Fix bug of ppt and googlescholar (#167)

* fix bug of ppt and googlescholar

* Format required parameters
This commit is contained in:
liujiangning30
2024-03-04 13:52:06 +08:00
committed by GitHub
parent 605a921878
commit e16a6bfc3a
5 changed files with 26 additions and 16 deletions

1
.gitignore vendored
View File

@@ -160,3 +160,4 @@ cython_debug/
#.idea/
.vscode/
docs/*/_build/
tmp_dir/

View File

@@ -142,7 +142,7 @@ class StreamlitUI:
belong='assistant',
end='<|action_end|>\n',
), ),
)
max_turn=7)
def render_user(self, prompt: str):
with st.chat_message('user'):

View File

@@ -8,7 +8,7 @@ THEME_MAPPING = {
'template': None,
'title': 'Title Slide',
'single': 'Title and Content',
'two': 'Tow content',
'two': 'Two Content',
}
}
@@ -31,7 +31,7 @@ class PPT(BaseAction):
"""Create a pptx file with specific themes.
Args:
theme (:class:`str`): the theme used
theme (:class:`str`): the theme used. The value should be one of ['Default'].
abs_location (:class:`str`): the ppt file's absolute location
Returns:
@@ -115,6 +115,7 @@ class PPT(BaseAction):
:class:`dict`: operation status
* status: the result of the execution
"""
from PIL import Image
layout_name = self.theme_mapping[self.pointer.slide_master.name]['two']
layout = next(i for i in self.pointer.slide_master.slide_layouts
if i.name == layout_name)
@@ -122,6 +123,7 @@ class PPT(BaseAction):
ph_title, ph_body1, ph_body2 = slide.placeholders
ph_title.text = title
ph = ph_body2
image = Image.open(image)
image_pil = image.to_pil()
left = ph.left
width = ph.width

View File

@@ -141,6 +141,12 @@ class Internlm2Protocol:
tool_name = api_info['name'].split('.')[0]
plugin['description'] = API_PREFIX.format(
tool_name=tool_name, description=plugin['description'])
# only keep required parameters
required_parameters = [
param for param in plugin['parameters']
if param['name'] in plugin['required']
]
plugin['parameters'] = required_parameters
plugin_descriptions.append(plugin)
plugin_prompt = self.plugin_prompt.format(
prompt=json.dumps(

View File

@@ -50,8 +50,8 @@ class HFTransformer(BaseModel):
self.gen_params.update(stop_words_id=stop_words_id)
if self.gen_params['stop_words'] is not None and \
self.gen_params['stop_words_id'] is not None:
logger.warning("Both stop_words and stop_words_id are specified,"
"only stop_words_id will be used.")
logger.warning('Both stop_words and stop_words_id are specified,'
'only stop_words_id will be used.')
self._load_tokenizer(
path=path,
@@ -80,7 +80,7 @@ class HFTransformer(BaseModel):
tokenizer_path if tokenizer_path else path,
trust_remote_code=True,
**tokenizer_kwargs)
if self.tokenizer.pad_token_id is None:
if self.tokenizer.eos_token is not None:
logger.warning(
@@ -101,7 +101,7 @@ class HFTransformer(BaseModel):
'pad_token_id is not set for this tokenizer. Try to '
'set pad_token_id via passing '
'`pad_token_id={PAD_TOKEN_ID}` in model_cfg.')
def _load_model(self, path: str, model_kwargs: dict):
import torch
from transformers import AutoModel
@@ -302,13 +302,16 @@ class HFTransformerCasualLM(HFTransformer):
path, trust_remote_code=True, **model_kwargs)
self.model.eval()
class HFTransformerChat(HFTransformerCasualLM):
def __init__(self,
template_parser=APITemplateParser,
**kwargs):
def __init__(self, template_parser=APITemplateParser, **kwargs):
super().__init__(template_parser=template_parser, **kwargs)
def chat(self, inputs: Union[List[dict], List[List[dict]]], do_sample: bool = True, **kwargs):
def chat(self,
inputs: Union[List[dict], List[List[dict]]],
do_sample: bool = True,
**kwargs):
"""Return the chat completions in stream mode.
Args:
@@ -327,12 +330,10 @@ class HFTransformerChat(HFTransformerCasualLM):
query = prompt[-1]['content']
history = prompt[:-1]
try:
response, history = self.model.chat(self.tokenizer,
query,
history=history)
response, history = self.model.chat(
self.tokenizer, query, history=history)
except Exception as e:
# handle over-length input error
logger.warning(str(e))
response = ""
response = ''
return response