Fix requests and update readme

This commit is contained in:
M
2023-11-12 19:02:41 +01:00
parent 928471b50b
commit 202accc7af
3 changed files with 21 additions and 13 deletions

View File

@@ -20,7 +20,11 @@ Configure `assistant.yaml` settings. (It is setup to work in french with ollama
Run `assistant.py`
Leave `space` key pressed to talk, the AI will interpret the query when you release the key.
## Todo
- Allow a full conversation with a "press to talk" function between requests
- Process ollama json responses in stream mode to generate voice at the end of each sentence.
- Fix the prompt
- Rearrange code base
- Some audio visualization in the UI
- Multi threading to overlap queries/rendering with response generation

View File

@@ -59,8 +59,8 @@ class Assistant:
self.display_message(self.config.messages.loadingModel)
self.model = whisper.load_model(self.config.whisperRecognition.modelPath)
self.tts = pyttsx3.init()
self.conversation_history = [self.config.conversation.context,
self.config.conversation.greeting]
#self.conversation_history = [self.config.conversation.context,
# self.config.conversation.greeting]
self.context = []
self.display_ready()
@@ -175,30 +175,34 @@ class Assistant:
def ask_ollama(self, prompt, responseCallback):
self.text_to_speech(self.config.conversation.llmWaitMsg)
self.conversation_history.append(prompt)
full_prompt = "\n".join(self.conversation_history)
#self.conversation_history.append(prompt)
#full_prompt = "\n".join(self.conversation_history)
full_prompt = prompt if hasattr(self, "contextSent") else (self.config.conversation.context+"\n"+prompt)
self.contextSent = True
jsonParam= {"model": self.config.ollama.model,
"stream":True,
"context":self.context,
"prompt":full_prompt}
print(jsonParam)
response = requests.post(self.config.ollama.url,
json=jsonParam,
headers=OLLAMA_REST_HEADERS,
stream=True)
response.raise_for_status()
print(jsonParam)
self.text_to_speech(self.config.conversation.llmWaitMsg)
tokens = []
for line in response.iter_lines():
print(line)
body = json.loads(line)
token = body.get('response', '')
tokens.append(token)
# the response streams one token at a time, print that as we receive it
if token == "." or token == ":":
responseCallback("".join(tokens))
# the response streams one token at a time, process only at end of sentences
if token == "." or token == ":" or token == "!" or token == "?":
current_response = "".join(tokens)
#self.conversation_history.append(current_response)
responseCallback(current_response)
tokens = []
if 'error' in body:

View File

@@ -12,7 +12,7 @@ ollama:
model: "mistral"
conversation:
context: "This is a discussion in french."
context: "Switch to french."
greeting: "Je vous écoute."
recognitionWaitMsg: "Oui."
llmWaitMsg: "Laissez moi réfléchir."