implementing simple ui & seperate brain cli and service

This commit is contained in:
Alihan
2023-05-18 22:21:46 +03:00
parent 6116fa37e4
commit c3c05f784e
6 changed files with 110 additions and 1 deletions

34
brain/service_fastapi.py Normal file
View File

@@ -0,0 +1,34 @@
import sys
from fastapi import FastAPI
import uvicorn
from brain_openai import CloudChatBrain
#def start_application():
# app = FastAPI()
# # app.include_router(api_router)
# # brain = CloudChatBrain()
# return app
if __name__ == "__main__":
if len(sys.argv) == 1:
port = 8890
else:
port = int(sys.argv[1])
app = FastAPI()
@app.post("/command/")
def post_command(payload: dict):
prompt = payload.get("prompt")
return {"response": prompt}
#app = start_application()
uvicorn.run(app, host="0.0.0.0", port=port, log_level="debug")

33
brain/service_gradio.py Normal file
View File

@@ -0,0 +1,33 @@
import random
import gradio as gr
from loguru import logger
def make_completion(history):
return "ok"
def answer(input, history):
history.append({"role": "user", "content": input})
response = random.choice(["How are you?", "I love you", "I'm very hungry"])
history.append({"role": "assistant", "content": response})
messages = [(history[i]["content"], history[i+1]["content"]) for i in range(0, len(history)-1, 2)]
logger.debug(history)
return messages, history
with gr.Blocks() as demo:
chatbot = gr.Chatbot(label="Fly my drone with chatGPT as copilot")
state = gr.State([])
with gr.Row():
prompt = gr.Textbox(
show_label=True,
label="what should I do now?\n(enter q for emergency)",
placeholder="Enter text and press enter")\
.style(container=True)
prompt.submit(answer, [prompt, state], [chatbot, state])
demo.launch(server_name="0.0.0.0", server_port=8890, debug=True)

View File

@@ -0,0 +1,37 @@
import gradio as gr
import requests
conversation = []
def generate_response(prompt, params):
#response = requests.get(f"http://localhost:8000/command/{prompt}").json()
response = f"I got {prompt}"
conversation.append({"prompt": prompt, "response": response})
print(params)
#params["conversation_history"].description = f"Conversation History:\n\n{populate_chat_history(conversation)}"
return f"I got: {prompt}"
def populate_chat_history(conversation):
history = ""
for i, chat in enumerate(conversation):
history += f"User: {chat['prompt']}\nBot: {chat['response']}\n\n"
return history
iface = gr.Interface(
fn=generate_response,
inputs=["text"],
outputs=["text"],
parameters=[
{
"type": "textbox",
"key": "conversation_history",
"label": "Conversation:",
"default": ""
}
],
title="fly my drone by talking",
description="what should I do now?\n(enter q for emergency)"
)
iface.launch(server_name="0.0.0.0", server_port=8890, debug=True)

View File

@@ -23,4 +23,5 @@ if __name__ == "__main__":
port = int(sys.argv[1])
app = start_application()
uvicorn.run(app, host="0.0.0.0", port=port, log_level="debug")

View File

@@ -69,8 +69,12 @@ start_commander_service() {
venv/bin/python commander/commander.py $1
}
start_brain_service() {
venv/bin/python brain/service.py $1
}
talk_to_drone() {
ENV_FOR_DYNACONF=$1 python brain/brain.py
ENV_FOR_DYNACONF=$1 python brain/cli.py
}
kill_everything() {