implementing simple ui & seperate brain cli and service
This commit is contained in:
34
brain/service_fastapi.py
Normal file
34
brain/service_fastapi.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import sys
|
||||
|
||||
from fastapi import FastAPI
|
||||
import uvicorn
|
||||
|
||||
from brain_openai import CloudChatBrain
|
||||
|
||||
|
||||
|
||||
#def start_application():
|
||||
# app = FastAPI()
|
||||
# # app.include_router(api_router)
|
||||
# # brain = CloudChatBrain()
|
||||
# return app
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) == 1:
|
||||
port = 8890
|
||||
else:
|
||||
port = int(sys.argv[1])
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
@app.post("/command/")
|
||||
def post_command(payload: dict):
|
||||
prompt = payload.get("prompt")
|
||||
return {"response": prompt}
|
||||
|
||||
|
||||
#app = start_application()
|
||||
uvicorn.run(app, host="0.0.0.0", port=port, log_level="debug")
|
||||
33
brain/service_gradio.py
Normal file
33
brain/service_gradio.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import random
|
||||
|
||||
import gradio as gr
|
||||
from loguru import logger
|
||||
|
||||
|
||||
|
||||
def make_completion(history):
|
||||
return "ok"
|
||||
|
||||
def answer(input, history):
|
||||
history.append({"role": "user", "content": input})
|
||||
response = random.choice(["How are you?", "I love you", "I'm very hungry"])
|
||||
history.append({"role": "assistant", "content": response})
|
||||
messages = [(history[i]["content"], history[i+1]["content"]) for i in range(0, len(history)-1, 2)]
|
||||
logger.debug(history)
|
||||
return messages, history
|
||||
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
chatbot = gr.Chatbot(label="Fly my drone with chatGPT as copilot")
|
||||
state = gr.State([])
|
||||
with gr.Row():
|
||||
prompt = gr.Textbox(
|
||||
show_label=True,
|
||||
label="what should I do now?\n(enter q for emergency)",
|
||||
placeholder="Enter text and press enter")\
|
||||
.style(container=True)
|
||||
prompt.submit(answer, [prompt, state], [chatbot, state])
|
||||
|
||||
|
||||
demo.launch(server_name="0.0.0.0", server_port=8890, debug=True)
|
||||
37
brain/service_gradio_nicetry_1.py
Normal file
37
brain/service_gradio_nicetry_1.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import gradio as gr
|
||||
import requests
|
||||
|
||||
|
||||
conversation = []
|
||||
|
||||
def generate_response(prompt, params):
|
||||
#response = requests.get(f"http://localhost:8000/command/{prompt}").json()
|
||||
response = f"I got {prompt}"
|
||||
conversation.append({"prompt": prompt, "response": response})
|
||||
print(params)
|
||||
#params["conversation_history"].description = f"Conversation History:\n\n{populate_chat_history(conversation)}"
|
||||
return f"I got: {prompt}"
|
||||
|
||||
def populate_chat_history(conversation):
|
||||
history = ""
|
||||
for i, chat in enumerate(conversation):
|
||||
history += f"User: {chat['prompt']}\nBot: {chat['response']}\n\n"
|
||||
return history
|
||||
|
||||
iface = gr.Interface(
|
||||
fn=generate_response,
|
||||
inputs=["text"],
|
||||
outputs=["text"],
|
||||
parameters=[
|
||||
{
|
||||
"type": "textbox",
|
||||
"key": "conversation_history",
|
||||
"label": "Conversation:",
|
||||
"default": ""
|
||||
}
|
||||
],
|
||||
title="fly my drone by talking",
|
||||
description="what should I do now?\n(enter q for emergency)"
|
||||
)
|
||||
|
||||
iface.launch(server_name="0.0.0.0", server_port=8890, debug=True)
|
||||
@@ -23,4 +23,5 @@ if __name__ == "__main__":
|
||||
port = int(sys.argv[1])
|
||||
|
||||
app = start_application()
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=port, log_level="debug")
|
||||
|
||||
Reference in New Issue
Block a user