feat: add improvements

This commit is contained in:
Aymen
2023-12-13 06:48:54 +00:00
committed by GitHub
parent a9cf231946
commit 0a06a342d1
6 changed files with 144 additions and 31 deletions

Binary file not shown.

58
main.py
View File

@@ -1,4 +1,6 @@
from microagent import MicroAgent
from openaiwrapper import OpenAIAPIWrapper
import os
import openai
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
@@ -8,16 +10,16 @@ class MicroAgentManager:
self.agents = []
self.api_key = api_key
self.max_agents = max_agents
openai.api_key = api_key
self.openai_wrapper = OpenAIAPIWrapper(api_key)
self.create_prime_agent()
def create_prime_agent(self):
# Pass the manager itself (self) to the prime agent
prime_agent = MicroAgent("Initial Prompt for General Tasks. This is the prime agent. You are only allowed to call other agents. Prime Agent's prompt may not be changed", "General", self, self.api_key)
prime_agent = MicroAgent("Initial Prompt for General Tasks. This is the prime agent. You are only allowed to call other agents. Prime Agent's prompt may not be changed", "General", self, self.api_key, 0)
self.agents.append(prime_agent)
def get_embedding(self, text):
response = openai.Embedding.create(input=text, engine="text-embedding-ada-002")
response = self.openai_wrapper.get_embedding(text)
return np.array(response['data'][0]['embedding'])
def calculate_similarity_threshold(self):
@@ -29,7 +31,6 @@ class MicroAgentManager:
return avg_similarity
def find_closest_agent(self, purpose_embedding):
print("Finding closest agent for purpose embedding:", purpose_embedding)
closest_agent = None
highest_similarity = -np.inf
@@ -44,7 +45,7 @@ class MicroAgentManager:
return closest_agent, highest_similarity
def get_or_create_agent(self, purpose):
def get_or_create_agent(self, purpose, depth, sample_input):
purpose_embedding = self.get_embedding(purpose)
closest_agent, highest_similarity = self.find_closest_agent(purpose_embedding)
similarity_threshold = self.calculate_similarity_threshold()
@@ -58,7 +59,8 @@ class MicroAgentManager:
self.agents.pop(0)
print("Creating new agent for purpose:", purpose)
new_agent = MicroAgent("Initial Prompt for " + purpose, purpose, self, self.api_key)
prompt = self.generate_llm_prompt(purpose, sample_input)
new_agent = MicroAgent(prompt, purpose, self, self.api_key, depth=depth)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
@@ -69,27 +71,55 @@ class MicroAgentManager:
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": evaluation_prompt}
]
evaluation = openai.ChatCompletion.create(
evaluation = self.openai_wrapper.chat_completion(
model="gpt-4",
messages=messages
)
return "goal achieved" in evaluation.choices[0].message['content'].lower()
def generate_llm_prompt(self, goal, sample_input):
"""
Generate a high-quality prompt for an LLM based on the goal, incorporating prompt engineering best practices and detailed examples, including a Python code snippet in Markdown format. Never use API KEYS or passwords in your code. Code must be runnable, besides the dynamic part coming as input. (e.g. weather code must work but may container a placeholder with the location to be filled.). Keep the code simple.
:param goal: The primary goal or purpose of the LLM's response.
:return: A structured prompt for the LLM.
"""
examples = [
"Goal: Your purpose is to be able to write blog posts. Generated Prompt: You are an expert writer on the topic of blog posts.",
"Goal: Your purpose is to be able to count the words of the input. Generated Prompt: # You are a useful assistant that is able to count words. You can use the following code during execution to count word frequencies. Here is sample code, adopt as needed:```python\nfrom collections import Counter\n\ndef count_words(text):\n words = text.split()\n word_counts = Counter(words)\n return word_counts\n```.",
"Goal: Your purpose is to solve basic arithmetic problems. Generated Prompt: You are a proficient calculator. Here's a Python function to solve a basic arithmetic problem, here is some sample code, adopt as needed.: ```python\ndef solve_arithmetic_problem(problem):\n return eval(problem)\n\n# Example problem: What is 15 times 4?\nresult = solve_arithmetic_problem('15 * 4')\n```.",
"Goal: Your purpose is to generate creative writing prompts. Generated Prompt: You are a creative muse who can come up with engaging and unique writing prompts. Provide an intriguing prompt for a science fiction story set in a distant galaxy.",
"Goal: Your purpose is to translate sentences from English to Spanish. Generated Prompt: You are an efficient language translator. Translate the following sentence into Spanish: 'The sun rises early in the morning.'",
"Goal: Your purpose is to get the weather based on a location. Generated Prompt: You are a weather assistant capable of retrieving weather information using web scraping. Here is Python code to scrape weather data for a given location, here is some sample code for you to use: ```python\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef get_weather(location):\n # Replace 'location' with the actual location parameter\n response = requests.get(f'https://wttr.in/{location}?format=3')\n return response.text\n\n# Example usage\n# weather = get_weather('<location>')\n```."
]
messages = [
{"role": "system", "content": "You are a helpful assistant knowledgeable in prompt engineering."},
{"role": "user", "content": f"Using best practices in prompt engineering, create a detailed prompt for the goal '{goal}'. This generated prompt will be combined with the following context later (but must be genertic and is forbidden to contain any of the following context): '{sample_input}'\n Examples: {examples}. Aim for maximum 50 words. Important: Any problems must be solved through sample code or learned information provided in the prompt."}
]
response = self.openai_wrapper.chat_completion(
model="gpt-4-1106-preview", # Using the specified model
messages=messages
)
generated_prompt = response.choices[0].message['content'].strip()
return generated_prompt
def respond(self, input_text):
prime_agent = self.agents[0]
# Pass the manager to the generate_response method
purpose = prime_agent.generate_response(f"Your Goal: {input_text}")
agent = self.get_or_create_agent(purpose)
# Pass the manager to the agent's respond method
response = agent.respond(input_text)
response = prime_agent.generate_response(f"Your Goal: {input_text}")
while not self.goal_reached(response, input_text):
response = agent.respond(input_text + " // Previous response: " + response)
response = prime_agent.respond(input_text + " // Previous response: " + response)
return response
def main():
api_key = os.environ["OPENAI_KEY"]
manager = MicroAgentManager(api_key)
user_input = "Who is the current president in 2023 of france? how is the weather in france? What is the date in france?"

View File

@@ -3,6 +3,7 @@ import subprocess
import shlex
import logging
import datetime
from openaiwrapper import OpenAIAPIWrapper
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -11,20 +12,24 @@ class MicroAgent:
currentDatetime = datetime.datetime.now()
static_pre_prompt = (
"Current Time: " + currentDatetime.strftime("%H:%M:%S") + ". "
"Current Date: " + currentDatetime.strftime("%d/%m/%Y") + ". "
"You are an autonomous agent capable of processing various tasks, "
"including executing simple Python code within code blocks with an internet connection (i.e. to get real-time information) and deciding when to use other agents (to break down tasks). "
"including executing simple Python code within code blocks with an internet connection and deciding when to use other agents (to break down tasks). "
"Agents are invoked using: 'Use Agent[Purpose of the agent as sentence:parameter]'."
"Example: Use Agent[GetWeatherForLocation:Zurich]"
"NEVER call an agent with the same purpose as yourself, if you call another agent you must break the task down. "
"Write code to solve the task."
"Write code to solve the task. You can only use the following frameworks: numpy, pandas, requests, beautifulsoup4, matplotlib, seaborn, sqlalchemy, pymysql, scipy, scikit-learn, statsmodels, click, python-dotenv, virtualenv, scrapy, oauthlib, tweepy, datetime, openpyxl, xlrd, loguru, pytest, paramiko, cryptography, lxml"
"A purpose MUST be reuseable and generic. Use names as you would call microservices."
"At depth=2, use agents only for tasks that are not well suited for your purpose."
"Below depth=3, using other agents is NOT allowed. Agents must only use other agents below their depth"
)
def __init__(self, initial_prompt, purpose, manager, api_key, depth=0, max_depth=5):
def __init__(self, initial_prompt, purpose, manager, api_key, depth, max_depth=5):
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.manager = manager
self.api_key = api_key
self.openai_wrapper = OpenAIAPIWrapper(api_key)
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
@@ -34,10 +39,10 @@ class MicroAgent:
def generate_runtime_context(self):
available_agents_arr = [agent for agent in self.manager.agents if agent.purpose != "General" and agent.purpose != self.purpose]
available_agents = ', '.join([agent.purpose for agent in available_agents_arr])
logging.info(f"Your Purpose: {self.purpose}. Queue Depth: {self.depth}. Available agents: {available_agents}.")
available_agents_with_depth = ', '.join([f"{agent.purpose} (depth={agent.depth})" for agent in available_agents_arr])
logging.info(f"Your Purpose: {self.purpose}. Queue Depth: {self.depth}. Available agents: {available_agents_with_depth}.")
return f"Your Purpose: {self.purpose}. Queue Depth: {self.depth}. Available agents: {available_agents}."
return f"Your Purpose: {self.purpose}. Current Agent Depth: {self.depth}. Available agents: {available_agents_with_depth}."
def generate_response(self, input_text):
runtime_context = self.generate_runtime_context()
@@ -51,9 +56,9 @@ class MicroAgent:
action_number = 1
for iteration in range(self.max_depth):
react_prompt = f"Question: {input_text}\n{conversation_accumulator}\nThought {thought_number}: [Decompose the task. Identify if another agent or Python code execution is needed. Write 'Query Solved' once the task is completed.]\nAction {action_number}: [Specify action based on the thought, e.g., 'Use Agent[Purpose of the agent as sentence]' for delegation or '```python\n# Python code here\n```' for execution]"
react_prompt = f"Question: {input_text}\n{conversation_accumulator}\nThought {thought_number}: [Decompose the task. Identify if another agent or Python code execution is needed. Write 'Query Solved: <formulate detailed answer>' once the task is completed.]\nAction {action_number}: [Specify action based on the thought, e.g., 'Use Agent[Purpose of the agent as sentence:Input Paramter for agent]' for delegation or '```python\n# Python code here\n```' for execution]"
response = openai.ChatCompletion.create(
response = self.openai_wrapper.chat_completion(
model="gpt-4-1106-preview",
messages=[
{"role": "system", "content": system_prompt},
@@ -66,12 +71,14 @@ class MicroAgent:
if "Use Agent[" in response:
agent_name = response.split('Use Agent[')[1].split(']')[0]
# agent name is split by ":", left is name of agent, right is input
input =
input_text = ""
if ":" in agent_name:
input_text = agent_name.split(":")[1]
agent_name = agent_name.split(":")[0]
logging.info(f"Delegating task to Agent: {agent_name}")
delegated_agent = self.manager.get_or_create_agent(agent_name)
delegated_response = delegated_agent.respond(agent_name)
delegated_agent = self.manager.get_or_create_agent(agent_name, depth=self.depth + 1, sample_input=input_text)
delegated_response = delegated_agent.respond(input_text)
conversation_accumulator += f"\nThought {thought_number}: Delegated task to Agent {agent_name}\nAction {action_number}: {delegated_response}"
logging.info(f"Conversation: {conversation_accumulator}")
logging.info(f"Delegated task to Agent {agent_name}")
@@ -92,11 +99,11 @@ class MicroAgent:
thought_number += 1
action_number += 1
if "Query Solved" in response:
break
final_answer = "Final Response: " + conversation_accumulator
logging.info(f"Final Response: {final_answer}")
return final_answer
@@ -115,9 +122,9 @@ class MicroAgent:
feedback = self.evaluate_agent(input_text, self.dynamic_prompt, output)
runtime_context = self.generate_runtime_context()
if "poor" in feedback.lower():
evolve_prompt_query = f"How should the GPT-4 prompt evolve based on this input and feedback? If you don't know something, write sample code in the prompt to solve it. Break down complex tasks by calling other agents if required. Please include python code that should be used to solve a certain task as per purpose or list other agents that should be called. A purpose is always a sentence long. ONLY RESPONSE WITH THE NEW PROMPT NO OTHER TEXT! Current Prompt: {input_text}, User Feedback: {feedback}"
evolve_prompt_query = f"How should the GPT-4 prompt evolve based on this input and feedback? If you don't know something, write sample code in the prompt to solve it. Break down complex tasks by calling other agents if required. Please include python code that should be used to solve a certain task as per purpose or list other agents that should be called. A purpose is always a sentence long. Important: Any problems must be solved through sample code or learned information provided in the prompt.ONLY RESPONSE WITH THE REVISED PROMPT NO OTHER TEXT! Current Prompt: {input_text}, User Feedback: {feedback}"
logging.info(f"Evolve prompt query: {evolve_prompt_query}")
new_prompt = openai.ChatCompletion.create(
new_prompt = self.openai_wrapper.chat_completion(
model="gpt-4-1106-preview",
messages=[{"role": "system", "content": evolve_prompt_query + runtime_context}]
).choices[0].message['content'].strip() or self.dynamic_prompt
@@ -134,7 +141,7 @@ class MicroAgent:
logging.info(f"Evaluation query: {evaluation_query}")
logging.info(f"Current prompt: {prompt}")
logging.info(f"Current output: {output}")
evaluation = openai.ChatCompletion.create(
evaluation = self.openai_wrapper.chat_completion(
model="gpt-4-1106-preview",
messages=[{"role": "system", "content": evaluation_query}]
).choices[0].message['content']

25
openaiwrapper.py Normal file
View File

@@ -0,0 +1,25 @@
import openai
import time
class OpenAIAPIWrapper:
def __init__(self, api_key, timeout=10):
openai.api_key = api_key
self.timeout = timeout
def get_embedding(self, text):
start_time = time.time()
while time.time() - start_time < self.timeout:
try:
return openai.Embedding.create(input=text, engine="text-embedding-ada-002")
except Exception as e:
time.sleep(1) # Wait for 1 second before retrying
raise TimeoutError("API call timed out")
def chat_completion(self, **kwargs):
start_time = time.time()
while time.time() - start_time < self.timeout:
try:
return openai.ChatCompletion.create(**kwargs)
except Exception as e:
time.sleep(1) # Wait for 1 second before retrying
raise TimeoutError("API call timed out")

View File

@@ -1 +1,52 @@
openai==28
openai==0.28.1
# Core Libraries
numpy
pandas
requests
beautifulsoup4
matplotlib
seaborn
# Database
sqlalchemy
pymysql
# Data Serialization
# Data Analysis and Visualization
scipy
scikit-learn
statsmodels
# Utility Libraries
click
python-dotenv
virtualenv
# Web Scraping
scrapy
# API Integration
oauthlib
tweepy
# Date and Time
datetime
# File Handling
openpyxl
xlrd
# Logging
loguru
# Testing
pytest
# Networking
paramiko
# Security
cryptography
# Parsing
lxml