mirror of
https://github.com/aymenfurter/microagents.git
synced 2023-12-30 16:47:11 +03:00
feat: major refactor
This commit is contained in:
BIN
__pycache__/agent_creation.cpython-310.pyc
Normal file
BIN
__pycache__/agent_creation.cpython-310.pyc
Normal file
Binary file not shown.
BIN
__pycache__/agent_evaluation.cpython-310.pyc
Normal file
BIN
__pycache__/agent_evaluation.cpython-310.pyc
Normal file
Binary file not shown.
BIN
__pycache__/agent_response.cpython-310.pyc
Normal file
BIN
__pycache__/agent_response.cpython-310.pyc
Normal file
Binary file not shown.
BIN
__pycache__/agent_similarity.cpython-310.pyc
Normal file
BIN
__pycache__/agent_similarity.cpython-310.pyc
Normal file
Binary file not shown.
BIN
__pycache__/code_execution.cpython-310.pyc
Normal file
BIN
__pycache__/code_execution.cpython-310.pyc
Normal file
Binary file not shown.
BIN
__pycache__/main.cpython-310.pyc
Normal file
BIN
__pycache__/main.cpython-310.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
__pycache__/microagent_manager.cpython-310.pyc
Normal file
BIN
__pycache__/microagent_manager.cpython-310.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
__pycache__/prompt_evolution.cpython-310.pyc
Normal file
BIN
__pycache__/prompt_evolution.cpython-310.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
__pycache__/response_extraction.cpython-310.pyc
Normal file
BIN
__pycache__/response_extraction.cpython-310.pyc
Normal file
Binary file not shown.
BIN
__pycache__/utility.cpython-310.pyc
Normal file
BIN
__pycache__/utility.cpython-310.pyc
Normal file
Binary file not shown.
56
agent_creation.py
Normal file
56
agent_creation.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from microagent import MicroAgent
|
||||
from openaiwrapper import OpenAIAPIWrapper
|
||||
from agent_similarity import AgentSimilarity
|
||||
from prompts import PRIME_PROMPT, PRIME_NAME, PROMPT_ENGINEERING_SYSTEM_PROMPT, PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
|
||||
|
||||
class AgentCreation:
|
||||
def __init__(self, openai_wrapper, max_agents=20):
|
||||
self.agents = []
|
||||
self.openai_wrapper = openai_wrapper
|
||||
self.max_agents = max_agents
|
||||
|
||||
def create_prime_agent(self, openai_wrapper):
|
||||
"""
|
||||
Creates the prime agent and adds it to the agent list.
|
||||
"""
|
||||
prime_agent = MicroAgent(PRIME_PROMPT, PRIME_NAME, 0, self, self.openai_wrapper, 25, True)
|
||||
self.agents.append(prime_agent)
|
||||
|
||||
def get_or_create_agent(self, purpose, depth, sample_input):
|
||||
"""
|
||||
Retrieves or creates an agent based on the given purpose.
|
||||
"""
|
||||
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
|
||||
purpose_embedding = agent_similarity.get_embedding(purpose)
|
||||
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
|
||||
similarity_threshold = agent_similarity.calculate_similarity_threshold()
|
||||
|
||||
if highest_similarity >= similarity_threshold:
|
||||
closest_agent.usage_count += 1
|
||||
return closest_agent
|
||||
|
||||
if len(self.agents) >= self.max_agents:
|
||||
self.agents.sort(key=lambda x: x.usage_count)
|
||||
self.agents.pop(0)
|
||||
|
||||
prompt = self.generate_llm_prompt(purpose, sample_input)
|
||||
new_agent = MicroAgent(prompt, purpose, depth, self, self.openai_wrapper)
|
||||
new_agent.usage_count = 1
|
||||
self.agents.append(new_agent)
|
||||
return new_agent
|
||||
|
||||
def generate_llm_prompt(self, goal, sample_input):
|
||||
"""
|
||||
Generates a prompt for the LLM based on the given goal and sample input.
|
||||
"""
|
||||
messages = [
|
||||
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
|
||||
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
|
||||
]
|
||||
|
||||
response = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=messages
|
||||
)
|
||||
|
||||
return response.choices[0].message['content'].strip()
|
||||
19
agent_evaluation.py
Normal file
19
agent_evaluation.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import logging
|
||||
from openaiwrapper import OpenAIAPIWrapper
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
class AgentEvaluation:
|
||||
def __init__(self, openai_wrapper):
|
||||
self.openai_wrapper = openai_wrapper
|
||||
|
||||
def evaluate_agent(self, input_text, prompt, output):
|
||||
"""
|
||||
Evaluates the performance of an agent based on given input, prompt, and output.
|
||||
"""
|
||||
evaluation_query = f"Evaluate the generated LLM Output: '{input_text}' with the current prompt '{prompt}' for quality and relevance (Possible Answers: Poor, Good, Perfect), LLM output with current prompt: '{output}'"
|
||||
evaluation = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=[{"role": "system", "content": evaluation_query}]
|
||||
).choices[0].message['content']
|
||||
return evaluation
|
||||
87
agent_response.py
Normal file
87
agent_response.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import logging
|
||||
from openaiwrapper import OpenAIAPIWrapper
|
||||
from prompts import REACT_STEP_POST, REACT_STEP_PROMPT, REACT_SYSTEM_PROMPT, REACT_PLAN_PROMPT, STATIC_PRE_PROMPT
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
class AgentResponse:
|
||||
def __init__(self, openai_wrapper, manager, code_execution, creator, depth):
|
||||
self.openai_wrapper = openai_wrapper
|
||||
self.manager = manager
|
||||
self.code_execution = code_execution
|
||||
self.creator = creator
|
||||
self.depth = depth
|
||||
|
||||
def generate_response(self, input_text, dynamic_prompt, max_depth):
|
||||
runtime_context = self.generate_runtime_context(dynamic_prompt)
|
||||
system_prompt = STATIC_PRE_PROMPT + runtime_context + dynamic_prompt + "\nDELIVER THE NEXT PACKAGE."
|
||||
conversation_accumulator = ""
|
||||
thought_number = 0
|
||||
action_number = 0
|
||||
found_new_solution = False
|
||||
plan_step = True
|
||||
|
||||
|
||||
for iteration in range(max_depth):
|
||||
react_prompt = self.build_react_prompt(input_text, conversation_accumulator, thought_number, action_number)
|
||||
|
||||
if plan_step:
|
||||
react_prompt = react_prompt + REACT_PLAN_PROMPT
|
||||
plan_step = False
|
||||
|
||||
response = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=[
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": react_prompt}
|
||||
]
|
||||
).choices[0].message['content']
|
||||
|
||||
conversation_accumulator += f"\n{response}"
|
||||
thought_number += 1
|
||||
action_number += 1
|
||||
|
||||
if "```python" in response:
|
||||
exec_response = self.code_execution.execute_external_code(response)
|
||||
print(exec_response)
|
||||
conversation_accumulator += f"\nObservation: Executed Python code\nOutput: {exec_response}"
|
||||
|
||||
if "Use Agent[" in response:
|
||||
agent_name = response.split('Use Agent[')[1].split(']')[0]
|
||||
input_text = ""
|
||||
if ":" in agent_name:
|
||||
input_text = agent_name.split(":")[1]
|
||||
agent_name = agent_name.split(":")[0]
|
||||
|
||||
delegated_agent = self.creator.get_or_create_agent(agent_name, depth=self.depth + 1, sample_input=input_text)
|
||||
delegated_response = delegated_agent.respond(input_text)
|
||||
conversation_accumulator += f"\nOutput {thought_number}: Delegated task to Agent {agent_name}\nOutput of Agent: {action_number}: {delegated_response}"
|
||||
|
||||
if "Query Solved" in response:
|
||||
found_new_solution = True
|
||||
break
|
||||
|
||||
return self.conclude_output(conversation_accumulator), conversation_accumulator, found_new_solution, thought_number
|
||||
|
||||
def generate_runtime_context(self, dynamic_prompt):
|
||||
available_agents_arr = [agent for agent in self.manager.agents if agent.purpose != "General"]
|
||||
available_agents_with_depth = ', '.join([f"{agent.purpose} (depth={agent.depth})" for agent in available_agents_arr])
|
||||
return f"Your Purpose: {dynamic_prompt}. Available agents: {available_agents_with_depth}."
|
||||
|
||||
def build_react_prompt(self, input_text, conversation_accumulator, thought_number, action_number):
|
||||
return (
|
||||
f"Question: {input_text}\n"
|
||||
f"{conversation_accumulator}\n"
|
||||
f"Thought {thought_number}: {REACT_STEP_PROMPT}\n"
|
||||
f"Action {action_number}: {REACT_STEP_POST}"
|
||||
)
|
||||
|
||||
def conclude_output(self, conversation):
|
||||
react_prompt = conversation
|
||||
response = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=[
|
||||
{"role": "system", "content": REACT_SYSTEM_PROMPT},
|
||||
{"role": "user", "content": react_prompt}
|
||||
]
|
||||
).choices[0].message['content']
|
||||
return response
|
||||
43
agent_similarity.py
Normal file
43
agent_similarity.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import numpy as np
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
from openaiwrapper import OpenAIAPIWrapper
|
||||
|
||||
class AgentSimilarity:
|
||||
def __init__(self, openai_wrapper, agents):
|
||||
self.openai_wrapper = openai_wrapper
|
||||
self.agents = agents
|
||||
|
||||
def get_embedding(self, text):
|
||||
"""
|
||||
Retrieves the embedding for a given text.
|
||||
"""
|
||||
response = self.openai_wrapper.get_embedding(text)
|
||||
return np.array(response['data'][0]['embedding'])
|
||||
|
||||
def calculate_similarity_threshold(self):
|
||||
"""
|
||||
Calculates the average similarity threshold across all agents.
|
||||
"""
|
||||
embeddings = [self.get_embedding(agent.purpose) for agent in self.agents]
|
||||
if len(embeddings) < 2:
|
||||
return 0.9
|
||||
|
||||
avg_similarity = np.mean([np.dot(e1, e2) / (np.linalg.norm(e1) * np.linalg.norm(e2)) for e1 in embeddings for e2 in embeddings if not np.array_equal(e1, e2)])
|
||||
return avg_similarity
|
||||
|
||||
def find_closest_agent(self, purpose_embedding):
|
||||
"""
|
||||
Finds the closest agent based on the given purpose embedding.
|
||||
"""
|
||||
closest_agent = None
|
||||
highest_similarity = -np.inf
|
||||
|
||||
for agent in self.agents:
|
||||
agent_embedding = self.get_embedding(agent.purpose)
|
||||
similarity = cosine_similarity([agent_embedding], [purpose_embedding])[0][0]
|
||||
|
||||
if similarity > highest_similarity:
|
||||
highest_similarity = similarity
|
||||
closest_agent = agent
|
||||
|
||||
return closest_agent, highest_similarity
|
||||
35
code_execution.py
Normal file
35
code_execution.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import subprocess
|
||||
import shlex
|
||||
from contextlib import redirect_stdout, redirect_stderr
|
||||
from io import StringIO
|
||||
|
||||
class CodeExecution:
|
||||
def __init__(self):
|
||||
self.code_block_start = "```python"
|
||||
self.code_block_end = "```"
|
||||
|
||||
def execute_external_code(self, text_with_code):
|
||||
"""
|
||||
Executes Python code as an external process.
|
||||
"""
|
||||
try:
|
||||
code_to_execute = text_with_code.split(self.code_block_start)[1].split(self.code_block_end)[0]
|
||||
exec_globals = {}
|
||||
with StringIO() as stdout_buffer, StringIO() as stderr_buffer:
|
||||
with redirect_stdout(stdout_buffer), redirect_stderr(stderr_buffer):
|
||||
exec(code_to_execute, exec_globals)
|
||||
stdout = stdout_buffer.getvalue()
|
||||
stderr = stderr_buffer.getvalue()
|
||||
|
||||
exec_response = "Executed Python Code Successfully."
|
||||
if stdout:
|
||||
exec_response += "\nStandard Output:\n" + stdout
|
||||
if stderr:
|
||||
exec_response += "\nStandard Error:\n" + stderr
|
||||
|
||||
except Exception as e:
|
||||
exec_response = f"Error in executing external code: {e}"
|
||||
|
||||
if len(exec_response) > 4000:
|
||||
exec_response = exec_response[:600] + "..." + exec_response[-3000:]
|
||||
return exec_response
|
||||
64
main.py
64
main.py
@@ -1,28 +1,56 @@
|
||||
import time
|
||||
import os
|
||||
from microagentmanager import MicroAgentManager
|
||||
from microagent import MicroAgent
|
||||
from microagent_manager import MicroAgentManager
|
||||
from utility import get_env_variable, time_function
|
||||
from prompts import USER_INPUTS
|
||||
from colorama import Fore, Style
|
||||
from terminaltables import AsciiTable
|
||||
|
||||
def main():
|
||||
try:
|
||||
api_key = os.environ["OPENAI_KEY"]
|
||||
except KeyError:
|
||||
print("Error: OPENAI_KEY environment variable is not set.")
|
||||
api_key = get_env_variable("OPENAI_KEY", raise_error=False)
|
||||
if not api_key:
|
||||
print(Fore.RED + "🚫 Error: OPENAI_KEY environment variable is not set." + Style.RESET_ALL)
|
||||
return
|
||||
|
||||
manager = MicroAgentManager(api_key)
|
||||
manager.create_agents() # Initialize agents
|
||||
|
||||
user_inputs = USER_INPUTS
|
||||
|
||||
for user_input in user_inputs:
|
||||
start_time = time.time()
|
||||
response = manager.respond(user_input)
|
||||
final_response = manager.extractResponseFromPrompt(response, user_input)
|
||||
print("Question:", user_input)
|
||||
print("Response:", final_response)
|
||||
end_time = time.time() - start_time
|
||||
print("Time taken:", end_time)
|
||||
print("Number of Agents:", len(manager.agents))
|
||||
for user_input in USER_INPUTS:
|
||||
process_user_input(manager, user_input)
|
||||
|
||||
@time_function
|
||||
def process_user_input(manager, user_input):
|
||||
"""
|
||||
Processes a single user input and generates a response.
|
||||
"""
|
||||
agent = manager.get_or_create_agent("General", depth=1, sample_input=user_input)
|
||||
response = agent.respond(user_input)
|
||||
print(Fore.YELLOW + "🔍 Question:" + Style.RESET_ALL, user_input)
|
||||
print(Fore.GREEN + "💡 Response:" + Style.RESET_ALL, response)
|
||||
|
||||
agents_data = [["Agent", "Depth", "Max Depth", "Usage Count", "Working Agent", "Agent Creator"]]
|
||||
agents = manager.get_agents()
|
||||
for agent in agents:
|
||||
agents_data.append([
|
||||
f"{agent.purpose}",
|
||||
f"{agent.depth}",
|
||||
f"{agent.max_depth}",
|
||||
f"{agent.usage_count}",
|
||||
f"{agent.working_agent}",
|
||||
f"{agent.agent_creator}"
|
||||
])
|
||||
|
||||
table = AsciiTable(agents_data)
|
||||
print(Fore.CYAN + "🤖 Agents Overview:" + Style.RESET_ALL)
|
||||
print(table.table)
|
||||
|
||||
print(Fore.BLUE + "📝 Dynamic Prompts:" + Style.RESET_ALL)
|
||||
for agent in agents:
|
||||
print(Fore.MAGENTA + f"Prompt for {agent.purpose}:" + Style.RESET_ALL)
|
||||
print(agent.dynamic_prompt + "\n")
|
||||
|
||||
def microagent_factory(initial_prompt, purpose, api_key, depth, max_depth, bootstrap_agent):
|
||||
return MicroAgent(initial_prompt, purpose, api_key, depth, max_depth, bootstrap_agent)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
||||
189
microagent.py
189
microagent.py
@@ -1,173 +1,44 @@
|
||||
import openai
|
||||
import subprocess
|
||||
import shlex
|
||||
import logging
|
||||
import datetime
|
||||
from openaiwrapper import OpenAIAPIWrapper
|
||||
from contextlib import redirect_stdout, redirect_stderr
|
||||
from io import StringIO
|
||||
from prompts import STATIC_PRE_PROMPT
|
||||
from prompts import REACT_PLAN_PROMPT
|
||||
from prompts import REACT_SYSTEM_PROMPT
|
||||
from prompts import REACT_STEP_PROMPT
|
||||
from prompts import REACT_STEP_POST
|
||||
from agent_evaluation import AgentEvaluation
|
||||
from agent_response import AgentResponse
|
||||
from agent_similarity import AgentSimilarity
|
||||
from code_execution import CodeExecution
|
||||
from prompt_evolution import PromptEvolution
|
||||
from response_extraction import ResponseExtraction
|
||||
from utility import get_env_variable, time_function, log_exception
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
class MicroAgent:
|
||||
def __init__(self, initial_prompt, purpose, manager, api_key, depth, max_depth=3, bootstrap_agent=False):
|
||||
def __init__(self, initial_prompt, purpose, depth, agent_creator, openai_wrapper, max_depth=3, bootstrap_agent=False):
|
||||
self.dynamic_prompt = initial_prompt
|
||||
self.purpose = purpose
|
||||
self.manager = manager
|
||||
self.api_key = api_key
|
||||
self.openai_wrapper = OpenAIAPIWrapper(api_key)
|
||||
self.depth = depth
|
||||
self.max_depth = max_depth
|
||||
self.usage_count = 0
|
||||
openai.api_key = api_key
|
||||
self.working_agent = True
|
||||
self.code_block_start = "```python"
|
||||
self.code_block_end = "```"
|
||||
if (bootstrap_agent):
|
||||
self.working_agent = True
|
||||
self.working_agent = bootstrap_agent
|
||||
self.agent_creator = agent_creator
|
||||
self.openai_wrapper = openai_wrapper
|
||||
|
||||
def generate_runtime_context(self):
|
||||
available_agents_arr = [agent for agent in self.manager.agents if agent.purpose != "General" and agent.purpose != self.purpose]
|
||||
available_agents_with_depth = ', '.join([f"{agent.purpose} (depth={agent.depth})" for agent in available_agents_arr])
|
||||
|
||||
runtime_context = f"Your Purpose: {self.purpose}. Current Agent Depth: {self.depth}. Available agents: {available_agents_with_depth}."
|
||||
return runtime_context
|
||||
|
||||
def conclude_output(self, conversation):
|
||||
react_prompt = conversation
|
||||
response = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=[
|
||||
{"role": "system", "content": REACT_SYSTEM_PROMPT},
|
||||
{"role": "user", "content": react_prompt}
|
||||
]
|
||||
).choices[0].message['content']
|
||||
return response
|
||||
|
||||
|
||||
def generate_response(self, input_text):
|
||||
runtime_context = self.generate_runtime_context()
|
||||
system_prompt = STATIC_PRE_PROMPT + runtime_context + self.dynamic_prompt + "\nDELIVER THE NEXT PACKAGE."
|
||||
conversation_accumulator = ""
|
||||
thought_number = 1
|
||||
action_number = 1
|
||||
found_new_solution = False
|
||||
plan_step = True
|
||||
|
||||
for iteration in range(self.max_depth):
|
||||
react_prompt = (
|
||||
f"Question: {input_text}\n"
|
||||
f"{conversation_accumulator}\n"
|
||||
f"Thought {thought_number}: {REACT_STEP_PROMPT}"
|
||||
f"Action {action_number}{REACT_STEP_POST}"
|
||||
)
|
||||
|
||||
if plan_step:
|
||||
react_prompt = react_prompt + REACT_PLAN_PROMPT
|
||||
plan_step = False
|
||||
|
||||
|
||||
response = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=[
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": react_prompt}
|
||||
]
|
||||
).choices[0].message['content']
|
||||
|
||||
conversation_accumulator += f"\n{response}"
|
||||
|
||||
if "Use Agent[" in response:
|
||||
agent_name = response.split('Use Agent[')[1].split(']')[0]
|
||||
input_text = ""
|
||||
if ":" in agent_name:
|
||||
input_text = agent_name.split(":")[1]
|
||||
agent_name = agent_name.split(":")[0]
|
||||
|
||||
delegated_agent = self.manager.get_or_create_agent(agent_name, depth=self.depth + 1, sample_input=input_text)
|
||||
delegated_response = delegated_agent.respond(input_text)
|
||||
conversation_accumulator += f"\Output {thought_number}: Delegated task to Agent {agent_name}\nOutput of Agent: {action_number}: {delegated_response}"
|
||||
|
||||
elif "```python" in response:
|
||||
code_to_execute = response.split("```python")[1].split("```")[0]
|
||||
try:
|
||||
exec_globals = {}
|
||||
with StringIO() as stdout_buffer, StringIO() as stderr_buffer:
|
||||
with redirect_stdout(stdout_buffer), redirect_stderr(stderr_buffer):
|
||||
exec(code_to_execute, exec_globals)
|
||||
stdout = stdout_buffer.getvalue()
|
||||
stderr = stderr_buffer.getvalue()
|
||||
|
||||
exec_response = "Executed Python Code Successfully."
|
||||
if stdout:
|
||||
exec_response += "\nStandard Output:\n" + stdout
|
||||
if stderr:
|
||||
exec_response += "\nStandard Error:\n" + stderr
|
||||
|
||||
except Exception as e:
|
||||
exec_response = f"Error in executing code: {e}"
|
||||
|
||||
if len(exec_response) > 4000:
|
||||
exec_response = exec_response[:600] + "..." + exec_response[-3000:]
|
||||
conversation_accumulator += f"\nThought {thought_number}: Executed Python code\nAction {action_number}: {exec_response}"
|
||||
|
||||
thought_number += 1
|
||||
action_number += 1
|
||||
if "Query Solved" in response:
|
||||
if iteration != 1 and self.working_agent is True:
|
||||
found_new_solution = True
|
||||
if iteration == 1:
|
||||
self.working_agent = True
|
||||
break
|
||||
|
||||
final_answer = self.conclude_output(conversation_accumulator)
|
||||
|
||||
return final_answer, conversation_accumulator, found_new_solution
|
||||
|
||||
def execute_code(self, text_with_code):
|
||||
try:
|
||||
code_start_index = text_with_code.find(self.code_block_start) + len(self.code_block_start)
|
||||
code_end_index = text_with_code.find(self.code_block_end, code_start_index)
|
||||
code_to_execute = text_with_code[code_start_index:code_end_index].strip()
|
||||
result = subprocess.run(shlex.split(code_to_execute), capture_output=True, text=True, shell=True, timeout=30)
|
||||
return result.stdout or result.stderr
|
||||
except Exception as e:
|
||||
return f"Error in executing code: {e}"
|
||||
|
||||
def evolve_prompt(self, input_text, output, full_conversation, new_solution):
|
||||
if self.manager.self_optimization is False:
|
||||
return
|
||||
if len(full_conversation) > 1000:
|
||||
full_conversation = full_conversation[:200] + "..." + full_conversation[-1000:]
|
||||
|
||||
feedback = self.evaluate_agent(input_text, self.dynamic_prompt, output)
|
||||
runtime_context = self.generate_runtime_context()
|
||||
if "poor" in feedback.lower() or new_solution:
|
||||
evolve_prompt_query = f"How should the GPT-4 prompt evolve based on this input and feedback? If you don't know something, write sample code in the prompt to solve it. Sample code must always print out the result! Break down complex tasks by calling other agents if required. Please include python code that should be used to solve a certain task as per purpose or list other agents that should be called. A purpose is always a sentence long. Important: Any problems must be solved through sample code or learned information provided in the new, updated prompt. It's ok to also put data in the prompt. Add any learnings or information that might be useful for the future. ONLY RESPONSE WITH THE REVISED PROMPT NO OTHER TEXT! Current Prompt: {self.dynamic_prompt}, User Feedback: {feedback}, full conversation: {full_conversation}"
|
||||
if (new_solution and self.working_agent is False):
|
||||
evolve_prompt_query = f"How should the GPT-4 prompt evolve based on this input and feedback? Take a look at the solution provided in later on in the _full conversation_ section. As you can see, the problem has been solved. We need to learn from this. Adopt the code or solution found, make it reusable and compile a new, updated system prompt, so the solution can be reused in the future. Sample code must always print out the solution! Remember: Problems are solved through sample code or learned information provided in the new, updatedprompt. It's ok to also put data in the prompt. Add any learnings or information that might be useful for the future. ONLY RESPONSE WITH THE REVISED PROMPT NO OTHER TEXT! Current Prompt: {dynamic_prompt}, User Feedback: {feedback}, full conversation: {full_conversation}"
|
||||
self.working_agent = True
|
||||
new_prompt = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=[{"role": "system", "content": evolve_prompt_query + runtime_context}]
|
||||
).choices[0].message['content'].strip() or self.dynamic_prompt
|
||||
self.dynamic_prompt = new_prompt
|
||||
self.agent_evaluator = AgentEvaluation(self.openai_wrapper)
|
||||
self.code_executor = CodeExecution()
|
||||
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_creator, self.code_executor, agent_creator, depth)
|
||||
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_creator.agents)
|
||||
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_creator)
|
||||
self.response_extractor = ResponseExtraction(self.openai_wrapper)
|
||||
|
||||
@time_function
|
||||
def respond(self, input_text):
|
||||
response, full_conversation, new_solution = self.generate_response(input_text)
|
||||
if self.working_agent is False:
|
||||
self.evolve_prompt(input_text, response, full_conversation, new_solution)
|
||||
return response
|
||||
|
||||
def evaluate_agent(self, input_text, prompt, output):
|
||||
evaluation_query = f"Evaluate the generated LLM Output: '{input_text}' with the current prompt '{prompt}' for quality and relevance (Possible Answers: Poor, Good, Perfect), LLM output with current prompt: '{output}'"
|
||||
evaluation = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=[{"role": "system", "content": evaluation_query}]
|
||||
).choices[0].message['content']
|
||||
return evaluation
|
||||
try:
|
||||
response, full_conversation, new_solution, no_of_iterations = self.agent_responder.generate_response(input_text, self.dynamic_prompt, self.max_depth)
|
||||
if not self.working_agent:
|
||||
if (no_of_iterations > 2):
|
||||
self.dynamic_prompt = self.prompt_evolver.evolve_prompt(input_text, self.dynamic_prompt, response, full_conversation, new_solution, self.depth)
|
||||
elif new_solution:
|
||||
self.working_agent = True
|
||||
# TODO Kill agent if not new_solution and no_of_iterations > 10:
|
||||
return response
|
||||
except Exception as e:
|
||||
log_exception(e)
|
||||
return "An error occurred while generating the response."
|
||||
36
microagent_manager.py
Normal file
36
microagent_manager.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import logging
|
||||
from agent_creation import AgentCreation
|
||||
from agent_similarity import AgentSimilarity
|
||||
from openaiwrapper import OpenAIAPIWrapper
|
||||
|
||||
class MicroAgentManager:
|
||||
def __init__(self, api_key, max_agents=20):
|
||||
self.api_key = api_key
|
||||
self.max_agents = max_agents
|
||||
self.openai_wrapper = OpenAIAPIWrapper(api_key)
|
||||
self.agent_creator = AgentCreation(self.openai_wrapper, max_agents)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.logger.setLevel(logging.INFO)
|
||||
self.logger.addHandler(logging.StreamHandler())
|
||||
|
||||
def get_agents(self):
|
||||
return self.agent_creator.agents
|
||||
|
||||
def create_agents(self):
|
||||
self.logger.info("Creating agents...")
|
||||
self.agent_creator.create_prime_agent(self.openai_wrapper)
|
||||
self.logger.info("Agents created successfully.")
|
||||
|
||||
def get_or_create_agent(self, purpose, depth, sample_input):
|
||||
self.logger.info(f"Getting or creating agent for purpose: {purpose}")
|
||||
agent = self.agent_creator.get_or_create_agent(purpose, depth, sample_input)
|
||||
self.logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
|
||||
return agent
|
||||
|
||||
def find_closest_agent(self, purpose):
|
||||
self.logger.info(f"Finding closest agent for purpose: {purpose}")
|
||||
agent_similarity = AgentSimilarity(self.api_key, self.agent_creator.agents)
|
||||
purpose_embedding = agent_similarity.get_embedding(purpose)
|
||||
closest_agent = agent_similarity.find_closest_agent(purpose_embedding)
|
||||
self.logger.info(f"Closest agent for purpose '{purpose}' found.")
|
||||
return closest_agent
|
||||
@@ -1,123 +0,0 @@
|
||||
from microagent import MicroAgent
|
||||
from openaiwrapper import OpenAIAPIWrapper
|
||||
import numpy as np
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
from prompts import PRIME_PROMPT
|
||||
from prompts import PRIME_NAME
|
||||
from prompts import EXAMPLES
|
||||
from prompts import PROMPT_ENGINEERING_SYSTEM_PROMPT
|
||||
from prompts import PROMPT_ENGINEERING_TEMPLATE
|
||||
from prompts import GOAL_REACHED_PROMPT_TEMPLATE
|
||||
from prompts import EXTRACTION_PROMPT_TEMPLATE
|
||||
from prompts import STANDARD_SYSTEM_PROMPT
|
||||
|
||||
class MicroAgentManager:
|
||||
def __init__(self, api_key, max_agents=20):
|
||||
self.agents = []
|
||||
self.api_key = api_key
|
||||
self.max_agents = max_agents
|
||||
self.openai_wrapper = OpenAIAPIWrapper(api_key)
|
||||
self.create_prime_agent()
|
||||
self.self_optimization = True
|
||||
|
||||
def create_prime_agent(self):
|
||||
prime_agent = MicroAgent(PRIME_PROMPT, PRIME_NAME, self, self.api_key, 0, 25, True)
|
||||
self.agents.append(prime_agent)
|
||||
|
||||
def get_embedding(self, text):
|
||||
response = self.openai_wrapper.get_embedding(text)
|
||||
return np.array(response['data'][0]['embedding'])
|
||||
|
||||
def calculate_similarity_threshold(self):
|
||||
if len(self.agents) < 2:
|
||||
return 0.9
|
||||
|
||||
embeddings = [self.get_embedding(agent.purpose) for agent in self.agents]
|
||||
avg_similarity = np.mean([np.dot(e1, e2) / (np.linalg.norm(e1) * np.linalg.norm(e2)) for e1 in embeddings for e2 in embeddings if not np.array_equal(e1, e2)])
|
||||
return avg_similarity
|
||||
|
||||
def find_closest_agent(self, purpose_embedding):
|
||||
closest_agent = None
|
||||
highest_similarity = -np.inf
|
||||
|
||||
available_agents = [agent for agent in self.agents if agent.purpose != PRIME_NAME]
|
||||
for agent in available_agents:
|
||||
agent_embedding = self.get_embedding(agent.purpose)
|
||||
similarity = cosine_similarity([agent_embedding], [purpose_embedding])[0][0]
|
||||
|
||||
if similarity > highest_similarity:
|
||||
highest_similarity = similarity
|
||||
closest_agent = agent
|
||||
|
||||
return closest_agent, highest_similarity
|
||||
|
||||
def get_or_create_agent(self, purpose, depth, sample_input):
|
||||
purpose_embedding = self.get_embedding(purpose)
|
||||
closest_agent, highest_similarity = self.find_closest_agent(purpose_embedding)
|
||||
similarity_threshold = self.calculate_similarity_threshold()
|
||||
|
||||
if highest_similarity >= similarity_threshold:
|
||||
closest_agent.usage_count += 1
|
||||
return closest_agent
|
||||
|
||||
if len(self.agents) >= self.max_agents:
|
||||
self.agents.sort(key=lambda x: x.usage_count)
|
||||
self.agents.pop(0)
|
||||
|
||||
prompt = self.generate_llm_prompt(purpose, sample_input)
|
||||
new_agent = MicroAgent(prompt, purpose, self, self.api_key, depth=depth)
|
||||
new_agent.usage_count = 1
|
||||
self.agents.append(new_agent)
|
||||
return new_agent
|
||||
|
||||
def extractResponseFromPrompt(self, prompt, question):
|
||||
extraction_prompt = EXTRACTION_PROMPT_TEMPLATE.format(question=question, prompt=prompt)
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": STANDARD_SYSTEM_PROMPT},
|
||||
{"role": "user", "content": extraction_prompt}
|
||||
]
|
||||
extraction = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4",
|
||||
messages=messages,
|
||||
max_tokens=100,
|
||||
)
|
||||
|
||||
return extraction.choices[0].message['content'].strip()
|
||||
|
||||
def goal_reached(self, response, user_input):
|
||||
goal_reached_prompt = GOAL_REACHED_PROMPT_TEMPLATE.format(user_input=user_input, response=response)
|
||||
messages = [
|
||||
{"role": "system", "content": STANDARD_SYSTEM_PROMPT},
|
||||
{"role": "user", "content": goal_reached_prompt}
|
||||
]
|
||||
evaluation = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4",
|
||||
messages=messages
|
||||
)
|
||||
return "goal achieved" in evaluation.choices[0].message['content'].lower()
|
||||
|
||||
def generate_llm_prompt(self, goal, sample_input):
|
||||
messages = [
|
||||
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
|
||||
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, EXAMPLES=EXAMPLES)}
|
||||
|
||||
]
|
||||
|
||||
response = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=messages
|
||||
)
|
||||
|
||||
generated_prompt = response.choices[0].message['content'].strip()
|
||||
|
||||
return generated_prompt
|
||||
|
||||
def respond(self, input_text):
|
||||
prime_agent = self.agents[0]
|
||||
response = prime_agent.generate_response(f"Your Goal: {input_text}")
|
||||
|
||||
while not self.goal_reached(response, input_text):
|
||||
response = prime_agent.respond(input_text + " // Previous response: " + response)
|
||||
|
||||
return response
|
||||
@@ -3,10 +3,11 @@ import time
|
||||
|
||||
class OpenAIAPIWrapper:
|
||||
def __init__(self, api_key, timeout=10):
|
||||
self.api_key = api_key
|
||||
openai.api_key = api_key
|
||||
self.timeout = timeout
|
||||
self.cache = {}
|
||||
|
||||
|
||||
def get_embedding(self, text):
|
||||
if text in self.cache:
|
||||
return self.cache[text]
|
||||
|
||||
45
prompt_evolution.py
Normal file
45
prompt_evolution.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import logging
|
||||
from openaiwrapper import OpenAIAPIWrapper
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
class PromptEvolution:
|
||||
def __init__(self, openai_wrapper, manager):
|
||||
self.openai_wrapper = openai_wrapper
|
||||
self.manager = manager
|
||||
|
||||
def evolve_prompt(self, input_text, dynamic_prompt, output, full_conversation, new_solution, depth):
|
||||
"""
|
||||
Evolves the prompt based on the feedback from the output and full conversation.
|
||||
"""
|
||||
if len(full_conversation) > 1000:
|
||||
full_conversation = full_conversation[:200] + "..." + full_conversation[-1000:]
|
||||
|
||||
runtime_context = self.generate_runtime_context(dynamic_prompt, depth)
|
||||
evolve_prompt_query = self.build_evolve_prompt_query(dynamic_prompt, output, full_conversation, new_solution)
|
||||
new_prompt = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=[{"role": "system", "content": evolve_prompt_query + runtime_context}]
|
||||
).choices[0].message['content'].strip()
|
||||
|
||||
return new_prompt or dynamic_prompt
|
||||
|
||||
def generate_runtime_context(self, dynamic_prompt, depth):
|
||||
"""
|
||||
Generates runtime context for the evolve prompt query.
|
||||
"""
|
||||
available_agents_arr = [agent for agent in self.manager.agents if agent.purpose != "General"]
|
||||
available_agents_with_depth = ', '.join([f"{agent.purpose} (depth={agent.depth})" for agent in available_agents_arr])
|
||||
runtime_context = f"Current Agent Depth: {depth}. Available agents: {available_agents_with_depth}."
|
||||
return runtime_context
|
||||
|
||||
def build_evolve_prompt_query(self, dynamic_prompt, output, full_conversation, new_solution):
|
||||
"""
|
||||
Builds the query for evolving the prompt.
|
||||
"""
|
||||
feedback_query_part = "How should the GPT-4 prompt evolve based on this input and feedback?"
|
||||
if new_solution:
|
||||
feedback_query_part += " Take a look at the solution provided in the full conversation section. Adopt the code or solution found, make it reusable and compile a new, updated system prompt."
|
||||
|
||||
evolve_prompt_query = f"{feedback_query_part} Current Prompt: {dynamic_prompt}, Full Conversation: {full_conversation}"
|
||||
return evolve_prompt_query
|
||||
@@ -15,7 +15,7 @@ EXAMPLES = [
|
||||
PROMPT_ENGINEERING_TEMPLATE = (
|
||||
"Using best practices in prompt engineering, create a detailed prompt for the goal '{goal}'. "
|
||||
"This generated prompt will be combined with the following context later (but must be generic and is forbidden to contain any of the following context): '{sample_input}'\n"
|
||||
"Examples: {EXAMPLES}. Aim for maximum 50 words. Important: Any problems must be solved through sample code or learned information provided in the prompt. "
|
||||
"Examples: {examples}. Aim for maximum 50 words. Important: Any problems must be solved through sample code or learned information provided in the prompt. "
|
||||
"Any sample code provided must be executable in isolation. Avoid unresolvable placeholders for URLs and API Keys. "
|
||||
"If you retrieve information from the web, avoid parsing HTML Code or use regex, just process the text data and print it out (As shown in the examples)!!! "
|
||||
"As long as the answer is somewhere in the output, and it is below 1k characters, its a perfect solution. Use real existing services and websites. Don't invent services or use example.com."
|
||||
@@ -59,8 +59,6 @@ STATIC_PRE_PROMPT = (
|
||||
)
|
||||
|
||||
USER_INPUTS = [
|
||||
"What is the population of Thailand?",
|
||||
"What is the population of Sweden?",
|
||||
"What is the population of the smallest country on earth?"
|
||||
"What is the population of Thailand?"
|
||||
]
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
prettytable
|
||||
termcolor
|
||||
colorama
|
||||
terminaltables
|
||||
openai==0.28.1
|
||||
# Core Libraries
|
||||
numpy
|
||||
@@ -38,7 +42,7 @@ openpyxl
|
||||
xlrd
|
||||
|
||||
# Logging
|
||||
wikpedia
|
||||
wikipedia
|
||||
loguru
|
||||
|
||||
# Testing
|
||||
@@ -51,4 +55,4 @@ paramiko
|
||||
cryptography
|
||||
|
||||
# Parsing
|
||||
lxml
|
||||
lxml
|
||||
|
||||
23
response_extraction.py
Normal file
23
response_extraction.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from openaiwrapper import OpenAIAPIWrapper
|
||||
from prompts import STANDARD_SYSTEM_PROMPT, EXTRACTION_PROMPT_TEMPLATE
|
||||
|
||||
class ResponseExtraction:
|
||||
def __init__(self, openai_wrapper):
|
||||
self.openai_wrapper = openai_wrapper
|
||||
|
||||
def extract_response_from_prompt(self, prompt, question):
|
||||
"""
|
||||
Extracts a response based on the given prompt and question.
|
||||
"""
|
||||
extraction_prompt = EXTRACTION_PROMPT_TEMPLATE.format(question=question, prompt=prompt)
|
||||
messages = [
|
||||
{"role": "system", "content": STANDARD_SYSTEM_PROMPT},
|
||||
{"role": "user", "content": extraction_prompt}
|
||||
]
|
||||
extraction = self.openai_wrapper.chat_completion(
|
||||
model="gpt-4",
|
||||
messages=messages,
|
||||
max_tokens=100,
|
||||
)
|
||||
|
||||
return extraction.choices[0].message['content'].strip()
|
||||
33
utility.py
Normal file
33
utility.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
|
||||
def get_env_variable(var_name, default=None, raise_error=True):
|
||||
"""
|
||||
Retrieves an environment variable. If not found, returns the default
|
||||
value or raises an error based on the raise_error flag.
|
||||
"""
|
||||
try:
|
||||
return os.environ[var_name]
|
||||
except KeyError:
|
||||
if raise_error:
|
||||
raise EnvironmentError(f"Error: {var_name} environment variable is not set.")
|
||||
return default
|
||||
|
||||
def time_function(func):
|
||||
"""
|
||||
Decorator to measure the execution time of a function.
|
||||
"""
|
||||
def wrapper(*args, **kwargs):
|
||||
start_time = time.time()
|
||||
result = func(*args, **kwargs)
|
||||
end_time = time.time() - start_time
|
||||
logging.info(f"Time taken by {func.__name__}: {end_time} seconds")
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
def log_exception(e, message="An error occurred"):
|
||||
"""
|
||||
Logs exceptions with a custom message.
|
||||
"""
|
||||
logging.error(f"{message}: {str(e)}")
|
||||
Reference in New Issue
Block a user