Update moa.py

add more logging
This commit is contained in:
Asankhaya Sharma
2024-10-21 20:21:07 +08:00
parent 83b53419dc
commit 0df52917a4

View File

@@ -3,9 +3,11 @@ import logging
logger = logging.getLogger(__name__)
def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str) -> str:
logger.info(f"Starting mixture_of_agents function with model: {model}")
moa_completion_tokens = 0
completions = []
logger.debug(f"Generating initial completions for query: {initial_query}")
response = client.chat.completions.create(
model=model,
messages=[
@@ -18,7 +20,9 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str
)
completions = [choice.message.content for choice in response.choices]
moa_completion_tokens += response.usage.completion_tokens
logger.info(f"Generated {len(completions)} initial completions. Tokens used: {response.usage.completion_tokens}")
logger.debug("Preparing critique prompt")
critique_prompt = f"""
Original query: {initial_query}
@@ -36,6 +40,7 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str
Please provide your critique for each candidate:
"""
logger.debug("Generating critiques")
critique_response = client.chat.completions.create(
model=model,
messages=[
@@ -48,7 +53,9 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str
)
critiques = critique_response.choices[0].message.content
moa_completion_tokens += critique_response.usage.completion_tokens
logger.info(f"Generated critiques. Tokens used: {critique_response.usage.completion_tokens}")
logger.debug("Preparing final prompt")
final_prompt = f"""
Original query: {initial_query}
@@ -69,6 +76,7 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str
Please provide a final, optimized response to the original query:
"""
logger.debug("Generating final response")
final_response = client.chat.completions.create(
model=model,
messages=[
@@ -80,4 +88,7 @@ def mixture_of_agents(system_prompt: str, initial_query: str, client, model: str
temperature=0.1
)
moa_completion_tokens += final_response.usage.completion_tokens
return final_response.choices[0].message.content, moa_completion_tokens
logger.info(f"Generated final response. Tokens used: {final_response.usage.completion_tokens}")
logger.info(f"Total completion tokens used: {moa_completion_tokens}")
return final_response.choices[0].message.content, moa_completion_tokens