Add files via upload
This commit is contained in:
64
AutoKG/Autokg.py
Normal file
64
AutoKG/Autokg.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import os
|
||||
from colorama import Fore
|
||||
from LC_CAMEL import starting_convo,get_sys_msgs,CAMELAgent
|
||||
from RE_CAMEL import Retrieval_Msg
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.schema import HumanMessage
|
||||
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = "Your_api_key"
|
||||
word_limit = 50 # word limit for task brainstorming
|
||||
|
||||
|
||||
def main() ->None:
|
||||
assistant_role_name = "Consultant"
|
||||
user_role_name = "Knowledge Graph Domain Expert"
|
||||
|
||||
task = "Construct a Knowledge Graph about the movie \"Spider-Man: Across the Spider-Verse\"."
|
||||
|
||||
specified_task, assistant_inception_prompt, user_inception_prompt = starting_convo(assistant_role_name, user_role_name, task, word_limit)
|
||||
assistant_sys_msg, user_sys_msg = get_sys_msgs(assistant_role_name, user_role_name, specified_task, assistant_inception_prompt, user_inception_prompt)
|
||||
assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2))
|
||||
user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2))
|
||||
|
||||
# Reset agents
|
||||
assistant_agent.reset()
|
||||
user_agent.reset()
|
||||
|
||||
# Initialize chats
|
||||
assistant_msg = HumanMessage(
|
||||
content=(f"{user_sys_msg.content}. "
|
||||
"Now start to give me introductions one by one. "
|
||||
"Only reply with Instruction and Input."))
|
||||
|
||||
user_msg = HumanMessage(content=f"{assistant_sys_msg.content}")
|
||||
user_msg = assistant_agent.step(user_msg)
|
||||
|
||||
print(Fore.RED+f"Original task prompt:\n{task}\n")
|
||||
print(Fore.GREEN+f"Specified task prompt:\n{specified_task}\n")
|
||||
|
||||
chat_turn_limit, n = 30, 0
|
||||
while n < chat_turn_limit:
|
||||
n += 1
|
||||
user_ai_msg = user_agent.step(assistant_msg)
|
||||
user_msg = HumanMessage(content=user_ai_msg.content)
|
||||
print(Fore.BLUE+f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
|
||||
|
||||
Supplement_info = Retrieval_Msg(assistant_role_name, user_role_name, user_msg.content, 20)
|
||||
if Supplement_info != "" or "Agent Stoped" not in Supplement_info:
|
||||
Supplement_info_new = user_msg.content + "\n Additional information for the Instruction: " + Supplement_info
|
||||
Supplement_info_new = HumanMessage(content=Supplement_info_new)
|
||||
assistant_ai_msg = assistant_agent.step(Supplement_info_new)
|
||||
assistant_msg = HumanMessage(content=assistant_ai_msg.content)
|
||||
print(Fore.YELLOW + f"AI Assistant With Tool ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
|
||||
|
||||
else:
|
||||
assistant_ai_msg = assistant_agent.step(user_msg)
|
||||
assistant_msg = HumanMessage(content=assistant_ai_msg.content)
|
||||
print(Fore.CYAN+f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
|
||||
|
||||
if "CAMEL_TASK_DONE" in user_msg.content:
|
||||
break
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
130
AutoKG/LC_CAMEL.py
Normal file
130
AutoKG/LC_CAMEL.py
Normal file
@@ -0,0 +1,130 @@
|
||||
from typing import List
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
|
||||
|
||||
from langchain.prompts.chat import (
|
||||
SystemMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
)
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
BaseMessage,
|
||||
)
|
||||
|
||||
|
||||
class CAMELAgent:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
system_message: SystemMessage,
|
||||
model: ChatOpenAI,
|
||||
) -> None:
|
||||
self.system_message = system_message
|
||||
self.model = model
|
||||
self.init_messages()
|
||||
|
||||
def reset(self) -> None:
|
||||
self.init_messages()
|
||||
return self.stored_messages
|
||||
|
||||
def init_messages(self) -> None:
|
||||
self.stored_messages = [self.system_message]
|
||||
|
||||
def update_messages(self, message: BaseMessage) -> List[BaseMessage]:
|
||||
self.stored_messages.append(message)
|
||||
return self.stored_messages
|
||||
|
||||
def step(
|
||||
self,
|
||||
input_message: HumanMessage,
|
||||
) -> AIMessage:
|
||||
messages = self.update_messages(input_message)
|
||||
|
||||
output_message = self.model(messages)
|
||||
self.update_messages(output_message)
|
||||
|
||||
return output_message
|
||||
|
||||
|
||||
def starting_convo(assistant_role_name, user_role_name, task, word_limit):
|
||||
task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.")
|
||||
task_specifier_prompt = (
|
||||
"""Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.
|
||||
Please make it more specific. Be creative and imaginative.
|
||||
Please reply with the specified task in {word_limit} words or less. Do not add anything else."""
|
||||
)
|
||||
task_specifier_template = HumanMessagePromptTemplate.from_template(template=task_specifier_prompt)
|
||||
task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))
|
||||
task_specifier_msg = task_specifier_template.format_messages(assistant_role_name=assistant_role_name,
|
||||
user_role_name=user_role_name,
|
||||
task=task, word_limit=word_limit)[0]
|
||||
specified_task_msg = task_specify_agent.step(task_specifier_msg)
|
||||
# print(Fore.GREEN+f"Specified task: {specified_task_msg.content}")
|
||||
specified_task = specified_task_msg.content
|
||||
# specified_task = task
|
||||
|
||||
assistant_inception_prompt = (
|
||||
"""Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!
|
||||
We share a common interest in collaborating to successfully complete a task.
|
||||
You must help me to complete the task.
|
||||
Here is the task: {task}. Never forget our task!
|
||||
I must instruct you based on your expertise and my needs to complete the task.
|
||||
|
||||
I must give you one instruction at a time.
|
||||
You must write a specific solution that appropriately completes the requested instruction.
|
||||
You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.
|
||||
Do not add anything else other than your solution to my instruction.
|
||||
You are never supposed to ask me any questions you only answer questions.
|
||||
You are never supposed to reply with a flake solution. Explain your solutions.
|
||||
Your solution must be declarative sentences and simple present tense.
|
||||
Unless I say the task is completed, you should always answer start with:
|
||||
|
||||
Solution: <YOUR_SOLUTION>
|
||||
|
||||
<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving.
|
||||
Always end <YOUR_SOLUTION> with: Next request."""
|
||||
)
|
||||
|
||||
user_inception_prompt = (
|
||||
"""Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.
|
||||
We share a common interest in collaborating to successfully complete a task.
|
||||
I must help you to complete the task.
|
||||
Here is the task: {task}. Never forget our task!
|
||||
You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:
|
||||
|
||||
1. Instruct with a necessary input:
|
||||
Instruction: <YOUR_INSTRUCTION>
|
||||
Input: <YOUR_INPUT>
|
||||
|
||||
2. Instruct without any input:
|
||||
Instruction: <YOUR_INSTRUCTION>
|
||||
Input: None
|
||||
|
||||
|
||||
The "Instruction" describes a task or question.
|
||||
The paired "Input" provides further context or information for the requested "Instruction".
|
||||
|
||||
You must give me one instruction at a time.
|
||||
I must write a response that appropriately completes the requested instruction.
|
||||
I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.
|
||||
You should instruct me not ask me questions.
|
||||
Now you must start to instruct me using the two ways described above.
|
||||
Do not add anything else other than your instruction and the optional corresponding input!
|
||||
Keep giving me instructions and necessary inputs until you think the task is completed.
|
||||
When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>.
|
||||
Never say <CAMEL_TASK_DONE> unless my responses have solved your task."""
|
||||
)
|
||||
return specified_task, assistant_inception_prompt, user_inception_prompt
|
||||
|
||||
|
||||
def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str, assistant_inception_prompt,
|
||||
user_inception_prompt):
|
||||
assistant_sys_template = SystemMessagePromptTemplate.from_template(template=assistant_inception_prompt)
|
||||
assistant_sys_msg = assistant_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0]
|
||||
|
||||
user_sys_template = SystemMessagePromptTemplate.from_template(template=user_inception_prompt)
|
||||
user_sys_msg = user_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0]
|
||||
|
||||
return assistant_sys_msg, user_sys_msg
|
||||
61
AutoKG/RE_CAMEL.py
Normal file
61
AutoKG/RE_CAMEL.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from langchain.agents import load_tools
|
||||
from langchain.agents import initialize_agent
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.agents import AgentType
|
||||
from LC_CAMEL import CAMELAgent
|
||||
from colorama import Fore
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
import os
|
||||
from langchain.prompts.chat import (
|
||||
HumanMessagePromptTemplate,
|
||||
)
|
||||
from langchain.schema import (
|
||||
SystemMessage,
|
||||
)
|
||||
|
||||
os.environ["SERPAPI_API_KEY"] = "your_serpapi_api_key"
|
||||
|
||||
def Retrieval_Msg(assistant_role_name, user_role_name, task, word_limit):
|
||||
retrieval_sys_msg = SystemMessage(content="You are an assistant who can use Google search to gather information")
|
||||
|
||||
retrieval_specifier_prompt = (
|
||||
"""Here is a task that {assistant_role_name} will help {user_role_name} to complete a Knowledge Graph Construction task based on the {user_role_name}'s instruction and input: {task}.
|
||||
Suppose you are the {assistant_role_name}.
|
||||
You must know that you are able to perform web searches.
|
||||
You are never supposed to search for information about methodology questions.
|
||||
You must only search factual knowledge on the Internet.
|
||||
|
||||
Please summarize the key information of the task and answer only in this form:
|
||||
|
||||
Browsing Question: <YOUR_QUESTION>
|
||||
|
||||
<YOUR_QUESTION> should be your browsing question suitable for Google search.
|
||||
If you think browsing is not necessary, then the answer should be \"none\".
|
||||
Be creative and imaginative. Please reply in {word_limit} words or less. Do not add anything else."""
|
||||
)
|
||||
|
||||
retrieval_specifier_template = HumanMessagePromptTemplate.from_template(template=retrieval_specifier_prompt)
|
||||
retrieval_specify_agent = CAMELAgent(retrieval_sys_msg, ChatOpenAI(temperature=1.0))
|
||||
retrieval_specifier_msg = retrieval_specifier_template.format_messages(assistant_role_name=assistant_role_name,
|
||||
user_role_name=user_role_name,
|
||||
task=task, word_limit=word_limit)[0]
|
||||
specified_retrieval_msg = retrieval_specify_agent.step(retrieval_specifier_msg)
|
||||
print(Fore.GREEN+f"Specified retrieval:\n{specified_retrieval_msg.content}")
|
||||
specified_retrieval = specified_retrieval_msg.content
|
||||
response = ""
|
||||
|
||||
if "Browsing Question:" in specified_retrieval:
|
||||
if "Browsing Question: none" in specified_retrieval:
|
||||
return response
|
||||
else:
|
||||
specified_retrieval = specified_retrieval.replace("Browsing Question:","")
|
||||
# 加载 OpenAI 模型
|
||||
llm = OpenAI(temperature=0,max_tokens=2048)
|
||||
# 加载 serpapi 工具
|
||||
tools = load_tools(["serpapi"])
|
||||
# 工具加载后都需要初始化,verbose 参数为 True,会打印全部的执行详情
|
||||
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
|
||||
# 运行 agent
|
||||
response = agent.run(specified_retrieval)
|
||||
|
||||
return response
|
||||
Reference in New Issue
Block a user