58 lines
1.6 KiB
Python
Executable File
58 lines
1.6 KiB
Python
Executable File
import glob
|
|
import os
|
|
import asyncio
|
|
|
|
import aiofiles
|
|
from lightrag import LightRAG, QueryParam
|
|
from lightrag.llm.azure_openai import azure_openai_embed, azure_openai_complete
|
|
from lightrag.llm.openai import gpt_4o_mini_complete, gpt_4o_complete, openai_embed
|
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
from lightrag.utils import setup_logger, EmbeddingFunc
|
|
from tqdm import tqdm
|
|
|
|
setup_logger("lightrag", level="INFO")
|
|
|
|
|
|
|
|
async def initialize_rag():
|
|
rag = LightRAG(
|
|
working_dir="/Users/tcudikel/Dev/ancient-history/data/storage/base_gpt4o",
|
|
graph_storage="NetworkXStorage",
|
|
vector_storage="ChromaVectorDBStorage",
|
|
vector_db_storage_cls_kwargs={
|
|
"local_path": "/Users/tcudikel/Dev/ancient-history/data/storage/base_gpt4o/vdb",
|
|
"cosine_better_than_threshold": 0.5,
|
|
},
|
|
embedding_func=EmbeddingFunc(
|
|
embedding_dim=3072,
|
|
max_token_size=8192,
|
|
func=lambda texts: azure_openai_embed(texts)
|
|
),
|
|
llm_model_func=azure_openai_complete
|
|
)
|
|
|
|
await rag.initialize_storages()
|
|
await initialize_pipeline_status()
|
|
|
|
return rag
|
|
|
|
|
|
def main():
|
|
rag = asyncio.run(initialize_rag())
|
|
|
|
mode = "mix"
|
|
response = rag.query(
|
|
"Which prophets exist before Noah?",
|
|
param=QueryParam(
|
|
mode=mode,
|
|
response_type="Single Paragraphs",
|
|
only_need_context=False,
|
|
# conversation_history=,
|
|
# history_turns=5,
|
|
)
|
|
)
|
|
print(response)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main() |