Merge pull request #79 from speedwagon1299/FixServiceContext

Modified from Service Context to LLaMa Settings
This commit is contained in:
NirDiamant
2025-02-03 00:50:05 +02:00
committed by GitHub

View File

@@ -19,7 +19,7 @@
"nest_asyncio.apply()\n",
"from dotenv import load_dotenv\n",
"\n",
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n",
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n",
"from llama_index.core.prompts import PromptTemplate\n",
"\n",
"from llama_index.core.evaluation import (\n",
@@ -28,6 +28,7 @@
" RelevancyEvaluator\n",
")\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.core import Settings\n",
"\n",
"import openai\n",
"import time\n",
@@ -90,11 +91,11 @@
"# We will use GPT-4 for evaluating the responses\n",
"gpt4 = OpenAI(temperature=0, model=\"gpt-4o\")\n",
"\n",
"# Define service context for GPT-4 for evaluation\n",
"service_context_gpt4 = ServiceContext.from_defaults(llm=gpt4)\n",
"# Set appropriate settings for the LLM\n",
"Settings.llm = gpt4\n",
"\n",
"# Define Faithfulness and Relevancy Evaluators which are based on GPT-4\n",
"faithfulness_gpt4 = FaithfulnessEvaluator(service_context=service_context_gpt4)\n",
"# Define Faithfulness Evaluators which are based on GPT-4\n",
"faithfulness_gpt4 = FaithfulnessEvaluator()\n",
"\n",
"faithfulness_new_prompt_template = PromptTemplate(\"\"\" Please tell if a given piece of information is directly supported by the context.\n",
" You need to answer with either YES or NO.\n",
@@ -123,7 +124,9 @@
" \"\"\")\n",
"\n",
"faithfulness_gpt4.update_prompts({\"your_prompt_key\": faithfulness_new_prompt_template}) # Update the prompts dictionary with the new prompt template\n",
"relevancy_gpt4 = RelevancyEvaluator(service_context=service_context_gpt4)"
"\n",
"# Define Relevancy Evaluators which are based on GPT-4\n",
"relevancy_gpt4 = RelevancyEvaluator()"
]
},
{
@@ -159,10 +162,12 @@
" # create vector index\n",
" llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
"\n",
" service_context = ServiceContext.from_defaults(llm=llm, chunk_size=chunk_size, chunk_overlap=chunk_size//5) \n",
" vector_index = VectorStoreIndex.from_documents(\n",
" eval_documents, service_context=service_context\n",
" )\n",
" Settings.llm = llm\n",
" Settings.chunk_size = chunk_size\n",
" Settings.chunk_overlap = chunk_size // 5 \n",
"\n",
" vector_index = VectorStoreIndex.from_documents(eval_documents)\n",
" \n",
" # build query engine\n",
" query_engine = vector_index.as_query_engine(similarity_top_k=5)\n",
" num_questions = len(eval_questions)\n",
@@ -234,7 +239,7 @@
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
@@ -248,7 +253,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.0"
"version": "3.11.0"
}
},
"nbformat": 4,