Files
graphiti/graphiti_core/llm_client/client.py
Daniel Chalef 895afc7be1 implement diskcache (#39)
* chore: Add romeo runner

* fix: Linter

* wip

* wip dump

* chore: Update romeo parser

* chore: Anthropic model fix

* wip

* allbirds

* allbirds runner

* format

* wip

* wip

* mypy updates

* update

* remove r

* update tests

* format

* wip

* chore: Strategically update the message

* rebase and fix import issues

* Update package imports for graphiti_core in examples and utils

* nits

* chore: Update OpenAI GPT-4o model to gpt-4o-2024-08-06

* implement groq

* improvments & linting

* cleanup and nits

* Refactor package imports for graphiti_core in examples and utils

* Refactor package imports for graphiti_core in examples and utils

* implement diskcache

* remove debug stuff

* log cache hit when debugging only

* Improve LLM config. Fix bugs (#41)

Refactor LLMConfig class to allow None values for model and base_url

* chore: Resolve mc

---------

Co-authored-by: paulpaliychuk <pavlo.paliychuk.ca@gmail.com>
Co-authored-by: prestonrasmussen <prasmuss15@gmail.com>
2024-08-26 13:13:05 -04:00

75 lines
2.3 KiB
Python

"""
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib
import json
import logging
import typing
from abc import ABC, abstractmethod
from diskcache import Cache
from ..prompts.models import Message
from .config import LLMConfig
DEFAULT_TEMPERATURE = 0
DEFAULT_CACHE_DIR = './llm_cache'
logger = logging.getLogger(__name__)
class LLMClient(ABC):
def __init__(self, config: LLMConfig | None, cache: bool = False):
if config is None:
config = LLMConfig()
self.config = config
self.model = config.model
self.temperature = config.temperature
self.max_tokens = config.max_tokens
self.cache_enabled = cache
self.cache_dir = Cache(DEFAULT_CACHE_DIR) # Create a cache directory
@abstractmethod
def get_embedder(self) -> typing.Any:
pass
@abstractmethod
async def _generate_response(self, messages: list[Message]) -> dict[str, typing.Any]:
pass
def _get_cache_key(self, messages: list[Message]) -> str:
# Create a unique cache key based on the messages and model
message_str = json.dumps([m.model_dump() for m in messages], sort_keys=True)
key_str = f'{self.model}:{message_str}'
return hashlib.md5(key_str.encode()).hexdigest()
async def generate_response(self, messages: list[Message]) -> dict[str, typing.Any]:
if self.cache_enabled:
cache_key = self._get_cache_key(messages)
cached_response = self.cache_dir.get(cache_key)
if cached_response is not None:
logger.debug(f'Cache hit for {cache_key}')
return cached_response
response = await self._generate_response(messages)
if self.cache_enabled:
self.cache_dir.set(cache_key, response)
return response