mirror of
https://github.com/transformerlab/transformerlab-api.git
synced 2025-04-19 19:36:18 +03:00
Resolve merge conflict and remove from requirements-gpu
This commit is contained in:
@@ -1,2 +1 @@
|
||||
bitsandbytes
|
||||
wandb==0.19.8
|
||||
@@ -86,6 +86,8 @@ gitdb==4.0.12
|
||||
# via gitpython
|
||||
gitpython==3.1.44
|
||||
# via wandb
|
||||
greenlet==3.1.1
|
||||
# via sqlalchemy
|
||||
grpcio==1.70.0
|
||||
# via tensorboard
|
||||
h11==0.14.0
|
||||
@@ -217,7 +219,6 @@ pydantic==2.10.6
|
||||
# -r requirements.in
|
||||
# fastapi
|
||||
# fschat
|
||||
# sqlmodel
|
||||
# wandb
|
||||
pydantic-core==2.27.2
|
||||
# via pydantic
|
||||
@@ -293,8 +294,6 @@ smmap==5.0.2
|
||||
sniffio==1.3.1
|
||||
# via anyio
|
||||
sqlalchemy==2.0.38
|
||||
# via sqlmodel
|
||||
sqlmodel==0.0.23
|
||||
# via -r requirements.in
|
||||
starlette==0.45.3
|
||||
# via fastapi
|
||||
|
||||
@@ -251,7 +251,6 @@ pydantic==2.10.6
|
||||
# -r requirements.in
|
||||
# fastapi
|
||||
# fschat
|
||||
# sqlmodel
|
||||
# wandb
|
||||
pydantic-core==2.27.2
|
||||
# via pydantic
|
||||
@@ -327,8 +326,6 @@ smmap==5.0.2
|
||||
sniffio==1.3.1
|
||||
# via anyio
|
||||
sqlalchemy==2.0.38
|
||||
# via sqlmodel
|
||||
sqlmodel==0.0.23
|
||||
# via -r requirements.in
|
||||
starlette==0.45.3
|
||||
# via fastapi
|
||||
|
||||
@@ -26,4 +26,4 @@ peft
|
||||
watchfiles
|
||||
wandb==0.19.8
|
||||
werkzeug
|
||||
sqlmodel
|
||||
sqlalchemy[asyncio]
|
||||
@@ -12,6 +12,7 @@ from deepeval.models import DeepEvalBaseEmbeddingModel
|
||||
from deepeval.models.base_model import DeepEvalBaseLLM
|
||||
from deepeval.synthesizer import Evolution, Synthesizer
|
||||
from deepeval.synthesizer.config import EvolutionConfig, StylingConfig
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
from langchain_openai import ChatOpenAI
|
||||
from openai import OpenAI
|
||||
from pydantic import BaseModel
|
||||
@@ -120,6 +121,16 @@ class TRLAB_MODEL(DeepEvalBaseLLM):
|
||||
res = await chat_model.ainvoke(prompt)
|
||||
return res.content
|
||||
|
||||
def generate_without_instructor(self, messages: List[dict]) -> BaseModel:
|
||||
chat_model = self.load_model()
|
||||
modified_messages = []
|
||||
for message in messages:
|
||||
if message["role"] == "system":
|
||||
modified_messages.append(SystemMessage(**message))
|
||||
else:
|
||||
modified_messages.append(HumanMessage(**message))
|
||||
return chat_model.invoke(modified_messages).content
|
||||
|
||||
def get_model_name(self):
|
||||
return args.model_name
|
||||
|
||||
@@ -185,6 +196,14 @@ class CustomCommercialModel(DeepEvalBaseLLM):
|
||||
)
|
||||
return resp
|
||||
|
||||
def generate_without_instructor(self, messages: List[dict]) -> BaseModel:
|
||||
client = self.load_model()
|
||||
response = client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=messages,
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
|
||||
async def a_generate(self, prompt: str, schema: BaseModel) -> BaseModel:
|
||||
return self.generate(prompt, schema)
|
||||
|
||||
@@ -316,7 +335,8 @@ def generate_expected_outputs(input_values, styling_config):
|
||||
\n\nExpected Input Format: {styling_config["input_format"]}
|
||||
\n\n Generate the output for the following input: {input_val}.
|
||||
\n Output: """
|
||||
expected_output = trlab_model.generate_without_instructor(prompt)
|
||||
messages = [{"role": "system", "content": prompt}]
|
||||
expected_output = trlab_model.generate_without_instructor(messages)
|
||||
expected_outputs.append(expected_output)
|
||||
return expected_outputs
|
||||
|
||||
|
||||
Reference in New Issue
Block a user