From 733d53625ba0242e0061e7c2dfb7bc27c44e0c93 Mon Sep 17 00:00:00 2001 From: arcticfly <41524992+arcticfly@users.noreply.github.com> Date: Fri, 18 Aug 2023 00:37:16 -0700 Subject: [PATCH] Add Gryphe/MythoMax-L2-13b (#173) --- .../modelProviders/openpipe-chat/frontend.ts | 10 ++++ .../openpipe-chat/getCompletion.ts | 1 + app/src/modelProviders/openpipe-chat/index.ts | 1 + .../openpipe-chat/input.schema.json | 1 + .../openpipe-chat/templatePrompt.ts | 49 +++++++++++++++++++ 5 files changed, 62 insertions(+) diff --git a/app/src/modelProviders/openpipe-chat/frontend.ts b/app/src/modelProviders/openpipe-chat/frontend.ts index cbefd84..a0f64ba 100644 --- a/app/src/modelProviders/openpipe-chat/frontend.ts +++ b/app/src/modelProviders/openpipe-chat/frontend.ts @@ -7,6 +7,7 @@ import { // templateSystemUserAssistantPrompt, templateInstructionInputResponsePrompt, templateAiroborosPrompt, + templateGryphePrompt, templateVicunaPrompt, } from "./templatePrompt"; @@ -69,6 +70,15 @@ const frontendModelProvider: FrontendModelProvider = { "NousResearch/Nous-Hermes-Llama2-13b": "https://ncv8pw3u0vb8j2-8000.proxy.runpod.net/v1", "jondurbin/airoboros-l2-13b-gpt4-2.0": "https://9nrbx7oph4btou-8000.proxy.runpod.net/v1", "lmsys/vicuna-13b-v1.5": "https://h88hkt3ux73rb7-8000.proxy.runpod.net/v1", + "Gryphe/MythoMax-L2-13b": "https://3l5jvhnxdgky3v-8000.proxy.runpod.net/v1", "NousResearch/Nous-Hermes-llama-2-7b": "https://ua1bpc6kv3dgge-8000.proxy.runpod.net/v1", }; diff --git a/app/src/modelProviders/openpipe-chat/index.ts b/app/src/modelProviders/openpipe-chat/index.ts index e183b91..b27de0c 100644 --- a/app/src/modelProviders/openpipe-chat/index.ts +++ b/app/src/modelProviders/openpipe-chat/index.ts @@ -11,6 +11,7 @@ const supportedModels = [ "NousResearch/Nous-Hermes-Llama2-13b", "jondurbin/airoboros-l2-13b-gpt4-2.0", "lmsys/vicuna-13b-v1.5", + "Gryphe/MythoMax-L2-13b", "NousResearch/Nous-Hermes-llama-2-7b", ] as const; diff --git a/app/src/modelProviders/openpipe-chat/input.schema.json b/app/src/modelProviders/openpipe-chat/input.schema.json index dd4eede..6d6324b 100644 --- a/app/src/modelProviders/openpipe-chat/input.schema.json +++ b/app/src/modelProviders/openpipe-chat/input.schema.json @@ -11,6 +11,7 @@ "NousResearch/Nous-Hermes-Llama2-13b", "jondurbin/airoboros-l2-13b-gpt4-2.0", "lmsys/vicuna-13b-v1.5", + "Gryphe/MythoMax-L2-13b", "NousResearch/Nous-Hermes-llama-2-7b" ] }, diff --git a/app/src/modelProviders/openpipe-chat/templatePrompt.ts b/app/src/modelProviders/openpipe-chat/templatePrompt.ts index f9bf4a2..33201ee 100644 --- a/app/src/modelProviders/openpipe-chat/templatePrompt.ts +++ b/app/src/modelProviders/openpipe-chat/templatePrompt.ts @@ -223,3 +223,52 @@ export const templateVicunaPrompt = (messages: OpenpipeChatInput["messages"]) => return prompt.trim(); }; + +// + +// ### Instruction: +// Your instruction or question here. +// For roleplay purposes, I suggest the following - Write 's next reply in a chat between and . Write a single reply only. + +// ### Response: +export const templateGryphePrompt = (messages: OpenpipeChatInput["messages"]) => { + const splitter = "\n\n"; + + const instructionTag = "### Instruction:\n"; + const responseTag = "### Response:\n"; + + let combinedSystemMessage = ""; + const conversationMessages = []; + + for (const message of messages) { + if (message.role === "system") { + combinedSystemMessage += message.content; + } else if (message.role === "user") { + conversationMessages.push(instructionTag + message.content); + } else { + conversationMessages.push(responseTag + message.content); + } + } + + let systemMessage = ""; + + if (combinedSystemMessage) { + // If there is no user message, add a user tag to the system message + if (conversationMessages.find((message) => message.startsWith(instructionTag))) { + systemMessage = `${combinedSystemMessage}\n\n`; + } else { + conversationMessages.unshift(instructionTag + combinedSystemMessage); + } + } + + let prompt = `${systemMessage}${conversationMessages.join(splitter)}`; + + // Ensure that the prompt ends with an assistant message + const lastInstructionIndex = prompt.lastIndexOf(instructionTag); + const lastAssistantIndex = prompt.lastIndexOf(responseTag); + if (lastInstructionIndex > lastAssistantIndex) { + prompt += splitter + responseTag; + } + + return prompt; +};