diff --git a/app/src/components/OutputsTable/OutputCell/OutputCell.tsx b/app/src/components/OutputsTable/OutputCell/OutputCell.tsx
index f3da232..3803495 100644
--- a/app/src/components/OutputsTable/OutputCell/OutputCell.tsx
+++ b/app/src/components/OutputsTable/OutputCell/OutputCell.tsx
@@ -147,9 +147,10 @@ export default function OutputCell({
)}
diff --git a/app/src/modelProviders/openpipe-chat/getCompletion.ts b/app/src/modelProviders/openpipe-chat/getCompletion.ts
index 9f669db..472d356 100644
--- a/app/src/modelProviders/openpipe-chat/getCompletion.ts
+++ b/app/src/modelProviders/openpipe-chat/getCompletion.ts
@@ -17,10 +17,23 @@ const modelEndpoints: Record = {
"NousResearch/Nous-Hermes-llama-2-7b": "https://ua1bpc6kv3dgge-8000.proxy.runpod.net/v1",
};
+const CUSTOM_MODELS_ENABLED = false;
+
export async function getCompletion(
input: OpenpipeChatInput,
onStream: ((partialOutput: OpenpipeChatOutput) => void) | null,
): Promise> {
+ // Temporarily disable these models because of GPU constraints
+
+ if (!CUSTOM_MODELS_ENABLED) {
+ return {
+ type: "error",
+ message:
+ "We've disabled this model temporarily because of GPU capacity constraints. Check back later.",
+ autoRetry: false,
+ };
+ }
+
const { model, messages, ...rest } = input;
const templatedPrompt = frontendModelProvider.models[model].templatePrompt?.(messages);