Cap the number of waiting messages we try to render

If an cell was attempted several hours ago and never resolved, it crashes the UI because we try to render thousands of log messages once a second (eg. https://app.openpipe.ai/experiments/372d0827-186e-4a7d-a8a6-1bf7050eb5fd) We should probably have a different UI for cells that have hung for a long time to let you know you should just retry, but this quick fix should work for now.
This commit is contained in:
Kyle Corbitt
2023-08-14 15:44:03 -07:00
parent 99f305483b
commit f41e2229ca

View File

@@ -33,7 +33,7 @@ export default function OutputCell({
if (!templateHasVariables) disabledReason = "Add a value to the scenario variables to see output";
const [refetchInterval, setRefetchInterval] = useState(0);
const [refetchInterval, setRefetchInterval] = useState<number | false>(false);
const { data: cell, isLoading: queryLoading } = api.scenarioVariantCells.get.useQuery(
{ scenarioId: scenario.id, variantId: variant.id },
{ refetchInterval },
@@ -64,7 +64,8 @@ export default function OutputCell({
cell.retrievalStatus === "PENDING" ||
cell.retrievalStatus === "IN_PROGRESS" ||
hardRefetching;
useEffect(() => setRefetchInterval(awaitingOutput ? 1000 : 0), [awaitingOutput]);
useEffect(() => setRefetchInterval(awaitingOutput ? 1000 : false), [awaitingOutput]);
// TODO: disconnect from socket if we're not streaming anymore
const streamedMessage = useSocket<OutputSchema>(cell?.id);
@@ -120,8 +121,13 @@ export default function OutputCell({
? response.receivedAt.getTime()
: Date.now();
if (response.requestedAt) {
numWaitingMessages = Math.floor(
(relativeWaitingTime - response.requestedAt.getTime()) / WAITING_MESSAGE_INTERVAL,
numWaitingMessages = Math.min(
Math.floor(
(relativeWaitingTime - response.requestedAt.getTime()) / WAITING_MESSAGE_INTERVAL,
),
// Don't try to render more than 15, it'll use too much CPU and
// break the page
15,
);
}
return (