Skip to content

Commit d9dcdef

Browse files
committed
🤖 fix: prevent new chat streaming flash
Keep a newly created workspace in an optimistic starting state until the first real send reaches onChat, suppressing the transient catch-up and empty-state placeholders that could flash during the handoff from project creation to the workspace chat. Add a focused regression test that delays the initial send and verifies the starting barrier stays visible throughout the transition. --- _Generated with `mux` • Model: `openai:gpt-5.4` • Thinking: `xhigh` • Cost: `$17.56`_ <!-- mux-attribution: model=openai:gpt-5.4 thinking=xhigh costs=17.56 -->
1 parent d352efc commit d9dcdef

8 files changed

Lines changed: 533 additions & 16 deletions

File tree

src/browser/components/ChatPane/ChatPane.tsx

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -630,8 +630,14 @@ export const ChatPane: React.FC<ChatPaneProps> = (props) => {
630630

631631
const hasInterruptedStream = interruption?.hasInterruptedStream ?? false;
632632
// Keep rendering cached transcript rows during incremental catch-up so workspace switches
633-
// feel stable; only show the full placeholder when there's no transcript content yet.
634-
const showTranscriptHydrationPlaceholder = isHydratingTranscript && deferredMessages.length === 0;
633+
// feel stable, but a brand-new chat should keep its starting barrier visible instead of
634+
// flashing transcript placeholders before the first send reaches the workspace history.
635+
const showTranscriptHydrationPlaceholder =
636+
isHydratingTranscript && deferredMessages.length === 0 && !workspaceState.isStreamStarting;
637+
const showEmptyTranscriptPlaceholder =
638+
deferredMessages.length === 0 &&
639+
!showTranscriptHydrationPlaceholder &&
640+
!workspaceState.isStreamStarting;
635641
const showRetryBarrier =
636642
!isHydratingTranscript &&
637643
!workspaceState.canInterrupt &&
@@ -805,7 +811,7 @@ export const ChatPane: React.FC<ChatPaneProps> = (props) => {
805811
ref={innerRef}
806812
className={cn(
807813
"max-w-4xl mx-auto",
808-
(showTranscriptHydrationPlaceholder || deferredMessages.length === 0) && "h-full"
814+
(showTranscriptHydrationPlaceholder || showEmptyTranscriptPlaceholder) && "h-full"
809815
)}
810816
>
811817
{showTranscriptHydrationPlaceholder ? (
@@ -816,7 +822,7 @@ export const ChatPane: React.FC<ChatPaneProps> = (props) => {
816822
<h3>Loading transcript...</h3>
817823
<p>Syncing recent messages for this workspace</p>
818824
</div>
819-
) : deferredMessages.length === 0 ? (
825+
) : showEmptyTranscriptPlaceholder ? (
820826
<div className="text-placeholder flex h-full flex-1 flex-col items-center justify-center text-center [&_h3]:m-0 [&_h3]:mb-2.5 [&_h3]:text-base [&_h3]:font-medium [&_p]:m-0 [&_p]:text-[13px]">
821827
<h3>No Messages Yet</h3>
822828
<p>Send a message below to begin</p>

src/browser/components/WorkspaceShell/WorkspaceShell.tsx

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ export const WorkspaceShell: React.FC<WorkspaceShellProps> = (props) => {
182182
});
183183
const backgroundBashError = useBackgroundBashError();
184184

185-
if (!workspaceState || workspaceState.loading) {
185+
if (!workspaceState || (workspaceState.loading && !workspaceState.isStreamStarting)) {
186186
return (
187187
<WorkspacePlaceholder
188188
title="Loading workspace..."
@@ -192,10 +192,12 @@ export const WorkspaceShell: React.FC<WorkspaceShellProps> = (props) => {
192192
);
193193
}
194194

195+
// User rationale: a just-created chat should keep showing its startup barrier instead of
196+
// flashing generic loading/catch-up placeholders before the first send reaches onChat.
195197
// Web-only: during workspace switches, the WebSocket subscription needs time to
196198
// catch up. Show a splash instead of flashing stale cached messages.
197199
// Electron's MessageChannel is near-instant so this gate is unnecessary there.
198-
if (workspaceState.isHydratingTranscript && !window.api) {
200+
if (workspaceState.isHydratingTranscript && !window.api && !workspaceState.isStreamStarting) {
199201
return (
200202
<WorkspacePlaceholder
201203
title="Catching up with the agent..."

src/browser/features/ChatInput/useCreationWorkspace.test.tsx

Lines changed: 72 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import type {
2727
import { act, cleanup, render, waitFor } from "@testing-library/react";
2828
import { afterEach, beforeEach, describe, expect, mock, spyOn, test } from "bun:test";
2929
import { GlobalWindow } from "happy-dom";
30+
import { workspaceStore } from "@/browser/stores/WorkspaceStore";
3031
import { useCreationWorkspace, type CreationSendResult } from "./useCreationWorkspace";
3132

3233
const readPersistedStateCalls: Array<[string, unknown]> = [];
@@ -492,6 +493,9 @@ describe("useCreationWorkspace", () => {
492493
updatePersistedStateCalls.length = 0;
493494
draftSettingsInvocations = [];
494495
draftSettingsState = createDraftSettingsHarness();
496+
routerState.currentWorkspaceId = null;
497+
routerState.currentProjectId = null;
498+
routerState.pendingDraftId = null;
495499
});
496500

497501
afterEach(() => {
@@ -915,6 +919,69 @@ describe("useCreationWorkspace", () => {
915919
expect(handleSendResult).toEqual({ success: true });
916920
});
917921

922+
test("marks pending initial send only for auto-navigated creations", async () => {
923+
const listBranchesMock = mock(
924+
(): Promise<BranchListResult> =>
925+
Promise.resolve({
926+
branches: ["main"],
927+
recommendedTrunk: "main",
928+
})
929+
);
930+
const sendMessageMock = mock(
931+
(_args: WorkspaceSendMessageArgs): Promise<WorkspaceSendMessageResult> =>
932+
Promise.resolve({ success: true, data: {} } as WorkspaceSendMessageResult)
933+
);
934+
const createMock = mock(
935+
(_args: WorkspaceCreateArgs): Promise<WorkspaceCreateResult> =>
936+
Promise.resolve({
937+
success: true,
938+
metadata: TEST_METADATA,
939+
} as WorkspaceCreateResult)
940+
);
941+
const nameGenerationMock = mock(
942+
(_args: NameGenerationArgs): Promise<NameGenerationResult> =>
943+
Promise.resolve({
944+
success: true,
945+
data: { name: "generated-name", modelUsed: "anthropic:claude-haiku-4-5" },
946+
} as NameGenerationResult)
947+
);
948+
setupWindow({
949+
listBranches: listBranchesMock,
950+
sendMessage: sendMessageMock,
951+
create: createMock,
952+
nameGeneration: nameGenerationMock,
953+
});
954+
955+
draftSettingsState = createDraftSettingsHarness({ trunkBranch: "main" });
956+
routerState.pendingDraftId = "different-draft";
957+
const onWorkspaceCreated = mock(
958+
(metadata: FrontendWorkspaceMetadata, options?: { autoNavigate?: boolean }) => ({
959+
metadata,
960+
options,
961+
})
962+
);
963+
const markPendingInitialSendSpy = spyOn(workspaceStore, "markPendingInitialSend");
964+
965+
const getHook = renderUseCreationWorkspace({
966+
projectPath: TEST_PROJECT_PATH,
967+
onWorkspaceCreated,
968+
message: "test message",
969+
draftId: "draft-being-created",
970+
});
971+
972+
await waitFor(() => expect(getHook().branches).toEqual(["main"]));
973+
974+
let handleSendResult: CreationSendResult | undefined;
975+
await act(async () => {
976+
handleSendResult = await getHook().handleSend("test message");
977+
});
978+
979+
expect(handleSendResult).toEqual({ success: true });
980+
expect(onWorkspaceCreated.mock.calls.length).toBe(1);
981+
expect(onWorkspaceCreated.mock.calls[0][1]).toEqual({ autoNavigate: false });
982+
expect(markPendingInitialSendSpy.mock.calls.length).toBe(0);
983+
});
984+
918985
test("handleSend surfaces backend errors and resets state", async () => {
919986
const createMock = mock(
920987
(_args: WorkspaceCreateArgs): Promise<WorkspaceCreateResult> =>
@@ -1078,8 +1145,12 @@ function createDraftSettingsHarness(
10781145

10791146
interface HookOptions {
10801147
projectPath: string;
1081-
onWorkspaceCreated: (metadata: FrontendWorkspaceMetadata) => void;
1148+
onWorkspaceCreated: (
1149+
metadata: FrontendWorkspaceMetadata,
1150+
options?: { autoNavigate?: boolean }
1151+
) => void;
10821152
message?: string;
1153+
draftId?: string | null;
10831154
}
10841155

10851156
function renderUseCreationWorkspace(options: HookOptions) {

src/browser/features/ChatInput/useCreationWorkspace.ts

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ import { normalizeModelInput } from "@/browser/utils/models/normalizeModelInput"
5454
import { resolveDevcontainerSelection } from "@/browser/utils/devcontainerSelection";
5555
import { getErrorMessage } from "@/common/utils/errors";
5656
import { normalizeAgentId } from "@/common/utils/agentIds";
57+
import { workspaceStore } from "@/browser/stores/WorkspaceStore";
5758
import { WORKSPACE_DEFAULTS } from "@/constants/workspaceDefaults";
5859

5960
export type CreationSendResult = { success: true } | { success: false; error?: SendMessageError };
@@ -392,6 +393,8 @@ export function useCreationWorkspace({
392393
: null
393394
);
394395

396+
let createdWorkspaceId: string | null = null;
397+
395398
try {
396399
// Wait for identity generation to complete (blocks if still in progress)
397400
// Returns null if generation failed or manual name is empty (error already set in hook)
@@ -506,6 +509,7 @@ export function useCreationWorkspace({
506509
}
507510

508511
const { metadata } = createResult;
512+
createdWorkspaceId = metadata.id;
509513

510514
// Best-effort: persist the initial AI settings to the backend immediately so this workspace
511515
// is portable across devices even before the first stream starts.
@@ -560,6 +564,13 @@ export function useCreationWorkspace({
560564
})();
561565

562566
onWorkspaceCreated(metadata, { autoNavigate: shouldAutoNavigate });
567+
if (shouldAutoNavigate) {
568+
// User rationale: after creating a brand-new chat, keep the workspace in a visible
569+
// "starting" state until onChat observes the first real user message or error.
570+
// Background-created workspaces should skip this optimistic flag so they don't open later
571+
// looking like a stale in-flight startup.
572+
workspaceStore.markPendingInitialSend(metadata.id, baseModel);
573+
}
563574

564575
if (typeof draftId === "string" && draftId.trim().length > 0 && promoteWorkspaceDraft) {
565576
// UI-only: show the created workspace in-place where the draft was rendered.
@@ -594,6 +605,9 @@ export function useCreationWorkspace({
594605
});
595606

596607
if (!sendResult.success) {
608+
if (createdWorkspaceId) {
609+
workspaceStore.clearPendingInitialSendState(createdWorkspaceId);
610+
}
597611
if (sendResult.error) {
598612
// Persist the failure so the workspace view can surface a toast after navigation.
599613
updatePersistedState(getPendingWorkspaceSendErrorKey(metadata.id), sendResult.error);
@@ -603,6 +617,9 @@ export function useCreationWorkspace({
603617

604618
return { success: true };
605619
} catch (err) {
620+
if (createdWorkspaceId) {
621+
workspaceStore.clearPendingInitialSendState(createdWorkspaceId);
622+
}
606623
const errorMessage = getErrorMessage(err);
607624
setToast({
608625
id: Date.now().toString(),

src/browser/stores/WorkspaceStore.test.ts

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1571,6 +1571,88 @@ describe("WorkspaceStore", () => {
15711571
expect(store.getWorkspaceState(workspaceId).isStreamStarting).toBe(false);
15721572
});
15731573

1574+
it("ignores non-streaming activity snapshots while optimistic start awaits replay", async () => {
1575+
const workspaceId = "optimistic-pending-start-activity-list";
1576+
const requestedModel = "openai:gpt-4o-mini";
1577+
let releaseCaughtUp!: () => void;
1578+
const caughtUpReady = new Promise<void>((resolve) => {
1579+
releaseCaughtUp = resolve;
1580+
});
1581+
1582+
mockActivityList.mockResolvedValue({
1583+
[workspaceId]: {
1584+
recency: 3_000,
1585+
streaming: false,
1586+
lastModel: requestedModel,
1587+
lastThinkingLevel: null,
1588+
},
1589+
});
1590+
recreateStore();
1591+
mockOnChat.mockImplementation(async function* (
1592+
input?: { workspaceId: string; mode?: unknown },
1593+
options?: { signal?: AbortSignal }
1594+
): AsyncGenerator<WorkspaceChatMessage, void, unknown> {
1595+
if (input?.workspaceId !== workspaceId) {
1596+
await waitForAbortSignal(options?.signal);
1597+
return;
1598+
}
1599+
1600+
await caughtUpReady;
1601+
yield { type: "caught-up", replay: "full" };
1602+
await waitForAbortSignal(options?.signal);
1603+
});
1604+
1605+
createAndAddWorkspace(store, workspaceId);
1606+
store.markPendingInitialSend(workspaceId, requestedModel);
1607+
1608+
const keptStartingBeforeReplay = await waitUntil(() => {
1609+
const state = store.getWorkspaceState(workspaceId);
1610+
return state.loading === true && state.isStreamStarting === true;
1611+
});
1612+
expect(keptStartingBeforeReplay).toBe(true);
1613+
1614+
releaseCaughtUp();
1615+
});
1616+
1617+
it("clears optimistic starting state when caught-up reports no active stream", async () => {
1618+
const workspaceId = "optimistic-pending-start-caught-up-clear";
1619+
const requestedModel = "openai:gpt-4o-mini";
1620+
let releaseCaughtUp!: () => void;
1621+
const caughtUpReady = new Promise<void>((resolve) => {
1622+
releaseCaughtUp = resolve;
1623+
});
1624+
1625+
mockOnChat.mockImplementation(async function* (
1626+
input?: { workspaceId: string; mode?: unknown },
1627+
options?: { signal?: AbortSignal }
1628+
): AsyncGenerator<WorkspaceChatMessage, void, unknown> {
1629+
if (input?.workspaceId !== workspaceId) {
1630+
await waitForAbortSignal(options?.signal);
1631+
return;
1632+
}
1633+
1634+
await caughtUpReady;
1635+
yield { type: "caught-up", replay: "full" };
1636+
await waitForAbortSignal(options?.signal);
1637+
});
1638+
1639+
createAndAddWorkspace(store, workspaceId);
1640+
store.markPendingInitialSend(workspaceId, requestedModel);
1641+
1642+
const sawStarting = await waitUntil(
1643+
() => store.getWorkspaceState(workspaceId).isStreamStarting
1644+
);
1645+
expect(sawStarting).toBe(true);
1646+
1647+
releaseCaughtUp();
1648+
1649+
const clearedStarting = await waitUntil(() => {
1650+
const state = store.getWorkspaceState(workspaceId);
1651+
return !state.loading && state.isStreamStarting === false;
1652+
});
1653+
expect(clearedStarting).toBe(true);
1654+
});
1655+
15741656
it("replays runtime-status before caught-up when switching back to a preparing workspace", async () => {
15751657
const workspaceId = "stream-starting-runtime-status-replay";
15761658
const otherWorkspaceId = "stream-starting-runtime-status-other";
@@ -1818,6 +1900,61 @@ describe("WorkspaceStore", () => {
18181900
expect(sawStarting).toBe(true);
18191901
});
18201902

1903+
it("keeps optimistic starting state until buffered first-turn history finishes catching up", async () => {
1904+
const workspaceId = "optimistic-pending-start-replay";
1905+
const requestedModel = "openai:gpt-4o-mini";
1906+
let releaseBufferedUser!: () => void;
1907+
let releaseCaughtUp!: () => void;
1908+
const bufferedUserReady = new Promise<void>((resolve) => {
1909+
releaseBufferedUser = resolve;
1910+
});
1911+
const caughtUpReady = new Promise<void>((resolve) => {
1912+
releaseCaughtUp = resolve;
1913+
});
1914+
1915+
mockOnChat.mockImplementation(async function* (
1916+
input?: { workspaceId: string; mode?: unknown },
1917+
options?: { signal?: AbortSignal }
1918+
): AsyncGenerator<WorkspaceChatMessage, void, unknown> {
1919+
if (input?.workspaceId !== workspaceId) {
1920+
await waitForAbortSignal(options?.signal);
1921+
return;
1922+
}
1923+
1924+
await bufferedUserReady;
1925+
yield createUserMessageEvent("buffered-first-turn", "hello", 1, 2_750, requestedModel);
1926+
await caughtUpReady;
1927+
yield { type: "caught-up", replay: "full" };
1928+
await waitForAbortSignal(options?.signal);
1929+
});
1930+
1931+
createAndAddWorkspace(store, workspaceId);
1932+
store.markPendingInitialSend(workspaceId, requestedModel);
1933+
releaseBufferedUser();
1934+
1935+
const keptStartingWhileBuffered = await waitUntil(() => {
1936+
const state = store.getWorkspaceState(workspaceId);
1937+
return (
1938+
state.loading === true &&
1939+
state.isStreamStarting === true &&
1940+
state.pendingStreamModel === requestedModel
1941+
);
1942+
});
1943+
expect(keptStartingWhileBuffered).toBe(true);
1944+
1945+
releaseCaughtUp();
1946+
1947+
const renderedBufferedHistoryAfterCaughtUp = await waitUntil(() => {
1948+
const state = store.getWorkspaceState(workspaceId);
1949+
return (
1950+
state.loading === false &&
1951+
state.isStreamStarting === false &&
1952+
state.messages.some((message) => message.type === "user")
1953+
);
1954+
});
1955+
expect(renderedBufferedHistoryAfterCaughtUp).toBe(true);
1956+
});
1957+
18211958
it("exposes the pending requested model in sidebar state during startup", async () => {
18221959
const workspaceId = "stream-starting-pending-model-workspace";
18231960
const requestedModel = "openai:gpt-4o-mini";

0 commit comments

Comments
 (0)