Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 53 additions & 11 deletions lib/codex-cli/sync.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,12 @@ import {
getLastAccountsSaveTimestamp,
getStoragePath,
} from "../storage.js";
import {
appendSyncHistoryEntry,
cloneSyncHistoryEntry,
readLatestSyncHistorySync,
readSyncHistory,
} from "../sync-history.js";
import {
incrementCodexCliMetric,
makeAccountFingerprint,
Expand Down Expand Up @@ -100,6 +106,7 @@ interface ReconcileResult {
}

let lastCodexCliSyncRun: CodexCliSyncRun | null = null;
let lastHistoryLoadAttempted = false;

function createEmptySyncSummary(): CodexCliSyncSummary {
return {
Expand All @@ -114,21 +121,56 @@ function createEmptySyncSummary(): CodexCliSyncSummary {
};
}

function setLastCodexCliSyncRun(run: CodexCliSyncRun): void {
async function setLastCodexCliSyncRun(run: CodexCliSyncRun): Promise<void> {
lastCodexCliSyncRun = run;
try {
await appendSyncHistoryEntry({
kind: "codex-cli-sync",
recordedAt: run.runAt,
run,
});
} catch (error) {
log.debug("Failed to record codex-cli sync history", {
error: error instanceof Error ? error.message : String(error),
});
}
}

export function getLastCodexCliSyncRun(): CodexCliSyncRun | null {
return lastCodexCliSyncRun
? {
...lastCodexCliSyncRun,
summary: { ...lastCodexCliSyncRun.summary },
}
: null;
if (lastCodexCliSyncRun) {
return {
...lastCodexCliSyncRun,
summary: { ...lastCodexCliSyncRun.summary },
};
}

if (!lastHistoryLoadAttempted) {
lastHistoryLoadAttempted = true;
const latest = readLatestSyncHistorySync();
const cloned = cloneSyncHistoryEntry(latest);
if (cloned?.kind === "codex-cli-sync") {
lastCodexCliSyncRun = cloned.run;
return {
...cloned.run,
summary: { ...cloned.run.summary },
};
}
void readSyncHistory({ kind: "codex-cli-sync", limit: 1 })
.then((entries) => {
const latestCodexEntry = entries.at(-1);
if (latestCodexEntry?.kind === "codex-cli-sync") {
lastCodexCliSyncRun = latestCodexEntry.run;
Copy link

@cubic-dev-ai cubic-dev-ai bot Mar 13, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: Async history fallback can overwrite a newer in-memory sync run with stale data.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At lib/codex-cli/sync.ts, line 162:

<comment>Async history fallback can overwrite a newer in-memory sync run with stale data.</comment>

<file context>
@@ -154,6 +155,14 @@ export function getLastCodexCliSyncRun(): CodexCliSyncRun | null {
+			.then((entries) => {
+				const latestCodexEntry = entries.at(-1);
+				if (latestCodexEntry?.kind === "codex-cli-sync") {
+					lastCodexCliSyncRun = latestCodexEntry.run;
+				}
+			})
</file context>
Suggested change
lastCodexCliSyncRun = latestCodexEntry.run;
if (
!lastCodexCliSyncRun ||
latestCodexEntry.run.runAt >= lastCodexCliSyncRun.runAt
) {
lastCodexCliSyncRun = latestCodexEntry.run;
}
Fix with Cubic

}
})
.catch(() => undefined);
Comment on lines +158 to +165
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Background async read can silently overwrite a fresher in-memory result

the fire-and-forget readSyncHistory callback unconditionally overwrites lastCodexCliSyncRun. if a sync run completes (calling setLastCodexCliSyncRun) between the moment this background read is queued and the moment its .then resolves, the fresh in-memory result gets clobbered by the stale on-disk entry. this is an async ordering race unique to node's event loop interleaving.

fix: guard the write with a check for whether lastCodexCliSyncRun has already been set:

Suggested change
void readSyncHistory({ kind: "codex-cli-sync", limit: 1 })
.then((entries) => {
const latestCodexEntry = entries.at(-1);
if (latestCodexEntry?.kind === "codex-cli-sync") {
lastCodexCliSyncRun = latestCodexEntry.run;
}
})
.catch(() => undefined);
void readSyncHistory({ kind: "codex-cli-sync", limit: 1 })
.then((entries) => {
if (lastCodexCliSyncRun) return; // a real sync already ran; keep it
const latestCodexEntry = entries.at(-1);
if (latestCodexEntry?.kind === "codex-cli-sync") {
lastCodexCliSyncRun = latestCodexEntry.run;
}
})
.catch(() => undefined);

no vitest coverage for this race today — a test that interleaves syncAccountStorageFromCodexCli with a simultaneous cold getLastCodexCliSyncRun would reproduce it.

Prompt To Fix With AI
This is a comment left during a code review.
Path: lib/codex-cli/sync.ts
Line: 158-165

Comment:
**Background async read can silently overwrite a fresher in-memory result**

the fire-and-forget `readSyncHistory` callback unconditionally overwrites `lastCodexCliSyncRun`. if a sync run completes (calling `setLastCodexCliSyncRun`) between the moment this background read is queued and the moment its `.then` resolves, the fresh in-memory result gets clobbered by the stale on-disk entry. this is an async ordering race unique to node's event loop interleaving.

fix: guard the write with a check for whether `lastCodexCliSyncRun` has already been set:

```suggestion
		void readSyncHistory({ kind: "codex-cli-sync", limit: 1 })
			.then((entries) => {
				if (lastCodexCliSyncRun) return; // a real sync already ran; keep it
				const latestCodexEntry = entries.at(-1);
				if (latestCodexEntry?.kind === "codex-cli-sync") {
					lastCodexCliSyncRun = latestCodexEntry.run;
				}
			})
			.catch(() => undefined);
```

no vitest coverage for this race today — a test that interleaves `syncAccountStorageFromCodexCli` with a simultaneous cold `getLastCodexCliSyncRun` would reproduce it.

How can I resolve this? If you propose a fix, please make it concise.

Fix in Codex

}

return null;
}
Comment on lines 139 to 169
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# check if callers of getLastCodexCliSyncRun can tolerate async
rg -n 'getLastCodexCliSyncRun\s*\(' --type=ts

Repository: ndycode/codex-multi-auth

Length of output: 384


🏁 Script executed:

#!/bin/bash
# Check context around line 486 in lib/codex-cli/sync.ts
sed -n '480,495n' lib/codex-cli/sync.ts

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Check the implementation of readLatestSyncHistorySync
sed -n '150,160n' lib/sync-history.ts

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Check test coverage for lazy-load behavior
sed -n '835,855n' test/codex-cli-sync.test.ts

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Verify if getLastCodexCliSyncRun is called from an async context that could be converted
rg -B5 'getLastCodexCliSyncRun\(\)' lib/codex-cli/sync.ts | head -20

Repository: ndycode/codex-multi-auth

Length of output: 394


🏁 Script executed:

#!/bin/bash
# Read the full getLastCodexCliSyncRun function implementation
sed -n '139,175n' lib/codex-cli/sync.ts

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Check the readLatestSyncHistorySync implementation
rg -A10 'function readLatestSyncHistorySync' lib/sync-history.ts

Repository: ndycode/codex-multi-auth

Length of output: 520


🏁 Script executed:

#!/bin/bash
# Check previewCodexCliSync function signature and whether it's async
rg -B2 'const lastSync = getLastCodexCliSyncRun' lib/codex-cli/sync.ts

Repository: ndycode/codex-multi-auth

Length of output: 165


🏁 Script executed:

#!/bin/bash
# Find readLatestSyncHistorySync to verify readFileSync usage
rg 'readLatestSyncHistorySync|readFileSync' lib/sync-history.ts

Repository: ndycode/codex-multi-auth

Length of output: 268


🏁 Script executed:

#!/bin/bash
# Read full getLastCodexCliSyncRun implementation
sed -n '139,175n' lib/codex-cli/sync.ts | cat -n

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Check if previewCodexCliSync is async
rg -B5 -A15 'export function previewCodexCliSync' lib/codex-cli/sync.ts | head -30

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Check readSyncHistory call and fire-and-forget pattern
sed -n '158,167n' lib/codex-cli/sync.ts | cat -n

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Check test coverage - what does the test verify?
sed -n '835,860n' test/codex-cli-sync.test.ts | cat -n

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Check actual file sizes and line counts
wc -l lib/codex-cli/sync.ts test/codex-cli-sync.test.ts

Repository: ndycode/codex-multi-auth

Length of output: 141


🏁 Script executed:

#!/bin/bash
# Read the entire getLastCodexCliSyncRun function using rg
rg -A30 'export function getLastCodexCliSyncRun' lib/codex-cli/sync.ts

Repository: ndycode/codex-multi-auth

Length of output: 875


🏁 Script executed:

#!/bin/bash
# Check previewCodexCliSync function
rg -A20 'export function previewCodexCliSync' lib/codex-cli/sync.ts

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Check how readSyncHistory is called in the function
rg -B5 'readSyncHistory.*codex-cli-sync' lib/codex-cli/sync.ts

Repository: ndycode/codex-multi-auth

Length of output: 209


🏁 Script executed:

#!/bin/bash
# Get full previewCodexCliSync function signature and context
rg -B2 -A40 'function previewCodexCliSync' lib/codex-cli/sync.ts | head -50

Repository: ndycode/codex-multi-auth

Length of output: 1259


🏁 Script executed:

#!/bin/bash
# Read the test cases in detail
sed -n '835,875n' test/codex-cli-sync.test.ts

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Check what kind of entries could be in sync history
rg 'kind.*codex-cli-sync|kind.*live-account-sync' lib/sync-history.ts | head -10

Repository: ndycode/codex-multi-auth

Length of output: 120


🏁 Script executed:

#!/bin/bash
# Check if there are other async calls near getLastCodexCliSyncRun to assess hot path claim
sed -n '480,510n' lib/codex-cli/sync.ts

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Read the test file content to understand coverage
cat test/codex-cli-sync.test.ts | grep -A20 -B5 'getLastCodexCliSyncRun'

Repository: ndycode/codex-multi-auth

Length of output: 2125


🏁 Script executed:

#!/bin/bash
# Check if there are any tests for the lazy-load behavior or race condition
rg -n 'lastHistoryLoadAttempted|readLatestSyncHistorySync' test/codex-cli-sync.test.ts

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Verify if __resetLastCodexCliSyncRunForTests exists and is used
rg -n '__resetLastCodexCliSyncRunForTests' lib/codex-cli/sync.ts test/codex-cli-sync.test.ts

Repository: ndycode/codex-multi-auth

Length of output: 435


🏁 Script executed:

#!/bin/bash
# Check the __resetLastCodexCliSyncRunForTests implementation
sed -n '171,175n' lib/codex-cli/sync.ts

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

#!/bin/bash
# Check if lastHistoryLoadAttempted is reset
rg -B2 -A5 '__resetLastCodexCliSyncRunForTests' lib/codex-cli/sync.ts

Repository: ndycode/codex-multi-auth

Length of output: 230


blocking sync i/o in async hot path and fire-and-forget race

readLatestSyncHistorySync() at lib/codex-cli/sync.ts:149 uses readFileSync (lib/sync-history.ts) and blocks the event loop on the first call to getLastCodexCliSyncRun(), which is called from the async previewCodexCliSync() (lib/codex-cli/sync.ts:486) on every preview request.

the fire-and-forget async call at lines 158-165 creates a subtle race: if readLatestSyncHistorySync() returns a live-account-sync entry (kind check fails at line 154), the async readSyncHistory() is kicked off without awaiting. a second call to getLastCodexCliSyncRun() before that resolves returns null even though history contains a valid codex-cli-sync entry. the test at test/codex-cli-sync.test.ts:849-860 covers the lazy-load behavior but doesn't verify the concurrent-call scenario.

consider converting the entire function to async or caching the hydration promise to avoid both the blocking i/o and the race condition.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/codex-cli/sync.ts` around lines 139 - 169, The function
getLastCodexCliSyncRun currently does blocking I/O via readLatestSyncHistorySync
and kicks off a fire-and-forget readSyncHistory, causing event-loop blocking and
a race; fix by making the hydration non-blocking and de-duplicated: either
convert getLastCodexCliSyncRun to async and replace readLatestSyncHistorySync
with an awaited async reader (or readLatestSyncHistory), or introduce a cached
promise (e.g. lastHistoryLoadPromise) that is set when the first load starts and
awaited on subsequent calls; ensure you update/await this promise before
returning or setting lastCodexCliSyncRun, remove fire-and-forget usage of
readSyncHistory, and update callers such as previewCodexCliSync to await the new
async behavior so callers no longer get null spuriously.


export function __resetLastCodexCliSyncRunForTests(): void {
lastCodexCliSyncRun = null;
lastHistoryLoadAttempted = false;
}

function buildIndexByAccountId(
Expand Down Expand Up @@ -527,7 +569,7 @@ export async function syncAccountStorageFromCodexCli(
const state = await loadCodexCliState();
if (!state) {
incrementCodexCliMetric("reconcileNoops");
setLastCodexCliSyncRun({
await setLastCodexCliSyncRun({
outcome:
(process.env.CODEX_MULTI_AUTH_SYNC_CODEX_CLI ?? "").trim() === "0"
? "disabled"
Expand All @@ -554,7 +596,7 @@ export async function syncAccountStorageFromCodexCli(

if (next.accounts.length === 0) {
incrementCodexCliMetric(changed ? "reconcileChanges" : "reconcileNoops");
setLastCodexCliSyncRun({
await setLastCodexCliSyncRun({
outcome: changed ? "changed" : "noop",
runAt: Date.now(),
sourcePath: state.path,
Expand All @@ -574,7 +616,7 @@ export async function syncAccountStorageFromCodexCli(

incrementCodexCliMetric(changed ? "reconcileChanges" : "reconcileNoops");
const activeFromSnapshots = readActiveFromSnapshots(state.accounts);
setLastCodexCliSyncRun({
await setLastCodexCliSyncRun({
outcome: changed ? "changed" : "noop",
runAt: Date.now(),
sourcePath: state.path,
Expand All @@ -596,7 +638,7 @@ export async function syncAccountStorageFromCodexCli(
};
} catch (error) {
incrementCodexCliMetric("reconcileFailures");
setLastCodexCliSyncRun({
await setLastCodexCliSyncRun({
outcome: "error",
runAt: Date.now(),
sourcePath: null,
Expand Down
26 changes: 25 additions & 1 deletion lib/live-account-sync.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import { type FSWatcher, promises as fs, watch as fsWatch } from "node:fs";
import { basename, dirname } from "node:path";
import { createLogger } from "./logger.js";
import { appendSyncHistoryEntry } from "./sync-history.js";

const log = createLogger("live-account-sync");

Expand Down Expand Up @@ -99,7 +100,6 @@ export class LiveAccountSync {
private reloadCount = 0;
private errorCount = 0;
private reloadInFlight: Promise<void> | null = null;

constructor(
reload: () => Promise<void>,
options: LiveAccountSyncOptions = {},
Expand Down Expand Up @@ -194,6 +194,24 @@ export class LiveAccountSync {
lastLiveAccountSyncSnapshot = this.getSnapshot();
}

private async recordHistory(
reason: "watch" | "poll" | "manual",
outcome: "success" | "error",
message?: string,
): Promise<void> {
const snapshot = this.getSnapshot();
const entry = {
kind: "live-account-sync" as const,
recordedAt: Date.now(),
reason,
outcome,
path: this.currentPath,
message,
snapshot,
};
await appendSyncHistoryEntry(entry);
}

private scheduleReload(reason: "watch" | "poll"): void {
if (!this.running) return;
if (this.debounceTimer) {
Expand Down Expand Up @@ -240,13 +258,19 @@ export class LiveAccountSync {
reason,
path: summarizeWatchPath(targetPath),
});
await this.recordHistory(reason, "success");
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

History write blocks publishSnapshot — latency risk on Windows

await this.recordHistory(reason, "success") is called inside this.reloadInFlight before the outer finally block calls this.publishSnapshot(). on windows, appendFile on a file that an AV scanner is momentarily holding can stall for hundreds of milliseconds. the snapshot (and therefore any status surface reading getLastLiveAccountSyncSnapshot) won't update until that stall resolves. subsequent runReload calls that check this.reloadInFlight will queue behind this I/O too.

recordHistory should be fire-and-forget here, matching the same pattern used in setLastCodexCliSyncRun (try/catch + no await on the caller side):

// after reloadCount/mtime update, before the try block exits:
void this.recordHistory(reason, "success").catch(() => {});
// and in the catch block:
void this.recordHistory(reason, "error", error instanceof Error ? error.message : String(error)).catch(() => {});

since appendSyncHistoryEntry already swallows its own errors, the void+catch is purely defensive.

Prompt To Fix With AI
This is a comment left during a code review.
Path: lib/live-account-sync.ts
Line: 261

Comment:
**History write blocks `publishSnapshot` — latency risk on Windows**

`await this.recordHistory(reason, "success")` is called inside `this.reloadInFlight` before the outer `finally` block calls `this.publishSnapshot()`. on windows, `appendFile` on a file that an AV scanner is momentarily holding can stall for hundreds of milliseconds. the snapshot (and therefore any status surface reading `getLastLiveAccountSyncSnapshot`) won't update until that stall resolves. subsequent `runReload` calls that check `this.reloadInFlight` will queue behind this I/O too.

`recordHistory` should be fire-and-forget here, matching the same pattern used in `setLastCodexCliSyncRun` (try/catch + no await on the caller side):

```ts
// after reloadCount/mtime update, before the try block exits:
void this.recordHistory(reason, "success").catch(() => {});
// and in the catch block:
void this.recordHistory(reason, "error", error instanceof Error ? error.message : String(error)).catch(() => {});
```

since `appendSyncHistoryEntry` already swallows its own errors, the void+catch is purely defensive.

How can I resolve this? If you propose a fix, please make it concise.

Fix in Codex

} catch (error) {
this.errorCount += 1;
log.warn("Live account sync reload failed", {
reason,
path: summarizeWatchPath(targetPath),
error: error instanceof Error ? error.message : String(error),
});
await this.recordHistory(
reason,
"error",
error instanceof Error ? error.message : String(error),
);
Comment on lines +261 to +273
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

awaiting history write blocks reload completion

recordHistory is awaited inside runReload, so any delay in appendSyncHistoryEntry (e.g., windows file locks, slow disk) extends the time before reloadInFlight clears. during that window, subsequent poll/watch triggers queue behind it.

the current appendSyncHistoryEntry does not retry on EBUSY — it logs and moves on. on windows, where file locks are more common, you may see sporadic history gaps.

if history write latency becomes a concern, consider fire-and-forget with a bounded in-memory queue or at least logging when writes are slow.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/live-account-sync.ts` around lines 261 - 273, The current await
this.recordHistory(...) inside runReload causes reloadInFlight to remain set
until history I/O completes; change recordHistory (and callers in runReload) to
be non-blocking by enqueuing history entries to a bounded in-memory queue
serviced by a background worker instead of awaiting appendSyncHistoryEntry
directly—update recordHistory to push {reason,status,message} into the queue and
return immediately, implement a background flush loop that retries on EBUSY
(referencing appendSyncHistoryEntry) with backoff and emits warnings when writes
are slow or queue is near capacity, and ensure runReload/reloadInFlight logic no
longer awaits the write so subsequent reload triggers aren’t delayed.

}
})();

Expand Down
210 changes: 210 additions & 0 deletions lib/sync-history.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,210 @@
import { promises as fs, readFileSync } from "node:fs";
import { join } from "node:path";
import type { CodexCliSyncRun } from "./codex-cli/sync.js";
import type { LiveAccountSyncSnapshot } from "./live-account-sync.js";
import { createLogger } from "./logger.js";
import { getCodexLogDir } from "./runtime-paths.js";

const log = createLogger("sync-history");

type SyncHistoryKind = "codex-cli-sync" | "live-account-sync";

export interface CodexCliSyncHistoryEntry {
kind: "codex-cli-sync";
recordedAt: number;
run: CodexCliSyncRun;
}

export interface LiveAccountSyncHistoryEntry {
kind: "live-account-sync";
recordedAt: number;
reason: "watch" | "poll" | "manual";
outcome: "success" | "error";
path: string | null;
message?: string;
snapshot: LiveAccountSyncSnapshot;
}

export type SyncHistoryEntry =
| CodexCliSyncHistoryEntry
| LiveAccountSyncHistoryEntry;

interface SyncHistoryPaths {
directory: string;
historyPath: string;
latestPath: string;
}

const HISTORY_FILE_NAME = "sync-history.ndjson";
const LATEST_FILE_NAME = "sync-history-latest.json";
Comment on lines +38 to +39
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

unbounded history file growth

sync-history.ndjson grows without limit. readSyncHistory loads the entire file into memory (line 126-129). after weeks/months of usage, this could become large.

consider adding rotation (e.g., keep last N entries or last X bytes) or truncating old entries periodically. alternatively, if only the latest entry is typically needed, consider if the ndjson history is even necessary.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/sync-history.ts` around lines 38 - 39, The sync-history.ndjson file
(HISTORY_FILE_NAME) can grow unbounded because readSyncHistory currently loads
the entire file; update the implementation to limit growth by rotating or
truncating old entries: modify readSyncHistory to stream the file and only
retain the last N records (or last X bytes) instead of reading the whole file
into memory, and update the append-side logic (where new entries are written) to
trigger rotation/truncation when file size or line count exceeds a threshold
(e.g., roll to a timestamped backup or rewrite file keeping only the most recent
N entries); reference HISTORY_FILE_NAME and the readSyncHistory/any
appendSyncHistory functions to locate the code to change.


let historyDirOverride: string | null = null;
let historyMutex: Promise<void> = Promise.resolve();
let lastAppendError: string | null = null;
let lastAppendPaths: SyncHistoryPaths | null = null;
const pendingHistoryWrites = new Set<Promise<void>>();

async function waitForPendingHistoryWrites(): Promise<void> {
if (pendingHistoryWrites.size === 0) return;
await Promise.allSettled(Array.from(pendingHistoryWrites));
}

function getHistoryDirectory(): string {
return historyDirOverride ?? getCodexLogDir();
}

export function getSyncHistoryPaths(): SyncHistoryPaths {
const directory = getHistoryDirectory();
return {
directory,
historyPath: join(directory, HISTORY_FILE_NAME),
latestPath: join(directory, LATEST_FILE_NAME),
};
}

function withHistoryLock<T>(fn: () => Promise<T>): Promise<T> {
const previous = historyMutex;
let release: () => void = () => {};
historyMutex = new Promise<void>((resolve) => {
release = resolve;
});
return previous.then(fn).finally(() => release());
}
Comment on lines +65 to +72
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

in-process-only mutex

lib/sync-history.ts:65-72 serializes writes within a single process, but multiple extension hosts or codex-cli processes can run concurrently. on windows especially, concurrent appendFile calls to the same file can interleave bytes or fail with EBUSY.

consider using proper-lockfile or advisory file locks if cross-process safety is required. if not, document the single-process assumption.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/sync-history.ts` around lines 65 - 72, The current in-process mutex
implemented in withHistoryLock (using historyMutex and release) only serializes
writes within a single process but does not prevent concurrent access from other
processes; replace this with a cross-process advisory lock (e.g., use the
proper-lockfile package) around the critical section that writes to the history
file (the code that calls appendFile / the function wrapped by withHistoryLock)
so you Acquire a file lock before writing and Release it after, or if
cross-process safety is not required, add a clear comment/docstring on
withHistoryLock/historyMutex stating the single-process assumption and the risk
on Windows (EBUSY/interleaved writes). Ensure you update call sites that rely on
withHistoryLock to use the new lock API (acquire/release or try/finally) and
handle lock acquisition failures appropriately.


async function ensureHistoryDir(directory: string): Promise<void> {
await fs.mkdir(directory, { recursive: true, mode: 0o700 });
}

function serializeEntry(entry: SyncHistoryEntry): string {
return JSON.stringify(entry);
}

function cloneEntry<T extends SyncHistoryEntry | null>(entry: T): T {
if (!entry) return entry;
return JSON.parse(JSON.stringify(entry)) as T;
}

export async function appendSyncHistoryEntry(
entry: SyncHistoryEntry,
): Promise<void> {
const writePromise = withHistoryLock(async () => {
const paths = getSyncHistoryPaths();
lastAppendPaths = paths;
await ensureHistoryDir(paths.directory);

const line = `${serializeEntry(entry)}\n`;
const latestContent = `${JSON.stringify(entry, null, 2)}\n`;
await fs.appendFile(paths.historyPath, line, {
encoding: "utf8",
mode: 0o600,
});
await fs.writeFile(paths.latestPath, latestContent, {
encoding: "utf8",
mode: 0o600,
});
Comment on lines +97 to +104
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Non-atomic two-phase write leaves latestPath inconsistent on failure

appendFile and writeFile are two separate syscalls inside the same lock. if appendFile succeeds but writeFile fails (e.g., windows AV holds the file, disk full, process killed mid-write), the ndjson log grows but sync-history-latest.json is stale or partially written. readLatestSyncHistorySync will silently return old data with no indication the inconsistency exists. lastAppendError does get set, but callers of getLastCodexCliSyncRun have no visibility into it.

the safe fix is to write latestPath via a temp file + rename, which is atomic at the fs level (on posix) and far more resilient on windows:

const tmp = `${paths.latestPath}.tmp`;
await fs.writeFile(tmp, latestContent, { encoding: "utf8", mode: 0o600 });
await fs.rename(tmp, paths.latestPath);

windows fs.rename over an existing file can still fail with EPERM when another process holds the target — callers should handle that, but it's still better than a partial in-place write.

Prompt To Fix With AI
This is a comment left during a code review.
Path: lib/sync-history.ts
Line: 97-104

Comment:
**Non-atomic two-phase write leaves `latestPath` inconsistent on failure**

`appendFile` and `writeFile` are two separate syscalls inside the same lock. if `appendFile` succeeds but `writeFile` fails (e.g., windows AV holds the file, disk full, process killed mid-write), the ndjson log grows but `sync-history-latest.json` is stale or partially written. `readLatestSyncHistorySync` will silently return old data with no indication the inconsistency exists. `lastAppendError` does get set, but callers of `getLastCodexCliSyncRun` have no visibility into it.

the safe fix is to write `latestPath` via a temp file + rename, which is atomic at the fs level (on posix) and far more resilient on windows:

```ts
const tmp = `${paths.latestPath}.tmp`;
await fs.writeFile(tmp, latestContent, { encoding: "utf8", mode: 0o600 });
await fs.rename(tmp, paths.latestPath);
```

windows `fs.rename` over an existing file can still fail with `EPERM` when another process holds the target — callers should handle that, but it's still better than a partial in-place write.

How can I resolve this? If you propose a fix, please make it concise.

Fix in Codex

Comment on lines +97 to +104
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

non-atomic write sequence and no EBUSY retry

lib/sync-history.ts:97-104 does appendFile then writeFile. if the process crashes between them, sync-history-latest.json will be stale compared to the ndjson.

additionally, neither call retries on EBUSY. on windows, if another process holds the file, the write fails and is logged but lost. since __resetSyncHistoryForTests already has EBUSY retry logic, consider extracting that into a reusable helper and applying it here.

for atomicity, consider writing to sync-history-latest.json.tmp then renaming.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/sync-history.ts` around lines 97 - 104, The appendFile/writeFile sequence
in lib/sync-history.ts (using paths.historyPath and paths.latestPath with
latestContent) is non-atomic and lacks EBUSY retry; extract the EBUSY retry
logic from __resetSyncHistoryForTests into a reusable helper (e.g.,
writeFileWithRetry or withEbusyRetry) and use it for both operations, then make
the latest update atomic by writing latestContent to a temporary file (e.g.,
sync-history-latest.json.tmp) and renaming it to paths.latestPath after
fs.appendFile completes successfully so the ndjson and latest JSON stay
consistent; ensure both the append and the temp-write/rename use the retry
helper to handle EBUSY on Windows.

lastAppendError = null;
});
pendingHistoryWrites.add(writePromise);
await writePromise
.catch((error) => {
lastAppendError = error instanceof Error ? error.message : String(error);
log.debug("Failed to append sync history", {
error: lastAppendError,
});
})
.finally(() => {
pendingHistoryWrites.delete(writePromise);
});
}

export async function readSyncHistory(
options: { limit?: number; kind?: SyncHistoryKind } = {},
): Promise<SyncHistoryEntry[]> {
const { limit, kind } = options;
await waitForPendingHistoryWrites();
try {
const content = await fs.readFile(
getSyncHistoryPaths().historyPath,
"utf8",
);
const lines = content
.split(/\r?\n/)
.map((line) => line.trim())
.filter(Boolean);
const parsed = lines.map((line) => JSON.parse(line) as SyncHistoryEntry);
Comment on lines +130 to +134
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

corrupt line crashes entire read

lib/sync-history.ts:134 parses every line with JSON.parse without try/catch. a single malformed line (disk corruption, partial write) throws and the entire read returns [] via the outer catch.

consider parsing each line individually and skipping invalid ones:

🔧 proposed fix
-		const parsed = lines.map((line) => JSON.parse(line) as SyncHistoryEntry);
+		const parsed: SyncHistoryEntry[] = [];
+		for (const line of lines) {
+			try {
+				parsed.push(JSON.parse(line) as SyncHistoryEntry);
+			} catch {
+				log.debug("Skipping corrupt sync history line", { line: line.slice(0, 100) });
+			}
+		}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
const lines = content
.split(/\r?\n/)
.map((line) => line.trim())
.filter(Boolean);
const parsed = lines.map((line) => JSON.parse(line) as SyncHistoryEntry);
const lines = content
.split(/\r?\n/)
.map((line) => line.trim())
.filter(Boolean);
const parsed: SyncHistoryEntry[] = [];
for (const line of lines) {
try {
parsed.push(JSON.parse(line) as SyncHistoryEntry);
} catch {
log.debug("Skipping corrupt sync history line", { line: line.slice(0, 100) });
}
}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/sync-history.ts` around lines 130 - 134, The current code maps every line
to JSON.parse and a single malformed line will throw and abort the whole read;
update the parsing so each line from lines (the variable produced above) is
parsed inside a try/catch: iterate lines, for each call JSON.parse(line) and
cast to SyncHistoryEntry inside the try block, push valid entries into parsed
(or a new array), and in the catch skip the malformed line (optionally log or
count skipped entries) so one corrupt line no longer crashes the entire read
operation.

const filtered = kind
? parsed.filter((entry) => entry.kind === kind)
: parsed;
return typeof limit === "number" && limit > 0
? filtered.slice(-limit)
: filtered;
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== "ENOENT") {
log.debug("Failed to read sync history", {
error: error instanceof Error ? error.message : String(error),
});
}
return [];
}
}

export function readLatestSyncHistorySync(): SyncHistoryEntry | null {
try {
const content = readFileSync(getSyncHistoryPaths().latestPath, "utf8");
return cloneEntry(JSON.parse(content) as SyncHistoryEntry);
} catch (error) {
const code = (error as NodeJS.ErrnoException).code;
if (code && code !== "ENOENT") {
log.debug("Failed to read latest sync history", {
error: error instanceof Error ? error.message : String(error),
});
}
return null;
}
}

export function configureSyncHistoryForTests(directory: string | null): void {
historyDirOverride = directory ? directory.trim() : null;
}

export async function __resetSyncHistoryForTests(): Promise<void> {
const paths = getSyncHistoryPaths();
await waitForPendingHistoryWrites();
await withHistoryLock(async () => {
for (const target of [paths.historyPath, paths.latestPath]) {
for (let attempt = 0; attempt < 5; attempt += 1) {
try {
await fs.rm(target, { force: true });
break;
} catch (error) {
const code = (error as NodeJS.ErrnoException).code;
if (
(code !== "EBUSY" && code !== "EPERM" && code !== "ENOTEMPTY") ||
attempt === 4
) {
throw error;
}
await new Promise((resolve) =>
setTimeout(resolve, 25 * 2 ** attempt),
);
}
}
}
});
lastAppendError = null;
lastAppendPaths = null;
}

export function cloneSyncHistoryEntry(
entry: SyncHistoryEntry | null,
): SyncHistoryEntry | null {
return cloneEntry(entry);
}

export function __getLastSyncHistoryErrorForTests(): string | null {
return lastAppendError;
}

export function __getLastSyncHistoryPathsForTests(): SyncHistoryPaths | null {
return lastAppendPaths ? { ...lastAppendPaths } : null;
}
Loading