Skip to content

Commit d9e66e2

Browse files
authored
Merge pull request #131 from PytorchConnectomics/fix/pytc-v2-runtime-and-schema
Stabilize PyTC v2 runtime and dual-schema config flows
2 parents b4f113d + 0487b0a commit d9e66e2

16 files changed

Lines changed: 1654 additions & 433 deletions

File tree

client/src/api.js

Lines changed: 79 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
import axios from "axios";
22
import { message } from "antd";
3+
import yaml from "js-yaml";
4+
import {
5+
setInferenceExecutionDefaults,
6+
setInferenceOutputPath,
7+
setTrainingOutputPath,
8+
} from "./configSchema";
39

410
const BASE_URL = `${process.env.REACT_APP_SERVER_PROTOCOL || "http"}://${process.env.REACT_APP_SERVER_URL || "localhost:4242"}`;
511

@@ -22,6 +28,30 @@ const buildFilePath = (file) => {
2228

2329
const hasBrowserFile = (file) => file && file.originFileObj instanceof File;
2430

31+
const getErrorDetailMessage = (detail) => {
32+
if (!detail) return "";
33+
if (typeof detail === "string") return detail;
34+
if (Array.isArray(detail)) {
35+
return detail.map(getErrorDetailMessage).filter(Boolean).join("; ");
36+
}
37+
if (typeof detail === "object") {
38+
const nestedUpstream =
39+
detail.upstream_body !== undefined
40+
? getErrorDetailMessage(detail.upstream_body)
41+
: "";
42+
return [
43+
detail.message,
44+
detail.detail,
45+
detail.reason,
46+
nestedUpstream,
47+
detail.error,
48+
]
49+
.filter(Boolean)
50+
.join(" | ");
51+
}
52+
return String(detail);
53+
};
54+
2555
export async function getNeuroglancerViewer(image, label, scales) {
2656
try {
2757
const url = `${BASE_URL}/neuroglancer`;
@@ -76,7 +106,7 @@ export async function checkFile(file) {
76106
function handleError(error) {
77107
if (error.response) {
78108
const detail = error.response.data?.detail;
79-
const detailMessage = typeof detail === "string" ? detail : detail?.data;
109+
const detailMessage = getErrorDetailMessage(detail);
80110
throw new Error(
81111
`${error.response.status}: ${detailMessage || error.response.statusText}`,
82112
);
@@ -106,7 +136,12 @@ export async function makeApiRequest(url, method, data = null) {
106136
}
107137
}
108138

109-
export async function startModelTraining(trainingConfig, logPath, outputPath) {
139+
export async function startModelTraining(
140+
trainingConfig,
141+
logPath,
142+
outputPath,
143+
configOriginPath = "",
144+
) {
110145
try {
111146
console.log("[API] ===== Starting Training Configuration =====");
112147
console.log("[API] logPath:", logPath);
@@ -115,26 +150,13 @@ export async function startModelTraining(trainingConfig, logPath, outputPath) {
115150
// Parse the YAML config and inject the outputPath
116151
let configToSend = trainingConfig;
117152

118-
if (outputPath) {
153+
if (outputPath && trainingConfig) {
119154
try {
120-
// Parse YAML to object
121-
const yaml = require("js-yaml");
122-
const configObj = yaml.load(trainingConfig);
123-
124-
console.log(
125-
"[API] Original DATASET.OUTPUT_PATH:",
126-
configObj.DATASET?.OUTPUT_PATH,
127-
);
155+
const configObj = yaml.load(trainingConfig) || {};
156+
setTrainingOutputPath(configObj, outputPath);
128157

129-
// Inject the output path from UI
130-
if (!configObj.DATASET) {
131-
configObj.DATASET = {};
132-
}
133-
configObj.DATASET.OUTPUT_PATH = outputPath;
134-
135-
// Convert back to YAML
136158
configToSend = yaml.dump(configObj);
137-
console.log("[API] Injected DATASET.OUTPUT_PATH:", outputPath);
159+
console.log("[API] Injected training output path:", outputPath);
138160
console.log(
139161
"[API] Modified config preview:",
140162
configToSend.substring(0, 500),
@@ -155,6 +177,7 @@ export async function startModelTraining(trainingConfig, logPath, outputPath) {
155177
logPath, // Keep for backwards compatibility, but won't be used for TensorBoard
156178
outputPath, // TensorBoard will use this instead
157179
trainingConfig: configToSend,
180+
configOriginPath,
158181
});
159182

160183
console.log("[API] Request payload size:", data.length, "bytes");
@@ -180,19 +203,35 @@ export async function getTrainingStatus() {
180203
const res = await axios.get(`${BASE_URL}/training_status`);
181204
return res.data;
182205
} catch (error) {
183-
console.error("Failed to get training status:", error);
184-
return { isRunning: false, error: true };
206+
handleError(error);
207+
}
208+
}
209+
210+
export async function getTrainingLogs() {
211+
try {
212+
const res = await axios.get(`${BASE_URL}/training_logs`);
213+
return res.data;
214+
} catch (error) {
215+
handleError(error);
185216
}
186217
}
187218

188219
export async function getTensorboardURL() {
189220
return makeApiRequest("get_tensorboard_url", "get");
190221
}
191222

223+
export async function startTensorboard(logPath) {
224+
const query = logPath
225+
? `?${new URLSearchParams({ logPath }).toString()}`
226+
: "";
227+
return makeApiRequest(`start_tensorboard${query}`, "get");
228+
}
229+
192230
export async function startModelInference(
193231
inferenceConfig,
194232
outputPath,
195233
checkpointPath,
234+
configOriginPath = "",
196235
) {
197236
console.log("\n========== API.JS: START_MODEL_INFERENCE CALLED ==========");
198237
console.log("[API] Function arguments:");
@@ -212,36 +251,18 @@ export async function startModelInference(
212251
// Parse the YAML config and inject the outputPath
213252
let configToSend = inferenceConfig;
214253

215-
if (outputPath) {
216-
console.log("[API] outputPath provided, will inject into YAML");
254+
if (inferenceConfig) {
217255
try {
218-
// Parse YAML to object
219-
const yaml = require("js-yaml");
220256
console.log("[API] Parsing YAML config...");
221-
const configObj = yaml.load(inferenceConfig);
257+
const configObj = yaml.load(inferenceConfig) || {};
222258
console.log("[API] ✓ YAML parsed successfully");
223259

224-
console.log("[API] Original config structure:");
225-
console.log("[API] - Has INFERENCE section?", !!configObj.INFERENCE);
226-
console.log(
227-
"[API] - Original INFERENCE.OUTPUT_PATH:",
228-
configObj.INFERENCE?.OUTPUT_PATH,
229-
);
230-
231-
// Inject the output path from UI
232-
if (!configObj.INFERENCE) {
233-
console.log("[API] INFERENCE section missing, creating it");
234-
configObj.INFERENCE = {};
235-
}
236-
configObj.INFERENCE.OUTPUT_PATH = outputPath;
237-
// Ensure SYSTEM section exists and set NUM_GPUS to 1 for CPU inference
238-
if (!configObj.SYSTEM) {
239-
console.log("[API] SYSTEM section missing, creating it");
240-
configObj.SYSTEM = {};
260+
if (outputPath) {
261+
setInferenceOutputPath(configObj, outputPath);
262+
console.log("[API] ✓ Injected inference output path:", outputPath);
241263
}
242-
configObj.SYSTEM.NUM_GPUS = 1;
243-
console.log("[API] ✓ Set SYSTEM.NUM_GPUS = 1");
244-
console.log("[API] ✓ Injected INFERENCE.OUTPUT_PATH:", outputPath);
264+
setInferenceExecutionDefaults(configObj);
265+
console.log("[API] ✓ Applied inference runtime defaults");
245266

246267
// Convert back to YAML
247268
console.log("[API] Converting back to YAML...");
@@ -260,7 +281,7 @@ export async function startModelInference(
260281
}
261282
} else {
262283
console.warn(
263-
"[API] ⚠ No outputPath provided, config will use its original OUTPUT_PATH",
284+
"[API] ⚠ No inferenceConfig provided, request will use raw payload value",
264285
);
265286
}
266287

@@ -271,6 +292,7 @@ export async function startModelInference(
271292
},
272293
outputPath,
273294
inferenceConfig: configToSend,
295+
configOriginPath,
274296
};
275297

276298
console.log("[API] Payload structure:");
@@ -318,8 +340,16 @@ export async function getInferenceStatus() {
318340
const res = await axios.get(`${BASE_URL}/inference_status`);
319341
return res.data;
320342
} catch (error) {
321-
console.error("Failed to get inference status:", error);
322-
return { isRunning: false, error: true };
343+
handleError(error);
344+
}
345+
}
346+
347+
export async function getInferenceLogs() {
348+
try {
349+
const res = await axios.get(`${BASE_URL}/inference_logs`);
350+
return res.data;
351+
} catch (error) {
352+
handleError(error);
323353
}
324354
}
325355

client/src/components/Configurator.js

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,6 @@ function Configurator(props) {
5151
if (!getPathValue(context.inputImage)) missing.push("input image");
5252
if (!getPathValue(context.inputLabel)) missing.push("input label");
5353
if (!getPathValue(context.outputPath)) missing.push("output path");
54-
if (type === "training" && !getPathValue(context.logPath)) {
55-
missing.push("log path");
56-
}
5754
if (type === "inference" && !getPathValue(context.checkpointPath)) {
5855
missing.push("checkpoint path");
5956
}
@@ -62,7 +59,6 @@ function Configurator(props) {
6259
context.inputImage,
6360
context.inputLabel,
6461
context.outputPath,
65-
context.logPath,
6662
context.checkpointPath,
6763
type,
6864
]);
@@ -72,9 +68,10 @@ function Configurator(props) {
7268
? Boolean(context.trainingConfig)
7369
: Boolean(context.inferenceConfig);
7470

75-
const missingBase = hasConfig
76-
? []
77-
: ["base configuration (preset or upload)"];
71+
const missingBase = useMemo(
72+
() => (hasConfig ? [] : ["base configuration (preset or upload)"]),
73+
[hasConfig],
74+
);
7875

7976
const missingByStep = useMemo(() => {
8077
if (current === 0) return missingInputs;

client/src/components/InputSelector.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ function InputSelector(props) {
197197
placeholder="Model checkpoint file (e.g., /path/to/checkpoint_00010.pth.tar)"
198198
value={context.checkpointPath || ""}
199199
onChange={handleCheckpointPathChange}
200-
selectionType="directory"
200+
selectionType="file"
201201
/>
202202
</Form.Item>
203203
)}

0 commit comments

Comments
 (0)