-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathanalyser.py
More file actions
438 lines (408 loc) · 18.6 KB
/
analyser.py
File metadata and controls
438 lines (408 loc) · 18.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
"""
Contains functions for analysing the various types of files hauled by Murfey, and
assigning to them the correct contexts (CLEM, SPA, tomography, etc.) for processing
on the server side.
Individual contexts can be found in murfey.client.contexts.
"""
from __future__ import annotations
import logging
import queue
import threading
from pathlib import Path
from typing import Type
from murfey.client.context import Context
from murfey.client.contexts.atlas import AtlasContext
from murfey.client.contexts.clem import CLEMContext
from murfey.client.contexts.spa import SPAModularContext
from murfey.client.contexts.spa_metadata import SPAMetadataContext
from murfey.client.contexts.tomo import TomographyContext
from murfey.client.contexts.tomo_metadata import TomographyMetadataContext
from murfey.client.destinations import find_longest_data_directory
from murfey.client.instance_environment import MurfeyInstanceEnvironment
from murfey.client.rsync import RSyncerUpdate, TransferResult
from murfey.util.client import Observer, get_machine_config_client
from murfey.util.mdoc import get_block
from murfey.util.models import ProcessingParametersSPA, ProcessingParametersTomo
logger = logging.getLogger("murfey.client.analyser")
class Analyser(Observer):
def __init__(
self,
basepath_local: Path,
token: str,
environment: MurfeyInstanceEnvironment | None = None,
force_mdoc_metadata: bool = False,
limited: bool = False,
):
super().__init__()
self._basepath = basepath_local.absolute()
self._limited = limited
self._experiment_type = ""
self._acquisition_software = ""
self._extension: str = ""
self._unseen_xml: list = []
self._context: Context | None = None
self._batch_store: dict = {}
self._environment = environment
self._force_mdoc_metadata = force_mdoc_metadata
self._token = token
self.parameters_model: (
Type[ProcessingParametersSPA] | Type[ProcessingParametersTomo] | None
) = None
self.queue: queue.Queue = queue.Queue()
self.thread = threading.Thread(name="Analyser", target=self._analyse)
self._stopping = False
self._halt_thread = False
self._murfey_config = (
get_machine_config_client(
str(environment.url.geturl()),
self._token,
instrument_name=environment.instrument_name,
)
if environment
else {}
)
def __repr__(self) -> str:
return f"<Analyser ({self._basepath})>"
def _find_extension(self, file_path: Path) -> bool:
"""
Identifies the file extension and stores that information in the class.
"""
if "atlas" in file_path.parts:
self._extension = file_path.suffix
return True
if (
required_substrings := self._murfey_config.get(
"data_required_substrings", {}
)
.get(self._acquisition_software, {})
.get(file_path.suffix)
):
if not any(r in file_path.name for r in required_substrings):
return False
# Checks for MRC, TIFF, TIF, and EER files
if file_path.suffix in (".mrc", ".tiff", ".tif", ".eer"):
if not self._extension:
logger.info(f"File extension determined: {file_path.suffix}")
self._extension = file_path.suffix
elif self._extension != file_path.suffix:
logger.info(f"File extension re-evaluated: {file_path.suffix}")
self._extension = file_path.suffix
return True
# If we see an .mdoc file first, use that to determine the file extensions
elif file_path.suffix == ".mdoc":
with open(file_path, "r") as md:
md.seek(0)
mdoc_data_block = get_block(md)
if subframe_path := mdoc_data_block.get("SubFramePath"):
self._extension = Path(subframe_path).suffix
return True
# Check for LIF files separately
elif file_path.suffix == ".lif":
self._extension = file_path.suffix
return True
return False
def _find_context(self, file_path: Path) -> bool:
"""
Using various conditionals, identifies what workflow the file is part of, and
assigns the correct context class to that batch of rsync files for subsequent
stages of processing. Actions to take for individual files will be determined
in the Context classes themselves.
"""
logger.debug(f"Finding context using file {str(file_path)!r}")
# CLEM workflow checks
# Look for LIF and XLIF files
if file_path.suffix in (".lif", ".xlif"):
self._context = CLEMContext("leica", self._basepath, self._token)
return True
# Look for TIFF files associated with CLEM workflow
# CLEM TIFF files will have "--Stage", "--Z", and/or "--C" in their file stem
if any(
pattern in file_path.stem for pattern in ("--Stage", "--Z", "--C")
) and file_path.suffix in (".tiff", ".tif"):
self._context = CLEMContext("leica", self._basepath, self._token)
return True
# Tomography and SPA workflow checks
if "atlas" in file_path.parts:
self._context = AtlasContext("epu", self._basepath, self._token)
return True
if "Metadata" in file_path.parts or file_path.name == "EpuSession.dm":
self._context = SPAMetadataContext("epu", self._basepath, self._token)
return True
elif (
"Batch" in file_path.parts
or "SearchMaps" in file_path.parts
or "Thumbnails" in file_path.parts
or file_path.name == "Session.dm"
):
self._context = TomographyMetadataContext(
"tomo", self._basepath, self._token
)
return True
split_file_stem = file_path.stem.split("_")
if split_file_stem:
if split_file_stem[-1] == "gain":
return False
# Files starting with "FoilHole" belong to the SPA workflow
if split_file_stem[0].startswith("FoilHole") and split_file_stem[-1] in [
"Fractions",
"fractions",
"EER",
]:
if not self._context:
logger.info("Acquisition software: EPU")
self._context = SPAModularContext(
"epu", self._basepath, self._token
)
self.parameters_model = ProcessingParametersSPA
return True
# Files starting with "Position" belong to the standard tomography workflow
# NOTE: not completely reliable, mdocs can be in tomography metadata as well
if (
split_file_stem[0] == "Position"
or "[" in file_path.name
or split_file_stem[-1] in ["Fractions", "fractions", "EER"]
or file_path.suffix == ".mdoc"
):
if not self._context:
logger.info("Acquisition software: tomo")
self._context = TomographyContext(
"tomo", self._basepath, self._token
)
self.parameters_model = ProcessingParametersTomo
return True
return False
def post_transfer(self, transferred_file: Path):
try:
if self._context:
self._context.post_transfer(
transferred_file, environment=self._environment
)
except Exception as e:
logger.error(
f"An exception was encountered post transfer: {e}", exc_info=True
)
def _analyse(self):
logger.info("Analyser thread started")
mdoc_for_reading = None
while not self._halt_thread:
transferred_file = self.queue.get()
transferred_file = (
Path(transferred_file)
if isinstance(transferred_file, str)
else transferred_file
)
if not transferred_file:
self._halt_thread = True
continue
if self._limited:
if (
"Metadata" in transferred_file.parts
or transferred_file.name == "EpuSession.dm"
and not self._context
):
self._context = SPAMetadataContext(
"epu", self._basepath, self._token
)
elif (
"Batch" in transferred_file.parts
or "SearchMaps" in transferred_file.parts
or transferred_file.name == "Session.dm"
and not self._context
):
self._context = TomographyMetadataContext(
"tomo", self._basepath, self._token
)
self.post_transfer(transferred_file)
else:
dc_metadata = {}
if (
self._force_mdoc_metadata
and transferred_file.suffix == ".mdoc"
or mdoc_for_reading
):
if self._context:
try:
dc_metadata = self._context.gather_metadata(
mdoc_for_reading or transferred_file,
environment=self._environment,
)
except KeyError as e:
logger.error(
f"Metadata gathering failed with a key error for key: {e.args[0]}"
)
raise e
if not dc_metadata:
mdoc_for_reading = None
elif transferred_file.suffix == ".mdoc":
mdoc_for_reading = transferred_file
if not self._context:
valid_extension = self._find_extension(transferred_file)
if not valid_extension:
logger.error(f"No extension found for {transferred_file}")
continue
found = self._find_context(transferred_file)
if not found:
logger.debug(
f"Couldn't find context for {str(transferred_file)!r}"
)
self.queue.task_done()
continue
elif self._extension:
logger.info(
f"Context found successfully for {transferred_file}"
)
try:
self._context.post_first_transfer(
transferred_file,
environment=self._environment,
)
except Exception as e:
logger.error(f"Exception encountered: {e}")
if not isinstance(self._context, AtlasContext):
if not dc_metadata:
try:
dc_metadata = self._context.gather_metadata(
self._xml_file(transferred_file),
environment=self._environment,
)
except NotImplementedError:
dc_metadata = {}
except KeyError as e:
logger.error(
f"Metadata gathering failed with a key error for key: {e.args[0]}"
)
raise e
except ValueError as e:
logger.error(
f"Metadata gathering failed with a value error: {e}"
)
if not dc_metadata or not self._force_mdoc_metadata:
self._unseen_xml.append(transferred_file)
else:
self._unseen_xml = []
if dc_metadata.get("file_extension"):
self._extension = dc_metadata["file_extension"]
else:
dc_metadata["file_extension"] = self._extension
dc_metadata["acquisition_software"] = (
self._context._acquisition_software
)
self.notify(
{
"form": dc_metadata,
}
)
# If a file with a CLEM context is identified, immediately post it
elif isinstance(self._context, CLEMContext):
logger.debug(
f"File {transferred_file.name!r} will be processed as part of CLEM workflow"
)
self.post_transfer(transferred_file)
elif isinstance(self._context, AtlasContext):
logger.debug(f"File {transferred_file.name!r} is part of the atlas")
self.post_transfer(transferred_file)
# Handle files with tomography and SPA context differently
elif not self._extension or self._unseen_xml:
valid_extension = self._find_extension(transferred_file)
if not valid_extension:
logger.error(f"No extension found for {transferred_file}")
continue
if self._extension:
logger.info(
f"Extension found successfully for {transferred_file}"
)
try:
self._context.post_first_transfer(
transferred_file,
environment=self._environment,
)
except Exception as e:
logger.error(f"Exception encountered: {e}")
if not dc_metadata:
try:
dc_metadata = self._context.gather_metadata(
mdoc_for_reading
or self._xml_file(transferred_file),
environment=self._environment,
)
except KeyError as e:
logger.error(
f"Metadata gathering failed with a key error for key: {e.args[0]}"
)
raise e
if not dc_metadata or not self._force_mdoc_metadata:
mdoc_for_reading = None
self._unseen_xml.append(transferred_file)
if dc_metadata:
self._unseen_xml = []
if dc_metadata.get("file_extension"):
self._extension = dc_metadata["file_extension"]
else:
dc_metadata["file_extension"] = self._extension
dc_metadata["acquisition_software"] = (
self._context._acquisition_software
)
self.notify(
{
"form": dc_metadata,
}
)
elif isinstance(
self._context,
(
SPAModularContext,
SPAMetadataContext,
TomographyContext,
TomographyMetadataContext,
),
):
context = str(self._context).split(" ")[0].split(".")[-1]
logger.debug(
f"Transferring file {str(transferred_file)} with context {context!r}"
)
self.post_transfer(transferred_file)
self.queue.task_done()
logger.debug("Analyer thread has stopped analysing incoming files")
self.notify(final=True)
def _xml_file(self, data_file: Path) -> Path:
if not self._environment:
return data_file.with_suffix(".xml")
file_name = f"{'_'.join(p for p in data_file.stem.split('_')[:-1])}.xml"
data_directories = self._murfey_config.get("data_directories", [])
base_dir, mid_dir = find_longest_data_directory(data_file, data_directories)
if not base_dir:
return data_file.with_suffix(".xml")
return base_dir / self._environment.visit / mid_dir / file_name
def enqueue(self, rsyncer: RSyncerUpdate):
if not self._stopping and rsyncer.outcome == TransferResult.SUCCESS:
absolute_path = (self._basepath / rsyncer.file_path).absolute()
self.queue.put(absolute_path)
def start(self):
if self.thread.is_alive():
raise RuntimeError("Analyser already running")
if self._stopping:
raise RuntimeError("Analyser has already stopped")
logger.info(f"Analyser thread starting for {self}")
self.thread.start()
def request_stop(self):
self._stopping = True
self._halt_thread = True
def is_safe_to_stop(self):
"""
Checks that the analyser thread is safe to stop
"""
return self._stopping and self._halt_thread and not self.queue.qsize()
def stop(self):
logger.debug("Analyser thread stop requested")
self._stopping = True
self._halt_thread = True
try:
if self.thread.is_alive():
self.queue.put(None)
self.thread.join()
except Exception as e:
logger.error(
f"Exception encountered while stopping Analyser: {e}",
exc_info=True,
)
logger.debug("Analyser thread stop completed")