-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathrun.py
More file actions
1946 lines (1656 loc) · 65.6 KB
/
run.py
File metadata and controls
1946 lines (1656 loc) · 65.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Simvue Run
==========
Main class for recording metrics and information to Simvue during code execution.
This forms the central API for users.
"""
import contextlib
import logging
import pathlib
import mimetypes
import multiprocessing.synchronize
import threading
import humanfriendly
import os
import multiprocessing
import pydantic
import re
import sys
import traceback as tb
import time
import functools
import platform
import typing
import warnings
import uuid
import click
import psutil
from simvue.api.objects.alert.fetch import Alert
from simvue.api.objects.folder import Folder
from simvue.exception import SimvueRunError
from simvue.utilities import prettify_pydantic
from .config.user import SimvueConfiguration
from .factory.dispatch import Dispatcher
from .executor import Executor
from .metrics import get_gpu_metrics, get_process_cpu, get_process_memory
from .models import FOLDER_REGEX, NAME_REGEX, MetricKeyString
from .system import get_system
from .metadata import git_info, environment
from .eco import SimvueEmissionsTracker
from .utilities import (
skip_if_failed,
validate_timestamp,
simvue_timestamp,
)
from .api.objects import (
Run as RunObject,
FileArtifact,
ObjectArtifact,
MetricsThresholdAlert,
MetricsRangeAlert,
UserAlert,
EventsAlert,
Events,
Metrics,
)
try:
from typing import Self
except ImportError:
from typing_extensions import Self # noqa: F401
if typing.TYPE_CHECKING:
from .factory.dispatch import DispatcherBaseClass
UPLOAD_TIMEOUT: int = 30
HEARTBEAT_INTERVAL: int = 60
RESOURCES_METRIC_PREFIX: str = "resources"
logger = logging.getLogger(__name__)
def check_run_initialised(
function: typing.Callable[..., typing.Any],
) -> typing.Callable[..., typing.Any]:
@functools.wraps(function)
def _wrapper(self: Self, *args: typing.Any, **kwargs: typing.Any) -> typing.Any:
# Tidy pydantic errors
_function = prettify_pydantic(function)
if self._user_config.run.mode == "disabled":
return True
if self._retention and time.time() - self._timer > self._retention:
self._active = False
raise RuntimeError("Cannot update expired Simvue Run")
if not self._sv_obj:
raise RuntimeError(
f"Simvue Run must be initialised before calling '{function.__name__}'"
)
return _function(self, *args, **kwargs)
return _wrapper
class Run:
"""Track simulation details based on token and URL
The Run class provides a way of monitoring simulation runs by logging metrics
and creating alerts based on such metrics. The recommended usage is as a
context manager to ensure the run is closed upon completion.
"""
@pydantic.validate_call
def __init__(
self,
mode: typing.Literal["online", "offline", "disabled"] = "online",
abort_callback: typing.Callable[[Self], None] | None = None,
server_token: pydantic.SecretStr | None = None,
server_url: str | None = None,
debug: bool = False,
) -> None:
"""Initialise a new Simvue run
If `abort_callback` is provided the first argument must be this Run instance
Parameters
----------
mode : Literal['online', 'offline', 'disabled'], optional
mode of running
online - objects sent directly to Simvue server
offline - everything is written to disk for later dispatch
disabled - disable monitoring completely
abort_callback : Callable | None, optional
callback executed when the run is aborted
server_token : str, optional
overwrite value for server token, default is None
server_url : str, optional
overwrite value for server URL, default is None
debug : bool, optional
run in debug mode, default is False
"""
self._uuid: str = f"{uuid.uuid4()}"
self._name: str | None = None
# monitor duration with respect to retention period
self._timer: float = 0
self._retention: float | None = None
self._testing: bool = False
self._abort_on_alert: typing.Literal["run", "terminate", "ignore"] = "terminate"
self._abort_callback: typing.Callable[[Self], None] | None = abort_callback
self._dispatch_mode: typing.Literal["direct", "queued"] = "queued"
self._executor = Executor(self)
self._dispatcher: DispatcherBaseClass | None = None
self._id: str | None = None
self._folder: Folder | None = None
self._term_color: bool = True
self._suppress_errors: bool = False
self._queue_blocking: bool = False
self._status: (
typing.Literal[
"created", "running", "completed", "failed", "terminated", "lost"
]
| None
) = None
self._data: dict[str, typing.Any] = {}
self._step: int = 0
self._active: bool = False
self._user_config = SimvueConfiguration.fetch(
server_url=server_url, server_token=server_token, mode=mode
)
logging.getLogger(self.__class__.__module__).setLevel(
logging.DEBUG
if (debug is not None and debug)
or (debug is None and self._user_config.client.debug)
else logging.INFO
)
self._aborted: bool = False
self._resources_metrics_interval: int | None = (
HEARTBEAT_INTERVAL
if self._user_config.metrics.resources_metrics_interval < 1
else self._user_config.metrics.resources_metrics_interval
)
self._headers: dict[str, str] = (
{
"Authorization": f"Bearer {self._user_config.server.token.get_secret_value()}"
}
if mode != "offline"
else {}
)
self._sv_obj: RunObject | None = None
self._pid: int | None = 0
self._shutdown_event: threading.Event | None = None
self._configuration_lock = threading.Lock()
self._heartbeat_termination_trigger: threading.Event | None = None
self._storage_id: str | None = None
self._heartbeat_thread: threading.Thread | None = None
self._heartbeat_interval: int = HEARTBEAT_INTERVAL
self._emission_metrics_interval: int | None = (
HEARTBEAT_INTERVAL
if (
(_interval := self._user_config.metrics.emission_metrics_interval)
and _interval < 1
)
else self._user_config.metrics.emission_metrics_interval
)
self._emissions_tracker: SimvueEmissionsTracker | None = (
SimvueEmissionsTracker("simvue", self, self._emission_metrics_interval)
if self._user_config.metrics.enable_emission_metrics
else None
)
def __enter__(self) -> Self:
return self
def _handle_exception_throw(
self,
exc_type: typing.Type[BaseException] | None,
value: BaseException,
traceback: typing.Type[BaseException] | BaseException | None,
) -> None:
_exception_thrown: str | None = exc_type.__name__ if exc_type else None
_is_running: bool = self._status == "running"
_is_running_online: bool = self._id is not None and _is_running
_is_running_offline: bool = (
self._user_config.run.mode == "offline" and _is_running
)
_is_terminated: bool = (
_exception_thrown is not None and _exception_thrown == "KeyboardInterrupt"
)
if not _exception_thrown and _is_running:
return
# Abort executor processes
self._executor.kill_all()
if not _is_running:
return
if not self._active:
return
_traceback_out: list[str] = tb.format_exception(exc_type, value, traceback)
_event_msg: str = (
"\n".join(_traceback_out)
if _traceback_out
else f"An exception was thrown: {_exception_thrown}"
)
self.log_event(_event_msg)
self.set_status("terminated" if _is_terminated else "failed")
# If the dispatcher has already been aborted then this will
# fail so just continue without the event
with contextlib.suppress(RuntimeError):
self.log_event(f"{_exception_thrown}: {value}")
if not traceback:
return
with contextlib.suppress(RuntimeError):
self.log_event(f"Traceback: {traceback}")
def __exit__(
self,
exc_type: typing.Type[BaseException] | None,
value: BaseException,
traceback: typing.Type[BaseException] | BaseException | None,
) -> None:
logger.debug(
"Automatically closing run '%s' in status %s",
self._id if self._user_config.run.mode == "online" else "unregistered",
self._status,
)
# Exception handling
self._handle_exception_throw(exc_type, value, traceback)
self._tidy_run()
@property
def duration(self) -> float:
"""Return current run duration"""
return time.time() - self._start_time
@property
def time_stamp(self) -> str:
"""Return current timestamp"""
return simvue_timestamp()
@property
def processes(self) -> list[psutil.Process]:
"""Create an array containing a list of processes"""
process_list = self._executor.processes
if not self._parent_process:
return process_list
process_list += [self._parent_process]
# Attach child processes relating to the process set by set_pid
with contextlib.suppress(psutil.NoSuchProcess, psutil.ZombieProcess):
for child in self._parent_process.children(recursive=True):
if child not in process_list:
process_list.append(child)
return list(set(process_list))
def _get_sysinfo(self) -> dict[str, typing.Any]:
"""Retrieve system administration
Returns
-------
dict[str, typing.Any]
retrieved system specifications
"""
cpu = get_process_cpu(self.processes)
memory = get_process_memory(self.processes)
gpu = get_gpu_metrics(self.processes)
data: dict[str, typing.Any] = {}
if memory is not None and cpu is not None:
data = {
f"{RESOURCES_METRIC_PREFIX}/cpu.usage.percent": cpu,
f"{RESOURCES_METRIC_PREFIX}/memory.usage": memory,
}
if gpu:
for item in gpu:
data[item] = gpu[item]
return data
def _create_heartbeat_callback(
self,
) -> typing.Callable[[threading.Event], None]:
if (
self._user_config.run.mode == "online"
and (not self._user_config.server.url or not self._id)
) or not self._heartbeat_termination_trigger:
raise RuntimeError("Could not commence heartbeat, run not initialised")
def _heartbeat(
heartbeat_trigger: typing.Optional[
threading.Event
] = self._heartbeat_termination_trigger,
abort_callback: typing.Optional[
typing.Callable[[Self], None]
] = self._abort_callback,
) -> None:
if not heartbeat_trigger:
raise RuntimeError("Expected initialisation of heartbeat")
last_heartbeat = time.time()
last_res_metric_call = time.time()
while not heartbeat_trigger.is_set():
time.sleep(0.1)
with self._configuration_lock:
if (
self._resources_metrics_interval
and (res_time := time.time()) - last_res_metric_call
> self._resources_metrics_interval
):
# Set join on fail to false as if an error is thrown
# join would be called on this thread and a thread cannot
# join itself!
self._add_metrics_to_dispatch(
self._get_sysinfo(), join_on_fail=False
)
last_res_metric_call = res_time
if time.time() - last_heartbeat < self._heartbeat_interval:
continue
last_heartbeat = time.time()
# Check if the user has aborted the run
with self._configuration_lock:
if self._sv_obj and self._sv_obj.abort_trigger:
self._alert_raised_trigger.set()
logger.debug("Received abort request from server")
if abort_callback is not None:
abort_callback(self) # type: ignore
if self._abort_on_alert != "ignore":
self.kill_all_processes()
if self._dispatcher and self._shutdown_event:
self._shutdown_event.set()
self._dispatcher.purge()
self._dispatcher.join()
if self._active:
self.set_status("terminated")
click.secho(
"[simvue] Run was aborted.",
fg="red" if self._term_color else None,
bold=self._term_color,
)
if self._abort_on_alert == "terminate":
os._exit(1)
if self._sv_obj:
self._sv_obj.send_heartbeat()
return _heartbeat
def _create_dispatch_callback(
self,
) -> typing.Callable:
"""Generates the relevant callback for posting of metrics and events
The generated callback is assigned to the dispatcher instance and is
executed on metrics and events objects held in a buffer.
"""
if self._user_config.run.mode == "online" and not self._id:
raise RuntimeError("Expected identifier for run")
if (
self._user_config.run.mode != "offline" and not self._user_config.server.url
) or not self._sv_obj:
raise RuntimeError("Cannot commence dispatch, run not initialised")
def _dispatch_callback(
buffer: list[typing.Any],
category: typing.Literal["events", "metrics"],
run_obj: RunObject = self._sv_obj,
) -> None:
if category == "events":
_events = Events.new(
run=self.id,
offline=self._user_config.run.mode == "offline",
events=buffer,
)
_events.commit()
else:
_metrics = Metrics.new(
run=self.id,
offline=self._user_config.run.mode == "offline",
metrics=buffer,
)
_metrics.commit()
return _dispatch_callback
def _start(self, reconnect: bool = False) -> bool:
"""Start a run
Parameters
----------
reconnect : bool, optional
whether this is a reconnect to an existing run, by default False
Returns
-------
bool
if successful
"""
if self._user_config.run.mode == "disabled":
return True
if self._user_config.run.mode != "offline":
self._uuid = "notused"
logger.debug("Starting run")
if self._sv_obj and self._sv_obj.status != "running":
self._sv_obj.status = self._status
self._sv_obj.commit()
self._start_time = time.time()
if self._pid == 0:
self._pid = os.getpid()
self._parent_process = psutil.Process(self._pid) if self._pid else None
self._shutdown_event = threading.Event()
self._heartbeat_termination_trigger = threading.Event()
self._alert_raised_trigger = threading.Event()
try:
self._dispatcher = Dispatcher(
mode=self._dispatch_mode,
termination_trigger=self._shutdown_event,
object_types=["events", "metrics"],
callback=self._create_dispatch_callback(),
)
self._heartbeat_thread = threading.Thread(
target=self._create_heartbeat_callback()
)
except RuntimeError as e:
self._error(e.args[0])
return False
self._active = True
self._dispatcher.start()
self._heartbeat_thread.start()
return True
def _error(self, message: str, join_threads: bool = True) -> None:
"""Raise an exception if necessary and log error
Parameters
----------
message : str
message to display in exception or logger message
join_threads : bool
whether to join the threads on failure. This option exists to
prevent join being called in nested thread calls to this function.
Raises
------
RuntimeError
exception throw
"""
if self._emissions_tracker:
with contextlib.suppress(Exception):
self._emissions_tracker.stop()
# Stop heartbeat
if self._heartbeat_termination_trigger and self._heartbeat_thread:
self._heartbeat_termination_trigger.set()
if join_threads:
self._heartbeat_thread.join()
# Finish stopping all threads
if self._shutdown_event:
self._shutdown_event.set()
# Purge the queue as we can no longer send metrics
if self._dispatcher and self._dispatcher.is_alive():
self._dispatcher.purge()
if join_threads:
self._dispatcher.join()
if not self._suppress_errors:
raise SimvueRunError(message)
# Simvue support now terminated as the instance of Run has entered
# the dormant state due to exception throw so set listing to be 'lost'
if self._status == "running" and self._sv_obj:
self._sv_obj.name = self._name
self._sv_obj.status = "lost"
self._sv_obj.commit()
logger.error(message)
self._aborted = True
@skip_if_failed("_aborted", "_suppress_errors", False)
@pydantic.validate_call
def init(
self,
name: typing.Annotated[str | None, pydantic.Field(pattern=NAME_REGEX)] = None,
*,
metadata: dict[str, typing.Any] = None,
tags: list[str] | None = None,
description: str | None = None,
folder: typing.Annotated[
str, pydantic.Field(None, pattern=FOLDER_REGEX)
] = None,
notification: typing.Literal["none", "all", "error", "lost"] = "none",
running: bool = True,
retention_period: str | None = None,
timeout: int | None = 180,
visibility: typing.Literal["public", "tenant"] | list[str] | None = None,
no_color: bool = False,
) -> bool:
"""Initialise a Simvue run
Parameters
----------
name : str, optional
the name to allocate this run, if not specified a name will be
selected at random, by default None
metadata : typing.dict[str, typing.Any], optional
any metadata relating to the run as key-value pairs, by default None
tags : typing.list[str], optional
a list of tags for this run, by default None
description : str, optional
description of the run, by default None
folder : str, optional
folder within which to store the run, by default "/"
notification: typing.Literal["none", "all", "error", "lost"], optional
whether to notify the user by email upon completion of the run if
the run is in the specified state, by default "none"
running : bool, optional
whether to set the status as running or created, the latter implying
the run will be commenced at a later time. Default is True.
retention_period : str, optional
describer for time period to retain run, the default of None
removes this constraint.
timeout: int, optional
specify the timeout of the run, if None there is no timeout
visibility : Literal['public', 'tenant'] | list[str], optional
set visibility options for this run, either:
* public - run viewable to all.
* tenant - run viewable to all within the current tenant.
* A list of usernames with which to share this run
no_color : bool, optional
disable terminal colors. Default False.
Returns
-------
bool
whether the initialisation was successful
"""
if self._user_config.run.mode == "disabled":
logger.warning(
"Simvue monitoring has been deactivated for this run, metrics and artifacts will not be recorded."
)
return True
description = description or self._user_config.run.description
tags = (tags or []) + (self._user_config.run.tags or [])
folder = folder or self._user_config.run.folder
name = name or self._user_config.run.name
metadata = (metadata or {}) | (self._user_config.run.metadata or {})
self._term_color = not no_color
self._folder = Folder.new(
path=folder, offline=self._user_config.run.mode == "offline"
)
self._folder.commit() # type: ignore
if isinstance(visibility, str) and visibility not in ("public", "tenant"):
self._error(
"invalid visibility option, must be either None, 'public', 'tenant' or a list of users"
)
if self._user_config.run.mode not in ("online", "offline"):
self._error("invalid mode specified, must be online, offline or disabled")
return False
if self._user_config.run.mode != "offline" and (
not self._user_config.server.token or not self._user_config.server.url
):
self._error(
"Unable to get URL and token from environment variables or config file"
)
return False
if name and not re.match(r"^[a-zA-Z0-9\-\_\s\/\.:]+$", name):
self._error("specified name is invalid")
return False
self._name = name
self._status = "running" if running else "created"
# Parse the time to live/retention time if specified
try:
if retention_period:
self._retention: int | None = int(
humanfriendly.parse_timespan(retention_period)
)
else:
self._retention = None
except humanfriendly.InvalidTimespan as e:
self._error(e.args[0])
return False
self._timer = time.time()
self._sv_obj = RunObject.new(
folder=folder, offline=self._user_config.run.mode == "offline"
)
if description:
self._sv_obj.description = description
if name:
self._sv_obj.name = name
self._sv_obj.visibility = {
"users": visibility if isinstance(visibility, list) else [],
"tenant": visibility == "tenant",
"public": visibility == "public",
}
self._sv_obj.ttl = self._retention
self._sv_obj.status = self._status
self._sv_obj.tags = tags
self._sv_obj.metadata = (metadata or {}) | git_info(os.getcwd()) | environment()
self._sv_obj.heartbeat_timeout = timeout
self._sv_obj.alerts = []
self._sv_obj.notifications = notification
if self._status == "running":
self._sv_obj.system = get_system()
self._data = self._sv_obj._staging
self._sv_obj.commit()
if self._user_config.run.mode == "online":
name = self._sv_obj.name
self._id = self._sv_obj.id
if not name:
return False
elif name is not True:
self._name = name
if self._status == "running":
self._start()
if self._user_config.run.mode == "online":
click.secho(
f"[simvue] Run {self._name} created",
bold=self._term_color,
fg="green" if self._term_color else None,
)
click.secho(
f"[simvue] Monitor in the UI at {self._user_config.server.url}/dashboard/runs/run/{self._id}",
bold=self._term_color,
fg="green" if self._term_color else None,
)
if self._emissions_tracker and self._status == "running":
self._emissions_tracker.post_init()
self._emissions_tracker.start()
return True
@skip_if_failed("_aborted", "_suppress_errors", None)
@pydantic.validate_call(config={"arbitrary_types_allowed": True})
def add_process(
self,
identifier: str,
*cmd_args,
executable: str | pathlib.Path | None = None,
script: pydantic.FilePath | None = None,
input_file: pydantic.FilePath | None = None,
completion_callback: typing.Optional[
typing.Callable[[int, str, str], None]
] = None,
completion_trigger: multiprocessing.synchronize.Event | None = None,
env: dict[str, str] | None = None,
cwd: pathlib.Path | None = None,
**cmd_kwargs,
) -> None:
"""Add a process to be executed to the executor.
This process can take many forms, for example a be a set of positional arguments:
```python
executor.add_process("my_process", "ls", "-ltr")
```
Provide explicitly the components of the command:
```python
executor.add_process("my_process", executable="bash", debug=True, c="return 1")
executor.add_process("my_process", executable="bash", script="my_script.sh", input="parameters.dat")
```
or a mixture of both. In the latter case arguments which are not 'executable', 'script', 'input'
are taken to be options to the command, for flags `flag=True` can be used to set the option and
for options taking values `option=value`.
When the process has completed if a function has been provided for the `completion_callback` argument
this will be called, this callback is expected to take the following form:
```python
def callback_function(status_code: int, std_out: str, std_err: str) -> None:
...
```
Note `completion_callback` is not supported on Windows operating systems.
Alternatively you can use `completion_trigger` to create a multiprocessing event which will be set
when the process has completed.
Parameters
----------
identifier : str
A unique identifier for this process
executable : str | None, optional
the main executable for the command, if not specified this is taken to be the first
positional argument, by default None
*positional_arguments : Any, ..., optional
all other positional arguments are taken to be part of the command to execute
script : pydantic.FilePath | None, optional
the script to run, note this only work if the script is not an option, if this is the case
you should provide it as such and perform the upload manually, by default None
input_file : pydantic.FilePath | None, optional
the input file to run, note this only work if the input file is not an option, if this is the case
you should provide it as such and perform the upload manually, by default None
completion_callback : typing.Callable | None, optional
callback to run when process terminates (not supported on Windows)
completion_trigger : multiprocessing.Event | None, optional
this trigger event is set when the processes completes
env : dict[str, str], optional
environment variables for process
cwd: pathlib.Path | None, optional
working directory to execute the process within. Note that executable, input and script file paths should
be absolute or relative to the directory where this method is called, not relative to the new working directory.
**kwargs : Any, ..., optional
all other keyword arguments are interpreted as options to the command
"""
if platform.system() == "Windows" and completion_trigger:
raise RuntimeError(
"Use of 'completion_trigger' on Windows based operating systems is unsupported "
"due to function pickling restrictions for multiprocessing"
)
if isinstance(executable, pathlib.Path) and not executable.is_file():
raise FileNotFoundError(f"Executable '{executable}' is not a valid file")
cmd_list: list[str] = []
pos_args = list(cmd_args)
executable_str: str | None = None
# Assemble the command for saving to metadata as string
if executable:
executable_str = f"{executable}"
cmd_list += [executable_str]
else:
cmd_list += [pos_args[0]]
executable = pos_args[0]
pos_args.pop(0)
for kwarg, val in cmd_kwargs.items():
_quoted_val: str = f'"{val}"'
if len(kwarg) == 1:
if isinstance(val, bool) and val:
cmd_list += [f"-{kwarg}"]
else:
cmd_list += [f"-{kwarg}{(f' {_quoted_val}') if val else ''}"]
else:
kwarg = kwarg.replace("_", "-")
if isinstance(val, bool) and val:
cmd_list += [f"--{kwarg}"]
else:
cmd_list += [f"--{kwarg}{(f' {_quoted_val}') if val else ''}"]
cmd_list += pos_args
cmd_str = " ".join(cmd_list)
# Store the command executed in metadata
self.update_metadata({f"{identifier}_command": cmd_str})
# Add the process to the executor
self._executor.add_process(
identifier,
*cmd_args,
executable=executable_str,
script=script,
input_file=input_file,
completion_callback=completion_callback, # type: ignore
completion_trigger=completion_trigger,
env=env,
cwd=cwd,
**cmd_kwargs,
)
@pydantic.validate_call
def kill_process(self, process_id: str) -> None:
"""Kill a running process by ID
Parameters
----------
process_id : str
the unique identifier for the added process
"""
self._executor.kill_process(process_id)
def kill_all_processes(self) -> None:
"""Kill all currently running processes."""
# Dont kill the manually attached process if it is the current script
# but do kill its children. The kill process method of executor by
# default refers to its own processes but can also be used on a PID
if self._parent_process:
self._executor.kill_process(
process_id=self._parent_process.pid,
kill_children_only=self._parent_process.pid == os.getpid(),
)
self._executor.kill_all()
@property
def executor(self) -> Executor:
"""Return the executor for this run"""
return self._executor
@property
def name(self) -> str | None:
"""Return the name of the run"""
return self._name
@property
def uid(self) -> str:
"""Return the local unique identifier of the run"""
return self._uuid
@property
def id(self) -> str | None:
"""Return the unique id of the run"""
return self._id
@skip_if_failed("_aborted", "_suppress_errors", False)
@pydantic.validate_call
def reconnect(self, run_id: str) -> bool:
"""Reconnect to a run in the created state
Parameters
----------
run_id : str
identifier of run to connect to
Returns
-------
bool
whether reconnection succeeded
"""
self._status = "running"
self._id = run_id
self._sv_obj = RunObject(identifier=self._id)
self._start(reconnect=True)
return True
@skip_if_failed("_aborted", "_suppress_errors", None)
@pydantic.validate_call
def set_pid(self, pid: int) -> None:
"""Set pid of process to be monitored
Parameters
----------
pid : int
PID of the process to be monitored
"""
self._pid = pid
@skip_if_failed("_aborted", "_suppress_errors", False)
@pydantic.validate_call
def config(
self,
*,
suppress_errors: bool | None = None,
queue_blocking: bool | None = None,
resources_metrics_interval: pydantic.PositiveInt | None = None,
emission_metrics_interval: pydantic.PositiveInt | None = None,
enable_emission_metrics: bool | None = None,
disable_resources_metrics: bool | None = None,
storage_id: str | None = None,
abort_on_alert: typing.Literal["run", "all", "ignore"] | bool | None = None,
) -> bool:
"""Optional configuration
Parameters
----------
suppress_errors : bool, optional
disable exception throwing instead putting Simvue into a
dormant state if an error occurs
queue_blocking : bool, optional
block thread queues during metric/event recording
resources_metrics_interval : int, optional
frequency at which to collect resource metrics
enable_emission_metrics : bool, optional
enable monitoring of emission metrics
disable_resources_metrics : bool, optional
disable monitoring of resource metrics
storage_id : str, optional
identifier of storage to use, by default None
abort_on_alert : Literal['ignore', run', 'terminate'], optional
whether to abort when an alert is triggered.
If 'run' then the current run is aborted.
If 'terminate' then the script itself is terminated.
If 'ignore' then alerts will not affect this run
Returns
-------
bool
if configuration was successful
"""
with self._configuration_lock:
if suppress_errors is not None:
self._suppress_errors = suppress_errors
if queue_blocking is not None:
self._queue_blocking = queue_blocking
if resources_metrics_interval and disable_resources_metrics:
self._error(
"Setting of resource metric interval and disabling resource metrics is ambiguous"