-
Notifications
You must be signed in to change notification settings - Fork 10
Expand file tree
/
Copy pathdevice.py
More file actions
533 lines (426 loc) · 18.6 KB
/
device.py
File metadata and controls
533 lines (426 loc) · 18.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
import os
import time
import ctypes
from ctypes import *
import numpy
from . import autosave
from . import alarm
from . import fields
from .imports import (
create_callback_capsule,
dbLoadDatabase,
signal_processing_complete,
recGblResetAlarms,
db_put_field_process,
db_get_field,
)
from .device_core import DeviceSupportCore, RecordLookup
# This is set from softioc.iocInit
dispatcher = None
# Global blocking flag, used to mark asynchronous (False) or synchronous (True)
# processing modes for Out records.
# Default False to maintain behaviour from previous versions.
blocking = False
# Set the current global blocking flag, and return the previous value.
def SetBlocking(new_val):
global blocking
old_val = blocking
blocking = new_val
return old_val
# EPICS processing return codes
EPICS_OK = 0
EPICS_ERROR = 1
NO_CONVERT = 2
class ProcessDeviceSupportCore(DeviceSupportCore, RecordLookup):
'''Implements canonical default processing for records with a _process
method. Processing typically either copies a locally set value into the
record, or else reads a value from the record and triggers an update.
'''
# Most records just have an extra process method, but unfortunately ai/ao
# will have to override this to also add their special_linconv method.
_dset_extra_ = ([('process', CFUNCTYPE(c_int, c_void_p))], [0])
# For some record types we want to return a different return code either
# from record init or processing
_epics_rc_ = EPICS_OK
# all record types can support autosave
def __init__(self, name, **kargs):
autosave_fields = kargs.pop("autosave", None)
autosave.add_pv_to_autosave(self, name, autosave_fields)
super().__init__(name, **kargs)
# Most subclasses (all except waveforms) define a ctypes constructor for the
# underlying EPICS compatible value.
def _value_to_epics(self, value):
return self._ctype_(value)
def _epics_to_value(self, epics):
return epics.value
def _default_value(self):
return self._ctype_()
def _compare_values(self, value1, value2):
return value1.value == value2.value
# This method is called during Out record processing to return the
# underlying value in EPICS format.
def _read_value(self, record):
# Take a true copy of the value read to avoid accidental sharing
result = self._ctype_()
result.value = record.read_val().value
return result
# This method is called during In record processing to update the
# underlying value (the value must be in EPICS compatible format). This is
# also called during Out record initialisation and value reversion when
# required.
def _write_value(self, record, value):
record.write_val(value)
def get_field(self, field):
''' Returns the given field value as a string.'''
assert hasattr(self, "_record"), \
'get_field may only be called after iocInit'
data = (c_char * 40)()
name = self._name + '.' + field
db_get_field(name, fields.DBF_STRING, addressof(data), 1)
return _string_at(data, 40)
def set_field(self, field, value):
'''Sets the given field to the given value. Value will be transported as
a DBF_STRING.'''
assert hasattr(self, "_record"), \
'set_field may only be called after iocInit'
data = (c_char * 40)()
data.value = str(value).encode() + b'\0'
name = self._name + '.' + field
db_put_field_process(name, fields.DBF_STRING, addressof(data), 1, True)
class ProcessDeviceSupportIn(ProcessDeviceSupportCore):
_link_ = 'INP'
def __init__(self, name, **kargs):
if 'initial_value' in kargs:
value = self._value_to_epics(kargs.pop('initial_value'))
else:
value = self._default_value()
# We implement update locking via a simple trick which relies on the
# Python global interpreter lock: this ensures that assigning or
# reading a single value is atomic. We therefore cluster all our
# variable state into a single tuple which represents a single value
# to be processed.
# The tuple contains everything needed to be written: the value,
# severity, alarm and optional timestamp.
self._value = (value, alarm.NO_ALARM, alarm.UDF_ALARM, None)
super().__init__(name, **kargs)
def _process(self, record):
# For input process we copy the value stored in the instance to the
# record. The alarm status is also updated, and a custom timestamp
# can also be set.
value, severity, alarm, timestamp = self._value
self._write_value(record, value)
self.process_severity(record, severity, alarm)
if timestamp is not None:
record.TIME = timestamp
record.UDF = 0
return self._epics_rc_
def set(self, value,
severity=alarm.NO_ALARM, alarm=alarm.NO_ALARM, timestamp=None):
'''Updates the stored value and triggers an update. The alarm
severity and timestamp can also be specified if appropriate.'''
value = self._value_to_epics(value)
self._value = (value, severity, alarm, timestamp)
self.trigger()
def set_alarm(self, severity, alarm, timestamp=None):
'''Updates the alarm status without changing the stored value. An
update is triggered, and a timestamp can optionally be specified.'''
self._value = (self._value[0], severity, alarm, timestamp)
self.trigger()
def get(self):
'''Returns the last written value.'''
return self._epics_to_value(self._value[0])
class ProcessDeviceSupportOut(ProcessDeviceSupportCore):
_link_ = 'OUT'
def __init__(self, name, **kargs):
on_update = kargs.pop('on_update', None)
on_update_name = kargs.pop('on_update_name', None)
# At most one of on_update and on_update_name can be specified
assert on_update is None or on_update_name is None, \
'Cannot specify on_update and on_update_name together'
if on_update:
self.__on_update = on_update
elif on_update_name:
self.__on_update = lambda value: on_update_name(value, name)
else:
self.__on_update = None
self.__validate = kargs.pop('validate', None)
self.__always_update = kargs.pop('always_update', False)
if 'initial_value' in kargs:
value = self._value_to_epics(kargs.pop('initial_value'))
initial_severity = alarm.NO_ALARM
initial_status = alarm.NO_ALARM
else:
value = self._default_value()
# To maintain backwards compatibility, if there is no initial value
# we mark the record as invalid
initial_severity = alarm.INVALID_ALARM
initial_status = alarm.UDF_ALARM
self._value = (value, initial_severity, initial_status)
self._blocking = kargs.pop('blocking', blocking)
if self._blocking:
self._callback = create_callback_capsule()
super().__init__(name, **kargs)
def init_record(self, record):
'''Special record initialisation for out records only: implements
special record initialisation if an initial value has been specified,
allowing out records to have a sensible initial value.'''
self._write_value(record, self._value[0])
if 'MLST' in self._fields_:
record.MLST = self._value[0]
record.TIME = time.time()
record.UDF = 0
record.NSEV = self._value[1]
record.NSTA = self._value[2]
recGblResetAlarms(record)
return self._epics_rc_
def __completion(self, record):
'''Signals that all on_update processing is finished'''
if self._blocking:
signal_processing_complete(record, self._callback)
def _process(self, record):
'''Processing suitable for output records. Performs immediate value
validation and asynchronous update notification.'''
if record.PACT:
return EPICS_OK
# Ignore memoized value, retrieve it from the VAL field instead
value = self._read_value(record)
_, severity, alarm = self._value
self.process_severity(record, severity, alarm)
if not self.__always_update and \
self._compare_values(value, self._value[0]):
# If the value isn't making a change then don't do anything.
return EPICS_OK
python_value = self._epics_to_value(value)
if self.__validate and not self.__validate(self, python_value):
# Asynchronous validation rejects value, so restore the last good
# value.
self._write_value(record, self._value[0])
return EPICS_ERROR
else:
# Value is good. Hang onto it, let users know the value has changed
self._value = (value, severity, alarm)
record.UDF = 0
if self.__on_update:
record.PACT = self._blocking
dispatcher(
self.__on_update,
func_args=(python_value,),
completion = self.__completion,
completion_args=(record,))
return EPICS_OK
def _value_to_dbr(self, value):
return self._dbf_type_, 1, addressof(value), value
def set_alarm(self, severity, alarm):
'''Updates the alarm status without changing the stored value. An
update is triggered, and a timestamp can optionally be specified.'''
self._value = (self._value[0], severity, alarm)
self.set(
self.get(),
severity=severity,
alarm=alarm)
def set(self, value, process=True,
severity=alarm.NO_ALARM, alarm=alarm.NO_ALARM):
'''Special routine to set the value directly.'''
value = self._value_to_epics(value)
try:
_record = self._record
except AttributeError:
# Record not initialised yet. Record data for when
# initialisation occurs
self._value = (value, severity, alarm)
else:
# The array parameter is used to keep the raw pointer alive
dbf_code, length, data, array = self._value_to_dbr(value)
# If we do process we instead do this inside _process, allowing
# validation to potentially refuse the update.
# However if we do not process, we must do this here to keep the
# Python and EPICS values in line
if not process:
self._value = (value, severity, alarm)
db_put_field_process(_record.NAME, dbf_code, data, length, process)
def get(self):
return self._epics_to_value(self._value[0])
def _Device(Base, record_type, ctype, dbf_type, epics_rc, mlst = False):
'''Wrapper for generating simple records.'''
class GenericDevice(Base):
_record_type_ = record_type
_device_name_ = 'devPython_' + record_type
_fields_ = ['UDF', 'VAL']
_epics_rc_ = epics_rc
_ctype_ = staticmethod(ctype)
_dbf_type_ = dbf_type
if mlst:
_fields_.append('MLST')
GenericDevice.__name__ = record_type
return GenericDevice
_In = ProcessDeviceSupportIn
_Out = ProcessDeviceSupportOut
def _Device_In(*args, **kargs):
return _Device(_In, mlst = False, *args, **kargs)
def _Device_Out(*args, **kargs):
return _Device(_Out, mlst = True, *args, **kargs)
longin = _Device_In('longin', c_int32, fields.DBF_LONG, EPICS_OK)
longout = _Device_Out('longout', c_int32, fields.DBF_LONG, EPICS_OK)
int64in = _Device_In('int64in', c_int64, fields.DBF_INT64, EPICS_OK)
int64out = _Device_Out('int64out', c_int64, fields.DBF_INT64, EPICS_OK)
bi = _Device_In('bi', c_uint16, fields.DBF_CHAR, NO_CONVERT)
bo = _Device_Out('bo', c_uint16, fields.DBF_CHAR, NO_CONVERT)
mbbi = _Device_In('mbbi', c_uint16, fields.DBF_SHORT, NO_CONVERT)
mbbo = _Device_Out('mbbo', c_uint16, fields.DBF_SHORT, NO_CONVERT)
def _string_at(value, count):
# Need string_at() twice to ensure string is size limited *and* null
# terminated.
value = ctypes.string_at(ctypes.string_at(value, count))
# Convert bytes to unicode string
return value.decode(errors = 'replace')
class EpicsString:
_fields_ = ['UDF', 'VAL']
_epics_rc_ = EPICS_OK
_ctype_ = c_char * 40
_dbf_type_ = fields.DBF_STRING
def _value_to_epics(self, value):
# It's a little odd: we can't simply construct a value from the byte
# string, but we can update the array in an existing value.
# Value being written must be a string, and will be automatically null
# terminated where possible.
result = self._ctype_()
result.value = value.encode() + b'\0'
return result
def _epics_to_value(self, epics):
return _string_at(epics, 40)
class stringin(EpicsString, ProcessDeviceSupportIn):
_record_type_ = 'stringin'
_device_name_ = 'devPython_stringin'
class stringout(EpicsString, ProcessDeviceSupportOut):
_record_type_ = 'stringout'
_device_name_ = 'devPython_stringout'
dset_process_linconv = (
[('process', CFUNCTYPE(c_int, c_void_p)),
('special_linconv', CFUNCTYPE(c_int, c_void_p, c_int))],
[0, 0])
# For ai and ao there's no point in supporting RVAL <-> VAL conversion, so
# for these we support no conversion directly.
class ai(ProcessDeviceSupportIn):
_record_type_ = 'ai'
_device_name_ = 'devPython_ai'
_fields_ = ['UDF', 'VAL']
_dset_extra_ = dset_process_linconv
_epics_rc_ = NO_CONVERT
_ctype_ = c_double
_dbf_type_ = fields.DBF_DOUBLE
def _process(self, record):
# Because we're returning NO_CONVERT we need to do the .UDF updating
# ourself (otherwise the record support layer does this).
record.UDF = int(numpy.isnan(self._value[0]))
return super()._process(record)
class ao(ProcessDeviceSupportOut):
_record_type_ = 'ao'
_device_name_ = 'devPython_ao'
_fields_ = ['UDF', 'VAL', 'MLST']
_dset_extra_ = dset_process_linconv
_epics_rc_ = NO_CONVERT
_ctype_ = c_double
_dbf_type_ = fields.DBF_DOUBLE
def to_epics_str_array(value):
"""Convert the given array of Python strings to an array of EPICS
nul-terminated strings"""
result = numpy.empty(len(value), 'S40')
for n, s in enumerate(value):
if isinstance(s, str):
val = EpicsString._ctype_()
val.value = s.encode() + b'\0'
result[n] = val.value
else:
result[n] = s
return result
def _require_waveform(value, dtype):
if dtype and dtype.char == 'S':
return to_epics_str_array(value)
else:
if isinstance(value, bytes):
# Special case hack for byte arrays. Surprisingly tricky:
value = numpy.frombuffer(value, dtype = numpy.uint8)
value = numpy.require(value, dtype = dtype)
if value.shape == ():
value.shape = (1,)
assert value.ndim == 1, 'Can\'t write multidimensional arrays'
return value
class WaveformBase(ProcessDeviceSupportCore):
_link_ = 'INP'
# In the waveform record class, the following four fields are key:
# FTVL Type of stored waveform (as a DBF_ code)
# BPTR Pointer to raw array containing waveform data
# NELM Length of allocated array in number of elements
# NORD Currently reported length of array (0 <= NORD <= NELM)
_fields_ = ['UDF', 'FTVL', 'BPTR', 'NELM', 'NORD']
def __init__(self, name, _wf_nelm, _wf_dtype, **kargs):
self._dtype = _wf_dtype
self._nelm = _wf_nelm
super().__init__(name, **kargs)
def init_record(self, record):
self._dbf_type_ = record.FTVL
return super().init_record(record)
def _read_value(self, record):
nord = record.NORD
result = numpy.empty(nord, dtype = self._dtype)
memmove(
result.ctypes.data_as(c_void_p), record.BPTR,
self._dtype.itemsize * nord)
return result
def _write_value(self, record, value):
nord = len(value)
memmove(
record.BPTR, value.ctypes.data_as(c_void_p),
self._dtype.itemsize * nord)
record.NORD = nord
def _compare_values(self, value, other):
return numpy.array_equal(value, other)
def _value_to_epics(self, value):
# Ensure we always convert incoming value into numpy array, regardless
# of whether the record has been initialised or not
value = _require_waveform(value, self._dtype)
# Because arrays are mutable values it's ever so easy to accidentially
# call set() with a value which subsequently changes. To avoid this
# common class of bug, at the cost of duplicated code and data, here we
# ensure a copy is taken of the value.
assert len(value) <= self._nelm, 'Value too long for waveform'
value = numpy.copy(value)
# As we return a reference to the numpy array, ensure it cannot be
# modified under our noses
value.flags.writeable = False
return value
def _epics_to_value(self, value):
if self._dtype.char == 'S':
return [_string_at(s, 40) for s in value]
else:
return value
def _value_to_dbr(self, value):
return self._dbf_type_, len(value), value.ctypes.data, value
class waveform(WaveformBase, ProcessDeviceSupportIn):
_record_type_ = 'waveform'
_device_name_ = 'devPython_waveform'
class waveform_out(WaveformBase, ProcessDeviceSupportOut):
_record_type_ = 'waveform'
_device_name_ = 'devPython_waveform_out'
class LongStringBase(WaveformBase):
_dtype = numpy.dtype('uint8')
def _value_to_epics(self, value):
value = value.encode(errors = 'replace')
# Convert a string into an array of characters. This will produce
# the correct behaviour when treating a character array as a string.
# Note that the trailing null is needed to work around problems with
# some clients.
value = numpy.frombuffer(value + b'\0', dtype = numpy.uint8)
# Ensure string isn't too long to fit into waveform
assert len(value) <= self._nelm, 'Value too long for waveform'
return value
def _epics_to_value(self, value):
return _string_at(value.ctypes, len(value))
class long_stringin(LongStringBase, ProcessDeviceSupportIn):
_record_type_ = 'waveform'
_device_name_ = 'devPython_long_stringin'
class long_stringout(LongStringBase, ProcessDeviceSupportOut):
_record_type_ = 'waveform'
_device_name_ = 'devPython_long_stringout'
# Ensure the .dbd file is loaded.
dbLoadDatabase('device.dbd', os.path.dirname(__file__), None)