-
Notifications
You must be signed in to change notification settings - Fork 12
Expand file tree
/
Copy pathcre_agent.py
More file actions
2445 lines (1940 loc) · 94.6 KB
/
cre_agent.py
File metadata and controls
2445 lines (1940 loc) · 94.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from numba.types import f8, string, boolean
import numpy as np
from cre import MemSet, CREFunc, UntypedCREFunc, Fact, FactProxy
from apprentice.agents.base import BaseAgent
from apprentice.agents.cre_agents.state import State, encode_neighbors
from apprentice.agents.cre_agents.dipl_base import BaseDIPLAgent
from apprentice.shared import rand_skill_uid, rand_skill_app_uid, rand_state_uid, ActionLike, Action
from cre.transform import MemSetBuilder, Flattener, FeatureApplier, RelativeEncoder, Vectorizer, Enumerizer
from cre.utils import PrintElapse
from cre import TF
from cre.gval import new_gval
import itertools
from itertools import chain
from copy import copy
from numba.core.runtime.nrt import rtsys
import gc
import hashlib
import base64
import json
from datetime import datetime
from typing import Union, List, Tuple
def used_bytes(garbage_collect=True):
# if(garbage_collect): gc.collect()
stats = rtsys.get_allocation_stats()
# print(stats)
return stats.alloc-stats.free
# -----------------------
# : Function minimal_str + get_info
def minimal_func_str(func, ignore_funcs=[]):
if(isinstance(func, CREFunc)):
return func.minimal_str(ignore_funcs=ignore_funcs)
else:
return str(func)
def func_get_info(func, ignore_funcs=[]):
var_infos = []
if(isinstance(func, CREFunc)):
for alias, typ in zip(func.base_var_aliases, func.arg_types):
var_infos.append({"alias" : alias, "type" : str(typ)})
min_str = minimal_func_str(func, ignore_funcs)
else:
min_str = str(func)
return {
"repr" : repr(func),
"vars" : var_infos,
"minimal_str" : min_str
}
def action_uid(state, action):
h = hashlib.sha224()
h.update(state.get("__uid__", None).encode('utf-8'))
h.update(repr(action).encode('utf-8'))
# Limit length to 30 chars to be consistent with other hashes
return f"AC_{base64.b64encode(h.digest(), altchars=b'AB')[:30].decode('utf-8')}"
# -----------------------
# : Skill
class Skill(object):
def __init__(self, agent, action_type, how_part,
uid=None, label=None, explanation_set=None):
self.agent = agent
self.label = label
self.explanation_set = explanation_set
self.how_part = how_part
self.action_type = action_type
# self.input_attr = input_attr
self.uid = rand_skill_uid() if(uid is None) else uid
self.where_lrn_mech = agent.where_cls(self,**agent.where_args)
self.when_lrn_mech = agent.when_cls(self,**agent.when_args)
self.which_lrn_mech = agent.which_cls(self,*agent.which_args)
if(agent.process_lrn_mech and getattr(agent, 'sep_in_proc_when', False)):
self.in_proc_when_lrn_mech = agent.when_cls(self,**agent.when_args)
self.skill_apps = {}
def get_applications(self, state, skip_when=False):
# print(str(self.how_part),":")
# print(self.where_lrn_mech.conds)
# with PrintElapse("get_matches"):
applications = []
matches = list(self.where_lrn_mech.get_matches(state))
# with PrintElapse("iter_matches"):
for match in matches:
when_pred = 1 if skip_when else self.when_lrn_mech.predict(state, match)
# print(f"{when_pred:.2f}", str(self.how_part), ": ", match)
if(when_pred > 0 or self.agent.suggest_uncert_neg and when_pred != -1.0):
skill_app = SkillApplication(self, match, state, when_pred=when_pred)
if(skill_app is not None):
applications.append(skill_app)
return applications
def get_info(self, where_kwargs={}, when_kwargs={},
which_kwargs={}, **kwargs):
info = { "uid" : self.uid,
"how": {
"func" : func_get_info(self.how_part, ignore_funcs=self.agent.conversions)
},
"where": self.where_lrn_mech.get_info(**where_kwargs),
"when": self.when_lrn_mech.get_info(**when_kwargs),
"which": self.which_lrn_mech.get_info(**which_kwargs),
}
return info
def __call__(self, *match):
args = match[1:]
if(hasattr(self.how_part, '__call__')):
if(len(args) != self.how_part.n_args):
raise ValueError(f"Incorrect number of args: {len(args)}, for skill how-part {self.how_part} with {self.how_part.n_args} positional arguments.")
try:
val = self.how_part(*args)
except Exception as e:
return None
else:
val = self.how_part
return Action(match[0], self.action_type, val)
# print("FIT", reward)
# if(not hasattr(self.how_part,'__call__') and self.how_part == -1):
# print("<<", self.how_part, match)
# raise ValueError()
def __repr__(self):
return f"Skill({self.how_part}, uid={self.uid!r})"
def __str__(self):
min_str = minimal_func_str(self.how_part, ignore_funcs=self.agent.conversions)
return f"Skill_{self.uid[3:8]}({min_str})"
# -----------------------
# : SkillApplication
# TODO: Need to think about whether it makes sense for SkillApps
# to keep a reference to their states and next_states,
# is a possible memory leak opportunity.
KEEP_STATE_REFS = True
class SkillApplication(object):
skill : Skill
state_uid : str
match : list #TODO: Be More specific
args : list
action: Action
uid: str
# ActionLike interface
@property
def selection(self):
return self.action.selection
@property
def action_type(self):
return self.action.action_type
@property
def input(self):
return self.action.input
def as_tuple(self):
return self.action.as_tuple()
# __slots__ = ("skill", "state_uid", "match", "args", "action", "uid")
def __new__(cls, skill, match, state, uid=None,
next_state=None, prob_uid=None, short_name=None,
reward=None, when_pred=None):
# print(skill, [m.id for m in match])
action = skill(*match)
# print("HERE >> ", type(action.selection), type(action.action_type), type(action.input))
# Find the unique id for this skill_app
state_uid = state.get("__uid__", None)
h = hashlib.sha224()
h.update(skill.uid.encode('utf-8'))
h.update(state_uid.encode('utf-8'))
h.update(",".join([m.id for m in match]).encode('utf-8'))
uid = f"A_{base64.b64encode(h.digest(), altchars=b'AB')[:30].decode('utf-8')}"
agent = getattr(skill,'agent', None)
# If this skill has been fit with this skill_app then return the previous instance
# if(uid in skill.skill_apps):
# self = skill.skill_apps[uid]
if(agent):
if(agent and uid in agent.skill_apps_by_uid):
self = agent.skill_apps_by_uid[uid]
# If given a prediction probability then overwrite the old one.
if(self.when_pred is not None):
self.when_pred = when_pred
return self
if(action is None):
return None
self = super().__new__(cls)
self.skill = skill
self.state_uid = state_uid
self.match = match
self.args = match[1:]
self.action = action
self.uid = uid
self.implicit_rewards = {}
self.implicit_dependants = {}
self.explicit_reward = reward
self.implicit_reward = None
if(not hasattr(self, "when_pred") or when_pred is not None):
self.when_pred = when_pred
if(not hasattr(self, "prob_uid") or prob_uid is not None):
self.prob_uid = prob_uid
if(KEEP_STATE_REFS):
self.state = state
self.next_state = next_state
return self
@property
def reward(self):
explicit_reward = getattr(self, 'explicit_reward', None)
implicit_reward = getattr(self, 'implicit_reward', None)
if(explicit_reward is not None):
return explicit_reward
elif(implicit_reward is not None):
return implicit_reward
return None
def annotate_train_data(self, reward, arg_foci, skill_label, skill_uid,
how_help, explanation_selected, is_demo=False, **kwargs):
# self.reward = reward
self.explicit_reward = reward
self.arg_foci = arg_foci
self.skill_label = skill_label
self.skill_uid = skill_uid
self.how_help = how_help
self.explanation_selected = explanation_selected
self.is_demo = is_demo
def add_seq_tracking(self, prob_uid=None):
if(prob_uid is not None):
self.prob_uid = prob_uid
agent = self.skill.agent
if(agent and self.uid not in agent.skill_apps_by_uid):
agent.skill_apps_by_uid[self.uid] = self
by_s_uid = agent.skill_apps_by_state_uid.get(self.state_uid, set())
by_s_uid.add(self)
agent.skill_apps_by_state_uid[self.state_uid] = by_s_uid
if(hasattr(agent, 'rollout_preseq_tracker')):
# print(self)
if(self.next_state is None):
# If cannot predict next state or skill_app doesn't change the state
# then don't keep
try:
self.next_state = agent.predict_next_state(self.state, self.action)
except:
print("DID FAIL", self)
return False
# print("NEXT STATE", self.sai[0], self.sai[2]['value'], self.next_state.get('__uid__')[:5])
# print(repr(self.next_state.get("working_memory")))
# print("THIS IS START", state.is_start, getattr(state,'is_start', None))
agent.rollout_preseq_tracker.add_skill_app(self, getattr(self.state,'is_start', None), do_update=False)
return True
def remove_seq_tracking(self):
agent = self.skill.agent
if(self.state_uid in agent.skill_apps_by_state_uid):
by_s_uid = agent.skill_apps_by_state_uid[self.state_uid]
by_s_uid.remove(self)
del agent.skill_apps_by_uid[self.uid]
agent.rollout_preseq_tracker.remove_skill_app(self, getattr(self.state,'is_start', None), do_update=False)
def get_info(self):
action = self.action
info = {
'uid' : self.uid,
'skill_uid' : self.skill.uid,
'skill_label' : self.skill.label,
'selection' : action.selection,
# 'action' : sai.action_type,
'action_type' : action.action_type,
'input' : action.input,
'args' : [m.id for m in self.args],
'when_pred' : self.when_pred,
'in_process' : getattr(self, 'in_process', False),
}
if(self.skill and len(self.match) > 1):
hvs = self.skill.how_part.head_vars
head_vals = [hv[0](m) for hv, m in zip(hvs, self.match[1:])]
info['head_vals'] = head_vals
if(getattr(self, 'path', None)):
info['path'] = self.path.get_info()
info['internal_unordered'] = self.path.is_internal_unordered
info['initial_unordered'] = self.path.is_initial_unordered
if(getattr(self, 'certainty', None)):
info['certainty'] = self.certainty
info['cert_diff'] = self.cert_diff
if(getattr(self, 'removed', None)):
info['removed'] = self.removed
if(getattr(self, 'unordered_group', None)):
info['unordered_group'] = self.unordered_group
# info['group_next_state_uid'] = self.group_next_state_uid
if(hasattr(self, 'train_time')):
train_data = {}
train_data['train_time'] = getattr(self, 'train_time', None)
train_data['explicit_reward'] = getattr(self, 'explicit_reward', None)
train_data['implicit_reward'] = getattr(self, 'implicit_reward', None)
train_data['reward'] = self.reward #getattr(self, 'reward', None)
train_data['arg_foci'] = getattr(self, 'arg_foci', None)
train_data['skill_label'] = getattr(self, 'skill_label', None)
train_data['skill_uid'] = getattr(self, 'skill_uid', None)
train_data['how_help'] = getattr(self, 'how_help', None)
train_data['explanation_selected'] = getattr(self, 'explanation_selected', None)
train_data['is_demo'] = getattr(self, 'is_demo', False)
train_data = {k:v for k,v in train_data.items() if v is not None}
if('arg_foci' in train_data):
# print("ARG FOCI", train_data['arg_foci'])
# TODO: Fix whatever causing this to be necessary
# arg_foci = train_data['arg_foci']
# if(len(arg_foci) > 0 and isinstance(arg_foci[0], list)):
# arg_foci = arg_foci[0]
train_data['arg_foci'] = [m if isinstance(m,str) else m.id for m in train_data['arg_foci']]
if('explicit_reward' in train_data):
train_data['confirmed'] = True
info.update(train_data)
return info
def as_train_kwargs(self):
return {'action': Action(*self.as_tuple()),
'arg_foci' : [m.id for m in self.args],
'how_help' : self.how_help}
def __repr__(self, add_action=True):
app_str = f'{self.skill}({", ".join([m.id for m in self.args])})'
if(add_action):
return f'{app_str} -> {self.action}'
else:
return app_str
def __eq__(self, other):
return getattr(self, 'uid', None) == getattr(other, 'uid', None)
def __hash__(self):
return hash(self.uid)
def update_implicit_reward(self):
old_implicit_reward = self.implicit_reward
if(len(self.implicit_rewards) == 0):
self.implicit_reward = None
else:
max_rew = None
for src, (_, r) in self.implicit_rewards.items():
if( src.explicit_reward is None or
src.explicit_reward <= 0):
continue
if(max_rew is None or r > max_rew):
max_rew = r
self.implicit_reward = max_rew
return old_implicit_reward == self.implicit_reward, self.implicit_reward
def add_implicit_reward(self, depends, reward, update=False):
for other in depends:
self.implicit_rewards[other] = (depends, reward)
other.implicit_dependants[self] = (depends, reward)
# Recalculate the value of implicit_reward
if(update):
return self.update_implicit_reward()
return False
def remove_implicit_reward(self, other, update=False):
if(other in self.implicit_rewards):
depends, reward = self.implicit_rewards[other]
for dep in depends:
del self.implicit_rewards[dep]
if(self in dep.implicit_dependants):
del dep.implicit_dependants[self]
# Recalculate the value of implicit_reward
if(update):
return self.update_implicit_reward()
return False
def clear_implicit_dependants(self, update=True):
changed_depends = []
for dep in [*self.implicit_dependants]:
did_change = dep.remove_implicit_reward(self, update)
if(did_change):
changed_depends.append(dep)
# if(clear_fit):
# for sa, (depends,reward) in self.implicit_rewards.items():
# sa.skill.ifit(sa.state, sa, None)
# old_implicit_rewards = self.implicit_rewards
# self.implicit_rewards = {}
return changed_depends
def ifit_implicit_dependants(self):
agent = self.skill.agent
for dep in self.implicit_dependants:
did_update, impl_rew = dep.update_implicit_reward()
if(did_update and dep.explicit_reward is None):
# print("IMPLICIT", dep.state.get("__uid__")[:5], dep, impl_rew)
agent._ifit_skill_app(dep, impl_rew)
# if(self.explicit_reward is not None and self.explicit_reward > 0):
# elif(self.explicit_reward is None or self.explicit_reward <= 0):
# for dep in self.implicit_dependants:
# did_update, impl_rew = dep.update_implicit_reward()
# if(did_update and dep.explicit_reward is None):
# agent._ifit_skill_app(dep, impl_rew)
# for sa, (depends, reward) in self.implicit_rewards.items():
# # Don't override explicit rewards
# if(hasattr(sa, 'explicit_reward')):
# continue
# if(old_implicit_rewards.get(sa, None) == reward):
# continue
# self.agent._ifit_skill_app(self.state, self, reward)
# if(getattr(imp_neg_sa, "reward", 0) == 0):
# sa.skill.ifit(sa.state, sa, reward)
# self.train(imp_neg_sa.state, reward=-1, skill_app=imp_neg_sa)
def ensure_when_pred(self):
# print("ENSURE", self)
if(getattr(self.skill.agent, "sep_in_proc_when", False) and
getattr(self, "in_process", False)):
self.when_pred = self.skill.in_proc_when_lrn_mech.predict(self.state, self.match)
else:
self.when_pred = self.skill.when_lrn_mech.predict(self.state, self.match)
# ----------------------
# : AppGroupAnnotation
class AppGroupAnnotation():
def __init__(self, app_group, kind):
self.app_group = app_group
self.kind = kind
# -----------------------
# : CREAgent
class CREAgent(BaseDIPLAgent):
# ------------------------------------------------
# : __init__
def init_processesors(self):
# The types that visible attributes / features can take.
val_types = set([f8,string,boolean])
for fact_type in self.fact_types:
for _, attr_spec in fact_type.filter_spec("visible").items():
# print(attr_spec)
val_types.add(attr_spec['type'])
for func in self.feature_set:
# print(op, type(op), isinstance(op, Op))
if(isinstance(func, UntypedCREFunc)):
raise ValueError(
"Feature functions must be typed. Specify signature in definition. " +
"For instance @CREFunc(signature = unicode_type(unicode_type,unicode_type))."
)
val_types.add(func.signature.return_type)
self.memset_builder = MemSetBuilder()
self.enumerizer = Enumerizer()
self.flattener = Flattener(self.fact_types,
in_memset=None, id_attr="id", enumerizer=self.enumerizer)
self.feature_applier = FeatureApplier(self.feature_set)
# self.relative_encoder = RelativeEncoder(self.fact_types, in_memset=None, id_attr='id')
# self.vectorizer = Vectorizer(val_types)
state_cls = self.state_cls = State(self)
@state_cls.register_transform(is_incremental=True, prereqs=['working_memory'])
def flat(state):
wm = state.get('working_memory')
flattener = self.flattener
return flattener(wm)
@state_cls.register_transform(is_incremental=len(self.extra_features)==0, prereqs=['flat'])
def flat_featurized(state):
flat = state.get('flat')
feature_applier = self.feature_applier
featurized_state = feature_applier(flat)
featurized_state = featurized_state.copy()
for extra_feature in self.extra_features:
featurized_state = extra_feature(self, state, featurized_state)
return featurized_state
@state_cls.register_transform(is_incremental=False, prereqs=['working_memory'])
def py_dict(state):
wm = state.get('working_memory')
return wm.as_dict(key_attr='id')
def __init__(self, encode_neighbors=True, **config):
# Parent defines learning-mechanism classes and args + action_chooser
super().__init__(**config)
self.how_lrn_mech = self.how_cls(self, **self.how_args)
self.process_lrn_mech = None
if(self.process_cls):
self.process_lrn_mech = self.process_cls(self, **self.process_args)
if(self.track_rollout_preseqs):
from .learning_mechs.process.process import PreseqTracker
self.rollout_preseq_tracker = PreseqTracker()
self.working_memory = MemSet()
self.init_processesors()
self.skills = {}
self.skill_apps_by_uid = {}
self.skill_apps_by_state_uid = {}
# self.implicit_negs = {}
self.skills_by_label = {}
self.prev_skill_app = None
self.episodic_memory = {}
self.group_annotations = []
def standardize_state(self, state, is_start=None, **kwargs):
if(not isinstance(state, self.state_cls)):
if(isinstance(state, State)):
state = self.state_cls(state.state_formats)
else:
state_uid = kwargs.get('state_uid', None)
if(isinstance(state, dict)):
# NOTE This is a fix for legacy attribute 'contentEditable'
for k,obj in state.items():
if('contentEditable' in obj):
obj['locked'] = not obj['contentEditable']
del obj['contentEditable']
state_uid = state.get("__uid__", None) if(state_uid is None) else state_uid
if self.should_find_neighbors:
state = encode_neighbors(state)
# print()
# for _id, obj in state.items():
# print(obj)
wm = self.memset_builder(state, MemSet())
elif(isinstance(state, MemSet)):
wm = state
else:
raise ValueError(f"Unrecognized State Type: \n{state}")
if(state_uid is None):
state_uid = f"S_{wm.long_hash()}"
state = self.state_cls({'__uid__' : state_uid, 'working_memory' : wm})
# Ensure if prev_skill_app references current state's working_memory
prev_skill_app = getattr(self,'prev_skill_app',None)
if(prev_skill_app):
wm = state.get("working_memory")
# Try to recover the facts matched by the previous skill app in the new state
try:
self.prev_skill_app.match =[wm.get_fact(id=m.id) for m in prev_skill_app.match]
# However if that is impossible then ignore prev_skill_app
except:
# print([m.id for m in prev_skill_app.match])
# print(wm)
self.prev_skill_app = None
self.state = state
if(getattr(state, 'is_start',None) is None):
state.is_start = is_start
return state
def standardize_action(self, action):
action = Action(action)
selection, action_type, inp = action.as_tuple()
try:
# print(selection, type(selection))
action.selection_inst = self.state.get('working_memory').get_fact(id=selection)
except KeyError:
# print(self.state.get('working_memory'))
raise KeyError(f"Bad Action: Element {selection!r} not found in state.")
# if(isinstance(action_type, str)):
action.action_type_inst = self.action_types[action_type]
return action
def standardize_arg_foci(self, arg_foci, kwargs={}):
# Allow for legacy name 'foci_of_attention'
if(arg_foci is None):
arg_foci = kwargs.get('foci_of_attention', None)
if(arg_foci is None):
arg_foci = kwargs.get('args', None)
if(arg_foci is None):
return None
new_arg_foci = []
wm = self.state.get('working_memory')
for fact in arg_foci:
if(isinstance(fact, str)):
fact = wm.get_fact(id=fact)
new_arg_foci.append(fact)
return new_arg_foci
def standardize_halt_policy(self, halt_policy):
pass
# ------------------------------------------------
# : Act, Act_All
def _organize_mutl_excl(self, in_process_grps):
mut_excl_grps = []
for i, grp in enumerate(in_process_grps):
# print("<<", i)
filtered_grp = []
for sa in grp:
# Weh
if(not sa.skill.where_lrn_mech.check_match(sa.state, sa.match)):
continue
sa.in_process = True
sa.ensure_when_pred()
filtered_grp.append(sa)
prefix_groups = group_by_path(filtered_grp)
me_grp = []
for pre_grp in prefix_groups.values():
me_grp.append(pre_grp)
mut_excl_grps.append(me_grp)
return mut_excl_grps
def _add_process_implicit_rewards(self, mut_excl_grps):
# for i, disj_grps in enumerate(mut_excl_grps):
# for j, grp in enumerate(disj_grps):
# # Reset implicit rewards of this skill app on other
# # skill apps
# for sa_a in grp:
# sa_a.clear_implicit_rewards()
apps_so_far = []
for i, disj_grps in enumerate(mut_excl_grps):
for j, grp in enumerate(disj_grps):
# Add implicit negatives between skill_apps
# that are part of disjoint groups.
for k, other_grp in enumerate(disj_grps):
if(j == k):
continue
# other_grp = disj_grps[k]
for sa_a in grp:
for sa_b in other_grp:
# print("This doesn't go with this:")
# print("\t", a)
# print("\t", b)
sa_a.add_implicit_reward(sa_b, -1)
for sa_a in grp:
# Add implicit negatives for all skill_apps preceeding
# those in the current group, and for each of the
# preceeding skill_apps add implicit no reward on
for sa_b in apps_so_far:
sa_a.add_implicit_reward(sa_b, -1)
sa_b.add_implicit_reward(sa_a, None)
# print("DISJ", disj_grps)
# print("CHAIN", list(chain(disj_grps)))
apps_so_far += chain(*disj_grps)
# for i, disj_grps in enumerate(mut_excl_grps):
# for j, grp in enumerate(disj_grps):
# # If a skill app already has reward == 1 then
# # apply it's implicit rewards.
# for sa_a in grp:
# if(getattr(sa_a, 'reward', 0) == 1):
# sa_a.apply_implicit_rewards()
def _add_conflict_certainty(self, skill_apps):
# Calculate certainty of each skill_app in a conflict set of possible
# next skill_apps. Helpful for choosing apps to show to user.
if(len(skill_apps) == 0):
return
# Add n_path_apps
for skill_app in skill_apps:
path = getattr(skill_app, 'path', None)
if(path is not None):
skill_app.n_path_apps = len(path.get_item().skill_apps)
# Collect when_preds, when_preds, n_apps
when_preds = np.array([sa.when_pred for sa in skill_apps], dtype=np.float64)
in_process = np.array([getattr(sa,'in_process', False) for sa in skill_apps],dtype=np.bool_)
n_apps = np.array([getattr(sa,'n_path_apps', False) for sa in skill_apps],dtype=np.bool_)
def is_mid_unordered_grp(sa):
path = getattr(sa, "path", None)
if(path):
return path.is_initial_unordered
# macro, meth_ind, item_ind, cov = path.steps[-1]
# return cov and len(cov) > 0
return False
mid_unord = np.array([is_mid_unordered_grp(sa) for sa in skill_apps],dtype=np.bool_)
# print("MID UNORD", mid_unord)
avg_iproc_pred, avg_iproc_n_apps = 0, 0
mask = in_process & (when_preds > 0)
if(np.sum(mask) > 0):
avg_iproc_pred = np.average(when_preds[mask])
avg_iproc_n_apps = np.average(n_apps[mask])
# Initial certainty is the when predictions
certainty = when_preds.copy()
# Reduce out-of-process certainties by a function of the certainties of
# in-process action's and their numbers of supporting skill_apps.
# If there are any mid-unordered group apps then double the reduction.
certainty[~in_process] /= (1.0 + max(avg_iproc_pred-1/(1+avg_iproc_n_apps), 0))*(1+mid_unord.any())
max_cert = np.max(certainty)
cert_diffs = max_cert-certainty
for sa, cert, cert_diff in zip(skill_apps, certainty, cert_diffs):
sa.certainty = np.nan_to_num(cert, 0)
sa.cert_diff = np.nan_to_num(cert_diff, 1)
def get_skill_applications(self, state,
is_start=None,
prob_uid=None,
eval_mode=False,
add_out_of_process=False,
ignore_filter=False,
add_conflict_certainty=False,
add_known=True,
hard_cert_thresh=None,
**kwargs):
skill_apps = set()
if(prob_uid is None and is_start):
prob_uid = state.get("__uid__")
# print("\nGET SKILL APPS", state.get('__uid__')[:5])
# If there is a process learning mechanism then use it to
# generate "in-process" skill applications from its grammar.
apps_in_process = False
if(self.process_lrn_mech):
preseq_tracker = getattr(self,'rollout_preseq_tracker', None)
# try:
# preseq = preseq_tracker.get_good_preseq(state)
# print("--", preseq)
# except RuntimeError as e:
# print("--FAIL")
# print("PROB UID", prob_uid)
try:
in_process_grps = self.process_lrn_mech.get_next_skill_apps(
state, preseq_tracker,
prob_uid=prob_uid, group_by_depends=True)
except:
in_process_grps = []
filtered_skill_apps = []
# Regroup in_process_grps into mut_excl_grps of form
# [ [[...skill_apps for items0],[...skill_apps for items1]] , [...skill_apps for items2] ]
# assuming items0 and items1 are part of disjoint methods that share a macro and items2
# are another set of items contiguous with those.
mut_excl_grps = self._organize_mutl_excl(in_process_grps)
if("process" in self.implicit_reward_kinds):
self._add_process_implicit_rewards(mut_excl_grps)
apps_in_process = len(in_process_grps) > 0
if(not apps_in_process):
print("NOT IN PROCESS", state.get("__uid__")[:5], f"PROB={prob_uid[:5] if prob_uid else prob_uid}")
# print(self.rollout_preseq_tracker.get_good_preseq(state))
# if(len(in_process_grps) > 1):
# print("in_process_grps", in_process_grps)
# Find the best skill_apps in each disjunction and record the
# maximum value among them.
vals = [-2]*len(mut_excl_grps)
contig_grps = [None]*len(mut_excl_grps)
for i, disj_grps in enumerate(mut_excl_grps):
# Find the best group among disj_grps on the basis of the
# maximum skill_app reward (or predicted reward) in the group.
best_d_ind = -1
best_d_val = -2
disj_apps = []
for j, grp in enumerate(disj_grps):
# Prefer to compute val on basis of the non-optional
# members of the group
# def is_optional(sa):
# macro, meth_ind, item_ind, _ = sa.path.steps[-1]
# return macro.methods[meth_ind].optional_mask[item_ind]
# if(any([not is_optional(sa) for sa in grp])):
# print("ONLY NO OPTS")
# itr_grp = [sa for sa in grp if not is_optional(sa)]
# else:
# itr_grp = grp
max_val = -1
for skill_app in grp:
val = getattr(skill_app, 'reward', None)
if(val is None):
val = getattr(skill_app, 'when_pred', None)
else:
# Prefer verified skill_apps over predictions
val *= 2
max_val = max(val, max_val)
# If max_val is not that different than the best one
# then keep skill_apps from both disjoint groups so
# that the user can resolve the ambiguity.
diff = max_val-best_d_val
if(abs(diff) < .25):
disj_apps += grp
elif(diff > 0):
disj_apps = [*grp]
if(max_val > best_d_val):
best_d_val = max_val
best_d_ind = j
vals[i] = best_d_val
contig_grps[i] = disj_apps
# Determine the subset of skill_apps which should be presented
# present just the highest prediction group unless there is
# some ambiguity (i.e. delta_val < .25) as to which group is best.
best_val = -2
skill_apps = set()
for i, val in enumerate(vals):
diff = val-best_val
if(abs(val) < .25):
skill_apps = skill_apps.intersection(contig_grps[i])
elif(diff > 0):
skill_apps = set([*contig_grps[i]])
if(val > best_val):
best_val = val
# When in eval mode just apply the first skill_app group
# which has positive reward
if(eval_mode and val > 0):
break
# if(len(in_process_grps) >= 1):
# best_grp_pred = -2
# best_grp_ind = -1
# for i, in_process_apps in enumerate(in_process_grps):
# if(len(in_process_apps) == 0):
# continue
# total = 0
# for skill_app in in_process_apps:
# val = getattr(skill_app, 'reward', None)
# if(val is None):
# val = getattr(skill_app, 'when_pred', None)
# total += val
# # if(reward != -1):
# # filtered_skill_apps.append(skill_app)
# avg = total / len(in_process_apps)
# if(avg > best_grp_pred):
# best_grp_pred = avg
# best_grp_ind = i
# # if(best_grp_pred > 0):
# # print("BEST GRP IND", best_grp_ind, "/", len(in_process_grps), best_grp_pred)
# if(best_grp_ind >= 0):
# skill_apps = list(in_process_grps[best_grp_ind])
# if(len(filtered_skill_apps) > 0):
# # if(len(in_process_grps) > 1):
# # print("<<", filtered_skill_apps)
# skill_apps = filtered_skill_apps
# skills = {sa.skill for sa in in_process_apps}
# for skill in skills:
# for skill_app in skill.get_applications(state):
# if(skill_app in in_process_apps):
# path = in_process_apps[skill_app]
# skill_app.path = path
# skill_apps.append(skill_app)
if(add_out_of_process or len(skill_apps) == 0):
# print("BACKUP", len(skill_apps), add_out_of_process)
for skill in self.skills.values():
for skill_app in skill.get_applications(state):
if (skill_app not in skill_apps):
skill_apps.add(skill_app)
skill_app.in_process = False
if(prob_uid is not None):
skill_app.prob_uid = prob_uid
# print('---', add_out_of_process)
# for skill_app in skill_apps:
# skill, match, when_pred = skill_app.skill, skill_app.match, skill_app.when_pred
# when_pred = 1 if when_pred is None else when_pred
# print(f"{getattr(skill_app, 'in_process', False)}{' ' if (when_pred >= 0) else ''}{when_pred:.2f} {skill_app}")
# if(not apps_in_process):
# print("AN SKILL APPS", len(skill_apps))
# print("BN SKILL APPS", len(skill_apps))
if(add_known):
s_uid = state.get('__uid__')
known_sas = self.skill_apps_by_state_uid.get(s_uid, [])
for skill_app in known_sas:
rew = skill_app.reward
if(rew is None):
if(getattr(skill_app, "removed", False) and
skill_app in skill_apps):
skill_apps.remove(skill_app)
continue
skill_apps.add(skill_app)
skill_app.ensure_when_pred()
# # Always show skill apps which have positive reward
# if(rew > 0):
# skill_apps.add(skill_app)
# # Don't keep skill apps which have negative reward
# elif(rew < 0 and
# skill_app in skill_apps):
# skill_apps.remove(skill_app)
if(add_conflict_certainty):
self._add_conflict_certainty(skill_apps)
if(hard_cert_thresh is not None):
skill_apps = [sa for sa in skill_apps if getattr(sa,"explicit_reward", None) is not None or sa.certainty >= hard_cert_thresh]
if(not ignore_filter):
# print("DO FILTER", ignore_filter)
# print("BEFORE FILTER", [getattr(sa,'when_pred', None) for sa in skill_apps])
skill_apps = self.action_filter(state, skill_apps, **self.action_filter_args)
# print("AFTER FILTER:")
# for sa in skill_apps:
# print(sa.reward, getattr(sa,'when_pred', None), skill_app)
# print("CN SKILL APPS", len(skill_apps))
skill_apps = self.which_cls.sort(state, skill_apps)