-
Notifications
You must be signed in to change notification settings - Fork 11
Expand file tree
/
Copy pathmodel_run.py
More file actions
206 lines (179 loc) · 7.3 KB
/
model_run.py
File metadata and controls
206 lines (179 loc) · 7.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
"""
Model Runs are deprecated and will be removed in a future version of the python client.
It is now possible to upload model predictions without a need for creating a model run
For example::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
prediction_1 = nucleus.BoxPrediction(label="label", x=0, y=0, width=10, height=10, reference_id="1", confidence=0.9, class_pdf={'label': 0.9, 'other_label': 0.1})
prediction_2 = nucleus.BoxPrediction(label="label", x=0, y=0, width=10, height=10, reference_id="2", confidence=0.2, class_pdf={'label': 0.2, 'other_label': 0.8})
model = client.add_model(name="My Model", reference_id="My-CNN", metadata={"timestamp": "121012401"})
response = dataset.upload_predictions(model, [prediction_1, prediction_2])
"""
from typing import List, Optional, Union
import requests
from nucleus.annotation import check_all_mask_paths_remote
from nucleus.job import AsyncJob
from nucleus.utils import (
format_prediction_response,
serialize_and_write_to_presigned_url,
)
from .constants import (
ANNOTATIONS_KEY,
DEFAULT_ANNOTATION_UPDATE_MODE,
REQUEST_IDS_KEY,
UPDATE_KEY,
)
from .prediction import (
BoxPrediction,
CuboidPrediction,
PolygonPrediction,
SegmentationPrediction,
from_json,
)
class ModelRun:
"""
This class is deprecated and will be removed from the python client.
"""
def __init__(self, model_run_id: str, dataset_id: str, client):
self.model_run_id = model_run_id
self._client = client
self.dataset_id = dataset_id
def __repr__(self):
return f"ModelRun(model_run_id='{self.model_run_id}', dataset_id='{self.dataset_id}', client={self._client})"
def __eq__(self, other):
if self.model_run_id == other.model_run_id:
if self._client == other._client:
return True
return False
def info(self) -> dict:
"""
provides information about the Model Run:
model_id -- Model Id corresponding to the run
name -- A human-readable name of the model project.
status -- Status of the Model Run.
metadata -- An arbitrary metadata blob specified for the run.
:return:
{
"model_id": str,
"name": str,
"status": str,
"metadata": Dict[str, Any],
}
"""
return self._client.model_run_info(self.model_run_id)
def commit(self, payload: Optional[dict] = None) -> dict:
"""
Commits the model run. Starts matching algorithm defined by payload.
class_agnostic -- A flag to specify if matching algorithm should be class-agnostic or not.
Default value: True
allowed_label_matches -- An optional list of AllowedMatch objects to specify allowed matches
for ground truth and model predictions.
If specified, 'class_agnostic' flag is assumed to be False
Type 'AllowedMatch':
{
ground_truth_label: string, # A label for ground truth annotation.
model_prediction_label: string, # A label for model prediction that can be matched with
# corresponding ground truth label.
}
payload:
{
"class_agnostic": boolean,
"allowed_label_matches": List[AllowedMatch],
}
:return: {"model_run_id": str}
"""
if payload is None:
payload = {}
return self._client.commit_model_run(self.model_run_id, payload)
def predict(
self,
annotations: List[
Union[
BoxPrediction,
PolygonPrediction,
CuboidPrediction,
SegmentationPrediction,
]
],
update: Optional[bool] = DEFAULT_ANNOTATION_UPDATE_MODE,
asynchronous: bool = False,
) -> Union[dict, AsyncJob]:
"""
Uploads model outputs as predictions for a model_run. Returns info about the upload.
:param annotations: List[Union[BoxPrediction, PolygonPrediction, CuboidPrediction, SegmentationPrediction]],
:return:
{
"model_run_id": str,
"predictions_processed": int,
"predictions_ignored": int,
}
"""
if asynchronous:
check_all_mask_paths_remote(annotations)
request_ids = serialize_and_write_to_presigned_url(
annotations, self.dataset_id, self._client, can_shard=True
)
response = self._client.make_request(
payload={REQUEST_IDS_KEY: request_ids, UPDATE_KEY: update},
route=f"modelRun/{self.model_run_id}/predict?async=1",
)
return AsyncJob.from_json(response, self._client)
else:
return self._client.predict(
model_run_id=self.model_run_id,
annotations=annotations,
update=update,
)
def iloc(self, i: int):
"""
Returns Model Run Info For Dataset Item by its number.
:param i: absolute number of Dataset Item for a dataset corresponding to the model run.
:return: List[Union[BoxPrediction, PolygonPrediction, CuboidPrediction, SegmentationPrediction]],
}
"""
response = self._client.predictions_iloc(self.model_run_id, i)
return format_prediction_response(response)
def refloc(self, reference_id: str):
"""
Returns Model Run Info For Dataset Item by its reference_id.
:param reference_id: reference_id of a dataset item.
:return: List[Union[BoxPrediction, PolygonPrediction, CuboidPrediction, SegmentationPrediction]],
"""
response = self._client.get(
f"modelRun/{self.model_run_id}/refloc/{reference_id}"
)
return format_prediction_response(response)
def loc(self, dataset_item_id: str):
"""
Returns Model Run Info For Dataset Item by its id.
:param dataset_item_id: internally controlled id for dataset item.
:return:
{
"annotations": List[Box2DPrediction],
}
"""
response = self._client.predictions_loc(
self.model_run_id, dataset_item_id
)
return format_prediction_response(response)
def prediction_loc(self, reference_id: str, annotation_id: str):
"""
Returns info for single Prediction by its reference id and annotation id.
:param reference_id: the user specified id for the image
:param annotation_id: the user specified id for the prediction, or if one was not provided, the Scale internally generated id for the prediction
:return:
BoxPrediction | PolygonPrediction | CuboidPrediction
"""
response = self._client.make_request(
{},
f"modelRun/{self.model_run_id}/prediction/loc/{reference_id}/{annotation_id}",
requests.get,
)
return from_json(response)
def ungrouped_export(self):
json_response = self._client.make_request(
payload={},
route=f"modelRun/{self.model_run_id}/ungrouped",
requests_command=requests.get,
)
return format_prediction_response({ANNOTATIONS_KEY: json_response})