-
Notifications
You must be signed in to change notification settings - Fork 11
Expand file tree
/
Copy pathscene.py
More file actions
736 lines (632 loc) · 26.7 KB
/
scene.py
File metadata and controls
736 lines (632 loc) · 26.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
import json
import warnings
from abc import ABC
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from nucleus.constants import (
FRAME_FORMAT_KEY,
FRAME_RATE_KEY,
FRAMES_KEY,
IMAGE_LOCATION_KEY,
LENGTH_KEY,
METADATA_KEY,
NUM_SENSORS_KEY,
POINTCLOUD_LOCATION_KEY,
REFERENCE_ID_KEY,
TRACKS_KEY,
UPLOAD_TO_SCALE_KEY,
VIDEO_LOCATION_KEY,
VIDEO_URL_KEY,
)
from nucleus.track import Track
from .annotation import is_local_path
from .dataset_item import (
DatasetItem,
DatasetItemType,
check_for_duplicate_reference_ids,
)
if TYPE_CHECKING:
from . import NucleusClient
class Frame:
"""Collection of sensor data pertaining to a single time step.
For 3D data, each Frame houses a sensor-to-data mapping and must have exactly
one pointcloud with any number of camera images.
Parameters:
**kwargs (Dict[str, :class:`DatasetItem`]): Mappings from sensor name
to dataset item. Each frame of a lidar scene must contain exactly one
pointcloud and any number of images (e.g. from different angles).
Refer to our `guide to uploading 3D data
<https://docs.nucleus.scale.com/docs/uploading-3d-data>`_ for more info!
"""
def __init__(self, **kwargs):
self.items: Dict[str, DatasetItem] = {}
for key, value in kwargs.items():
assert isinstance(key, str), "All keys must be names of sensors"
assert isinstance(
value, DatasetItem
), f"All values must be DatasetItems, instead got type {type(value)}"
self.items[key] = value
check_for_duplicate_reference_ids(list(self.items.values()))
def __repr__(self) -> str:
return f"Frame(items={self.items})"
def __eq__(self, other):
for key, value in self.items.items():
if key not in other.items:
return False
if value != other.items[key]:
return False
return True
def add_item(self, item: DatasetItem, sensor_name: str) -> None:
"""Adds DatasetItem object to frame as sensor data.
Parameters:
item (:class:`DatasetItem`): Pointcloud or camera image item to add.
sensor_name: Name of the sensor, e.g. "lidar" or "front_cam."
"""
self.items[sensor_name] = item
def get_item(self, sensor_name: str) -> DatasetItem:
"""Fetches the DatasetItem object associated with the given sensor.
Parameters:
sensor_name: Name of the sensor, e.g. "lidar" or "front_cam."
Returns:
:class:`DatasetItem`: DatasetItem object pertaining to the sensor.
"""
if sensor_name not in self.items:
raise ValueError(
f"This frame does not have a {sensor_name} sensor"
)
return self.items[sensor_name]
def get_items(self) -> List[DatasetItem]:
"""Fetches all items in the frame.
Returns:
List[:class:`DatasetItem`]: List of all DatasetItem objects in the frame.
"""
return list(self.items.values())
def get_sensors(self) -> List[str]:
"""Fetches all sensor names of the frame.
Returns:
List of all sensor names of the frame."""
return list(self.items.keys())
@classmethod
def from_json(cls, payload: dict):
"""Instantiates frame object from schematized JSON dict payload."""
items = {
sensor: DatasetItem.from_json(item)
for sensor, item in payload.items()
}
return cls(**items)
def to_payload(self) -> dict:
"""Serializes frame object to schematized JSON dict."""
return {
sensor: dataset_item.to_payload(is_scene=True)
for sensor, dataset_item in self.items.items()
}
@dataclass
class Scene(ABC):
reference_id: str
frames: List[Frame] = field(default_factory=list)
metadata: Optional[dict] = field(default_factory=dict)
tracks: List[Track] = field(default_factory=list)
skip_validate: Optional[bool] = False
def __post_init__(self):
self.sensors = set(
flatten([frame.get_sensors() for frame in self.frames])
)
self.frames_dict = dict(enumerate(self.frames))
if self.metadata is None:
self.metadata = {}
if not self.skip_validate:
self.validate()
def __eq__(self, other):
return all(
[
self.reference_id == other.reference_id,
self.frames == other.frames,
self.metadata == other.metadata,
self.tracks == other.tracks,
]
)
@property
def length(self) -> int:
"""Number of frames in the scene."""
return len(self.frames_dict)
@property
def num_sensors(self) -> int:
"""Number of sensors in the scene."""
return len(self.get_sensors())
def validate(self):
# TODO: make private
assert self.length > 0, "Must have at least 1 frame in a scene"
all_items = []
for frame in self.frames_dict.values():
assert isinstance(
frame, Frame
), "Each frame in a scene must be a Frame object"
all_items.extend(frame.get_items())
check_for_duplicate_reference_ids(all_items)
def add_item(
self, index: int, sensor_name: str, item: DatasetItem
) -> None:
"""Adds DatasetItem to the specified frame as sensor data.
Parameters:
index: Serial index of the frame to which to add the item.
item (:class:`DatasetItem`): Pointcloud or camera image item to add.
sensor_name: Name of the sensor, e.g. "lidar" or "front_cam."
"""
self.sensors.add(sensor_name)
if index not in self.frames_dict:
new_frame = Frame(**{sensor_name: item})
self.frames_dict[index] = new_frame
else:
self.frames_dict[index].items[sensor_name] = item
def add_frame(
self, frame: Frame, index: int, update: bool = False
) -> None:
"""Adds frame to scene at the specified index.
Parameters:
frame (:class:`Frame`): Frame object to add.
index: Serial index at which to add the frame.
update: Whether to overwrite the frame at the specified index, if it
exists. Default is False.
"""
if (
index not in self.frames_dict
or index in self.frames_dict
and update
):
self.frames_dict[index] = frame
self.sensors.update(frame.get_sensors())
def get_frame(self, index: int) -> Frame:
"""Fetches the Frame object at the specified index.
Parameters:
index: Serial index for which to retrieve the Frame.
Return:
:class:`Frame`: Frame object at the specified index."""
if index not in self.frames_dict:
raise ValueError(
f"This scene does not have a frame at index {index}"
)
return self.frames_dict[index]
def get_frames(self) -> List[Frame]:
"""Fetches a sorted list of Frames of the scene.
Returns:
List[:class:`Frame`]: List of Frames, sorted by index ascending.
"""
return [
frame
for _, frame in sorted(
self.frames_dict.items(), key=lambda x: x[0]
)
]
def get_sensors(self) -> List[str]:
"""Fetches all sensor names of the scene.
Returns:
List of all sensor names associated with frames in the scene."""
return list(self.sensors)
def get_item(self, index: int, sensor_name: str) -> DatasetItem:
"""Fetches the DatasetItem object of the given frame and sensor.
Parameters:
index: Serial index of the frame from which to fetch the item.
sensor_name: Name of the sensor, e.g. "lidar" or "front_cam."
Returns:
:class:`DatasetItem`: DatasetItem object of the frame and sensor.
"""
frame = self.get_frame(index)
return frame.get_item(sensor_name)
def get_items_from_sensor(self, sensor_name: str) -> List[DatasetItem]:
"""Fetches all DatasetItem objects of the given sensor.
Parameters:
sensor_name: Name of the sensor, e.g. "lidar" or "front_cam."
Returns:
List[:class:`DatasetItem`]: List of DatasetItem objects associated
with the specified sensor.
"""
if sensor_name not in self.sensors:
raise ValueError(
f"This scene does not have a {sensor_name} sensor"
)
items_from_sensor = []
for frame in self.frames_dict.values():
try:
sensor_item = frame.get_item(sensor_name)
items_from_sensor.append(sensor_item)
except ValueError:
# This sensor is not present at current frame
items_from_sensor.append(None)
return items_from_sensor
def get_items(self) -> List[DatasetItem]:
"""Fetches all items in the scene.
Returns:
List[:class:`DatasetItem`]: Unordered list of all DatasetItem
objects in the scene.
"""
return flatten([frame.get_items() for frame in self.get_frames()])
def info(self):
"""Fetches information about the scene.
Returns:
Payload containing::
{
"reference_id": str,
"length": int,
"num_sensors": int
}
"""
return {
REFERENCE_ID_KEY: self.reference_id,
LENGTH_KEY: self.length,
NUM_SENSORS_KEY: self.num_sensors,
}
def validate_frames_dict(self):
# TODO: make private
is_continuous = set(list(range(len(self.frames_dict)))) == set(
self.frames_dict.keys()
)
assert (
is_continuous
), "frames must be 0-indexed and continuous (no missing frames)"
@classmethod
def from_json(
cls,
payload: dict,
client: Optional["NucleusClient"] = None,
skip_validate: Optional[bool] = False,
):
"""Instantiates scene object from schematized JSON dict payload."""
frames_payload = payload.get(FRAMES_KEY, [])
frames = [Frame.from_json(frame) for frame in frames_payload]
tracks_payload = payload.get(TRACKS_KEY, [])
tracks = (
[
Track.from_json(track, connection=client.connection)
for track in tracks_payload
]
if client
else []
)
return cls(
reference_id=payload[REFERENCE_ID_KEY],
frames=frames,
metadata=payload.get(METADATA_KEY, {}),
skip_validate=skip_validate,
tracks=tracks,
)
def to_payload(self) -> dict:
"""Serializes scene object to schematized JSON dict."""
self.validate_frames_dict()
ordered_frames = self.get_frames()
frames_payload = [frame.to_payload() for frame in ordered_frames]
payload: Dict[str, Any] = {
REFERENCE_ID_KEY: self.reference_id,
FRAMES_KEY: frames_payload,
}
if self.tracks:
payload[TRACKS_KEY] = [track.to_payload() for track in self.tracks]
if self.metadata:
payload[METADATA_KEY] = self.metadata
return payload
def to_json(self) -> str:
"""Serializes scene object to schematized JSON string."""
return json.dumps(self.to_payload(), allow_nan=False)
@dataclass
class LidarScene(Scene):
"""Sequence of lidar pointcloud and camera images over time.
Nucleus 3D datasets are comprised of LidarScenes, which are sequences of
lidar pointclouds and camera images over time. These sequences are in turn
comprised of :class:`Frames <Frame>`.
By organizing data across multiple sensors over time, LidarScenes make it
easier to interpret pointclouds, allowing you to see objects move over time
by clicking through frames and providing context in the form of corresponding
images.
You can think of scenes and frames as nested groupings of sensor data across
time:
* LidarScene for a given location
* Frame at timestep 0
* DatasetItem of pointcloud
* DatasetItem of front camera image
* DatasetItem of rear camera image
* Frame at timestep 1
* ...
* ...
* LidarScene for another location
* ...
LidarScenes are uploaded to a :class:`Dataset` with any accompanying
metadata. Frames do not accept metadata, but each of its constituent
:class:`DatasetItems <DatasetItem>` does.
Note: Uploads with a different number of frames/items will error out (only
on scenes that now differ). Existing scenes are expected to retain the
same structure, i.e. the same number of frames, and same items per frame.
If a scene definition is changed (for example, additional frames added) the
update operation will be ignored. If you would like to alter the structure
of a scene, please delete the scene and re-upload.
Parameters:
reference_id (str): User-specified identifier to reference the scene.
frames (Optional[List[:class:`Frame`]]): List of frames to be a part of
the scene. A scene can be created before frames or items have been
added to it, but must be non-empty when uploading to a :class:`Dataset`.
metadata (Optional[Dict]):
Optional metadata to include with the scene.
Coordinate metadata may be provided to enable the Map Chart in the Nucleus Dataset charts page.
These values can be specified as `{ "lat": 52.5, "lon": 13.3, ... }`.
Context Attachments may be provided to display the attachments side by side with the dataset
item in the Detail View by specifying
`{ "context_attachments": [ { "attachment": 'https://example.com/1' }, { "attachment": 'https://example.com/2' }, ... ] }`.
Refer to our `guide to uploading 3D data
<https://docs.nucleus.scale.com/docs/uploading-3d-data>`_ for more info!
"""
def __repr__(self) -> str:
return f"LidarScene(reference_id='{self.reference_id}', frames={self.get_frames()}, metadata={self.metadata})"
def validate(self):
# TODO: make private
super().validate()
lidar_sensors = flatten(
[
[
sensor
for sensor in frame.items.keys()
if frame.items[sensor].type == DatasetItemType.POINTCLOUD
]
for frame in self.frames_dict.values()
]
)
assert (
len(set(lidar_sensors)) == 1
), "Each lidar scene must have exactly one lidar sensor"
for frame in self.frames_dict.values():
num_pointclouds = sum(
[
int(item.type == DatasetItemType.POINTCLOUD)
for item in frame.get_items()
]
)
assert (
num_pointclouds == 1
), "Each frame of a lidar scene must have exactly 1 pointcloud"
def flatten(t):
return [item for sublist in t for item in sublist]
@dataclass
class VideoScene(ABC):
"""Video or sequence of images over time.
Nucleus video datasets are comprised of VideoScenes. These can be
comprised of a single video, or a sequence of :class:`DatasetItems <DatasetItem>`
which are equivalent to frames.
VideoScenes are uploaded to a :class:`Dataset` with any accompanying
metadata. Each of :class:`DatasetItems <DatasetItem>` representing a frame
also accepts metadata.
Note: Updates with different items will error out (only on scenes that
now differ). Existing video are expected to retain the same frames, and only
metadata can be updated. If a video definition is changed (for example,
additional frames added) the update operation will be ignored. If you would
like to alter the structure of a video scene, please delete the scene and
re-upload.
Parameters:
reference_id (str): User-specified identifier to reference the scene.
frame_rate (Optional[int]): Required if uploading items. Frame rate of the video.
video_location (Optional[str]): Required if not uploading items. The remote URL
containing the video MP4. Remote formats supported include any URL (``http://``
or ``https://``) or URIs for AWS S3, Azure, or GCS (i.e. ``s3://``, ``gcs://``).
items (Optional[List[:class:`DatasetItem`]]): Required if not uploading video_location.
List of items representing frames, to be a part of the scene. A scene can be created
before items have been added to it, but must be non-empty when uploading to
a :class:`Dataset`. A video scene can contain a maximum of 3000 items.
metadata (Optional[Dict]):
Optional metadata to include with the scene.
Coordinate metadata may be provided to enable the Map Chart in the Nucleus Dataset charts page.
These values can be specified as `{ "lat": 52.5, "lon": 13.3, ... }`.
Context Attachments may be provided to display the attachments side by side with the dataset
item in the Detail View by specifying
`{ "context_attachments": [ { "attachment": 'https://example.com/1' }, { "attachment": 'https://example.com/2' }, ... ] }`.
frame_format (Optional[str]):
Format for extracted video frames. When uploading a video_location, this determines
the format used when extracting frames from the video. Valid values are "jpeg" (default)
or "png" for lossless frame extraction.
Refer to our `guide to uploading video data
<https://nucleus.scale.com/docs/uploading-video-data>`_ for more info!
"""
reference_id: str
frame_rate: Optional[int] = None
video_location: Optional[str] = None
items: List[DatasetItem] = field(default_factory=list)
metadata: Optional[dict] = field(default_factory=dict)
attachment_type: Optional[str] = None
tracks: List[Track] = field(default_factory=list)
use_privacy_mode: bool = False
frame_format: Optional[str] = None
def __post_init__(self):
if self.attachment_type:
warnings.warn(
"The attachment_type parameter is no longer required and will be deprecated soon.",
DeprecationWarning,
)
if self.metadata is None:
self.metadata = {}
def __eq__(self, other):
return all(
[
self.reference_id == other.reference_id,
self.items == other.items,
self.video_location == other.video_location,
self.metadata == other.metadata,
self.tracks == other.tracks,
]
)
@property
def length(self) -> int:
"""Gets number of items in the scene for videos uploaded with an array of images."""
return len(self.items)
def validate(self):
assert (
self.items or self.video_location
), "Please upload either a video_location or an array of dataset items representing frames"
if self.items:
assert (
self.frame_rate > 0
), "When uploading an array of items frame rate must be at least 1"
assert (
self.length > 0
), "When uploading an array of items scene must have a list of items of length at least 1"
if not self.use_privacy_mode:
assert (
not self.video_location
), "No video location is accepted when uploading an array of items unless you are using privacy mode"
for item in self.items:
assert isinstance(
item, DatasetItem
), "Each item in a scene must be a DatasetItem object"
assert (
item.image_location is not None
), "Each item in a video scene must have an image_location"
else:
assert (
not self.frame_rate
), "No frame rate is accepted when uploading a video_location"
assert (
not self.items
), "No list of items is accepted when uploading a video_location unless you are using privacy mode"
def add_item(
self,
item: DatasetItem,
index: Optional[int] = None,
update: bool = False,
) -> None:
"""Adds DatasetItem to the specified index for videos uploaded as an array of images.
Parameters:
item (:class:`DatasetItem`): Video item to add.
index: Serial index at which to add the item.
update: Whether to overwrite the item at the specified index, if it
exists. Default is False.
"""
if not self.use_privacy_mode:
assert (
not self.video_location
), "Cannot add item to a video without items"
if index is None:
index = len(self.items)
assert (
0 <= index <= len(self.items)
), f"Video scenes must be contiguous so index must be at least 0 and at most {len(self.items)}."
if index < len(self.items) and update:
self.items[index] = item
else:
self.items.append(item)
def get_item(self, index: int) -> DatasetItem:
"""Fetches the DatasetItem at the specified index for videos uploaded as an array of images.
Parameters:
index: Serial index for which to retrieve the DatasetItem.
Return:
:class:`DatasetItem`: DatasetItem at the specified index."""
if not self.use_privacy_mode:
assert (
not self.video_location
), "Cannot add item to a video without items"
if index < 0 or index > len(self.items):
raise ValueError(
f"This scene does not have an item at index {index}"
)
return self.items[index]
def get_items(self) -> List[DatasetItem]:
"""Fetches a sorted list of DatasetItems of the scene for videos uploaded as an array of images.
Returns:
List[:class:`DatasetItem`]: List of DatasetItems, sorted by index ascending.
"""
if not self.use_privacy_mode:
assert (
not self.video_location
), "Cannot add item to a video without items"
return self.items
def info(self):
"""Fetches information about the video scene.
Returns:
Payload containing::
{
"reference_id": str,
"length": Optional[int],
"frame_rate": int,
"video_url": Optional[str],
}
"""
payload: Dict[str, Any] = {
REFERENCE_ID_KEY: self.reference_id,
}
if self.frame_rate:
payload[FRAME_RATE_KEY] = self.frame_rate
if self.video_location:
payload[VIDEO_URL_KEY] = self.video_location
if self.items:
payload[LENGTH_KEY] = self.length
return payload
@classmethod
def from_json(
cls, payload: dict, client: Optional["NucleusClient"] = None
):
"""Instantiates scene object from schematized JSON dict payload."""
items_payload = payload.get(FRAMES_KEY, [])
items = [DatasetItem.from_json(item) for item in items_payload]
tracks_payload = payload.get(TRACKS_KEY, [])
tracks = (
[
Track.from_json(track, connection=client.connection)
for track in tracks_payload
]
if client
else []
)
return cls(
reference_id=payload[REFERENCE_ID_KEY],
frame_rate=payload.get(FRAME_RATE_KEY, None),
items=items,
metadata=payload.get(METADATA_KEY, {}),
video_location=payload.get(VIDEO_URL_KEY, None),
tracks=tracks,
)
def to_payload(self) -> dict:
"""Serializes scene object to schematized JSON dict."""
self.validate()
payload: Dict[str, Any] = {
REFERENCE_ID_KEY: self.reference_id,
}
if self.frame_rate:
payload[FRAME_RATE_KEY] = self.frame_rate
if self.metadata:
payload[METADATA_KEY] = self.metadata
if self.video_location:
payload[VIDEO_URL_KEY] = self.video_location
# needed in order for the backed validation to work
if self.use_privacy_mode is not None:
payload[UPLOAD_TO_SCALE_KEY] = not self.use_privacy_mode
if self.items:
items_payload = [
item.to_payload(is_scene=True) for item in self.items
]
payload[FRAMES_KEY] = items_payload
if self.tracks:
payload[TRACKS_KEY] = [track.to_payload() for track in self.tracks]
if self.frame_format:
payload[FRAME_FORMAT_KEY] = self.frame_format
return payload
def to_json(self) -> str:
"""Serializes scene object to schematized JSON string."""
return json.dumps(self.to_payload(), allow_nan=False)
def check_all_scene_paths_remote(
scenes: Union[List[LidarScene], List[VideoScene]]
):
for scene in scenes:
if isinstance(scene, VideoScene) and scene.video_location:
video_location = getattr(scene, VIDEO_LOCATION_KEY)
if video_location and is_local_path(video_location):
raise ValueError(
f"All paths for videos must be remote, but {scene.video_location} is either "
"local, or a remote URL type that is not supported."
)
if isinstance(scene, LidarScene) or scene.items:
for item in scene.get_items():
pointcloud_location = getattr(item, POINTCLOUD_LOCATION_KEY)
if pointcloud_location and is_local_path(pointcloud_location):
raise ValueError(
f"All paths for DatasetItems in a Scene must be remote, but {item.pointcloud_location} is either "
"local, or a remote URL type that is not supported."
)
image_location = getattr(item, IMAGE_LOCATION_KEY)
if image_location and is_local_path(image_location):
raise ValueError(
f"All paths for DatasetItems in a Scene must be remote, but {item.image_location} is either "
"local, or a remote URL type that is not supported."
)