forked from Geopipe/gltf2glb
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbatchtable.py
More file actions
112 lines (91 loc) · 3.28 KB
/
batchtable.py
File metadata and controls
112 lines (91 loc) · 3.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
#!/usr/bin/env python3
#--------------------------------------------------
# batchtable.py: Component of GLTF to GLB converter
# (c) 2016 - 2019 Geopipe, Inc.
# All rights reserved. See LICENSE.
#--------------------------------------------------
import struct
import json
import numpy as np
class BatchTable:
def __init__(self):
self.batch_in = {}
self.batch_json = bytearray()
self.batch_bin = bytearray()
self.num_features = 0
def loadJSONBatch(self, data_in, object_wise = True):
""" Load object batch data from a dict/object. The data could,
for example, have been decoded from a JSON string.
If object_wise is True, then the JSON should be
formatted as an array of batched objects, each
of which has a series of keys and values. This
method will transpose the data to map keys to
arrays of values, one for each object. It handles
keys that only exist for a subset of the batched
obhects.
If object_wise is False, then it is assumed that
the input data already maps keys to arrays of
values, one for each object, with the necessary
null/None/placeholder values for keys that don't
have a real value for that particular object.
"""
if object_wise:
n_objs = len(data_in)
# Find all the fields for all the objects
if type(data_in) is list:
data_in = {i: data_in[i] for i in range(len(data_in))}
for obj, objval in data_in.items():
obj = int(obj)
# Add this object's key-vals
for key, val in objval.items():
if not key in self.batch_in:
self.batch_in[key] = [None] * n_objs
self.batch_in[key][obj] = val
n_objs += 1
else:
self.batch_in = data_in
if len(self.batch_in):
first_key = next(iter(self.batch_in))
self.num_features = len(self.batch_in[first_key])
def writeOutput(self):
data_out = {}
# TODO: Add proper encoding to JSON + binary, rather than just
# punting to the naive method
data_out = self.batch_in
self.batch_json = bytearray(json.dumps(data_out, separators=(',', ':'), sort_keys=True), encoding='utf8')
# TODO: Why do we clear this?
self.batch_in = bytearray()
self.num_features = 0
def finalize(self):
# Create the actual batch JSON (and binary)
self.writeOutput()
# Pad with spaces to a multiple of 4 bytes
padded_batch_json_len = len(self.batch_json) + 3 & ~3
self.batch_json.extend([ord(' ')] * (padded_batch_json_len - len(self.batch_json)))
padded_batch_bin_len = len(self.batch_bin) + 3 & ~3
self.batch_bin.extend([ord(' ')] * (padded_batch_bin_len - len(self.batch_bin)))
"""
Returns a bytearray of the JSON for the batch, ready to embed in another binary stream
"""
def getBatchJSON(self):
return self.batch_json
def getBatchBin(self):
return self.batch_bin
def getNumFeatures(self):
return self.num_features
""" A few utilities """
def nestedListToBin(self, val, val_type):
val_codes = {'f32' : 'f', 'u16' : 'H', 'u8' : 'B'}
if val_type not in val_codes:
raise TypeError("Don't know how to pack type '%s'" % val_type)
else:
val_code = val_codes[val_type]
if type(val) is list:
output = bytearray()
for item in val:
output.extend(self.nestedListToBin(item, val_type))
return output
elif type(val) is np.ndarray:
return bytearray(val.astype(val_code).tobytes())
else:
return struct.pack('<%s' % val_code, val)