diff --git a/system-test/managed_writer_client_test.ts b/system-test/managed_writer_client_test.ts index fbe7d1d7..b0638b7f 100644 --- a/system-test/managed_writer_client_test.ts +++ b/system-test/managed_writer_client_test.ts @@ -694,6 +694,133 @@ describe('managedwriter.WriterClient', () => { } }); + it.only('should invoke appendRows with picosecond precision timestamp without errors', async () => { + const picosTableId = generateUuid(); + const picosSchema: any = { + fields: [ + { + name: 'customer_name', + type: 'STRING', + mode: 'REQUIRED', + }, + { + name: 'row_num', + type: 'INTEGER', + mode: 'REQUIRED', + }, + { + name: 'created_at', + type: 'TIMESTAMP', + mode: 'NULLABLE', + timestampPrecision: 12, + }, + ], + }; + const [table] = await bigquery + .dataset(datasetId) + .createTable(picosTableId, {schema: picosSchema}); + const picosParent = `projects/${projectId}/datasets/${datasetId}/tables/${table.id}`; + + bqWriteClient.initialize().catch(err => { + throw err; + }); + const streamType: WriteStream['type'] = managedwriter.PendingStream; + const client = new WriterClient(); + client.setClient(bqWriteClient); + + const storageSchema = + adapt.convertBigQuerySchemaToStorageTableSchema(picosSchema); + const protoDescriptor: DescriptorProto = + adapt.convertStorageSchemaToProto2Descriptor(storageSchema, 'root'); + + // Row 1 + const row1 = { + customer_name: 'Ada Lovelace', + row_num: 1, + created_at: '2023-10-10 12:00:00.123456789012', + }; + + const offset: IInt64Value['value'] = '0'; + + const streamId = await client.createWriteStream({ + streamType, + destinationTable: picosParent, + }); + const appendRowsResponsesResult: AppendRowsResponse[] = [ + { + appendResult: { + offset: { + value: offset, + }, + }, + writeStream: streamId, + }, + ]; + try { + const connection = await client.createStreamConnection({ + streamId, + }); + const writer = new JSONWriter({ + connection, + protoDescriptor, + }); + const pw = writer.appendRows([row1], offset); + const result = await pw.getResult(); + const responses: AppendRowsResponse[] = [ + { + appendResult: result.appendResult, + writeStream: result.writeStream, + }, + ]; + + assert.deepEqual(appendRowsResponsesResult, responses); + + const res = await connection.finalize(); + connection.close(); + assert.equal(res?.rowCount, 1); + + const commitResponse = await client.batchCommitWriteStream({ + parent: picosParent, + writeStreams: [streamId], + }); + assert.equal(commitResponse.streamErrors?.length, 0); + + writer.close(); + + // Now read to make sure the written data is correct: + const options: {[key: string]: any} = {}; + const timestampOutputFormat = 'ISO8601_STRING'; + const useInt64Timestamp = false; + const expectedTsValue = '2023-01-01T12:00:00.123456789123Z'; + options['formatOptions.timestampOutputFormat'] = timestampOutputFormat; + options['formatOptions.useInt64Timestamp'] = useInt64Timestamp; + + await new Promise((resolve, reject) => { + (table as any).request( + { + uri: '/data', + qs: options, + }, + (err: any, resp: any) => { + if (err) { + reject(err); + return; + } + try { + assert(resp.rows && resp.rows.length > 0); + assert.strictEqual(resp.rows[0].f[0].v, expectedTsValue); + resolve(); + } catch (e) { + reject(e); + } + }, + ); + }); + } finally { + client.close(); + } + }); + it('should update proto descriptor automatically with appendRows without errors', async () => { bqWriteClient.initialize().catch(err => { throw err; diff --git a/system-test/timestamp_output_format.ts b/system-test/timestamp_output_format.ts index f6188d79..15a7a933 100644 --- a/system-test/timestamp_output_format.ts +++ b/system-test/timestamp_output_format.ts @@ -27,7 +27,7 @@ interface TestCase { expectedTsValue?: string; } -describe.only('Timestamp Output Format System Tests', () => { +describe('Timestamp Output Format System Tests', () => { const datasetId = `timestamp_test_${randomUUID().replace(/-/g, '_')}`; const tableId = `timestamp_table_${randomUUID().replace(/-/g, '_')}`; const dataset = bigquery.dataset(datasetId);