From abb292bc7e994c806ef9974b0dc4a0417918538d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 10 Jan 2025 15:59:54 -0500 Subject: [PATCH 001/448] Add the metrics tracer factory class --- src/metrics-tracer-factory.ts | 106 +++++++++++++++++++++++++++++ system-test/client-side-metrics.ts | 0 2 files changed, 106 insertions(+) create mode 100644 src/metrics-tracer-factory.ts create mode 100644 system-test/client-side-metrics.ts diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts new file mode 100644 index 000000000..eff1f8ef4 --- /dev/null +++ b/src/metrics-tracer-factory.ts @@ -0,0 +1,106 @@ +// import * as SDKMetrics from '@opentelemetry/sdk-metrics'; +const { MeterProvider, Histogram, Counter, PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics'); +// import { MeterProvider, PeriodicExportingMetricReader, Histogram} from '@opentelemetry/sdk-metrics'; +import * as Resources from '@opentelemetry/resources'; +import { MetricExporter } from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; + +interface OperationInfo { + retries: number; +} + +const buckets = [0.001, 0.01, 0.1, 1, 10, 100] +const count = 0; + +interface Metrics { + operationLatencies: typeof Histogram; + attemptLatencies: typeof Histogram; + retryCount: typeof Counter; + applicationBlockingLatencies: typeof Histogram; + firstResponseLatencies: typeof Histogram; + serverLatencies: typeof Histogram; + connectivityErrorCount: typeof Histogram; + clientBlockingLatencies: typeof Histogram; +} + +class MetricsTracer { // TODO: Consider rename. + private metrics: Metrics; + + constructor(metrics: Metrics) { + this.metrics = metrics; + } + + onAttemptComplete(info: OperationInfo) { + console.log('onAttemptComplete'); + } + + onOperationComplete(info: OperationInfo) { + console.log('onOperationComplete'); + } +} + +export class MetricsTracerFactory { + private metrics: Metrics; + + constructor() { + // Create MeterProvider + const meterProvider = new MeterProvider({ + // Create a resource. Fill the `service.*` attributes in with real values for your service. + // GcpDetectorSync will add in resource information about the current environment if you are + // running on GCP. These resource attributes will be translated to a specific GCP monitored + // resource if running on GCP. Otherwise, metrics will be sent with monitored resource + // `generic_task`. + resource: new Resources.Resource({ + "service.name": "example-metric-service", + "service.namespace": "samples", + "service.instance.id": "12345", + "cloud.resource_manager.project_id": "cloud-native-db-dpes-shared" + }).merge(new ResourceUtil.GcpDetectorSync().detect()), + readers: [ // Register the exporter + new PeriodicExportingMetricReader({ + // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by + // Cloud Monitoring. + exportIntervalMillis: 10_000, + exporter: new MetricExporter({ + projectId: 'cloud-native-db-dpes-shared' // TODO: Replace later + }), + }) + ] + }); + const meter = meterProvider.getMeter('bigtable.googleapis.com'); + this.metrics = { + operationLatencies: meter.createHistogram('operation_latencies', { + description: 'The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation\'s round trip from the client to Bigtable and back to the client and includes all retries.', + }), + attemptLatencies: meter.createHistogram('attempt_latencies', { + description: "The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.", + unit: 'ms', + }), + retryCount: meter.createCounter('retry_count', { + description: 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + }), + applicationBlockingLatencies: meter.createHistogram('application_blocking_latencies', { + description: 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', + unit: 'ms', + }), + firstResponseLatencies: meter.createHistogram('first_response_latencies', { + description: 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + }), + serverLatencies: meter.createHistogram('server_latencies', { + description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + }), + connectivityErrorCount: meter.createHistogram('connectivity_error_count', { + description: 'The number of requests that failed to reach Google\'s network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.', + }), + clientBlockingLatencies: meter.createHistogram('client_blocking_latencies', { + description: 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + }), + }; + } + + getMetricsTracer() { + return new MetricsTracer(this.metrics); + } +} diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts new file mode 100644 index 000000000..e69de29bb From 051b488ec6ffda36ca96c7b4117366b838f691d6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 10 Jan 2025 16:08:32 -0500 Subject: [PATCH 002/448] Add system tests for the client side metrics --- system-test/client-side-metrics.ts | 95 ++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index e69de29bb..6e41740f4 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -0,0 +1,95 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + + +import {Bigtable} from '../src'; +import {Mutation} from '../src/mutation'; +import * as assert from 'assert'; +import {describe, it, before, after} from 'mocha'; + +describe.only('Bigtable/Table#getRows', () => { + const bigtable = new Bigtable(); + const instanceId = 'emulator-test-instance'; + const tableId = 'my-table'; + const clusterId = 'test-cluster'; + const location = 'us-central1-c'; + + before(async () => { + const instance = bigtable.instance(instanceId); + try { + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [,operation] = await instance.create({ // Fix: Destructure correctly + clusters: { // Fix: Use computed property name + [clusterId]: { + location, + nodes: 3, + }, + }, + } as any); // any cast resolves type mismatch for options. + await operation.promise(); + } + + const table = instance.table(tableId); + const [tableExists] = await table.exists(); + if (!tableExists) { + await table.create(); + } + } catch (error) { + console.error('Error during setup:', error); + // Consider re-throwing error, to actually stop tests. + } + }); + + after(async () => { + const instance = bigtable.instance(instanceId); + await instance.delete({}); + }); + + + it('should read rows after inserting data', async () => { + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId); + const rows = [ + { + key: 'row1', + data: { + cf1: { + q1: 'value1', + }, + }, + }, + { + key: 'row2', + data: { + cf1: { + q2: 'value2', + }, + }, + }, + ]; + await table.insert(rows); + const retrievedRows = await table.getRows(); + assert.strictEqual(retrievedRows[0].length, 2); + const row1 = retrievedRows[0].find(row => row.key === 'row1'); + assert(row1); + const row1Data = row1.data; + assert.deepStrictEqual(row1Data, rows[0].data); + const row2 = retrievedRows[0].find(row => row.key === 'row2'); + assert(row2); + const row2Data = row2.data; + assert.deepStrictEqual(row2Data, rows[1].data); + }); +}); From 1c49f8651376604fce987ea26bbf739a944899a7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 10 Jan 2025 16:09:37 -0500 Subject: [PATCH 003/448] Add open telemetry packages --- package.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/package.json b/package.json index c0b1a07e9..7b1c30d4a 100644 --- a/package.json +++ b/package.json @@ -47,9 +47,13 @@ "precompile": "gts clean" }, "dependencies": { + "@google-cloud/opentelemetry-cloud-monitoring-exporter": "^0.20.0", + "@google-cloud/opentelemetry-resource-util": "^2.4.0", "@google-cloud/precise-date": "^4.0.0", "@google-cloud/projectify": "^4.0.0", "@google-cloud/promisify": "^4.0.0", + "@opentelemetry/resources": "^1.30.0", + "@opentelemetry/sdk-metrics": "^1.30.0", "arrify": "^2.0.0", "concat-stream": "^2.0.0", "dot-prop": "^6.0.0", From 7a5be3bfbbbb345423c85b30f5297994c7a95a29 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 10 Jan 2025 16:10:27 -0500 Subject: [PATCH 004/448] Add a metrics tracer factory --- src/index.ts | 3 +++ src/tabular-api-surface.ts | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/src/index.ts b/src/index.ts index dc4143c99..07147a0c1 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,6 +35,7 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; +import {MetricsTracerFactory} from './metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -395,8 +396,10 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; + metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { + this.metricsTracerFactory = new MetricsTracerFactory(); // Determine what scopes are needed. // It is the union of the scopes on all three clients. const scopes: string[] = []; diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index a7f86e0a2..9cb164f70 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -210,6 +210,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { + const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer(); const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; let activeRequestStream: AbortableDuplex | null; @@ -506,6 +507,13 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return false; }; + /* + function onCallComplete() { + this.metricsTracer.onOperationComplete({ + retries: numConsecutiveErrors, + }); + } + */ rowStream .on('error', (error: ServiceError) => { @@ -548,6 +556,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); error.code = grpc.status.CANCELLED; } userStream.emit('error', error); + //onCallComplete(); } }) .on('data', _ => { @@ -557,6 +566,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }) .on('end', () => { activeRequestStream = null; + //onCallComplete(); }); rowStreamPipe(rowStream, userStream); }; From 5390411f10c59d588bb955efc851dc8476016a61 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 13 Jan 2025 11:05:17 -0500 Subject: [PATCH 005/448] metadata experimentation --- src/index.ts | 4 ++- src/tabular-api-surface.ts | 7 +++-- system-test/client-side-metrics.ts | 44 ++++++++++++++---------------- 3 files changed, 29 insertions(+), 26 deletions(-) diff --git a/src/index.ts b/src/index.ts index 07147a0c1..595bc267c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -870,7 +870,9 @@ export class Bigtable { gaxStream = requestFn!(); gaxStream .on('error', stream.destroy.bind(stream)) - .on('metadata', stream.emit.bind(stream, 'metadata')) + .on('metadata', (arg1, arg2) => { + stream.emit.bind(stream, 'metadata')(arg1, arg2); + }) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 9cb164f70..cea03d859 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -559,12 +559,15 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); //onCallComplete(); } }) - .on('data', _ => { + .on('data', (something: any) => { // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; }) - .on('end', () => { + .on('metadata', (something: any) => { + console.log(something); + }) + .on('end', (something: any) => { activeRequestStream = null; //onCallComplete(); }); diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 6e41740f4..9ac299773 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. - - import {Bigtable} from '../src'; -import {Mutation} from '../src/mutation'; import * as assert from 'assert'; import {describe, it, before, after} from 'mocha'; describe.only('Bigtable/Table#getRows', () => { - const bigtable = new Bigtable(); + const bigtable = new Bigtable({ + projectId: 'cloud-native-db-dpes-shared', + }); const instanceId = 'emulator-test-instance'; const tableId = 'my-table'; + const columnFamilyId = 'cf1'; const clusterId = 'test-cluster'; const location = 'us-central1-c'; @@ -31,21 +31,27 @@ describe.only('Bigtable/Table#getRows', () => { try { const [instanceInfo] = await instance.exists(); if (!instanceInfo) { - const [,operation] = await instance.create({ // Fix: Destructure correctly - clusters: { // Fix: Use computed property name - [clusterId]: { - location, - nodes: 3, - }, + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, }, - } as any); // any cast resolves type mismatch for options. + }); await operation.promise(); } const table = instance.table(tableId); const [tableExists] = await table.exists(); if (!tableExists) { - await table.create(); + await table.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await table.getFamilies(); + + if (!families.some(family => family.id === columnFamilyId)) { + await table.createFamily(columnFamilyId); + } } } catch (error) { console.error('Error during setup:', error); @@ -58,7 +64,6 @@ describe.only('Bigtable/Table#getRows', () => { await instance.delete({}); }); - it('should read rows after inserting data', async () => { const instance = bigtable.instance(instanceId); const table = instance.table(tableId); @@ -81,15 +86,8 @@ describe.only('Bigtable/Table#getRows', () => { }, ]; await table.insert(rows); - const retrievedRows = await table.getRows(); - assert.strictEqual(retrievedRows[0].length, 2); - const row1 = retrievedRows[0].find(row => row.key === 'row1'); - assert(row1); - const row1Data = row1.data; - assert.deepStrictEqual(row1Data, rows[0].data); - const row2 = retrievedRows[0].find(row => row.key === 'row2'); - assert(row2); - const row2Data = row2.data; - assert.deepStrictEqual(row2Data, rows[1].data); + for (let i = 0; i < 100; i++) { + console.log(await table.getRows()); + } }); }); From a7f2fd433cef5af6774814e54680b867f4f4bfdf Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 13 Jan 2025 13:37:32 -0500 Subject: [PATCH 006/448] Pass metadata, status along --- src/index.ts | 5 ++--- src/tabular-api-surface.ts | 17 +++++++++++++---- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/src/index.ts b/src/index.ts index 595bc267c..4fc86fbee 100644 --- a/src/index.ts +++ b/src/index.ts @@ -870,9 +870,8 @@ export class Bigtable { gaxStream = requestFn!(); gaxStream .on('error', stream.destroy.bind(stream)) - .on('metadata', (arg1, arg2) => { - stream.emit.bind(stream, 'metadata')(arg1, arg2); - }) + .on('metadata', stream.emit.bind(stream, 'metadata')) + .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index cea03d859..da9fe3528 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -514,7 +514,19 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }); } */ - + requestStream + .on( + 'metadata', + (metadata: {internalRepr: Map; options: {}}) => { + console.log(metadata); + } + ) + .on( + 'status', + (status: {internalRepr: Map; options: {}}) => { + console.log(status); + } + ); rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); @@ -564,9 +576,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; }) - .on('metadata', (something: any) => { - console.log(something); - }) .on('end', (something: any) => { activeRequestStream = null; //onCallComplete(); From 9e3e5f5679c3d3fac0b698f2449354916d408bb0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 13 Jan 2025 14:21:48 -0500 Subject: [PATCH 007/448] Get mapped entries --- src/tabular-api-surface.ts | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index da9fe3528..635d1cdbd 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -518,13 +518,23 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); .on( 'metadata', (metadata: {internalRepr: Map; options: {}}) => { - console.log(metadata); + const mappedEntries = Array.from( + metadata.internalRepr.entries(), + ([key, value]) => [key, value.toString()] + ); + console.log(mappedEntries); } ) .on( 'status', - (status: {internalRepr: Map; options: {}}) => { - console.log(status); + (status: { + metadata: {internalRepr: Map; options: {}}; + }) => { + const mappedEntries = Array.from( + status.metadata.internalRepr.entries(), + ([key, value]) => [key, value.toString()] + ); + console.log(mappedEntries); } ); rowStream From 9f93172983223d617468adf6b5c0c7d6dd51ae41 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 13 Jan 2025 17:25:19 -0500 Subject: [PATCH 008/448] Start collecting a few metrics in the metrics trac --- src/metrics-tracer-factory.ts | 194 ++++++++++++++++++++++++++++------ src/tabular-api-surface.ts | 41 ++++--- 2 files changed, 179 insertions(+), 56 deletions(-) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index eff1f8ef4..9a04025b4 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -1,16 +1,36 @@ // import * as SDKMetrics from '@opentelemetry/sdk-metrics'; -const { MeterProvider, Histogram, Counter, PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics'); +import {Table} from './table'; + +const { + MeterProvider, + Histogram, + Counter, + PeriodicExportingMetricReader, +} = require('@opentelemetry/sdk-metrics'); // import { MeterProvider, PeriodicExportingMetricReader, Histogram} from '@opentelemetry/sdk-metrics'; import * as Resources from '@opentelemetry/resources'; -import { MetricExporter } from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; +import {TabularApiSurface} from './tabular-api-surface'; interface OperationInfo { retries: number; + finalOperationStatus: string; } -const buckets = [0.001, 0.01, 0.1, 1, 10, 100] +const buckets = [0.001, 0.01, 0.1, 1, 10, 100]; const count = 0; +interface Dimensions { + projectId: string; + instanceId: string; + table: string; + cluster?: string | null; + zone?: string | null; + appProfileId?: string; + methodName: string; + finalOperationStatus: string; + clientName: string; +} interface Metrics { operationLatencies: typeof Histogram; @@ -23,11 +43,43 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } -class MetricsTracer { // TODO: Consider rename. +class MetricsTracer { + // TODO: Consider rename. + private startTime: Date; private metrics: Metrics; + private zone: string | null | undefined; + private cluster: string | null | undefined; + private tabularApiSurface: TabularApiSurface; + private methodName: string; - constructor(metrics: Metrics) { + constructor( + metrics: Metrics, + tabularApiSurface: TabularApiSurface, + methodName: string + ) { this.metrics = metrics; + this.zone = null; + this.cluster = null; + this.startTime = new Date(); + this.tabularApiSurface = tabularApiSurface; + this.methodName = methodName; + } + + private getDimensions( + projectId: string, + finalOperationStatus: string + ): Dimensions { + return { + projectId, + instanceId: this.tabularApiSurface.instance.id, + table: this.tabularApiSurface.id, + cluster: this.cluster, + zone: this.zone, + appProfileId: this.tabularApiSurface.bigtable.appProfileId, + methodName: this.methodName, + finalOperationStatus: finalOperationStatus, + clientName: 'nodejs-bigtable', + }; } onAttemptComplete(info: OperationInfo) { @@ -35,8 +87,61 @@ class MetricsTracer { // TODO: Consider rename. } onOperationComplete(info: OperationInfo) { + const endTime = new Date(); + const totalTime = endTime.getTime() - this.startTime.getTime(); + this.tabularApiSurface.bigtable.getProjectId_( + (err: Error | null, projectId?: string) => { + if (projectId) { + const dimensions = this.getDimensions( + projectId, + info.finalOperationStatus + ); + this.metrics.operationLatencies.record(totalTime, dimensions); + this.metrics.retryCount.add(info.retries, dimensions); + } + } + ); console.log('onOperationComplete'); } + + onMetadataReceived(metadata: { + internalRepr: Map; + options: {}; + }) { + const mappedEntries = new Map( + Array.from(metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const durationValues = mappedEntries.get('server-timing')?.split('dur='); + if (durationValues && durationValues[1]) { + const serverTime = parseInt(durationValues[1]); + } + console.log(mappedEntries); + } + + onStatusReceived(status: { + metadata: {internalRepr: Map; options: {}}; + }) { + const mappedEntries = new Map( + Array.from(status.metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const instanceInformation = mappedEntries + .get('x-goog-ext-425905942-bin') + ?.replace(new RegExp('\\n', 'g'), '') + .split('\r'); + if (instanceInformation && instanceInformation[0]) { + this.zone = instanceInformation[0]; + } + if (instanceInformation && instanceInformation[1]) { + this.cluster = instanceInformation[0]; + } + console.log(mappedEntries); + } } export class MetricsTracerFactory { @@ -51,56 +156,77 @@ export class MetricsTracerFactory { // resource if running on GCP. Otherwise, metrics will be sent with monitored resource // `generic_task`. resource: new Resources.Resource({ - "service.name": "example-metric-service", - "service.namespace": "samples", - "service.instance.id": "12345", - "cloud.resource_manager.project_id": "cloud-native-db-dpes-shared" + 'service.name': 'example-metric-service', + 'service.namespace': 'samples', + 'service.instance.id': '12345', + 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', }).merge(new ResourceUtil.GcpDetectorSync().detect()), - readers: [ // Register the exporter + readers: [ + // Register the exporter new PeriodicExportingMetricReader({ // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. exportIntervalMillis: 10_000, exporter: new MetricExporter({ - projectId: 'cloud-native-db-dpes-shared' // TODO: Replace later + projectId: 'cloud-native-db-dpes-shared', // TODO: Replace later }), - }) - ] + }), + ], }); const meter = meterProvider.getMeter('bigtable.googleapis.com'); this.metrics = { operationLatencies: meter.createHistogram('operation_latencies', { - description: 'The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation\'s round trip from the client to Bigtable and back to the client and includes all retries.', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", }), attemptLatencies: meter.createHistogram('attempt_latencies', { - description: "The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.", + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', }), retryCount: meter.createCounter('retry_count', { - description: 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - }), - applicationBlockingLatencies: meter.createHistogram('application_blocking_latencies', { - description: 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', - unit: 'ms', - }), - firstResponseLatencies: meter.createHistogram('first_response_latencies', { - description: 'Latencies from when a client sends a request and receives the first row of the response.', - unit: 'ms', + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', }), + applicationBlockingLatencies: meter.createHistogram( + 'application_blocking_latencies', + { + description: + 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', + unit: 'ms', + } + ), + firstResponseLatencies: meter.createHistogram( + 'first_response_latencies', + { + description: + 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + } + ), serverLatencies: meter.createHistogram('server_latencies', { - description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - }), - connectivityErrorCount: meter.createHistogram('connectivity_error_count', { - description: 'The number of requests that failed to reach Google\'s network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.', - }), - clientBlockingLatencies: meter.createHistogram('client_blocking_latencies', { - description: 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', - unit: 'ms', + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', }), + connectivityErrorCount: meter.createHistogram( + 'connectivity_error_count', + { + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + } + ), + clientBlockingLatencies: meter.createHistogram( + 'client_blocking_latencies', + { + description: + 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + } + ), }; } - getMetricsTracer() { - return new MetricsTracer(this.metrics); + getMetricsTracer(tabularApiSurface: TabularApiSurface, methodName: string) { + return new MetricsTracer(this.metrics, tabularApiSurface, methodName); } } diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 635d1cdbd..57fd73135 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -210,7 +210,18 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { - const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer(); + // Initialize objects for collecting client side metrics. + const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( + this, + 'readRows' + ); + function onCallComplete(finalOperationStatus: string) { + metricsTracer.onOperationComplete({ + retries: numRequestsMade - 1, + finalOperationStatus, + }); + } + const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; let activeRequestStream: AbortableDuplex | null; @@ -507,22 +518,11 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return false; }; - /* - function onCallComplete() { - this.metricsTracer.onOperationComplete({ - retries: numConsecutiveErrors, - }); - } - */ requestStream .on( 'metadata', (metadata: {internalRepr: Map; options: {}}) => { - const mappedEntries = Array.from( - metadata.internalRepr.entries(), - ([key, value]) => [key, value.toString()] - ); - console.log(mappedEntries); + metricsTracer.onMetadataReceived(metadata); } ) .on( @@ -530,11 +530,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); (status: { metadata: {internalRepr: Map; options: {}}; }) => { - const mappedEntries = Array.from( - status.metadata.internalRepr.entries(), - ([key, value]) => [key, value.toString()] - ); - console.log(mappedEntries); + metricsTracer.onStatusReceived(status); } ); rowStream @@ -578,17 +574,18 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); error.code = grpc.status.CANCELLED; } userStream.emit('error', error); - //onCallComplete(); + onCallComplete('ERROR'); } }) - .on('data', (something: any) => { + .on('data', () => { // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; }) - .on('end', (something: any) => { + .on('end', () => { + numRequestsMade++; activeRequestStream = null; - //onCallComplete(); + onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); }; From b32aea01f24c668d043545123eb8870c0e465fcb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 14 Jan 2025 09:50:03 -0500 Subject: [PATCH 009/448] on attempt start --- src/metrics-tracer-factory.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index 9a04025b4..68f249683 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -86,6 +86,10 @@ class MetricsTracer { console.log('onAttemptComplete'); } + onAttemptStart() { + console.log('onAttemptStart'); + } + onOperationComplete(info: OperationInfo) { const endTime = new Date(); const totalTime = endTime.getTime() - this.startTime.getTime(); From 750c1e7e2b3aa2e5ef61b394493fd8d561c42bc1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 15 Jan 2025 10:57:33 -0500 Subject: [PATCH 010/448] Adding more metrics --- src/metrics-tracer-factory.ts | 114 ++++++++++++++++++++++++++++------ src/tabular-api-surface.ts | 11 ++++ 2 files changed, 107 insertions(+), 18 deletions(-) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index 68f249683..b8c1e5e96 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -14,12 +14,11 @@ import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {TabularApiSurface} from './tabular-api-surface'; interface OperationInfo { - retries: number; + retries?: number; finalOperationStatus: string; + connectivityErrorCount?: number; } -const buckets = [0.001, 0.01, 0.1, 1, 10, 100]; -const count = 0; interface Dimensions { projectId: string; instanceId: string; @@ -45,12 +44,15 @@ interface Metrics { class MetricsTracer { // TODO: Consider rename. - private startTime: Date; + private operationStartTime: Date | null; + private attemptStartTime: Date | null; private metrics: Metrics; private zone: string | null | undefined; private cluster: string | null | undefined; private tabularApiSurface: TabularApiSurface; private methodName: string; + private receivedFirstResponse: boolean; + private serverTimeRead: boolean; constructor( metrics: Metrics, @@ -60,15 +62,15 @@ class MetricsTracer { this.metrics = metrics; this.zone = null; this.cluster = null; - this.startTime = new Date(); this.tabularApiSurface = tabularApiSurface; this.methodName = methodName; + this.operationStartTime = null; + this.attemptStartTime = null; + this.receivedFirstResponse = false; + this.serverTimeRead = false; } - private getDimensions( - projectId: string, - finalOperationStatus: string - ): Dimensions { + private getBasicDimensions(projectId: string) { return { projectId, instanceId: this.tabularApiSurface.instance.id, @@ -77,35 +79,98 @@ class MetricsTracer { zone: this.zone, appProfileId: this.tabularApiSurface.bigtable.appProfileId, methodName: this.methodName, - finalOperationStatus: finalOperationStatus, clientName: 'nodejs-bigtable', }; } + private getFinalOperationDimensions( + projectId: string, + finalOperationStatus: string + ): Dimensions { + return Object.assign( + { + finalOperationStatus: finalOperationStatus, + }, + this.getBasicDimensions(projectId) + ); + } + + private getAttemptDimensions(projectId: string, attemptStatus: string) { + return Object.assign( + { + attemptStatus: attemptStatus, + }, + this.getBasicDimensions(projectId) + ); + } + + onOperationStart() { + this.operationStartTime = new Date(); + } + onAttemptComplete(info: OperationInfo) { - console.log('onAttemptComplete'); + const endTime = new Date(); + this.tabularApiSurface.bigtable.getProjectId_( + (err: Error | null, projectId?: string) => { + if (projectId && this.attemptStartTime) { + const dimensions = this.getAttemptDimensions( + projectId, + info.finalOperationStatus + ); + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + this.metrics.operationLatencies.record(totalTime, dimensions); + } + } + ); } onAttemptStart() { - console.log('onAttemptStart'); + this.attemptStartTime = new Date(); + } + + onFirstResponse() { + const endTime = new Date(); + this.tabularApiSurface.bigtable.getProjectId_( + (err: Error | null, projectId?: string) => { + if (projectId && this.operationStartTime) { + const dimensions = this.getFinalOperationDimensions( + projectId, + 'PENDING' + ); + const totalTime = + endTime.getTime() - this.operationStartTime.getTime(); + if (!this.receivedFirstResponse) { + this.receivedFirstResponse = true; + this.metrics.operationLatencies.record(totalTime, dimensions); + } + } + } + ); } onOperationComplete(info: OperationInfo) { const endTime = new Date(); - const totalTime = endTime.getTime() - this.startTime.getTime(); + this.onAttemptComplete(info); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { - if (projectId) { - const dimensions = this.getDimensions( + if (projectId && this.operationStartTime) { + const totalTime = + endTime.getTime() - this.operationStartTime.getTime(); + const dimensions = this.getFinalOperationDimensions( projectId, info.finalOperationStatus ); this.metrics.operationLatencies.record(totalTime, dimensions); this.metrics.retryCount.add(info.retries, dimensions); + if (info.connectivityErrorCount) { + this.metrics.connectivityErrorCount.record( + info.connectivityErrorCount, + dimensions + ); + } } } ); - console.log('onOperationComplete'); } onMetadataReceived(metadata: { @@ -120,9 +185,22 @@ class MetricsTracer { ); const durationValues = mappedEntries.get('server-timing')?.split('dur='); if (durationValues && durationValues[1]) { - const serverTime = parseInt(durationValues[1]); + if (!this.serverTimeRead) { + this.serverTimeRead = true; + const serverTime = parseInt(durationValues[1]); + this.tabularApiSurface.bigtable.getProjectId_( + (err: Error | null, projectId?: string) => { + if (projectId) { + const dimensions = this.getAttemptDimensions( + projectId, + 'PENDING' // TODO: Adjust this + ); + this.metrics.operationLatencies.record(serverTime, dimensions); + } + } + ); + } } - console.log(mappedEntries); } onStatusReceived(status: { diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 57fd73135..92935f143 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -219,6 +219,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); metricsTracer.onOperationComplete({ retries: numRequestsMade - 1, finalOperationStatus, + connectivityErrorCount, }); } @@ -230,6 +231,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const rowsLimit = options.limit || 0; const hasLimit = rowsLimit !== 0; + let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; let retryTimer: NodeJS.Timeout | null; @@ -345,7 +347,9 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return originalEnd(chunk, encoding, cb); }; + metricsTracer.onOperationStart(); const makeNewRequest = () => { + metricsTracer.onAttemptStart(); // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry retryTimer = null; @@ -537,6 +541,11 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; + if (new Set([10, 14, 15]).has(error.code)) { + // The following grpc errors will be considered connectivity errors: + // ABORTED, UNAVAILABLE, DATA_LOSS + connectivityErrorCount++; + } if (IGNORED_STATUS_CODES.has(error.code)) { // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. @@ -558,6 +567,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); + metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -581,6 +591,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; + metricsTracer.onFirstResponse(); }) .on('end', () => { numRequestsMade++; From b88512674b9748a9251fb21044697591b1ef1c3d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 15 Jan 2025 15:19:43 -0500 Subject: [PATCH 011/448] Add support for application blocking latencies --- src/metrics-tracer-factory.ts | 24 ++++++++++++++++++++++++ src/tabular-api-surface.ts | 4 ++++ 2 files changed, 28 insertions(+) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index b8c1e5e96..1d6ff0c2e 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -53,6 +53,7 @@ class MetricsTracer { private methodName: string; private receivedFirstResponse: boolean; private serverTimeRead: boolean; + private lastReadTime: Date | null; constructor( metrics: Metrics, @@ -67,6 +68,7 @@ class MetricsTracer { this.operationStartTime = null; this.attemptStartTime = null; this.receivedFirstResponse = false; + this.lastReadTime = null; this.serverTimeRead = false; } @@ -108,6 +110,28 @@ class MetricsTracer { this.operationStartTime = new Date(); } + onRead() { + const currentTime = new Date(); + if (this.lastReadTime) { + this.tabularApiSurface.bigtable.getProjectId_( + (err: Error | null, projectId?: string) => { + if (projectId && this.lastReadTime) { + const dimensions = this.getAttemptDimensions(projectId, 'PENDING'); + const difference = + currentTime.getTime() - this.lastReadTime.getTime(); + this.metrics.applicationBlockingLatencies.record( + difference, + dimensions + ); + this.lastReadTime = currentTime; + } + } + ); + } else { + this.lastReadTime = currentTime; + } + } + onAttemptComplete(info: OperationInfo) { const endTime = new Date(); this.tabularApiSurface.bigtable.getProjectId_( diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 92935f143..5df73b1ac 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -315,6 +315,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowsRead++; callback(null, row); }, + read(size) { + metricsTracer.onRead(); + return this.read(size); + }, }); // The caller should be able to call userStream.end() to stop receiving From 5fb300bdae800f45907dc021f6bf146bb6f8d22c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 15 Jan 2025 16:37:09 -0500 Subject: [PATCH 012/448] Add a TODO for date wrapper --- src/metrics-tracer-factory.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index 1d6ff0c2e..f19260015 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -1,6 +1,8 @@ // import * as SDKMetrics from '@opentelemetry/sdk-metrics'; import {Table} from './table'; +// TODO: Mock out Date - ie. DateWrapper + const { MeterProvider, Histogram, From feb36e7db4886b57f5ef4c0b9f0c4ed31e603d0b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 16 Jan 2025 17:23:11 -0500 Subject: [PATCH 013/448] Add first unit test for the metrics tracer --- src/metrics-tracer-factory.ts | 158 +++++++++++++++++------- src/tabular-api-surface.ts | 2 +- test/metrics-tracer.ts | 218 ++++++++++++++++++++++++++++++++++ 3 files changed, 334 insertions(+), 44 deletions(-) create mode 100644 test/metrics-tracer.ts diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index f19260015..1354777da 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -44,23 +44,77 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } +interface DateLike { + getTime(): number; +} + +interface DateProvider { + getDate(): DateLike; +} + +class DefaultDateProvider { + getDate() { + return new Date(); + } +} + +interface ICounter { + add(retries: number, dimensions: {}): void; +} + +interface IHistogram { + record(value: number, dimensions: {}): void; +} + +interface IMeter { + createCounter(instrument: string, attributes: {}): ICounter; + createHistogram(instrument: string, attributes: {}): IHistogram; +} + +interface IMeterProvider { + getMeter(name: string): IMeter; +} + +export interface ObservabilityOptions { + meterProvider: IMeterProvider; +} + +interface IBigtable { + appProfileId?: string; + getProjectId_( + callback: (err: Error | null, projectId?: string) => void + ): void; +} + +interface IInstance { + id: string; +} + +interface ITabularApiSurface { + instance: IInstance; + id: string; + bigtable: IBigtable; +} + class MetricsTracer { // TODO: Consider rename. - private operationStartTime: Date | null; - private attemptStartTime: Date | null; + private operationStartTime: DateLike | null; + private attemptStartTime: DateLike | null; private metrics: Metrics; private zone: string | null | undefined; private cluster: string | null | undefined; - private tabularApiSurface: TabularApiSurface; + private tabularApiSurface: ITabularApiSurface; private methodName: string; private receivedFirstResponse: boolean; private serverTimeRead: boolean; - private lastReadTime: Date | null; + private lastReadTime: DateLike | null; + private dateProvider: DateProvider; constructor( metrics: Metrics, - tabularApiSurface: TabularApiSurface, - methodName: string + tabularApiSurface: ITabularApiSurface, + methodName: string, + dateProvider?: DateProvider ) { this.metrics = metrics; this.zone = null; @@ -72,6 +126,11 @@ class MetricsTracer { this.receivedFirstResponse = false; this.lastReadTime = null; this.serverTimeRead = false; + if (dateProvider) { + this.dateProvider = dateProvider; + } else { + this.dateProvider = new DefaultDateProvider(); + } } private getBasicDimensions(projectId: string) { @@ -109,11 +168,11 @@ class MetricsTracer { } onOperationStart() { - this.operationStartTime = new Date(); + this.operationStartTime = this.dateProvider.getDate(); } onRead() { - const currentTime = new Date(); + const currentTime = this.dateProvider.getDate(); if (this.lastReadTime) { this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { @@ -135,7 +194,7 @@ class MetricsTracer { } onAttemptComplete(info: OperationInfo) { - const endTime = new Date(); + const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { if (projectId && this.attemptStartTime) { @@ -151,11 +210,11 @@ class MetricsTracer { } onAttemptStart() { - this.attemptStartTime = new Date(); + this.attemptStartTime = this.dateProvider.getDate(); } - onFirstResponse() { - const endTime = new Date(); + onResponse() { + const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { if (projectId && this.operationStartTime) { @@ -167,7 +226,7 @@ class MetricsTracer { endTime.getTime() - this.operationStartTime.getTime(); if (!this.receivedFirstResponse) { this.receivedFirstResponse = true; - this.metrics.operationLatencies.record(totalTime, dimensions); + this.metrics.firstResponseLatencies.record(totalTime, dimensions); } } } @@ -175,7 +234,7 @@ class MetricsTracer { } onOperationComplete(info: OperationInfo) { - const endTime = new Date(); + const endTime = this.dateProvider.getDate(); this.onAttemptComplete(info); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { @@ -221,7 +280,7 @@ class MetricsTracer { projectId, 'PENDING' // TODO: Adjust this ); - this.metrics.operationLatencies.record(serverTime, dimensions); + this.metrics.serverLatencies.record(serverTime, dimensions); } } ); @@ -248,39 +307,43 @@ class MetricsTracer { if (instanceInformation && instanceInformation[1]) { this.cluster = instanceInformation[0]; } - console.log(mappedEntries); } } export class MetricsTracerFactory { private metrics: Metrics; - constructor() { + constructor(observabilityOptions?: ObservabilityOptions) { // Create MeterProvider - const meterProvider = new MeterProvider({ - // Create a resource. Fill the `service.*` attributes in with real values for your service. - // GcpDetectorSync will add in resource information about the current environment if you are - // running on GCP. These resource attributes will be translated to a specific GCP monitored - // resource if running on GCP. Otherwise, metrics will be sent with monitored resource - // `generic_task`. - resource: new Resources.Resource({ - 'service.name': 'example-metric-service', - 'service.namespace': 'samples', - 'service.instance.id': '12345', - 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', - }).merge(new ResourceUtil.GcpDetectorSync().detect()), - readers: [ - // Register the exporter - new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 10_000, - exporter: new MetricExporter({ - projectId: 'cloud-native-db-dpes-shared', // TODO: Replace later - }), - }), - ], - }); + const meterProvider = + observabilityOptions && observabilityOptions.meterProvider + ? observabilityOptions.meterProvider + : new MeterProvider({ + // This is the default meter provider + // Create a resource. Fill the `service.*` attributes in with real values for your service. + // GcpDetectorSync will add in resource information about the current environment if you are + // running on GCP. These resource attributes will be translated to a specific GCP monitored + // resource if running on GCP. Otherwise, metrics will be sent with monitored resource + // `generic_task`. + resource: new Resources.Resource({ + 'service.name': 'example-metric-service', + 'service.namespace': 'samples', + 'service.instance.id': '12345', + 'cloud.resource_manager.project_id': + 'cloud-native-db-dpes-shared', + }).merge(new ResourceUtil.GcpDetectorSync().detect()), + readers: [ + // Register the exporter + new PeriodicExportingMetricReader({ + // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by + // Cloud Monitoring. + exportIntervalMillis: 10_000, + exporter: new MetricExporter({ + projectId: 'cloud-native-db-dpes-shared', // TODO: Replace later + }), + }), + ], + }); const meter = meterProvider.getMeter('bigtable.googleapis.com'); this.metrics = { operationLatencies: meter.createHistogram('operation_latencies', { @@ -334,7 +397,16 @@ export class MetricsTracerFactory { }; } - getMetricsTracer(tabularApiSurface: TabularApiSurface, methodName: string) { - return new MetricsTracer(this.metrics, tabularApiSurface, methodName); + getMetricsTracer( + tabularApiSurface: ITabularApiSurface, + methodName: string, + dateProvider?: DateProvider + ) { + return new MetricsTracer( + this.metrics, + tabularApiSurface, + methodName, + dateProvider + ); } } diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 5df73b1ac..7484f84b9 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -595,7 +595,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; - metricsTracer.onFirstResponse(); + metricsTracer.onResponse(); }) .on('end', () => { numRequestsMade++; diff --git a/test/metrics-tracer.ts b/test/metrics-tracer.ts new file mode 100644 index 000000000..433f119f9 --- /dev/null +++ b/test/metrics-tracer.ts @@ -0,0 +1,218 @@ +import {describe} from 'mocha'; +import {MetricsTracerFactory} from '../src/metrics-tracer-factory'; + +// TODO: Shared folder + +class Logger { + private messages: string[] = []; + + log(message: string) { + this.messages.push(message); + } + + getMessages() { + return this.messages; + } +} + +class TestDateLike { + private fakeDate; + constructor(fakeDate: number) { + this.fakeDate = fakeDate; + } + getTime() { + return this.fakeDate; + } +} + +class TestDateProvider { + private dateCounter = 0; + private logger: Logger; + + constructor(logger: Logger) { + this.logger = logger; + } + getDate() { + // The test assumes exactly 1ms passes between each getDate call. + this.dateCounter++; + this.logger.log(`getDate call returns ${this.dateCounter.toString()} ms`); + return new TestDateLike(this.dateCounter); + } +} + +class TestMeterProvider { + private logger: Logger; + constructor(logger: Logger) { + this.logger = logger; + } + getMeter(name: string) { + return new TestMeter(this.logger, name); + } +} + +class TestMeter { + private logger: Logger; + private name: string; + constructor(logger: Logger, name: string) { + this.logger = logger; + this.name = name; + } + createHistogram(instrument: string) { + return new TestHistogram(this.logger, `${this.name}:${instrument}`); + } + createCounter(instrument: string) { + return new TestCounter(this.logger, `${this.name}:${instrument}`); + } +} + +class TestCounter { + private logger: Logger; + private name: string; + constructor(logger: Logger, name: string) { + this.logger = logger; + this.name = name; + } + add(value: number) { + this.logger.log( + `Value added to counter ${this.name} = ${value.toString()} ` + ); + } +} + +class TestHistogram { + private logger: Logger; + private name: string; + constructor(logger: Logger, name: string) { + this.logger = logger; + this.name = name; + } + record(value: number) { + this.logger.log( + `Value added to histogram ${this.name} = ${value.toString()} ` + ); + } +} + +class FakeBigtable { + appProfileId?: string; + metricsTracerFactory: MetricsTracerFactory; + constructor(observabilityOptions: {meterProvider: TestMeterProvider}) { + this.metricsTracerFactory = new MetricsTracerFactory({ + meterProvider: observabilityOptions.meterProvider, + }); + } + + getProjectId_( + callback: (err: Error | null, projectId?: string) => void + ): void { + callback(null, 'my-project'); + } +} +// TODO: Put fixtures into a shared folder that are going to be used +// by system tests. + +class FakeInstance { + id = 'fakeInstanceId'; +} + +class FakeTable { + private logger: Logger; + id = 'fakeTableId'; + instance = new FakeInstance(); + bigtable: FakeBigtable; + + constructor(logger: Logger) { + this.logger = logger; + this.bigtable = new FakeBigtable({ + meterProvider: new TestMeterProvider(this.logger), + }); + } +} +// TODO: Check that there is a server latency for each attempt + +describe.only('Bigtable/MetricsTracer', () => { + it('should record the right metrics with a typical method call', () => { + const logger = new Logger(); + class FakeTable { + id = 'fakeTableId'; + instance = new FakeInstance(); + bigtable = new FakeBigtable({ + meterProvider: new TestMeterProvider(logger), + }); + + fakeMethod(): void { + function createMetadata(duration: string) { + return { + internalRepr: new Map([ + ['server-timing', Buffer.from(`dur=${duration}`)], + ]), + options: {}, + }; + } + const status = { + metadata: { + internalRepr: new Map([ + ['x-goog-ext-425905942-bin', Buffer.from('doLater')], + ]), + options: {}, + }, + }; + const metricsTracer = + this.bigtable.metricsTracerFactory.getMetricsTracer( + this, + 'fakeMethod', + new TestDateProvider(logger) + ); + // In this method we simulate a series of events that might happen + // when a user calls one of the Table methods. + // Here is an example of what might happen in a method call: + logger.log('1. The operation starts'); + metricsTracer.onOperationStart(); + logger.log('2. The attempt starts.'); + metricsTracer.onAttemptStart(); + logger.log('3. Client receives status information.'); + metricsTracer.onStatusReceived(status); + logger.log('4. Client receives metadata.'); + metricsTracer.onMetadataReceived(createMetadata('1001')); + logger.log('5. Client receives first row.'); + metricsTracer.onResponse(); + logger.log('6. Client receives metadata.'); + metricsTracer.onMetadataReceived(createMetadata('1002')); + logger.log('7. Client receives second row.'); + metricsTracer.onResponse(); + logger.log('8. A transient error occurs.'); + metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); + logger.log('9. After a timeout, the second attempt is made.'); + metricsTracer.onAttemptStart(); + logger.log('10. Client receives status information.'); + metricsTracer.onStatusReceived(status); + logger.log('11. Client receives metadata.'); + metricsTracer.onMetadataReceived(createMetadata('1003')); + logger.log('12. Client receives third row.'); + metricsTracer.onResponse(); + logger.log('13. Client receives metadata.'); + metricsTracer.onMetadataReceived(createMetadata('1004')); + logger.log('14. Client receives fourth row.'); + metricsTracer.onResponse(); + logger.log('15. User reads row 1'); + metricsTracer.onRead(); + logger.log('16. User reads row 2'); + metricsTracer.onRead(); + logger.log('17. User reads row 3'); + metricsTracer.onRead(); + logger.log('18. User reads row 4'); + metricsTracer.onRead(); + logger.log('19. Stream ends, operation completes'); + metricsTracer.onOperationComplete({ + retries: 1, + finalOperationStatus: 'SUCCESS', + connectivityErrorCount: 1, + }); + } + } + const table = new FakeTable(); + table.fakeMethod(); + // Ensure events occurred in the right order here: + console.log('test'); + }); +}); From 8465b3a79e78fa9cffd5c6da074f7cd6d70306a2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 16 Jan 2025 17:32:05 -0500 Subject: [PATCH 014/448] Move the code for the TestMeterProvider to separate file --- common/test-meter-provider.ts | 56 +++++++++++++++++++++++++++++ test/metrics-tracer.ts | 67 +---------------------------------- 2 files changed, 57 insertions(+), 66 deletions(-) create mode 100644 common/test-meter-provider.ts diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts new file mode 100644 index 000000000..40bcee4a2 --- /dev/null +++ b/common/test-meter-provider.ts @@ -0,0 +1,56 @@ +export class TestMeterProvider { + private logger: ILogger; + constructor(logger: ILogger) { + this.logger = logger; + } + getMeter(name: string) { + return new TestMeter(this.logger, name); + } +} + +interface ILogger { + log(message: string): void; +} + +class TestMeter { + private logger: ILogger; + private name: string; + constructor(logger: ILogger, name: string) { + this.logger = logger; + this.name = name; + } + createHistogram(instrument: string) { + return new TestHistogram(this.logger, `${this.name}:${instrument}`); + } + createCounter(instrument: string) { + return new TestCounter(this.logger, `${this.name}:${instrument}`); + } +} + +class TestCounter { + private logger: ILogger; + private name: string; + constructor(logger: ILogger, name: string) { + this.logger = logger; + this.name = name; + } + add(value: number) { + this.logger.log( + `Value added to counter ${this.name} = ${value.toString()} ` + ); + } +} + +class TestHistogram { + private logger: ILogger; + private name: string; + constructor(logger: ILogger, name: string) { + this.logger = logger; + this.name = name; + } + record(value: number) { + this.logger.log( + `Value added to histogram ${this.name} = ${value.toString()} ` + ); + } +} diff --git a/test/metrics-tracer.ts b/test/metrics-tracer.ts index 433f119f9..f23d65201 100644 --- a/test/metrics-tracer.ts +++ b/test/metrics-tracer.ts @@ -1,5 +1,6 @@ import {describe} from 'mocha'; import {MetricsTracerFactory} from '../src/metrics-tracer-factory'; +import {TestMeterProvider} from '../common/test-meter-provider'; // TODO: Shared folder @@ -40,59 +41,6 @@ class TestDateProvider { } } -class TestMeterProvider { - private logger: Logger; - constructor(logger: Logger) { - this.logger = logger; - } - getMeter(name: string) { - return new TestMeter(this.logger, name); - } -} - -class TestMeter { - private logger: Logger; - private name: string; - constructor(logger: Logger, name: string) { - this.logger = logger; - this.name = name; - } - createHistogram(instrument: string) { - return new TestHistogram(this.logger, `${this.name}:${instrument}`); - } - createCounter(instrument: string) { - return new TestCounter(this.logger, `${this.name}:${instrument}`); - } -} - -class TestCounter { - private logger: Logger; - private name: string; - constructor(logger: Logger, name: string) { - this.logger = logger; - this.name = name; - } - add(value: number) { - this.logger.log( - `Value added to counter ${this.name} = ${value.toString()} ` - ); - } -} - -class TestHistogram { - private logger: Logger; - private name: string; - constructor(logger: Logger, name: string) { - this.logger = logger; - this.name = name; - } - record(value: number) { - this.logger.log( - `Value added to histogram ${this.name} = ${value.toString()} ` - ); - } -} - class FakeBigtable { appProfileId?: string; metricsTracerFactory: MetricsTracerFactory; @@ -115,19 +63,6 @@ class FakeInstance { id = 'fakeInstanceId'; } -class FakeTable { - private logger: Logger; - id = 'fakeTableId'; - instance = new FakeInstance(); - bigtable: FakeBigtable; - - constructor(logger: Logger) { - this.logger = logger; - this.bigtable = new FakeBigtable({ - meterProvider: new TestMeterProvider(this.logger), - }); - } -} // TODO: Check that there is a server latency for each attempt describe.only('Bigtable/MetricsTracer', () => { From 7a97aab6d7306a103ef585f6843fe80896e182e3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 16 Jan 2025 17:38:05 -0500 Subject: [PATCH 015/448] Move the Date provider to a second file --- common/test-date-provider.ts | 29 +++++++++++++++++++++++++++++ test/metrics-tracer.ts | 26 +------------------------- 2 files changed, 30 insertions(+), 25 deletions(-) create mode 100644 common/test-date-provider.ts diff --git a/common/test-date-provider.ts b/common/test-date-provider.ts new file mode 100644 index 000000000..b078c8dac --- /dev/null +++ b/common/test-date-provider.ts @@ -0,0 +1,29 @@ +interface ILogger { + log(message: string): void; +} + +class TestDateLike { + private fakeDate; + constructor(fakeDate: number) { + this.fakeDate = fakeDate; + } + getTime() { + return this.fakeDate; + } +} + +// TODO: ILogger in separate file +export class TestDateProvider { + private dateCounter = 0; + private logger: ILogger; + + constructor(logger: ILogger) { + this.logger = logger; + } + getDate() { + // The test assumes exactly 1ms passes between each getDate call. + this.dateCounter++; + this.logger.log(`getDate call returns ${this.dateCounter.toString()} ms`); + return new TestDateLike(this.dateCounter); + } +} diff --git a/test/metrics-tracer.ts b/test/metrics-tracer.ts index f23d65201..fc6e4fb63 100644 --- a/test/metrics-tracer.ts +++ b/test/metrics-tracer.ts @@ -1,6 +1,7 @@ import {describe} from 'mocha'; import {MetricsTracerFactory} from '../src/metrics-tracer-factory'; import {TestMeterProvider} from '../common/test-meter-provider'; +import {TestDateProvider} from '../common/test-date-provider'; // TODO: Shared folder @@ -16,31 +17,6 @@ class Logger { } } -class TestDateLike { - private fakeDate; - constructor(fakeDate: number) { - this.fakeDate = fakeDate; - } - getTime() { - return this.fakeDate; - } -} - -class TestDateProvider { - private dateCounter = 0; - private logger: Logger; - - constructor(logger: Logger) { - this.logger = logger; - } - getDate() { - // The test assumes exactly 1ms passes between each getDate call. - this.dateCounter++; - this.logger.log(`getDate call returns ${this.dateCounter.toString()} ms`); - return new TestDateLike(this.dateCounter); - } -} - class FakeBigtable { appProfileId?: string; metricsTracerFactory: MetricsTracerFactory; From 51d3dd3b5bb3171e4dd0244d8ec604dcc28bf33e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 16 Jan 2025 17:40:31 -0500 Subject: [PATCH 016/448] Fix attempt latencies bug --- src/metrics-tracer-factory.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index 1354777da..fdf602aad 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -203,7 +203,7 @@ class MetricsTracer { info.finalOperationStatus ); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metrics.operationLatencies.record(totalTime, dimensions); + this.metrics.attemptLatencies.record(totalTime, dimensions); } } ); From ee8c272cdbe259afa6b816c2a48b9bf59574f9fb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 10:34:34 -0500 Subject: [PATCH 017/448] Add assertion check against text file --- test/{ => metrics-tracer}/metrics-tracer.ts | 13 +++++-- test/metrics-tracer/typical-method-call.txt | 43 +++++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) rename test/{ => metrics-tracer}/metrics-tracer.ts (90%) create mode 100644 test/metrics-tracer/typical-method-call.txt diff --git a/test/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts similarity index 90% rename from test/metrics-tracer.ts rename to test/metrics-tracer/metrics-tracer.ts index fc6e4fb63..14b7c8420 100644 --- a/test/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -1,7 +1,9 @@ import {describe} from 'mocha'; -import {MetricsTracerFactory} from '../src/metrics-tracer-factory'; -import {TestMeterProvider} from '../common/test-meter-provider'; -import {TestDateProvider} from '../common/test-date-provider'; +import {MetricsTracerFactory} from '../../src/metrics-tracer-factory'; +import {TestMeterProvider} from '../../common/test-meter-provider'; +import {TestDateProvider} from '../../common/test-date-provider'; +import * as assert from 'assert'; +import * as fs from 'fs'; // TODO: Shared folder @@ -123,7 +125,12 @@ describe.only('Bigtable/MetricsTracer', () => { } const table = new FakeTable(); table.fakeMethod(); + const expectedOutput = fs.readFileSync( + './test/metrics-tracer/typical-method-call.txt', + 'utf8' + ); // Ensure events occurred in the right order here: + assert.strictEqual(logger.getMessages().join('\n'), expectedOutput); console.log('test'); }); }); diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt new file mode 100644 index 000000000..106002085 --- /dev/null +++ b/test/metrics-tracer/typical-method-call.txt @@ -0,0 +1,43 @@ +1. The operation starts +getDate call returns 1 ms +2. The attempt starts. +getDate call returns 2 ms +3. Client receives status information. +4. Client receives metadata. +Value added to histogram bigtable.googleapis.com:server_latencies = 1001 +5. Client receives first row. +getDate call returns 3 ms +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2 +6. Client receives metadata. +7. Client receives second row. +getDate call returns 4 ms +8. A transient error occurs. +getDate call returns 5 ms +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3 +9. After a timeout, the second attempt is made. +getDate call returns 6 ms +10. Client receives status information. +11. Client receives metadata. +12. Client receives third row. +getDate call returns 7 ms +13. Client receives metadata. +14. Client receives fourth row. +getDate call returns 8 ms +15. User reads row 1 +getDate call returns 9 ms +16. User reads row 2 +getDate call returns 10 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +17. User reads row 3 +getDate call returns 11 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +18. User reads row 4 +getDate call returns 12 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +19. Stream ends, operation completes +getDate call returns 13 ms +getDate call returns 14 ms +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8 +Value added to histogram bigtable.googleapis.com:operation_latencies = 12 +Value added to counter bigtable.googleapis.com:retry_count = 1 +Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 From b7413e881c0d8e6373d3119951e3c67f291ad1de Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 10:42:52 -0500 Subject: [PATCH 018/448] More realistic seconds increment --- common/test-date-provider.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/test-date-provider.ts b/common/test-date-provider.ts index b078c8dac..0b6974cfc 100644 --- a/common/test-date-provider.ts +++ b/common/test-date-provider.ts @@ -21,8 +21,8 @@ export class TestDateProvider { this.logger = logger; } getDate() { - // The test assumes exactly 1ms passes between each getDate call. - this.dateCounter++; + // The test assumes exactly 1s passes between each getDate call. + this.dateCounter = this.dateCounter + 1000; this.logger.log(`getDate call returns ${this.dateCounter.toString()} ms`); return new TestDateLike(this.dateCounter); } From 7c8877b436ec02694f2ac837980738d28787a212 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 10:43:02 -0500 Subject: [PATCH 019/448] Remove imports --- src/metrics-tracer-factory.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index fdf602aad..c6d3e4dd3 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -9,11 +9,9 @@ const { Counter, PeriodicExportingMetricReader, } = require('@opentelemetry/sdk-metrics'); -// import { MeterProvider, PeriodicExportingMetricReader, Histogram} from '@opentelemetry/sdk-metrics'; import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; -import {TabularApiSurface} from './tabular-api-surface'; interface OperationInfo { retries?: number; From 503a2a9e013add0e2d25f9118fcabe2f813b3851 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:03:34 -0500 Subject: [PATCH 020/448] Adjust event timings to be more realistic --- common/test-meter-provider.ts | 4 +- test/metrics-tracer/metrics-tracer.ts | 10 ++--- test/metrics-tracer/typical-method-call.txt | 44 ++++++++++----------- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index 40bcee4a2..8eca67f35 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -36,7 +36,7 @@ class TestCounter { } add(value: number) { this.logger.log( - `Value added to counter ${this.name} = ${value.toString()} ` + `Value added to counter ${this.name} = ${value.toString()}` ); } } @@ -50,7 +50,7 @@ class TestHistogram { } record(value: number) { this.logger.log( - `Value added to histogram ${this.name} = ${value.toString()} ` + `Value added to histogram ${this.name} = ${value.toString()}` ); } } diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 14b7c8420..12c84e9c6 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -86,11 +86,11 @@ describe.only('Bigtable/MetricsTracer', () => { logger.log('3. Client receives status information.'); metricsTracer.onStatusReceived(status); logger.log('4. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('1001')); + metricsTracer.onMetadataReceived(createMetadata('101')); logger.log('5. Client receives first row.'); metricsTracer.onResponse(); logger.log('6. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('1002')); + metricsTracer.onMetadataReceived(createMetadata('102')); logger.log('7. Client receives second row.'); metricsTracer.onResponse(); logger.log('8. A transient error occurs.'); @@ -100,11 +100,11 @@ describe.only('Bigtable/MetricsTracer', () => { logger.log('10. Client receives status information.'); metricsTracer.onStatusReceived(status); logger.log('11. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('1003')); + metricsTracer.onMetadataReceived(createMetadata('103')); logger.log('12. Client receives third row.'); metricsTracer.onResponse(); logger.log('13. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('1004')); + metricsTracer.onMetadataReceived(createMetadata('104')); logger.log('14. Client receives fourth row.'); metricsTracer.onResponse(); logger.log('15. User reads row 1'); @@ -130,7 +130,7 @@ describe.only('Bigtable/MetricsTracer', () => { 'utf8' ); // Ensure events occurred in the right order here: - assert.strictEqual(logger.getMessages().join('\n'), expectedOutput); + assert.strictEqual(logger.getMessages().join('\n') + '\n', expectedOutput); console.log('test'); }); }); diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt index 106002085..96cec94ac 100644 --- a/test/metrics-tracer/typical-method-call.txt +++ b/test/metrics-tracer/typical-method-call.txt @@ -1,43 +1,43 @@ 1. The operation starts -getDate call returns 1 ms +getDate call returns 1000 ms 2. The attempt starts. -getDate call returns 2 ms +getDate call returns 2000 ms 3. Client receives status information. 4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 1001 +Value added to histogram bigtable.googleapis.com:server_latencies = 101 5. Client receives first row. -getDate call returns 3 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2 +getDate call returns 3000 ms +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 6. Client receives metadata. 7. Client receives second row. -getDate call returns 4 ms +getDate call returns 4000 ms 8. A transient error occurs. -getDate call returns 5 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3 +getDate call returns 5000 ms +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 9. After a timeout, the second attempt is made. -getDate call returns 6 ms +getDate call returns 6000 ms 10. Client receives status information. 11. Client receives metadata. 12. Client receives third row. -getDate call returns 7 ms +getDate call returns 7000 ms 13. Client receives metadata. 14. Client receives fourth row. -getDate call returns 8 ms +getDate call returns 8000 ms 15. User reads row 1 -getDate call returns 9 ms +getDate call returns 9000 ms 16. User reads row 2 -getDate call returns 10 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +getDate call returns 10000 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 17. User reads row 3 -getDate call returns 11 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +getDate call returns 11000 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 18. User reads row 4 -getDate call returns 12 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +getDate call returns 12000 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 19. Stream ends, operation completes -getDate call returns 13 ms -getDate call returns 14 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8 -Value added to histogram bigtable.googleapis.com:operation_latencies = 12 +getDate call returns 13000 ms +getDate call returns 14000 ms +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 +Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 Value added to counter bigtable.googleapis.com:retry_count = 1 Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 From 938cb2ce1364f1e6447ce45d3076bd24d0ab12c3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:23:09 -0500 Subject: [PATCH 021/448] Remove only --- test/metrics-tracer/metrics-tracer.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 12c84e9c6..16e0e6bd9 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -5,8 +5,6 @@ import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; -// TODO: Shared folder - class Logger { private messages: string[] = []; @@ -43,7 +41,7 @@ class FakeInstance { // TODO: Check that there is a server latency for each attempt -describe.only('Bigtable/MetricsTracer', () => { +describe('Bigtable/MetricsTracer', () => { it('should record the right metrics with a typical method call', () => { const logger = new Logger(); class FakeTable { From 854e1d1e054de67baf3246891d0d84afb373aa3e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:28:24 -0500 Subject: [PATCH 022/448] Add comments to the table class --- src/tabular-api-surface.ts | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 7484f84b9..87f18edeb 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -210,6 +210,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { + /* // Initialize objects for collecting client side metrics. const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( this, @@ -222,6 +223,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); connectivityErrorCount, }); } + */ const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; @@ -315,10 +317,12 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowsRead++; callback(null, row); }, + /* read(size) { metricsTracer.onRead(); return this.read(size); }, + */ }); // The caller should be able to call userStream.end() to stop receiving @@ -351,9 +355,9 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return originalEnd(chunk, encoding, cb); }; - metricsTracer.onOperationStart(); + // metricsTracer.onOperationStart(); const makeNewRequest = () => { - metricsTracer.onAttemptStart(); + // metricsTracer.onAttemptStart(); // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry retryTimer = null; @@ -526,6 +530,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return false; }; + /* requestStream .on( 'metadata', @@ -541,6 +546,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); metricsTracer.onStatusReceived(status); } ); + */ rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); @@ -571,7 +577,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); - metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum + // metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -588,19 +594,19 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); error.code = grpc.status.CANCELLED; } userStream.emit('error', error); - onCallComplete('ERROR'); + // onCallComplete('ERROR'); } }) .on('data', () => { // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; - metricsTracer.onResponse(); + // metricsTracer.onResponse(); }) .on('end', () => { numRequestsMade++; activeRequestStream = null; - onCallComplete('SUCCESS'); + // onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); }; From db7d1b1f8eaa1b8240f719051dd53c930ea8760d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:35:35 -0500 Subject: [PATCH 023/448] More comments in table --- src/tabular-api-surface.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 87f18edeb..d62b13175 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -233,7 +233,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const rowsLimit = options.limit || 0; const hasLimit = rowsLimit !== 0; - let connectivityErrorCount = 0; + // let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; let retryTimer: NodeJS.Timeout | null; @@ -530,6 +530,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return false; }; + /* requestStream .on( @@ -551,11 +552,13 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; + /* if (new Set([10, 14, 15]).has(error.code)) { // The following grpc errors will be considered connectivity errors: // ABORTED, UNAVAILABLE, DATA_LOSS connectivityErrorCount++; } + */ if (IGNORED_STATUS_CODES.has(error.code)) { // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. @@ -597,14 +600,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // onCallComplete('ERROR'); } }) - .on('data', () => { + .on('data', _ => { // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; // metricsTracer.onResponse(); }) .on('end', () => { - numRequestsMade++; + // numRequestsMade++; activeRequestStream = null; // onCallComplete('SUCCESS'); }); From ee670378fdb095c8bd91655cc1a6f647a9438bb6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:38:22 -0500 Subject: [PATCH 024/448] Remove TODO --- src/metrics-tracer-factory.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index c6d3e4dd3..1390775b3 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -95,7 +95,6 @@ interface ITabularApiSurface { } class MetricsTracer { - // TODO: Consider rename. private operationStartTime: DateLike | null; private attemptStartTime: DateLike | null; private metrics: Metrics; From ea2fbe28eeda9e4d8068e0868f1b9585ec4e466d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:47:08 -0500 Subject: [PATCH 025/448] Move observability options into a separate file --- .../metrics-tracer-factory.ts | 24 ++----------------- .../observability-options.ts | 20 ++++++++++++++++ src/index.ts | 2 +- test/metrics-tracer/metrics-tracer.ts | 2 +- 4 files changed, 24 insertions(+), 24 deletions(-) rename src/{ => client-side-metrics}/metrics-tracer-factory.ts (96%) create mode 100644 src/client-side-metrics/observability-options.ts diff --git a/src/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts similarity index 96% rename from src/metrics-tracer-factory.ts rename to src/client-side-metrics/metrics-tracer-factory.ts index 1390775b3..2bf433f14 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -1,5 +1,5 @@ // import * as SDKMetrics from '@opentelemetry/sdk-metrics'; -import {Table} from './table'; +import {Table} from '../table'; // TODO: Mock out Date - ie. DateWrapper @@ -12,6 +12,7 @@ const { import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; +import {ObservabilityOptions} from './observability-options'; interface OperationInfo { retries?: number; @@ -56,27 +57,6 @@ class DefaultDateProvider { } } -interface ICounter { - add(retries: number, dimensions: {}): void; -} - -interface IHistogram { - record(value: number, dimensions: {}): void; -} - -interface IMeter { - createCounter(instrument: string, attributes: {}): ICounter; - createHistogram(instrument: string, attributes: {}): IHistogram; -} - -interface IMeterProvider { - getMeter(name: string): IMeter; -} - -export interface ObservabilityOptions { - meterProvider: IMeterProvider; -} - interface IBigtable { appProfileId?: string; getProjectId_( diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts new file mode 100644 index 000000000..ae0698b64 --- /dev/null +++ b/src/client-side-metrics/observability-options.ts @@ -0,0 +1,20 @@ +interface ICounter { + add(retries: number, dimensions: {}): void; +} + +interface IHistogram { + record(value: number, dimensions: {}): void; +} + +interface IMeter { + createCounter(instrument: string, attributes: {}): ICounter; + createHistogram(instrument: string, attributes: {}): IHistogram; +} + +interface IMeterProvider { + getMeter(name: string): IMeter; +} + +export interface ObservabilityOptions { + meterProvider: IMeterProvider; +} diff --git a/src/index.ts b/src/index.ts index 4fc86fbee..fc6020494 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,7 +35,7 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; -import {MetricsTracerFactory} from './metrics-tracer-factory'; +import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 16e0e6bd9..f666964cf 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -1,5 +1,5 @@ import {describe} from 'mocha'; -import {MetricsTracerFactory} from '../../src/metrics-tracer-factory'; +import {MetricsTracerFactory} from '../../src/client-side-metrics/metrics-tracer-factory'; import {TestMeterProvider} from '../../common/test-meter-provider'; import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; From c22eb5ba33ce818a695c91043fe3936df112eb51 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:00:32 -0500 Subject: [PATCH 026/448] inline definitions for the tabular api surface --- .../metrics-tracer-factory.ts | 29 +++++++------------ 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 2bf433f14..c1ddfcc8e 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -1,8 +1,3 @@ -// import * as SDKMetrics from '@opentelemetry/sdk-metrics'; -import {Table} from '../table'; - -// TODO: Mock out Date - ie. DateWrapper - const { MeterProvider, Histogram, @@ -57,21 +52,17 @@ class DefaultDateProvider { } } -interface IBigtable { - appProfileId?: string; - getProjectId_( - callback: (err: Error | null, projectId?: string) => void - ): void; -} - -interface IInstance { - id: string; -} - -interface ITabularApiSurface { - instance: IInstance; +export interface ITabularApiSurface { + instance: { + id: string; + }; id: string; - bigtable: IBigtable; + bigtable: { + appProfileId?: string; + getProjectId_( + callback: (err: Error | null, projectId?: string) => void + ): void; + }; } class MetricsTracer { From a658a39e0aa41eb83c44ea40202ef99138cb7371 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:02:50 -0500 Subject: [PATCH 027/448] Comment source code out for now --- src/index.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/index.ts b/src/index.ts index fc6020494..136cf4a8d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -396,10 +396,10 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; - metricsTracerFactory: MetricsTracerFactory; + // metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { - this.metricsTracerFactory = new MetricsTracerFactory(); + // this.metricsTracerFactory = new MetricsTracerFactory(); // Determine what scopes are needed. // It is the union of the scopes on all three clients. const scopes: string[] = []; @@ -871,7 +871,7 @@ export class Bigtable { gaxStream .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) - .on('status', stream.emit.bind(stream, 'status')) + // .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); From 960b402cb52fa0753d213043950ec8db5711f5e9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:13:29 -0500 Subject: [PATCH 028/448] Add abstractions for classes that have a logger --- common/logger.ts | 19 ++++++++++++++++++ common/test-date-provider.ts | 11 ++--------- common/test-meter-provider.ts | 36 ++++++----------------------------- 3 files changed, 27 insertions(+), 39 deletions(-) create mode 100644 common/logger.ts diff --git a/common/logger.ts b/common/logger.ts new file mode 100644 index 000000000..24cfc2d1a --- /dev/null +++ b/common/logger.ts @@ -0,0 +1,19 @@ +interface ILogger { + log(message: string): void; +} + +export abstract class WithLogger { + protected logger: ILogger; + constructor(logger: ILogger) { + this.logger = logger; + } +} + +export abstract class WithLoggerAndName { + protected logger: ILogger; + protected name: string; + constructor(logger: ILogger, name: string) { + this.logger = logger; + this.name = name; + } +} diff --git a/common/test-date-provider.ts b/common/test-date-provider.ts index 0b6974cfc..6cc74e83c 100644 --- a/common/test-date-provider.ts +++ b/common/test-date-provider.ts @@ -1,6 +1,4 @@ -interface ILogger { - log(message: string): void; -} +import {WithLogger} from './logger'; class TestDateLike { private fakeDate; @@ -13,13 +11,8 @@ class TestDateLike { } // TODO: ILogger in separate file -export class TestDateProvider { +export class TestDateProvider extends WithLogger { private dateCounter = 0; - private logger: ILogger; - - constructor(logger: ILogger) { - this.logger = logger; - } getDate() { // The test assumes exactly 1s passes between each getDate call. this.dateCounter = this.dateCounter + 1000; diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index 8eca67f35..8764e7a30 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -1,24 +1,12 @@ -export class TestMeterProvider { - private logger: ILogger; - constructor(logger: ILogger) { - this.logger = logger; - } +import {WithLogger, WithLoggerAndName} from './logger'; + +export class TestMeterProvider extends WithLogger { getMeter(name: string) { return new TestMeter(this.logger, name); } } -interface ILogger { - log(message: string): void; -} - -class TestMeter { - private logger: ILogger; - private name: string; - constructor(logger: ILogger, name: string) { - this.logger = logger; - this.name = name; - } +class TestMeter extends WithLoggerAndName { createHistogram(instrument: string) { return new TestHistogram(this.logger, `${this.name}:${instrument}`); } @@ -27,13 +15,7 @@ class TestMeter { } } -class TestCounter { - private logger: ILogger; - private name: string; - constructor(logger: ILogger, name: string) { - this.logger = logger; - this.name = name; - } +class TestCounter extends WithLoggerAndName { add(value: number) { this.logger.log( `Value added to counter ${this.name} = ${value.toString()}` @@ -41,13 +23,7 @@ class TestCounter { } } -class TestHistogram { - private logger: ILogger; - private name: string; - constructor(logger: ILogger, name: string) { - this.logger = logger; - this.name = name; - } +class TestHistogram extends WithLoggerAndName { record(value: number) { this.logger.log( `Value added to histogram ${this.name} = ${value.toString()}` From 23a7c14c0eaa4f938a536ba510a44d543583c13c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:20:34 -0500 Subject: [PATCH 029/448] Generate documentation for meter provider --- common/test-meter-provider.ts | 39 +++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index 8764e7a30..609f458ae 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -1,21 +1,52 @@ import {WithLogger, WithLoggerAndName} from './logger'; +/** + * A test implementation of a MeterProvider. This MeterProvider is used for testing purposes. + * It doesn't send metrics to a backend, but instead logs metric updates for verification. + */ export class TestMeterProvider extends WithLogger { + /** + * Returns a TestMeter, that logs metric updates for verification. + * @param {string} name The name of the meter. + * @returns {TestMeter} + */ getMeter(name: string) { return new TestMeter(this.logger, name); } } +/** + * A test implementation of a Meter. Used for testing purposes. It doesn't send metrics to a backend, + * but instead logs metric updates for verification. + */ class TestMeter extends WithLoggerAndName { + /** + * Creates a test histogram. The TestHistogram logs when values are recorded. + * @param {string} instrument The name of the instrument. + * @returns {TestHistogram} + */ createHistogram(instrument: string) { return new TestHistogram(this.logger, `${this.name}:${instrument}`); } + /** + * Creates a test counter. The TestCounter logs when values are added. + * @param {string} instrument The name of the instrument. + * @returns {TestCounter} + */ createCounter(instrument: string) { return new TestCounter(this.logger, `${this.name}:${instrument}`); } } +/** + * A test implementation of a Counter. Used for testing purposes. It doesn't send metrics to a backend, + * but instead logs value additions for verification. + */ class TestCounter extends WithLoggerAndName { + /** + * Simulates adding a value to the counter. Logs the value and the counter name. + * @param {number} value The value to be added to the counter. + */ add(value: number) { this.logger.log( `Value added to counter ${this.name} = ${value.toString()}` @@ -23,7 +54,15 @@ class TestCounter extends WithLoggerAndName { } } +/** + * A test implementation of a Histogram. Used for testing purposes. It doesn't send metrics to a backend, + * but instead logs recorded values for verification. + */ class TestHistogram extends WithLoggerAndName { + /** + * Simulates recording a value in the histogram. Logs the value and the histogram name. + * @param {number} value The value to be recorded in the histogram. + */ record(value: number) { this.logger.log( `Value added to histogram ${this.name} = ${value.toString()}` From 0ac6d15aa8436027e042f2e6e5f1594cf6636e3f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:26:09 -0500 Subject: [PATCH 030/448] Generate documentation for the date provider --- common/test-date-provider.ts | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/common/test-date-provider.ts b/common/test-date-provider.ts index 6cc74e83c..931fbc8eb 100644 --- a/common/test-date-provider.ts +++ b/common/test-date-provider.ts @@ -1,18 +1,38 @@ import {WithLogger} from './logger'; +/** + * A test implementation of a Date-like object. Used for testing purposes. It provides a + * getTime method that returns a pre-determined fake date value, allowing for + * deterministic testing of time-dependent functionality. + */ class TestDateLike { private fakeDate; + /** + * @param {number} fakeDate The fake date value to be returned by getTime(), in milliseconds. + */ constructor(fakeDate: number) { this.fakeDate = fakeDate; } + /** + * Returns the fake date value that this object was created with. + * @returns {number} The fake date, in milliseconds. + */ getTime() { return this.fakeDate; } } -// TODO: ILogger in separate file +/** + * A test implementation of a DateProvider. Used for testing purposes. Provides + * a deterministic series of fake dates, with each call to getDate() returning a date 1000ms later than the last. + * Logs each date value returned for verification purposes. + */ export class TestDateProvider extends WithLogger { private dateCounter = 0; + /** + * Returns a new fake date 1000ms later than the last. Logs the date for test verification. + * @returns {TestDateLike} A fake date object. + */ getDate() { // The test assumes exactly 1s passes between each getDate call. this.dateCounter = this.dateCounter + 1000; From bad23b276d67acf4e30ae1cc0c4106d902cf57bf Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:29:46 -0500 Subject: [PATCH 031/448] Generate logger documentation --- common/logger.ts | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/common/logger.ts b/common/logger.ts index 24cfc2d1a..691eaefdc 100644 --- a/common/logger.ts +++ b/common/logger.ts @@ -1,17 +1,36 @@ +/** + * A simple logger interface for logging messages. Implementations of this interface + * can provide various logging mechanisms (e.g., console logging, file logging, etc.). + */ interface ILogger { log(message: string): void; } +/** + * An abstract base class that provides a logger instance. Subclasses can use this logger + * for logging messages. + */ export abstract class WithLogger { protected logger: ILogger; + /** + * @param logger The logger instance to be used by this object. + */ constructor(logger: ILogger) { this.logger = logger; } } +/** + * An abstract base class that provides a logger instance and a name. Subclasses + * can use the logger for logging messages, incorporating the name for context. + */ export abstract class WithLoggerAndName { protected logger: ILogger; protected name: string; + /** + * @param logger The logger instance to be used by this object. + * @param name The name associated with this object. + */ constructor(logger: ILogger, name: string) { this.logger = logger; this.name = name; From 49bd7cad2593cbc99b5c3d271251932320c0c192 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:36:03 -0500 Subject: [PATCH 032/448] Observability options documentation --- .../observability-options.ts | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts index ae0698b64..91552ef81 100644 --- a/src/client-side-metrics/observability-options.ts +++ b/src/client-side-metrics/observability-options.ts @@ -1,20 +1,65 @@ +/** + * The Counter interface for recording increments of a metric. + */ interface ICounter { + /** + * Adds a value to the counter. + * @param retries The value to be added to the counter. + * @param dimensions The dimensions associated with this value. + */ add(retries: number, dimensions: {}): void; } +/** + * The Histogram interface for recording distributions of values of a metric. + */ interface IHistogram { + /** + * Records a value in the histogram. + * @param value The value to be recorded in the histogram. + * @param dimensions The dimensions associated with this value. + */ record(value: number, dimensions: {}): void; } +/** + * The Meter interface. Meters are responsible for creating and managing instruments (Counters, Histograms, etc.). + */ interface IMeter { + /** + * Creates a Counter instrument, which counts increments of a given metric. + * @param instrument The name of the counter instrument. + * @param attributes The attributes associated with this counter. + * @returns {ICounter} A Counter instance. + */ createCounter(instrument: string, attributes: {}): ICounter; + /** + * Creates a Histogram instrument, which records distributions of values for a given metric. + * @param instrument The name of the histogram instrument. + * @param attributes The attributes associated with this histogram. + * @returns {IHistogram} A Histogram instance. + */ createHistogram(instrument: string, attributes: {}): IHistogram; } +/** + * The MeterProvider interface. A MeterProvider creates and manages Meters. + */ interface IMeterProvider { + /** + * Returns a Meter, which can be used to create instruments for recording measurements. + * @param name The name of the Meter. + * @returns {IMeter} A Meter instance. + */ getMeter(name: string): IMeter; } +/** + * Options for configuring client-side metrics observability. Allows users to provide their own MeterProvider. + */ export interface ObservabilityOptions { + /** + * The MeterProvider to use for recording metrics. If not provided, a default MeterProvider will be used. + */ meterProvider: IMeterProvider; } From 129e8fde3d0d41466e5ada160b4d355335c55370 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:14:17 -0500 Subject: [PATCH 033/448] Add more documentation for various MTF methods --- .../metrics-tracer-factory.ts | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index c1ddfcc8e..840166ae1 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -9,12 +9,28 @@ import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-expor import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {ObservabilityOptions} from './observability-options'; +/** + * Information about a Bigtable operation. + */ interface OperationInfo { + /** + * The number of retries attempted for the operation. + */ retries?: number; + /** + * The final status of the operation (e.g., 'OK', 'ERROR'). + */ finalOperationStatus: string; + /** + * Number of times a connectivity error occurred during the operation. + */ connectivityErrorCount?: number; } +/** + * Dimensions (labels) associated with a Bigtable metric. These + * dimensions provide context for the metric values. + */ interface Dimensions { projectId: string; instanceId: string; @@ -27,6 +43,10 @@ interface Dimensions { clientName: string; } +/** + * A collection of OpenTelemetry metric instruments used to record + * Bigtable client-side metrics. + */ interface Metrics { operationLatencies: typeof Histogram; attemptLatencies: typeof Histogram; @@ -38,20 +58,46 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } +/** + * An interface representing a Date-like object. Provides a `getTime` method + * for retrieving the time value in milliseconds. Used for abstracting time + * in tests. + */ interface DateLike { + /** + * Returns the time value in milliseconds. + * @returns The time value in milliseconds. + */ getTime(): number; } +/** + * Interface for a provider that returns DateLike objects. Used for mocking dates in tests. + */ interface DateProvider { + /** + * Returns a DateLike object. + * @returns A DateLike object representing the current time or a fake time value. + */ getDate(): DateLike; } +/** + * The default DateProvider implementation. Returns the current date and time. + */ class DefaultDateProvider { + /** + * Returns a new Date object representing the current time. + * @returns {Date} The current date and time. + */ getDate() { return new Date(); } } +/** + * An interface representing a tabular API surface, such as a Bigtable table. + */ export interface ITabularApiSurface { instance: { id: string; @@ -65,6 +111,9 @@ export interface ITabularApiSurface { }; } +/** + * A class for tracing and recording client-side metrics related to Bigtable operations. + */ class MetricsTracer { private operationStartTime: DateLike | null; private attemptStartTime: DateLike | null; @@ -78,6 +127,12 @@ class MetricsTracer { private lastReadTime: DateLike | null; private dateProvider: DateProvider; + /** + * @param metrics The metrics instruments to record data with. + * @param tabularApiSurface Information about the Bigtable table being accessed. + * @param methodName The name of the method being traced. + * @param dateProvider A provider for date/time information (for testing). + */ constructor( metrics: Metrics, tabularApiSurface: ITabularApiSurface, @@ -135,10 +190,16 @@ class MetricsTracer { ); } + /** + * Called when the operation starts. Records the start time. + */ onOperationStart() { this.operationStartTime = this.dateProvider.getDate(); } + /** + * Called after the client reads a row. Records application blocking latencies. + */ onRead() { const currentTime = this.dateProvider.getDate(); if (this.lastReadTime) { @@ -161,6 +222,10 @@ class MetricsTracer { } } + /** + * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. + * @param info Information about the completed attempt. + */ onAttemptComplete(info: OperationInfo) { const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( @@ -177,10 +242,16 @@ class MetricsTracer { ); } + /** + * Called when a new attempt starts. Records the start time of the attempt. + */ onAttemptStart() { this.attemptStartTime = this.dateProvider.getDate(); } + /** + * Called when the first response is received. Records first response latencies. + */ onResponse() { const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( @@ -201,6 +272,11 @@ class MetricsTracer { ); } + /** + * Called when an operation completes (successfully or unsuccessfully). + * Records operation latencies, retry counts, and connectivity error counts. + * @param info Information about the completed operation. + */ onOperationComplete(info: OperationInfo) { const endTime = this.dateProvider.getDate(); this.onAttemptComplete(info); @@ -226,6 +302,10 @@ class MetricsTracer { ); } + /** + * Called when metadata is received. Extracts server timing information if available. + * @param metadata The received metadata. + */ onMetadataReceived(metadata: { internalRepr: Map; options: {}; @@ -256,6 +336,10 @@ class MetricsTracer { } } + /** + * Called when status information is received. Extracts zone and cluster information. + * @param status The received status information. + */ onStatusReceived(status: { metadata: {internalRepr: Map; options: {}}; }) { @@ -278,9 +362,16 @@ class MetricsTracer { } } +/** + * A factory class for creating MetricsTracer instances. Initializes + * OpenTelemetry metrics instruments. + */ export class MetricsTracerFactory { private metrics: Metrics; + /** + * @param observabilityOptions Options for configuring client-side metrics observability. + */ constructor(observabilityOptions?: ObservabilityOptions) { // Create MeterProvider const meterProvider = @@ -365,6 +456,13 @@ export class MetricsTracerFactory { }; } + /** + * Creates a new MetricsTracer instance. + * @param tabularApiSurface The Bigtable table being accessed. + * @param methodName The name of the method being traced. + * @param dateProvider An optional DateProvider for testing purposes. + * @returns A new MetricsTracer instance. + */ getMetricsTracer( tabularApiSurface: ITabularApiSurface, methodName: string, From 052c7bbd349e74d1d8e4d44d52cc22f0a32d62df Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:30:36 -0500 Subject: [PATCH 034/448] Comment out Metrics --- src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/index.ts b/src/index.ts index 136cf4a8d..f277a4cbf 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,7 +35,7 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; -import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; +// import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); From ac27a9563ae4e49f1bf186519365599e013268c1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:37:44 -0500 Subject: [PATCH 035/448] Add a bunch of TODOs in front of the comments --- src/index.ts | 5 +++++ src/tabular-api-surface.ts | 13 +++++++++++++ 2 files changed, 18 insertions(+) diff --git a/src/index.ts b/src/index.ts index f277a4cbf..e77a67822 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,6 +35,7 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; +// TODO: Uncomment the next line after client-side metrics are well tested. // import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires @@ -396,10 +397,13 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; + // TODO: Uncomment the next line after client-side metrics are well tested. // metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { + // TODO: Uncomment the next line after client-side metrics are well tested. // this.metricsTracerFactory = new MetricsTracerFactory(); + // Determine what scopes are needed. // It is the union of the scopes on all three clients. const scopes: string[] = []; @@ -871,6 +875,7 @@ export class Bigtable { gaxStream .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) + // TODO: Uncomment the next line after client-side metrics are well tested. // .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index d62b13175..fda3c1eae 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -210,6 +210,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { + // TODO: Uncomment the next line after client-side metrics are well tested. /* // Initialize objects for collecting client side metrics. const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( @@ -233,6 +234,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const rowsLimit = options.limit || 0; const hasLimit = rowsLimit !== 0; + // TODO: Uncomment the next line after client-side metrics are well tested. // let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; @@ -317,6 +319,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowsRead++; callback(null, row); }, + // TODO: Uncomment the next line after client-side metrics are well tested. /* read(size) { metricsTracer.onRead(); @@ -355,9 +358,12 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return originalEnd(chunk, encoding, cb); }; + // TODO: Uncomment the next line after client-side metrics are well tested. // metricsTracer.onOperationStart(); const makeNewRequest = () => { + // TODO: Uncomment the next line after client-side metrics are well tested. // metricsTracer.onAttemptStart(); + // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry retryTimer = null; @@ -531,6 +537,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return false; }; + // TODO: Uncomment the next line after client-side metrics are well tested. /* requestStream .on( @@ -552,6 +559,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; + // TODO: Uncomment the next line after client-side metrics are well tested. /* if (new Set([10, 14, 15]).has(error.code)) { // The following grpc errors will be considered connectivity errors: @@ -580,6 +588,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); + // TODO: Uncomment the next line after client-side metrics are well tested. // metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { @@ -597,6 +606,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); error.code = grpc.status.CANCELLED; } userStream.emit('error', error); + // TODO: Uncomment the next line after client-side metrics are well tested. // onCallComplete('ERROR'); } }) @@ -604,11 +614,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; + // TODO: Uncomment the next line after client-side metrics are well tested. // metricsTracer.onResponse(); }) .on('end', () => { + // TODO: Uncomment the next line after client-side metrics are well tested. // numRequestsMade++; activeRequestStream = null; + // TODO: Uncomment the next line after client-side metrics are well tested. // onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); From 18c942e9a0a696468bc94780de26c3eb04b86a46 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:53:28 -0500 Subject: [PATCH 036/448] Delete client-side-metrics file --- system-test/client-side-metrics.ts | 93 ------------------------------ 1 file changed, 93 deletions(-) delete mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts deleted file mode 100644 index 9ac299773..000000000 --- a/system-test/client-side-metrics.ts +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {Bigtable} from '../src'; -import * as assert from 'assert'; -import {describe, it, before, after} from 'mocha'; - -describe.only('Bigtable/Table#getRows', () => { - const bigtable = new Bigtable({ - projectId: 'cloud-native-db-dpes-shared', - }); - const instanceId = 'emulator-test-instance'; - const tableId = 'my-table'; - const columnFamilyId = 'cf1'; - const clusterId = 'test-cluster'; - const location = 'us-central1-c'; - - before(async () => { - const instance = bigtable.instance(instanceId); - try { - const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { - const [, operation] = await instance.create({ - clusters: { - id: 'fake-cluster3', - location: 'us-west1-c', - nodes: 1, - }, - }); - await operation.promise(); - } - - const table = instance.table(tableId); - const [tableExists] = await table.exists(); - if (!tableExists) { - await table.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await table.getFamilies(); - - if (!families.some(family => family.id === columnFamilyId)) { - await table.createFamily(columnFamilyId); - } - } - } catch (error) { - console.error('Error during setup:', error); - // Consider re-throwing error, to actually stop tests. - } - }); - - after(async () => { - const instance = bigtable.instance(instanceId); - await instance.delete({}); - }); - - it('should read rows after inserting data', async () => { - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId); - const rows = [ - { - key: 'row1', - data: { - cf1: { - q1: 'value1', - }, - }, - }, - { - key: 'row2', - data: { - cf1: { - q2: 'value2', - }, - }, - }, - ]; - await table.insert(rows); - for (let i = 0; i < 100; i++) { - console.log(await table.getRows()); - } - }); -}); From 7a3aabc3b236f7c509a10401c9fd5e1c18093d91 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:53:46 -0500 Subject: [PATCH 037/448] Revert "Delete client-side-metrics file" This reverts commit 18c942e9a0a696468bc94780de26c3eb04b86a46. --- system-test/client-side-metrics.ts | 93 ++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts new file mode 100644 index 000000000..9ac299773 --- /dev/null +++ b/system-test/client-side-metrics.ts @@ -0,0 +1,93 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {Bigtable} from '../src'; +import * as assert from 'assert'; +import {describe, it, before, after} from 'mocha'; + +describe.only('Bigtable/Table#getRows', () => { + const bigtable = new Bigtable({ + projectId: 'cloud-native-db-dpes-shared', + }); + const instanceId = 'emulator-test-instance'; + const tableId = 'my-table'; + const columnFamilyId = 'cf1'; + const clusterId = 'test-cluster'; + const location = 'us-central1-c'; + + before(async () => { + const instance = bigtable.instance(instanceId); + try { + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, + }, + }); + await operation.promise(); + } + + const table = instance.table(tableId); + const [tableExists] = await table.exists(); + if (!tableExists) { + await table.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await table.getFamilies(); + + if (!families.some(family => family.id === columnFamilyId)) { + await table.createFamily(columnFamilyId); + } + } + } catch (error) { + console.error('Error during setup:', error); + // Consider re-throwing error, to actually stop tests. + } + }); + + after(async () => { + const instance = bigtable.instance(instanceId); + await instance.delete({}); + }); + + it('should read rows after inserting data', async () => { + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId); + const rows = [ + { + key: 'row1', + data: { + cf1: { + q1: 'value1', + }, + }, + }, + { + key: 'row2', + data: { + cf1: { + q2: 'value2', + }, + }, + }, + ]; + await table.insert(rows); + for (let i = 0; i < 100; i++) { + console.log(await table.getRows()); + } + }); +}); From 5906c29987fee2c55be44a9e9e0a931930051db8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:53:51 -0500 Subject: [PATCH 038/448] Revert "Revert "Delete client-side-metrics file"" This reverts commit 7a3aabc3b236f7c509a10401c9fd5e1c18093d91. --- system-test/client-side-metrics.ts | 93 ------------------------------ 1 file changed, 93 deletions(-) delete mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts deleted file mode 100644 index 9ac299773..000000000 --- a/system-test/client-side-metrics.ts +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {Bigtable} from '../src'; -import * as assert from 'assert'; -import {describe, it, before, after} from 'mocha'; - -describe.only('Bigtable/Table#getRows', () => { - const bigtable = new Bigtable({ - projectId: 'cloud-native-db-dpes-shared', - }); - const instanceId = 'emulator-test-instance'; - const tableId = 'my-table'; - const columnFamilyId = 'cf1'; - const clusterId = 'test-cluster'; - const location = 'us-central1-c'; - - before(async () => { - const instance = bigtable.instance(instanceId); - try { - const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { - const [, operation] = await instance.create({ - clusters: { - id: 'fake-cluster3', - location: 'us-west1-c', - nodes: 1, - }, - }); - await operation.promise(); - } - - const table = instance.table(tableId); - const [tableExists] = await table.exists(); - if (!tableExists) { - await table.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await table.getFamilies(); - - if (!families.some(family => family.id === columnFamilyId)) { - await table.createFamily(columnFamilyId); - } - } - } catch (error) { - console.error('Error during setup:', error); - // Consider re-throwing error, to actually stop tests. - } - }); - - after(async () => { - const instance = bigtable.instance(instanceId); - await instance.delete({}); - }); - - it('should read rows after inserting data', async () => { - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId); - const rows = [ - { - key: 'row1', - data: { - cf1: { - q1: 'value1', - }, - }, - }, - { - key: 'row2', - data: { - cf1: { - q2: 'value2', - }, - }, - }, - ]; - await table.insert(rows); - for (let i = 0; i < 100; i++) { - console.log(await table.getRows()); - } - }); -}); From be731af7d428825e5afe6cf99f50bf4bc3bdacc2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:58:32 -0500 Subject: [PATCH 039/448] Add headers --- common/logger.ts | 14 ++++++++++++++ common/test-date-provider.ts | 14 ++++++++++++++ common/test-meter-provider.ts | 14 ++++++++++++++ src/client-side-metrics/metrics-tracer-factory.ts | 14 ++++++++++++++ src/client-side-metrics/observability-options.ts | 14 ++++++++++++++ test/metrics-tracer/metrics-tracer.ts | 14 ++++++++++++++ 6 files changed, 84 insertions(+) diff --git a/common/logger.ts b/common/logger.ts index 691eaefdc..82baa0f9c 100644 --- a/common/logger.ts +++ b/common/logger.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * A simple logger interface for logging messages. Implementations of this interface * can provide various logging mechanisms (e.g., console logging, file logging, etc.). diff --git a/common/test-date-provider.ts b/common/test-date-provider.ts index 931fbc8eb..71ef66aee 100644 --- a/common/test-date-provider.ts +++ b/common/test-date-provider.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {WithLogger} from './logger'; /** diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index 609f458ae..7a2494a92 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {WithLogger, WithLoggerAndName} from './logger'; /** diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 840166ae1..f0b76fb97 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + const { MeterProvider, Histogram, diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts index 91552ef81..7d54fe623 100644 --- a/src/client-side-metrics/observability-options.ts +++ b/src/client-side-metrics/observability-options.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * The Counter interface for recording increments of a metric. */ diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index f666964cf..2cb8d9af6 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {describe} from 'mocha'; import {MetricsTracerFactory} from '../../src/client-side-metrics/metrics-tracer-factory'; import {TestMeterProvider} from '../../common/test-meter-provider'; From c26640f5e2f914afe2f3006d8056ce7cf11b6bbb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 15:01:01 -0500 Subject: [PATCH 040/448] Remove TODOs --- test/metrics-tracer/metrics-tracer.ts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 2cb8d9af6..91587b28c 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -46,15 +46,11 @@ class FakeBigtable { callback(null, 'my-project'); } } -// TODO: Put fixtures into a shared folder that are going to be used -// by system tests. class FakeInstance { id = 'fakeInstanceId'; } -// TODO: Check that there is a server latency for each attempt - describe('Bigtable/MetricsTracer', () => { it('should record the right metrics with a typical method call', () => { const logger = new Logger(); From 3011e5069da2b534edd3c530e0f66e8199daa0f0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 15:28:14 -0500 Subject: [PATCH 041/448] Add AttemptInfo to distinguish from OperationInfo --- src/client-side-metrics/metrics-tracer-factory.ts | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index f0b76fb97..790ecfc68 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -39,6 +39,12 @@ interface OperationInfo { * Number of times a connectivity error occurred during the operation. */ connectivityErrorCount?: number; + isStreaming: string; +} + +interface AttemptInfo { + finalOperationStatus: string; + isStreaming: string; } /** @@ -240,7 +246,7 @@ class MetricsTracer { * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. * @param info Information about the completed attempt. */ - onAttemptComplete(info: OperationInfo) { + onAttemptComplete(info: AttemptInfo) { const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { From 945f237524db1037cca9bc09841ded725bc73f35 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 17:03:49 -0500 Subject: [PATCH 042/448] Adjust dimensions to match documentation --- common/client-side-metrics-dimensions.ts | 22 ++++ common/test-meter-provider.ts | 11 +- .../metrics-tracer-factory.ts | 114 ++++++++++++------ test/metrics-tracer/metrics-tracer.ts | 38 ++++-- 4 files changed, 135 insertions(+), 50 deletions(-) create mode 100644 common/client-side-metrics-dimensions.ts diff --git a/common/client-side-metrics-dimensions.ts b/common/client-side-metrics-dimensions.ts new file mode 100644 index 000000000..8c85ee388 --- /dev/null +++ b/common/client-side-metrics-dimensions.ts @@ -0,0 +1,22 @@ +/** + * Dimensions (labels) associated with a Bigtable metric. These + * dimensions provide context for the metric values. + */ +export interface Dimensions { + projectId: string; + instanceId: string; + table: string; + cluster?: string | null; + zone?: string | null; + appProfileId?: string; + methodName: string; + attemptStatus?: string; + finalOperationStatus?: string; + streamingOperation?: string; + clientName: string; +} + +export function dimensionsToString(d: Dimensions) { + const p = (dimension?: string | null) => (dimension ? dimension : ''); + return `${p(d.projectId)};${p(d.instanceId)};${p(d.table)};${p(d.cluster)};${p(d.zone)};${p(d.appProfileId)};${p(d.methodName)};${p(d.attemptStatus)};${p(d.finalOperationStatus)};${p(d.streamingOperation)};${p(d.clientName)}`; +} diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index 7a2494a92..dc9cb741e 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -13,6 +13,7 @@ // limitations under the License. import {WithLogger, WithLoggerAndName} from './logger'; +import {Dimensions, dimensionsToString} from './client-side-metrics-dimensions'; /** * A test implementation of a MeterProvider. This MeterProvider is used for testing purposes. @@ -60,10 +61,11 @@ class TestCounter extends WithLoggerAndName { /** * Simulates adding a value to the counter. Logs the value and the counter name. * @param {number} value The value to be added to the counter. + * @param {Dimensions} dimensions The dimensions associated with the value. */ - add(value: number) { + add(value: number, dimensions: Dimensions) { this.logger.log( - `Value added to counter ${this.name} = ${value.toString()}` + `Value added to counter ${this.name} = ${value.toString()} with dimensions ${dimensionsToString(dimensions)}` ); } } @@ -76,10 +78,11 @@ class TestHistogram extends WithLoggerAndName { /** * Simulates recording a value in the histogram. Logs the value and the histogram name. * @param {number} value The value to be recorded in the histogram. + * @param {Dimensions} dimensions The dimensions associated with the value. */ - record(value: number) { + record(value: number, dimensions: Dimensions) { this.logger.log( - `Value added to histogram ${this.name} = ${value.toString()}` + `Value added to histogram ${this.name} = ${value.toString()} with dimensions ${dimensionsToString(dimensions)}` ); } } diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 790ecfc68..3bf843829 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +import {Dimensions} from '../../common/client-side-metrics-dimensions'; + const { MeterProvider, Histogram, @@ -39,28 +41,12 @@ interface OperationInfo { * Number of times a connectivity error occurred during the operation. */ connectivityErrorCount?: number; - isStreaming: string; + streamingOperation: string; } interface AttemptInfo { finalOperationStatus: string; - isStreaming: string; -} - -/** - * Dimensions (labels) associated with a Bigtable metric. These - * dimensions provide context for the metric values. - */ -interface Dimensions { - projectId: string; - instanceId: string; - table: string; - cluster?: string | null; - zone?: string | null; - appProfileId?: string; - methodName: string; - finalOperationStatus: string; - clientName: string; + streamingOperation: string; } /** @@ -189,7 +175,21 @@ class MetricsTracer { }; } - private getFinalOperationDimensions( + private getOperationLatencyDimensions( + projectId: string, + finalOperationStatus: string, + streamOperation?: string + ): Dimensions { + return Object.assign( + { + finalOperationStatus: finalOperationStatus, + streamingOperation: streamOperation, + }, + this.getBasicDimensions(projectId) + ); + } + + private getFinalOpDimensions( projectId: string, finalOperationStatus: string ): Dimensions { @@ -201,7 +201,21 @@ class MetricsTracer { ); } - private getAttemptDimensions(projectId: string, attemptStatus: string) { + private getAttemptDimensions( + projectId: string, + attemptStatus: string, + streamingOperation: string + ) { + return Object.assign( + { + attemptStatus: attemptStatus, + streamingOperation: streamingOperation, + }, + this.getBasicDimensions(projectId) + ); + } + + private getAttemptStatusDimensions(projectId: string, attemptStatus: string) { return Object.assign( { attemptStatus: attemptStatus, @@ -226,7 +240,7 @@ class MetricsTracer { this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { if (projectId && this.lastReadTime) { - const dimensions = this.getAttemptDimensions(projectId, 'PENDING'); + const dimensions = this.getBasicDimensions(projectId); const difference = currentTime.getTime() - this.lastReadTime.getTime(); this.metrics.applicationBlockingLatencies.record( @@ -253,7 +267,8 @@ class MetricsTracer { if (projectId && this.attemptStartTime) { const dimensions = this.getAttemptDimensions( projectId, - info.finalOperationStatus + info.finalOperationStatus, + info.streamingOperation ); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metrics.attemptLatencies.record(totalTime, dimensions); @@ -272,14 +287,14 @@ class MetricsTracer { /** * Called when the first response is received. Records first response latencies. */ - onResponse() { + onResponse(finalOperationStatus: string) { const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { if (projectId && this.operationStartTime) { - const dimensions = this.getFinalOperationDimensions( + const dimensions = this.getFinalOpDimensions( projectId, - 'PENDING' + finalOperationStatus ); const totalTime = endTime.getTime() - this.operationStartTime.getTime(); @@ -305,16 +320,36 @@ class MetricsTracer { if (projectId && this.operationStartTime) { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - const dimensions = this.getFinalOperationDimensions( - projectId, - info.finalOperationStatus - ); - this.metrics.operationLatencies.record(totalTime, dimensions); - this.metrics.retryCount.add(info.retries, dimensions); + { + // This block records operation latency metrics. + const operationLatencyDimensions = + this.getOperationLatencyDimensions( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + this.metrics.operationLatencies.record( + totalTime, + operationLatencyDimensions + ); + } + if (info.retries) { + // This block records the retry count metrics + const retryCountDimensions = this.getFinalOpDimensions( + projectId, + info.finalOperationStatus + ); + this.metrics.retryCount.add(info.retries, retryCountDimensions); + } if (info.connectivityErrorCount) { + // This block records the connectivity error count metrics + const connectivityCountDimensions = this.getAttemptStatusDimensions( + projectId, + info.finalOperationStatus + ); this.metrics.connectivityErrorCount.record( info.connectivityErrorCount, - dimensions + connectivityCountDimensions ); } } @@ -324,12 +359,16 @@ class MetricsTracer { /** * Called when metadata is received. Extracts server timing information if available. + * @param info Information about the completed attempt. * @param metadata The received metadata. */ - onMetadataReceived(metadata: { - internalRepr: Map; - options: {}; - }) { + onMetadataReceived( + info: AttemptInfo, + metadata: { + internalRepr: Map; + options: {}; + } + ) { const mappedEntries = new Map( Array.from(metadata.internalRepr.entries(), ([key, value]) => [ key, @@ -346,7 +385,8 @@ class MetricsTracer { if (projectId) { const dimensions = this.getAttemptDimensions( projectId, - 'PENDING' // TODO: Adjust this + info.finalOperationStatus, + info.streamingOperation ); this.metrics.serverLatencies.record(serverTime, dimensions); } diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 91587b28c..78163c771 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -62,6 +62,10 @@ describe('Bigtable/MetricsTracer', () => { }); fakeMethod(): void { + const standardAttemptInfo = { + finalOperationStatus: 'PENDING', + streamingOperation: 'YES', + }; function createMetadata(duration: string) { return { internalRepr: new Map([ @@ -94,27 +98,42 @@ describe('Bigtable/MetricsTracer', () => { logger.log('3. Client receives status information.'); metricsTracer.onStatusReceived(status); logger.log('4. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('101')); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('101') + ); logger.log('5. Client receives first row.'); - metricsTracer.onResponse(); + metricsTracer.onResponse('PENDING'); logger.log('6. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('102')); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('102') + ); logger.log('7. Client receives second row.'); - metricsTracer.onResponse(); + metricsTracer.onResponse('PENDING'); logger.log('8. A transient error occurs.'); - metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); + metricsTracer.onAttemptComplete({ + finalOperationStatus: 'ERROR', + streamingOperation: 'YES', + }); logger.log('9. After a timeout, the second attempt is made.'); metricsTracer.onAttemptStart(); logger.log('10. Client receives status information.'); metricsTracer.onStatusReceived(status); logger.log('11. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('103')); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('103') + ); logger.log('12. Client receives third row.'); - metricsTracer.onResponse(); + metricsTracer.onResponse('PENDING'); logger.log('13. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('104')); + metricsTracer.onMetadataReceived( + {finalOperationStatus: 'PENDING', streamingOperation: 'YES'}, + createMetadata('104') + ); logger.log('14. Client receives fourth row.'); - metricsTracer.onResponse(); + metricsTracer.onResponse('PENDING'); logger.log('15. User reads row 1'); metricsTracer.onRead(); logger.log('16. User reads row 2'); @@ -128,6 +147,7 @@ describe('Bigtable/MetricsTracer', () => { retries: 1, finalOperationStatus: 'SUCCESS', connectivityErrorCount: 1, + streamingOperation: 'YES', }); } } From b04c3c469df258e028418775911da02251585e42 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 17:09:31 -0500 Subject: [PATCH 043/448] Update tests with dimension metrics --- test/metrics-tracer/metrics-tracer.ts | 2 +- test/metrics-tracer/typical-method-call.txt | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 78163c771..9ed72e155 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -51,7 +51,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe('Bigtable/MetricsTracer', () => { +describe.only('Bigtable/MetricsTracer', () => { it('should record the right metrics with a typical method call', () => { const logger = new Logger(); class FakeTable { diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt index 96cec94ac..8a177824e 100644 --- a/test/metrics-tracer/typical-method-call.txt +++ b/test/metrics-tracer/typical-method-call.txt @@ -4,16 +4,16 @@ getDate call returns 1000 ms getDate call returns 2000 ms 3. Client receives status information. 4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 101 +Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;PENDING;;YES;nodejs-bigtable 5. Client receives first row. getDate call returns 3000 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;PENDING;;nodejs-bigtable 6. Client receives metadata. 7. Client receives second row. getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;ERROR;;YES;nodejs-bigtable 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -27,17 +27,17 @@ getDate call returns 8000 ms getDate call returns 9000 ms 16. User reads row 2 getDate call returns 10000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable 17. User reads row 3 getDate call returns 11000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable 18. User reads row 4 getDate call returns 12000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable 19. Stream ends, operation completes getDate call returns 13000 ms getDate call returns 14000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 -Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 -Value added to counter bigtable.googleapis.com:retry_count = 1 -Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;SUCCESS;YES;nodejs-bigtable +Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;SUCCESS;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;SUCCESS;;;nodejs-bigtable From 2417e80844ba59d2b04f8053cd75a61f1f707c35 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 17:12:28 -0500 Subject: [PATCH 044/448] Revert "Revert "Revert "Delete client-side-metrics file""" This reverts commit 5906c29987fee2c55be44a9e9e0a931930051db8. --- system-test/client-side-metrics.ts | 93 ++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts new file mode 100644 index 000000000..9ac299773 --- /dev/null +++ b/system-test/client-side-metrics.ts @@ -0,0 +1,93 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {Bigtable} from '../src'; +import * as assert from 'assert'; +import {describe, it, before, after} from 'mocha'; + +describe.only('Bigtable/Table#getRows', () => { + const bigtable = new Bigtable({ + projectId: 'cloud-native-db-dpes-shared', + }); + const instanceId = 'emulator-test-instance'; + const tableId = 'my-table'; + const columnFamilyId = 'cf1'; + const clusterId = 'test-cluster'; + const location = 'us-central1-c'; + + before(async () => { + const instance = bigtable.instance(instanceId); + try { + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, + }, + }); + await operation.promise(); + } + + const table = instance.table(tableId); + const [tableExists] = await table.exists(); + if (!tableExists) { + await table.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await table.getFamilies(); + + if (!families.some(family => family.id === columnFamilyId)) { + await table.createFamily(columnFamilyId); + } + } + } catch (error) { + console.error('Error during setup:', error); + // Consider re-throwing error, to actually stop tests. + } + }); + + after(async () => { + const instance = bigtable.instance(instanceId); + await instance.delete({}); + }); + + it('should read rows after inserting data', async () => { + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId); + const rows = [ + { + key: 'row1', + data: { + cf1: { + q1: 'value1', + }, + }, + }, + { + key: 'row2', + data: { + cf1: { + q2: 'value2', + }, + }, + }, + ]; + await table.insert(rows); + for (let i = 0; i < 100; i++) { + console.log(await table.getRows()); + } + }); +}); From df59d88bf7a15edc07c9517633fc97b6e4cf4223 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:00:18 -0500 Subject: [PATCH 045/448] Do some measurements --- myFile.txt | 146 +++++++++++++++++++++++++++++ src/index.ts | 13 ++- src/tabular-api-surface.ts | 38 ++++---- system-test/client-side-metrics.ts | 34 ++++++- 4 files changed, 207 insertions(+), 24 deletions(-) create mode 100644 myFile.txt diff --git a/myFile.txt b/myFile.txt new file mode 100644 index 000000000..8e932f96f --- /dev/null +++ b/myFile.txt @@ -0,0 +1,146 @@ +Done attempt 0 +Value added to histogram bigtable.googleapis.com:server_latencies = 124 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 146 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 150 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 150 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 1 +Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 83 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 85 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 85 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 2 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 3 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 4 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 5 +Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 6 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;;PENDING;;nodejs-bigtable +Done attempt 7 +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 8 +Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 9 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 91 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 92 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 92 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 10 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 11 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 80 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 80 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 12 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 72 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 13 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 14 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 15 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 16 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 17 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 18 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 19 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 83 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 20 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 21 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 79 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 79 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 22 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 86 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 86 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 23 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 24 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 25 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 26 +Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 27 +Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 81 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 81 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;PENDING;;YES;nodejs-bigtable +Done attempt 28 +Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 29 diff --git a/src/index.ts b/src/index.ts index e77a67822..86d40ef3c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -36,7 +36,8 @@ import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; // TODO: Uncomment the next line after client-side metrics are well tested. -// import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; +import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; +import {ObservabilityOptions} from './client-side-metrics/observability-options'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -102,6 +103,8 @@ export interface BigtableOptions extends gax.GoogleAuthOptions { * Internal only. */ BigtableTableAdminClient?: gax.ClientOptions; + + observabilityOptions?: ObservabilityOptions; } /** @@ -398,11 +401,13 @@ export class Bigtable { static Instance: Instance; static Cluster: Cluster; // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracerFactory: MetricsTracerFactory; + metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { // TODO: Uncomment the next line after client-side metrics are well tested. - // this.metricsTracerFactory = new MetricsTracerFactory(); + this.metricsTracerFactory = new MetricsTracerFactory( + options.observabilityOptions + ); // Determine what scopes are needed. // It is the union of the scopes on all three clients. @@ -876,7 +881,7 @@ export class Bigtable { .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) // TODO: Uncomment the next line after client-side metrics are well tested. - // .on('status', stream.emit.bind(stream, 'status')) + .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index fda3c1eae..c701194a2 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +let attemptCounter = 0; + import {promisifyAll} from '@google-cloud/promisify'; import arrify = require('arrify'); import {Instance} from './instance'; @@ -210,8 +212,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { - // TODO: Uncomment the next line after client-side metrics are well tested. - /* + attemptCounter++; // Initialize objects for collecting client side metrics. const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( this, @@ -222,9 +223,9 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); retries: numRequestsMade - 1, finalOperationStatus, connectivityErrorCount, + streamingOperation: 'YES', }); } - */ const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; @@ -235,7 +236,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const hasLimit = rowsLimit !== 0; // TODO: Uncomment the next line after client-side metrics are well tested. - // let connectivityErrorCount = 0; + let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; let retryTimer: NodeJS.Timeout | null; @@ -359,10 +360,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }; // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onOperationStart(); + metricsTracer.onOperationStart(); const makeNewRequest = () => { // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onAttemptStart(); + metricsTracer.onAttemptStart(); // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry @@ -538,12 +539,17 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }; // TODO: Uncomment the next line after client-side metrics are well tested. - /* requestStream .on( 'metadata', (metadata: {internalRepr: Map; options: {}}) => { - metricsTracer.onMetadataReceived(metadata); + metricsTracer.onMetadataReceived( + { + finalOperationStatus: 'PENDING', + streamingOperation: 'YES', + }, + metadata + ); } ) .on( @@ -554,19 +560,16 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); metricsTracer.onStatusReceived(status); } ); - */ rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; // TODO: Uncomment the next line after client-side metrics are well tested. - /* if (new Set([10, 14, 15]).has(error.code)) { // The following grpc errors will be considered connectivity errors: // ABORTED, UNAVAILABLE, DATA_LOSS connectivityErrorCount++; } - */ if (IGNORED_STATUS_CODES.has(error.code)) { // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. @@ -589,7 +592,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); backOffSettings ); // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum + metricsTracer.onAttemptComplete({ + finalOperationStatus: 'ERROR', + streamingOperation: 'YES', + }); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -607,7 +613,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } userStream.emit('error', error); // TODO: Uncomment the next line after client-side metrics are well tested. - // onCallComplete('ERROR'); + onCallComplete('ERROR'); } }) .on('data', _ => { @@ -615,14 +621,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onResponse(); + metricsTracer.onResponse('PENDING'); }) .on('end', () => { // TODO: Uncomment the next line after client-side metrics are well tested. - // numRequestsMade++; + numRequestsMade++; activeRequestStream = null; // TODO: Uncomment the next line after client-side metrics are well tested. - // onCallComplete('SUCCESS'); + onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); }; diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 9ac299773..70ae9d844 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -15,16 +15,34 @@ import {Bigtable} from '../src'; import * as assert from 'assert'; import {describe, it, before, after} from 'mocha'; +import {TestMeterProvider} from '../common/test-meter-provider'; +import * as fs from 'node:fs'; + +class Logger { + private messages = ''; + + log(message: string) { + console.log(message); + this.messages = this.messages + message + '\n'; + } + + getMessages() { + return this.messages; + } +} describe.only('Bigtable/Table#getRows', () => { + const logger = new Logger(); + const meterProvider = new TestMeterProvider(logger); const bigtable = new Bigtable({ projectId: 'cloud-native-db-dpes-shared', + observabilityOptions: { + meterProvider, + }, }); const instanceId = 'emulator-test-instance'; const tableId = 'my-table'; const columnFamilyId = 'cf1'; - const clusterId = 'test-cluster'; - const location = 'us-central1-c'; before(async () => { const instance = bigtable.instance(instanceId); @@ -86,8 +104,16 @@ describe.only('Bigtable/Table#getRows', () => { }, ]; await table.insert(rows); - for (let i = 0; i < 100; i++) { - console.log(await table.getRows()); + for (let i = 0; i < 30; i++) { + console.log(`Doing attempt ${i}`); + const rows = await table.getRows(); + console.log(`Done attempt ${i}`); + logger.log(`Done attempt ${i}`); } + const myString = logger.getMessages(); // 'This is the string I want to write to the file.'; + const filename = 'myFile.txt'; + + // Write the string to the file + fs.writeFileSync(filename, myString); }); }); From 8ad51f55a4009c573c64f2ee88d0551e20e08820 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:00:55 -0500 Subject: [PATCH 046/448] Revert "Do some measurements" This reverts commit df59d88bf7a15edc07c9517633fc97b6e4cf4223. --- myFile.txt | 146 ----------------------------- src/index.ts | 13 +-- src/tabular-api-surface.ts | 38 ++++---- system-test/client-side-metrics.ts | 34 +------ 4 files changed, 24 insertions(+), 207 deletions(-) delete mode 100644 myFile.txt diff --git a/myFile.txt b/myFile.txt deleted file mode 100644 index 8e932f96f..000000000 --- a/myFile.txt +++ /dev/null @@ -1,146 +0,0 @@ -Done attempt 0 -Value added to histogram bigtable.googleapis.com:server_latencies = 124 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 146 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 150 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 150 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 1 -Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 83 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 85 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 85 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 2 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 3 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 4 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 5 -Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 6 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;;PENDING;;nodejs-bigtable -Done attempt 7 -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 8 -Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 9 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 91 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 92 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 92 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 10 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 11 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 80 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 80 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 12 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 72 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 13 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 14 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 15 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 16 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 17 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 18 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 19 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 83 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 20 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 21 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 79 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 79 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 22 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 86 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 86 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 23 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 24 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 25 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 26 -Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 27 -Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 81 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 81 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;PENDING;;YES;nodejs-bigtable -Done attempt 28 -Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 29 diff --git a/src/index.ts b/src/index.ts index 86d40ef3c..e77a67822 100644 --- a/src/index.ts +++ b/src/index.ts @@ -36,8 +36,7 @@ import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; // TODO: Uncomment the next line after client-side metrics are well tested. -import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; -import {ObservabilityOptions} from './client-side-metrics/observability-options'; +// import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -103,8 +102,6 @@ export interface BigtableOptions extends gax.GoogleAuthOptions { * Internal only. */ BigtableTableAdminClient?: gax.ClientOptions; - - observabilityOptions?: ObservabilityOptions; } /** @@ -401,13 +398,11 @@ export class Bigtable { static Instance: Instance; static Cluster: Cluster; // TODO: Uncomment the next line after client-side metrics are well tested. - metricsTracerFactory: MetricsTracerFactory; + // metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { // TODO: Uncomment the next line after client-side metrics are well tested. - this.metricsTracerFactory = new MetricsTracerFactory( - options.observabilityOptions - ); + // this.metricsTracerFactory = new MetricsTracerFactory(); // Determine what scopes are needed. // It is the union of the scopes on all three clients. @@ -881,7 +876,7 @@ export class Bigtable { .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) // TODO: Uncomment the next line after client-side metrics are well tested. - .on('status', stream.emit.bind(stream, 'status')) + // .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index c701194a2..fda3c1eae 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -let attemptCounter = 0; - import {promisifyAll} from '@google-cloud/promisify'; import arrify = require('arrify'); import {Instance} from './instance'; @@ -212,7 +210,8 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { - attemptCounter++; + // TODO: Uncomment the next line after client-side metrics are well tested. + /* // Initialize objects for collecting client side metrics. const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( this, @@ -223,9 +222,9 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); retries: numRequestsMade - 1, finalOperationStatus, connectivityErrorCount, - streamingOperation: 'YES', }); } + */ const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; @@ -236,7 +235,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const hasLimit = rowsLimit !== 0; // TODO: Uncomment the next line after client-side metrics are well tested. - let connectivityErrorCount = 0; + // let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; let retryTimer: NodeJS.Timeout | null; @@ -360,10 +359,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }; // TODO: Uncomment the next line after client-side metrics are well tested. - metricsTracer.onOperationStart(); + // metricsTracer.onOperationStart(); const makeNewRequest = () => { // TODO: Uncomment the next line after client-side metrics are well tested. - metricsTracer.onAttemptStart(); + // metricsTracer.onAttemptStart(); // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry @@ -539,17 +538,12 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }; // TODO: Uncomment the next line after client-side metrics are well tested. + /* requestStream .on( 'metadata', (metadata: {internalRepr: Map; options: {}}) => { - metricsTracer.onMetadataReceived( - { - finalOperationStatus: 'PENDING', - streamingOperation: 'YES', - }, - metadata - ); + metricsTracer.onMetadataReceived(metadata); } ) .on( @@ -560,16 +554,19 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); metricsTracer.onStatusReceived(status); } ); + */ rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; // TODO: Uncomment the next line after client-side metrics are well tested. + /* if (new Set([10, 14, 15]).has(error.code)) { // The following grpc errors will be considered connectivity errors: // ABORTED, UNAVAILABLE, DATA_LOSS connectivityErrorCount++; } + */ if (IGNORED_STATUS_CODES.has(error.code)) { // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. @@ -592,10 +589,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); backOffSettings ); // TODO: Uncomment the next line after client-side metrics are well tested. - metricsTracer.onAttemptComplete({ - finalOperationStatus: 'ERROR', - streamingOperation: 'YES', - }); // TODO: Replace ERROR with enum + // metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -613,7 +607,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } userStream.emit('error', error); // TODO: Uncomment the next line after client-side metrics are well tested. - onCallComplete('ERROR'); + // onCallComplete('ERROR'); } }) .on('data', _ => { @@ -621,14 +615,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; // TODO: Uncomment the next line after client-side metrics are well tested. - metricsTracer.onResponse('PENDING'); + // metricsTracer.onResponse(); }) .on('end', () => { // TODO: Uncomment the next line after client-side metrics are well tested. - numRequestsMade++; + // numRequestsMade++; activeRequestStream = null; // TODO: Uncomment the next line after client-side metrics are well tested. - onCallComplete('SUCCESS'); + // onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); }; diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 70ae9d844..9ac299773 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -15,34 +15,16 @@ import {Bigtable} from '../src'; import * as assert from 'assert'; import {describe, it, before, after} from 'mocha'; -import {TestMeterProvider} from '../common/test-meter-provider'; -import * as fs from 'node:fs'; - -class Logger { - private messages = ''; - - log(message: string) { - console.log(message); - this.messages = this.messages + message + '\n'; - } - - getMessages() { - return this.messages; - } -} describe.only('Bigtable/Table#getRows', () => { - const logger = new Logger(); - const meterProvider = new TestMeterProvider(logger); const bigtable = new Bigtable({ projectId: 'cloud-native-db-dpes-shared', - observabilityOptions: { - meterProvider, - }, }); const instanceId = 'emulator-test-instance'; const tableId = 'my-table'; const columnFamilyId = 'cf1'; + const clusterId = 'test-cluster'; + const location = 'us-central1-c'; before(async () => { const instance = bigtable.instance(instanceId); @@ -104,16 +86,8 @@ describe.only('Bigtable/Table#getRows', () => { }, ]; await table.insert(rows); - for (let i = 0; i < 30; i++) { - console.log(`Doing attempt ${i}`); - const rows = await table.getRows(); - console.log(`Done attempt ${i}`); - logger.log(`Done attempt ${i}`); + for (let i = 0; i < 100; i++) { + console.log(await table.getRows()); } - const myString = logger.getMessages(); // 'This is the string I want to write to the file.'; - const filename = 'myFile.txt'; - - // Write the string to the file - fs.writeFileSync(filename, myString); }); }); From 6868f5a9d4d5b1b53f826a68419bea62aef4d1a9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:01:35 -0500 Subject: [PATCH 047/448] Revert "Revert "Revert "Revert "Delete client-side-metrics file"""" This reverts commit 2417e80844ba59d2b04f8053cd75a61f1f707c35. --- system-test/client-side-metrics.ts | 93 ------------------------------ 1 file changed, 93 deletions(-) delete mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts deleted file mode 100644 index 9ac299773..000000000 --- a/system-test/client-side-metrics.ts +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {Bigtable} from '../src'; -import * as assert from 'assert'; -import {describe, it, before, after} from 'mocha'; - -describe.only('Bigtable/Table#getRows', () => { - const bigtable = new Bigtable({ - projectId: 'cloud-native-db-dpes-shared', - }); - const instanceId = 'emulator-test-instance'; - const tableId = 'my-table'; - const columnFamilyId = 'cf1'; - const clusterId = 'test-cluster'; - const location = 'us-central1-c'; - - before(async () => { - const instance = bigtable.instance(instanceId); - try { - const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { - const [, operation] = await instance.create({ - clusters: { - id: 'fake-cluster3', - location: 'us-west1-c', - nodes: 1, - }, - }); - await operation.promise(); - } - - const table = instance.table(tableId); - const [tableExists] = await table.exists(); - if (!tableExists) { - await table.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await table.getFamilies(); - - if (!families.some(family => family.id === columnFamilyId)) { - await table.createFamily(columnFamilyId); - } - } - } catch (error) { - console.error('Error during setup:', error); - // Consider re-throwing error, to actually stop tests. - } - }); - - after(async () => { - const instance = bigtable.instance(instanceId); - await instance.delete({}); - }); - - it('should read rows after inserting data', async () => { - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId); - const rows = [ - { - key: 'row1', - data: { - cf1: { - q1: 'value1', - }, - }, - }, - { - key: 'row2', - data: { - cf1: { - q2: 'value2', - }, - }, - }, - ]; - await table.insert(rows); - for (let i = 0; i < 100; i++) { - console.log(await table.getRows()); - } - }); -}); From 7cc36a226b0652f76ee16dba15c333e5b6a7b61a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:08:21 -0500 Subject: [PATCH 048/448] Add header --- common/client-side-metrics-dimensions.ts | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/common/client-side-metrics-dimensions.ts b/common/client-side-metrics-dimensions.ts index 8c85ee388..2797b4499 100644 --- a/common/client-side-metrics-dimensions.ts +++ b/common/client-side-metrics-dimensions.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * Dimensions (labels) associated with a Bigtable metric. These * dimensions provide context for the metric values. From 62a4b8be16fdd16705c2b1954c72c9da614ad89f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:14:52 -0500 Subject: [PATCH 049/448] Remove the TODOs --- src/index.ts | 9 ----- src/tabular-api-surface.ts | 67 -------------------------------------- 2 files changed, 76 deletions(-) diff --git a/src/index.ts b/src/index.ts index e77a67822..dc4143c99 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,8 +35,6 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; -// TODO: Uncomment the next line after client-side metrics are well tested. -// import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -397,13 +395,8 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; - // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { - // TODO: Uncomment the next line after client-side metrics are well tested. - // this.metricsTracerFactory = new MetricsTracerFactory(); - // Determine what scopes are needed. // It is the union of the scopes on all three clients. const scopes: string[] = []; @@ -875,8 +868,6 @@ export class Bigtable { gaxStream .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) - // TODO: Uncomment the next line after client-side metrics are well tested. - // .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index fda3c1eae..b15a08766 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -210,22 +210,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { - // TODO: Uncomment the next line after client-side metrics are well tested. - /* - // Initialize objects for collecting client side metrics. - const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( - this, - 'readRows' - ); - function onCallComplete(finalOperationStatus: string) { - metricsTracer.onOperationComplete({ - retries: numRequestsMade - 1, - finalOperationStatus, - connectivityErrorCount, - }); - } - */ - const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; let activeRequestStream: AbortableDuplex | null; @@ -234,8 +218,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const rowsLimit = options.limit || 0; const hasLimit = rowsLimit !== 0; - // TODO: Uncomment the next line after client-side metrics are well tested. - // let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; let retryTimer: NodeJS.Timeout | null; @@ -319,13 +301,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowsRead++; callback(null, row); }, - // TODO: Uncomment the next line after client-side metrics are well tested. - /* - read(size) { - metricsTracer.onRead(); - return this.read(size); - }, - */ }); // The caller should be able to call userStream.end() to stop receiving @@ -357,13 +332,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return originalEnd(chunk, encoding, cb); }; - - // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onOperationStart(); const makeNewRequest = () => { - // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onAttemptStart(); - // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry retryTimer = null; @@ -537,36 +506,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return false; }; - // TODO: Uncomment the next line after client-side metrics are well tested. - /* - requestStream - .on( - 'metadata', - (metadata: {internalRepr: Map; options: {}}) => { - metricsTracer.onMetadataReceived(metadata); - } - ) - .on( - 'status', - (status: { - metadata: {internalRepr: Map; options: {}}; - }) => { - metricsTracer.onStatusReceived(status); - } - ); - */ rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; - // TODO: Uncomment the next line after client-side metrics are well tested. - /* - if (new Set([10, 14, 15]).has(error.code)) { - // The following grpc errors will be considered connectivity errors: - // ABORTED, UNAVAILABLE, DATA_LOSS - connectivityErrorCount++; - } - */ if (IGNORED_STATUS_CODES.has(error.code)) { // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. @@ -588,8 +531,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); - // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -606,23 +547,15 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); error.code = grpc.status.CANCELLED; } userStream.emit('error', error); - // TODO: Uncomment the next line after client-side metrics are well tested. - // onCallComplete('ERROR'); } }) .on('data', _ => { // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; - // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onResponse(); }) .on('end', () => { - // TODO: Uncomment the next line after client-side metrics are well tested. - // numRequestsMade++; activeRequestStream = null; - // TODO: Uncomment the next line after client-side metrics are well tested. - // onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); }; From 7c4f414c373a6b2f8a5219f6efcd6f42e81afbf9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:16:04 -0500 Subject: [PATCH 050/448] Add line back --- src/tabular-api-surface.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index b15a08766..a7f86e0a2 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -332,6 +332,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return originalEnd(chunk, encoding, cb); }; + const makeNewRequest = () => { // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry From 83f53ae88613e5a5bcef0907463b80f7e537a648 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:20:50 -0500 Subject: [PATCH 051/448] Add comment --- src/client-side-metrics/metrics-tracer-factory.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 3bf843829..69bd03b0b 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -433,7 +433,8 @@ export class MetricsTracerFactory { * @param observabilityOptions Options for configuring client-side metrics observability. */ constructor(observabilityOptions?: ObservabilityOptions) { - // Create MeterProvider + // Use MeterProvider provided by user + // If MeterProvider was not provided then use the default meter provider. const meterProvider = observabilityOptions && observabilityOptions.meterProvider ? observabilityOptions.meterProvider From 610eec01afbbe34ba9fec94ce06be7fb342489ac Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:34:49 -0500 Subject: [PATCH 052/448] Add version --- src/client-side-metrics/metrics-tracer-factory.ts | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 69bd03b0b..b87eeb04e 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -171,7 +171,7 @@ class MetricsTracer { zone: this.zone, appProfileId: this.tabularApiSurface.bigtable.appProfileId, methodName: this.methodName, - clientName: 'nodejs-bigtable', + clientName: 'nodejs-bigtable', // TODO: Add version }; } @@ -446,11 +446,7 @@ export class MetricsTracerFactory { // resource if running on GCP. Otherwise, metrics will be sent with monitored resource // `generic_task`. resource: new Resources.Resource({ - 'service.name': 'example-metric-service', - 'service.namespace': 'samples', - 'service.instance.id': '12345', - 'cloud.resource_manager.project_id': - 'cloud-native-db-dpes-shared', + 'service.name': 'bigtable-metrics', }).merge(new ResourceUtil.GcpDetectorSync().detect()), readers: [ // Register the exporter From a2b5951b58c5652185a2204623921751c9c5507f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:42:21 -0500 Subject: [PATCH 053/448] Add version to client side metrics --- src/client-side-metrics/metrics-tracer-factory.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index b87eeb04e..4eab84feb 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -24,6 +24,7 @@ import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {ObservabilityOptions} from './observability-options'; +const { version } = require('../../package.json'); /** * Information about a Bigtable operation. @@ -171,7 +172,7 @@ class MetricsTracer { zone: this.zone, appProfileId: this.tabularApiSurface.bigtable.appProfileId, methodName: this.methodName, - clientName: 'nodejs-bigtable', // TODO: Add version + clientName: `nodejs-bigtable/${version}`, }; } From 5f67cad8693f82c0b42c0611e6ca5e847cd6adf6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:15:24 -0500 Subject: [PATCH 054/448] linter --- src/client-side-metrics/metrics-tracer-factory.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 4eab84feb..002339535 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -24,7 +24,7 @@ import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {ObservabilityOptions} from './observability-options'; -const { version } = require('../../package.json'); +const {version} = require('../../package.json'); /** * Information about a Bigtable operation. From 8f20c78c3c2ccb954745d40690ec60ba94065308 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:20:34 -0500 Subject: [PATCH 055/448] Generate documentation for AttemptInfo interface --- src/client-side-metrics/metrics-tracer-factory.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 002339535..41985f8c5 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -45,8 +45,17 @@ interface OperationInfo { streamingOperation: string; } +/** + * Information about a single attempt of a Bigtable operation. + */ interface AttemptInfo { + /** + * The final status of the attempt (e.g., 'OK', 'ERROR'). + */ finalOperationStatus: string; + /** + * Whether the operation is a streaming operation or not + */ streamingOperation: string; } From 9b1ba9d29ec4d4431c1a569aebe48cb0f9f6ff20 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:24:25 -0500 Subject: [PATCH 056/448] Logger documentation --- test/metrics-tracer/metrics-tracer.ts | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 9ed72e155..206f86385 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -19,13 +19,24 @@ import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; +/** + * A basic logger class that stores log messages in an array. Useful for testing. + */ class Logger { private messages: string[] = []; + /** + * Logs a message by adding it to the internal message array. + * @param message The message to be logged. + */ log(message: string) { this.messages.push(message); } + /** + * Retrieves all logged messages. + * @returns An array of logged messages. + */ getMessages() { return this.messages; } From 88e96c34172aadf36a871945b812f05e6555ba6a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:27:29 -0500 Subject: [PATCH 057/448] Generate more documentation --- test/metrics-tracer/metrics-tracer.ts | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 206f86385..336b6a485 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -42,15 +42,28 @@ class Logger { } } +/** + * A fake implementation of the Bigtable client for testing purposes. Provides a + * metricsTracerFactory and a stubbed getProjectId_ method. + */ class FakeBigtable { appProfileId?: string; metricsTracerFactory: MetricsTracerFactory; + /** + * @param observabilityOptions Options for configuring client-side metrics + * observability, including a TestMeterProvider. + */ constructor(observabilityOptions: {meterProvider: TestMeterProvider}) { this.metricsTracerFactory = new MetricsTracerFactory({ meterProvider: observabilityOptions.meterProvider, }); } + /** + * A stubbed method that simulates retrieving the project ID. Always returns + * 'my-project'. + * @param callback A callback function that receives the project ID (or an error). + */ getProjectId_( callback: (err: Error | null, projectId?: string) => void ): void { @@ -58,7 +71,13 @@ class FakeBigtable { } } +/** + * A fake implementation of a Bigtable instance for testing purposes. Provides only an ID. + */ class FakeInstance { + /** + * The ID of the fake instance. + */ id = 'fakeInstanceId'; } From ed39628cfdbe0091dbf6a25344584628b816e9f6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:49:35 -0500 Subject: [PATCH 058/448] Generate documentation --- src/client-side-metrics/metrics-tracer-factory.ts | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 41985f8c5..ae3c12cf6 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -172,6 +172,12 @@ class MetricsTracer { } } + /** + * Assembles the basic dimensions for metrics. These dimensions provide + * context about the Bigtable environment and the operation being performed. + * @param {string} projectId The Google Cloud project ID. + * @returns {object} An object containing the basic dimensions. + */ private getBasicDimensions(projectId: string) { return { projectId, @@ -185,6 +191,15 @@ class MetricsTracer { }; } + /** + * Assembles the dimensions for operation latency metrics. These dimensions + * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. + * Includes whether the operation was a streaming operation or not. + * @param {string} projectId The Google Cloud project ID. + * @param {string} finalOperationStatus The final status of the operation. + * @param {string} streamOperation Whether the operation was a streaming operation or not. + * @returns An object containing the dimensions for operation latency metrics. + */ private getOperationLatencyDimensions( projectId: string, finalOperationStatus: string, From 76b1249f3126d9bf4c86c31d78405523d694222d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:59:21 -0500 Subject: [PATCH 059/448] Make sure test reports correct duration, zone cluster --- test/metrics-tracer/metrics-tracer.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 336b6a485..43c0fbf48 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -99,7 +99,7 @@ describe.only('Bigtable/MetricsTracer', () => { function createMetadata(duration: string) { return { internalRepr: new Map([ - ['server-timing', Buffer.from(`dur=${duration}`)], + ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], ]), options: {}, }; @@ -107,7 +107,10 @@ describe.only('Bigtable/MetricsTracer', () => { const status = { metadata: { internalRepr: new Map([ - ['x-goog-ext-425905942-bin', Buffer.from('doLater')], + [ + 'x-goog-ext-425905942-bin', + Buffer.from('\n\nus-west1-c \rfake-cluster3'), + ], ]), options: {}, }, From 8d60cb1a7839ea6ff485e1a339a38247612d97e7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 12:59:59 -0500 Subject: [PATCH 060/448] Generate documentation for the dimensions to strin --- common/client-side-metrics-dimensions.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/common/client-side-metrics-dimensions.ts b/common/client-side-metrics-dimensions.ts index 2797b4499..967ff9113 100644 --- a/common/client-side-metrics-dimensions.ts +++ b/common/client-side-metrics-dimensions.ts @@ -30,6 +30,15 @@ export interface Dimensions { clientName: string; } +/** + * Converts a Dimensions object to a string representation. + * This string representation is suitable for use as labels or tags. + * The order of dimensions in the output string is fixed: + * projectId;instanceId;table;cluster;zone;appProfileId;methodName;attemptStatus;finalOperationStatus;streamingOperation;clientName + * If a dimension is null or undefined, the empty string is used. + * @param {Dimensions} d The Dimensions object to convert. + * @returns A string representation of the dimensions. + */ export function dimensionsToString(d: Dimensions) { const p = (dimension?: string | null) => (dimension ? dimension : ''); return `${p(d.projectId)};${p(d.instanceId)};${p(d.table)};${p(d.cluster)};${p(d.zone)};${p(d.appProfileId)};${p(d.methodName)};${p(d.attemptStatus)};${p(d.finalOperationStatus)};${p(d.streamingOperation)};${p(d.clientName)}`; From 19fef92054551dc08dba85c104e9145a07fd6d56 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:12:28 -0500 Subject: [PATCH 061/448] Add version to the dimensions --- src/client-side-metrics/metrics-tracer-factory.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index ae3c12cf6..69eb96452 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -24,7 +24,7 @@ import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {ObservabilityOptions} from './observability-options'; -const {version} = require('../../package.json'); +import * as fs from 'fs'; /** * Information about a Bigtable operation. @@ -127,6 +127,9 @@ export interface ITabularApiSurface { }; } +const packageJSON = fs.readFileSync('package.json'); +const version = JSON.parse(packageJSON.toString()).version; + /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ From 1ecfb1c1eb87a67f1c6c9cdb6112037d38434e7e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:13:01 -0500 Subject: [PATCH 062/448] Fix the client name. The version is going to chan --- common/client-side-metrics-dimensions.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/client-side-metrics-dimensions.ts b/common/client-side-metrics-dimensions.ts index 967ff9113..0e06d365b 100644 --- a/common/client-side-metrics-dimensions.ts +++ b/common/client-side-metrics-dimensions.ts @@ -41,5 +41,5 @@ export interface Dimensions { */ export function dimensionsToString(d: Dimensions) { const p = (dimension?: string | null) => (dimension ? dimension : ''); - return `${p(d.projectId)};${p(d.instanceId)};${p(d.table)};${p(d.cluster)};${p(d.zone)};${p(d.appProfileId)};${p(d.methodName)};${p(d.attemptStatus)};${p(d.finalOperationStatus)};${p(d.streamingOperation)};${p(d.clientName)}`; + return `${p(d.projectId)};${p(d.instanceId)};${p(d.table)};${p(d.cluster)};${p(d.zone)};${p(d.appProfileId)};${p(d.methodName)};${p(d.attemptStatus)};${p(d.finalOperationStatus)};${p(d.streamingOperation)};nodejs-bigtable`; } From d8a3960e93ef0d0f838305d91be12f485c8b2705 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:13:24 -0500 Subject: [PATCH 063/448] Update the expected output file. --- test/metrics-tracer/typical-method-call.txt | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt index 8a177824e..bdefbba01 100644 --- a/test/metrics-tracer/typical-method-call.txt +++ b/test/metrics-tracer/typical-method-call.txt @@ -4,16 +4,16 @@ getDate call returns 1000 ms getDate call returns 2000 ms 3. Client receives status information. 4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable 5. Client receives first row. getDate call returns 3000 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable 6. Client receives metadata. 7. Client receives second row. getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;ERROR;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -27,17 +27,17 @@ getDate call returns 8000 ms getDate call returns 9000 ms 16. User reads row 2 getDate call returns 10000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 17. User reads row 3 getDate call returns 11000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 18. User reads row 4 getDate call returns 12000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 19. Stream ends, operation completes getDate call returns 13000 ms getDate call returns 14000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;SUCCESS;YES;nodejs-bigtable -Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;SUCCESS;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;SUCCESS;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable +Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable From 1d6b64586b0da6d95380218868c20ed48ac03197 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:35:41 -0500 Subject: [PATCH 064/448] Fox bug, get cluster --- src/client-side-metrics/metrics-tracer-factory.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 69eb96452..21c4f7c02 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -445,7 +445,7 @@ class MetricsTracer { this.zone = instanceInformation[0]; } if (instanceInformation && instanceInformation[1]) { - this.cluster = instanceInformation[0]; + this.cluster = instanceInformation[1]; } } } From acb1d3a6026ac695a71e4ddfdb2ce9f6a1ada841 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:37:25 -0500 Subject: [PATCH 065/448] Add fake cluster to tests --- test/metrics-tracer/typical-method-call.txt | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt index bdefbba01..a7208bea5 100644 --- a/test/metrics-tracer/typical-method-call.txt +++ b/test/metrics-tracer/typical-method-call.txt @@ -4,16 +4,16 @@ getDate call returns 1000 ms getDate call returns 2000 ms 3. Client receives status information. 4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable 5. Client receives first row. getDate call returns 3000 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable 6. Client receives metadata. 7. Client receives second row. getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -27,17 +27,17 @@ getDate call returns 8000 ms getDate call returns 9000 ms 16. User reads row 2 getDate call returns 10000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 17. User reads row 3 getDate call returns 11000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 18. User reads row 4 getDate call returns 12000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 19. Stream ends, operation completes getDate call returns 13000 ms getDate call returns 14000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable -Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable +Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable From c30b0579b7a974bef3a5bb8d1a56f75fcd5f83e4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:39:03 -0500 Subject: [PATCH 066/448] Remove console log --- test/metrics-tracer/metrics-tracer.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 43c0fbf48..8367d97ee 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -192,6 +192,5 @@ describe.only('Bigtable/MetricsTracer', () => { ); // Ensure events occurred in the right order here: assert.strictEqual(logger.getMessages().join('\n') + '\n', expectedOutput); - console.log('test'); }); }); From 9ef079b6873316e9c9b19a5744f0bf480c2efe21 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:51:34 -0500 Subject: [PATCH 067/448] Generate more documentation --- .../metrics-tracer-factory.ts | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 21c4f7c02..dde89ef68 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -217,6 +217,13 @@ class MetricsTracer { ); } + /** + * Assembles the dimensions for final operation metrics. These dimensions provide + * context about the Bigtable environment and the operation being performed. + * @param projectId The Google Cloud project ID. + * @param finalOperationStatus The final status of the operation. + * @returns An object containing the dimensions for final operation metrics. + */ private getFinalOpDimensions( projectId: string, finalOperationStatus: string @@ -229,6 +236,15 @@ class MetricsTracer { ); } + /** + * Assembles the dimensions for attempt metrics. These dimensions provide context + * about the Bigtable environment, the operation being performed, and the status of the attempt. + * Includes whether the operation was a streaming operation or not. + * @param projectId The Google Cloud project ID. + * @param attemptStatus The status of the attempt. + * @param streamingOperation Whether the operation was a streaming operation or not. + * @returns An object containing the dimensions for attempt metrics. + */ private getAttemptDimensions( projectId: string, attemptStatus: string, @@ -243,6 +259,13 @@ class MetricsTracer { ); } + /** + * Assembles the dimensions for attempt status metrics. These dimensions provide context + * about the Bigtable environment and the operation being performed. + * @param projectId The Google Cloud project ID. + * @param attemptStatus The status of the attempt. + * @returns An object containing the dimensions for attempt status metrics. + */ private getAttemptStatusDimensions(projectId: string, attemptStatus: string) { return Object.assign( { From d5a0368497c61e8a56422f5ee6b7d94e3f1a6889 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 14:57:01 -0500 Subject: [PATCH 068/448] Require a call to fetch the project when using MT --- .../metrics-tracer-factory.ts | 164 ++++++++------- test/metrics-tracer/metrics-tracer.ts | 192 +++++++++--------- 2 files changed, 190 insertions(+), 166 deletions(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index dde89ef68..6e2fd7ade 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -478,90 +478,103 @@ class MetricsTracer { * OpenTelemetry metrics instruments. */ export class MetricsTracerFactory { - private metrics: Metrics; + private metrics?: Metrics; + private observabilityOptions?: ObservabilityOptions; /** * @param observabilityOptions Options for configuring client-side metrics observability. */ constructor(observabilityOptions?: ObservabilityOptions) { - // Use MeterProvider provided by user - // If MeterProvider was not provided then use the default meter provider. - const meterProvider = - observabilityOptions && observabilityOptions.meterProvider - ? observabilityOptions.meterProvider - : new MeterProvider({ - // This is the default meter provider - // Create a resource. Fill the `service.*` attributes in with real values for your service. - // GcpDetectorSync will add in resource information about the current environment if you are - // running on GCP. These resource attributes will be translated to a specific GCP monitored - // resource if running on GCP. Otherwise, metrics will be sent with monitored resource - // `generic_task`. - resource: new Resources.Resource({ - 'service.name': 'bigtable-metrics', - }).merge(new ResourceUtil.GcpDetectorSync().detect()), - readers: [ - // Register the exporter - new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 10_000, - exporter: new MetricExporter({ - projectId: 'cloud-native-db-dpes-shared', // TODO: Replace later + this.observabilityOptions = observabilityOptions; + } + + private initialize( + projectId?: string, + observabilityOptions?: ObservabilityOptions + ) { + if (this.metrics) { + return this.metrics; + } else { + // Use MeterProvider provided by user + // If MeterProvider was not provided then use the default meter provider. + const meterProvider = + observabilityOptions && observabilityOptions.meterProvider + ? observabilityOptions.meterProvider + : new MeterProvider({ + // This is the default meter provider + // Create a resource. Fill the `service.*` attributes in with real values for your service. + // GcpDetectorSync will add in resource information about the current environment if you are + // running on GCP. These resource attributes will be translated to a specific GCP monitored + // resource if running on GCP. Otherwise, metrics will be sent with monitored resource + // `generic_task`. + resource: new Resources.Resource({ + 'service.name': 'bigtable-metrics', + }).merge(new ResourceUtil.GcpDetectorSync().detect()), + readers: [ + // Register the exporter + new PeriodicExportingMetricReader({ + // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by + // Cloud Monitoring. + exportIntervalMillis: 10_000, + exporter: new MetricExporter({ + projectId, + }), }), - }), - ], - }); - const meter = meterProvider.getMeter('bigtable.googleapis.com'); - this.metrics = { - operationLatencies: meter.createHistogram('operation_latencies', { - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - }), - attemptLatencies: meter.createHistogram('attempt_latencies', { - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', - unit: 'ms', - }), - retryCount: meter.createCounter('retry_count', { - description: - 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - }), - applicationBlockingLatencies: meter.createHistogram( - 'application_blocking_latencies', - { + ], + }); + const meter = meterProvider.getMeter('bigtable.googleapis.com'); + this.metrics = { + operationLatencies: meter.createHistogram('operation_latencies', { description: - 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', - unit: 'ms', - } - ), - firstResponseLatencies: meter.createHistogram( - 'first_response_latencies', - { + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + }), + attemptLatencies: meter.createHistogram('attempt_latencies', { description: - 'Latencies from when a client sends a request and receives the first row of the response.', + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', - } - ), - serverLatencies: meter.createHistogram('server_latencies', { - description: - 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - }), - connectivityErrorCount: meter.createHistogram( - 'connectivity_error_count', - { + }), + retryCount: meter.createCounter('retry_count', { description: - "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - } - ), - clientBlockingLatencies: meter.createHistogram( - 'client_blocking_latencies', - { + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + }), + applicationBlockingLatencies: meter.createHistogram( + 'application_blocking_latencies', + { + description: + 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', + unit: 'ms', + } + ), + firstResponseLatencies: meter.createHistogram( + 'first_response_latencies', + { + description: + 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + } + ), + serverLatencies: meter.createHistogram('server_latencies', { description: - 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', - unit: 'ms', - } - ), - }; + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + }), + connectivityErrorCount: meter.createHistogram( + 'connectivity_error_count', + { + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + } + ), + clientBlockingLatencies: meter.createHistogram( + 'client_blocking_latencies', + { + description: + 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + } + ), + }; + return this.metrics; + } } /** @@ -569,15 +582,18 @@ export class MetricsTracerFactory { * @param tabularApiSurface The Bigtable table being accessed. * @param methodName The name of the method being traced. * @param dateProvider An optional DateProvider for testing purposes. + * @param {string} projectId The project id * @returns A new MetricsTracer instance. */ getMetricsTracer( tabularApiSurface: ITabularApiSurface, methodName: string, + projectId?: string, dateProvider?: DateProvider ) { + const metrics = this.initialize(projectId, this.observabilityOptions); return new MetricsTracer( - this.metrics, + metrics, tabularApiSurface, methodName, dateProvider diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 8367d97ee..7ef1fd7ba 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -82,7 +82,7 @@ class FakeInstance { } describe.only('Bigtable/MetricsTracer', () => { - it('should record the right metrics with a typical method call', () => { + it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); class FakeTable { id = 'fakeTableId'; @@ -91,101 +91,109 @@ describe.only('Bigtable/MetricsTracer', () => { meterProvider: new TestMeterProvider(logger), }); - fakeMethod(): void { - const standardAttemptInfo = { - finalOperationStatus: 'PENDING', - streamingOperation: 'YES', - }; - function createMetadata(duration: string) { - return { - internalRepr: new Map([ - ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], - ]), - options: {}, - }; - } - const status = { - metadata: { - internalRepr: new Map([ - [ - 'x-goog-ext-425905942-bin', - Buffer.from('\n\nus-west1-c \rfake-cluster3'), - ], - ]), - options: {}, - }, - }; - const metricsTracer = - this.bigtable.metricsTracerFactory.getMetricsTracer( - this, - 'fakeMethod', - new TestDateProvider(logger) - ); - // In this method we simulate a series of events that might happen - // when a user calls one of the Table methods. - // Here is an example of what might happen in a method call: - logger.log('1. The operation starts'); - metricsTracer.onOperationStart(); - logger.log('2. The attempt starts.'); - metricsTracer.onAttemptStart(); - logger.log('3. Client receives status information.'); - metricsTracer.onStatusReceived(status); - logger.log('4. Client receives metadata.'); - metricsTracer.onMetadataReceived( - standardAttemptInfo, - createMetadata('101') - ); - logger.log('5. Client receives first row.'); - metricsTracer.onResponse('PENDING'); - logger.log('6. Client receives metadata.'); - metricsTracer.onMetadataReceived( - standardAttemptInfo, - createMetadata('102') - ); - logger.log('7. Client receives second row.'); - metricsTracer.onResponse('PENDING'); - logger.log('8. A transient error occurs.'); - metricsTracer.onAttemptComplete({ - finalOperationStatus: 'ERROR', - streamingOperation: 'YES', - }); - logger.log('9. After a timeout, the second attempt is made.'); - metricsTracer.onAttemptStart(); - logger.log('10. Client receives status information.'); - metricsTracer.onStatusReceived(status); - logger.log('11. Client receives metadata.'); - metricsTracer.onMetadataReceived( - standardAttemptInfo, - createMetadata('103') - ); - logger.log('12. Client receives third row.'); - metricsTracer.onResponse('PENDING'); - logger.log('13. Client receives metadata.'); - metricsTracer.onMetadataReceived( - {finalOperationStatus: 'PENDING', streamingOperation: 'YES'}, - createMetadata('104') - ); - logger.log('14. Client receives fourth row.'); - metricsTracer.onResponse('PENDING'); - logger.log('15. User reads row 1'); - metricsTracer.onRead(); - logger.log('16. User reads row 2'); - metricsTracer.onRead(); - logger.log('17. User reads row 3'); - metricsTracer.onRead(); - logger.log('18. User reads row 4'); - metricsTracer.onRead(); - logger.log('19. Stream ends, operation completes'); - metricsTracer.onOperationComplete({ - retries: 1, - finalOperationStatus: 'SUCCESS', - connectivityErrorCount: 1, - streamingOperation: 'YES', + async fakeMethod(): Promise { + return new Promise((resolve, reject) => { + this.bigtable.getProjectId_((err, projectId) => { + const standardAttemptInfo = { + finalOperationStatus: 'PENDING', + streamingOperation: 'YES', + }; + + function createMetadata(duration: string) { + return { + internalRepr: new Map([ + ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], + ]), + options: {}, + }; + } + + const status = { + metadata: { + internalRepr: new Map([ + [ + 'x-goog-ext-425905942-bin', + Buffer.from('\n\nus-west1-c \rfake-cluster3'), + ], + ]), + options: {}, + }, + }; + const metricsTracer = + this.bigtable.metricsTracerFactory.getMetricsTracer( + this, + 'fakeMethod', + projectId, + new TestDateProvider(logger) + ); + // In this method we simulate a series of events that might happen + // when a user calls one of the Table methods. + // Here is an example of what might happen in a method call: + logger.log('1. The operation starts'); + metricsTracer.onOperationStart(); + logger.log('2. The attempt starts.'); + metricsTracer.onAttemptStart(); + logger.log('3. Client receives status information.'); + metricsTracer.onStatusReceived(status); + logger.log('4. Client receives metadata.'); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('101') + ); + logger.log('5. Client receives first row.'); + metricsTracer.onResponse('PENDING'); + logger.log('6. Client receives metadata.'); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('102') + ); + logger.log('7. Client receives second row.'); + metricsTracer.onResponse('PENDING'); + logger.log('8. A transient error occurs.'); + metricsTracer.onAttemptComplete({ + finalOperationStatus: 'ERROR', + streamingOperation: 'YES', + }); + logger.log('9. After a timeout, the second attempt is made.'); + metricsTracer.onAttemptStart(); + logger.log('10. Client receives status information.'); + metricsTracer.onStatusReceived(status); + logger.log('11. Client receives metadata.'); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('103') + ); + logger.log('12. Client receives third row.'); + metricsTracer.onResponse('PENDING'); + logger.log('13. Client receives metadata.'); + metricsTracer.onMetadataReceived( + {finalOperationStatus: 'PENDING', streamingOperation: 'YES'}, + createMetadata('104') + ); + logger.log('14. Client receives fourth row.'); + metricsTracer.onResponse('PENDING'); + logger.log('15. User reads row 1'); + metricsTracer.onRead(); + logger.log('16. User reads row 2'); + metricsTracer.onRead(); + logger.log('17. User reads row 3'); + metricsTracer.onRead(); + logger.log('18. User reads row 4'); + metricsTracer.onRead(); + logger.log('19. Stream ends, operation completes'); + metricsTracer.onOperationComplete({ + retries: 1, + finalOperationStatus: 'SUCCESS', + connectivityErrorCount: 1, + streamingOperation: 'YES', + }); + resolve(); + }); }); } } const table = new FakeTable(); - table.fakeMethod(); + await table.fakeMethod(); const expectedOutput = fs.readFileSync( './test/metrics-tracer/typical-method-call.txt', 'utf8' From ae532d8e8a84b7bfbbd83d2b32399c454bc617f3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:14:09 -0500 Subject: [PATCH 069/448] use same date provider for all metrics tracers --- .../metrics-tracer-factory.ts | 12 ++++++++---- test/metrics-tracer/metrics-tracer.ts | 19 ++++++++++++------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 6e2fd7ade..41daa239e 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -480,12 +480,17 @@ class MetricsTracer { export class MetricsTracerFactory { private metrics?: Metrics; private observabilityOptions?: ObservabilityOptions; + private dateProvider: DateProvider; /** * @param observabilityOptions Options for configuring client-side metrics observability. */ - constructor(observabilityOptions?: ObservabilityOptions) { + constructor( + dateProvider: DateProvider, + observabilityOptions?: ObservabilityOptions + ) { this.observabilityOptions = observabilityOptions; + this.dateProvider = dateProvider; } private initialize( @@ -588,15 +593,14 @@ export class MetricsTracerFactory { getMetricsTracer( tabularApiSurface: ITabularApiSurface, methodName: string, - projectId?: string, - dateProvider?: DateProvider + projectId?: string ) { const metrics = this.initialize(projectId, this.observabilityOptions); return new MetricsTracer( metrics, tabularApiSurface, methodName, - dateProvider + this.dateProvider ); } } diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 7ef1fd7ba..04d5cba52 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -53,8 +53,11 @@ class FakeBigtable { * @param observabilityOptions Options for configuring client-side metrics * observability, including a TestMeterProvider. */ - constructor(observabilityOptions: {meterProvider: TestMeterProvider}) { - this.metricsTracerFactory = new MetricsTracerFactory({ + constructor( + observabilityOptions: {meterProvider: TestMeterProvider}, + dateProvider: TestDateProvider + ) { + this.metricsTracerFactory = new MetricsTracerFactory(dateProvider, { meterProvider: observabilityOptions.meterProvider, }); } @@ -87,9 +90,12 @@ describe.only('Bigtable/MetricsTracer', () => { class FakeTable { id = 'fakeTableId'; instance = new FakeInstance(); - bigtable = new FakeBigtable({ - meterProvider: new TestMeterProvider(logger), - }); + bigtable = new FakeBigtable( + { + meterProvider: new TestMeterProvider(logger), + }, + new TestDateProvider(logger) + ); async fakeMethod(): Promise { return new Promise((resolve, reject) => { @@ -123,8 +129,7 @@ describe.only('Bigtable/MetricsTracer', () => { this.bigtable.metricsTracerFactory.getMetricsTracer( this, 'fakeMethod', - projectId, - new TestDateProvider(logger) + projectId ); // In this method we simulate a series of events that might happen // when a user calls one of the Table methods. From b2bced91ef55abd694c20bd40a3f8638a2ef3af1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:25:39 -0500 Subject: [PATCH 070/448] =?UTF-8?q?In=20the=20metrics=20traceer,=20don?= =?UTF-8?q?=E2=80=99t=20fetch=20the=20project?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit for each call --- .../metrics-tracer-factory.ts | 174 ++++++++---------- 1 file changed, 78 insertions(+), 96 deletions(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 41daa239e..13fc7993d 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -121,9 +121,6 @@ export interface ITabularApiSurface { id: string; bigtable: { appProfileId?: string; - getProjectId_( - callback: (err: Error | null, projectId?: string) => void - ): void; }; } @@ -141,6 +138,7 @@ class MetricsTracer { private cluster: string | null | undefined; private tabularApiSurface: ITabularApiSurface; private methodName: string; + private projectId?: string; private receivedFirstResponse: boolean; private serverTimeRead: boolean; private lastReadTime: DateLike | null; @@ -156,6 +154,7 @@ class MetricsTracer { metrics: Metrics, tabularApiSurface: ITabularApiSurface, methodName: string, + projectId?: string, dateProvider?: DateProvider ) { this.metrics = metrics; @@ -168,6 +167,7 @@ class MetricsTracer { this.receivedFirstResponse = false; this.lastReadTime = null; this.serverTimeRead = false; + this.projectId = projectId; if (dateProvider) { this.dateProvider = dateProvider; } else { @@ -287,21 +287,17 @@ class MetricsTracer { */ onRead() { const currentTime = this.dateProvider.getDate(); + const projectId = this.projectId; if (this.lastReadTime) { - this.tabularApiSurface.bigtable.getProjectId_( - (err: Error | null, projectId?: string) => { - if (projectId && this.lastReadTime) { - const dimensions = this.getBasicDimensions(projectId); - const difference = - currentTime.getTime() - this.lastReadTime.getTime(); - this.metrics.applicationBlockingLatencies.record( - difference, - dimensions - ); - this.lastReadTime = currentTime; - } - } - ); + if (projectId && this.lastReadTime) { + const dimensions = this.getBasicDimensions(projectId); + const difference = currentTime.getTime() - this.lastReadTime.getTime(); + this.metrics.applicationBlockingLatencies.record( + difference, + dimensions + ); + this.lastReadTime = currentTime; + } } else { this.lastReadTime = currentTime; } @@ -313,19 +309,16 @@ class MetricsTracer { */ onAttemptComplete(info: AttemptInfo) { const endTime = this.dateProvider.getDate(); - this.tabularApiSurface.bigtable.getProjectId_( - (err: Error | null, projectId?: string) => { - if (projectId && this.attemptStartTime) { - const dimensions = this.getAttemptDimensions( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metrics.attemptLatencies.record(totalTime, dimensions); - } - } - ); + const projectId = this.projectId; + if (projectId && this.attemptStartTime) { + const dimensions = this.getAttemptDimensions( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + this.metrics.attemptLatencies.record(totalTime, dimensions); + } } /** @@ -340,22 +333,18 @@ class MetricsTracer { */ onResponse(finalOperationStatus: string) { const endTime = this.dateProvider.getDate(); - this.tabularApiSurface.bigtable.getProjectId_( - (err: Error | null, projectId?: string) => { - if (projectId && this.operationStartTime) { - const dimensions = this.getFinalOpDimensions( - projectId, - finalOperationStatus - ); - const totalTime = - endTime.getTime() - this.operationStartTime.getTime(); - if (!this.receivedFirstResponse) { - this.receivedFirstResponse = true; - this.metrics.firstResponseLatencies.record(totalTime, dimensions); - } - } + const projectId = this.projectId; + if (projectId && this.operationStartTime) { + const dimensions = this.getFinalOpDimensions( + projectId, + finalOperationStatus + ); + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + if (!this.receivedFirstResponse) { + this.receivedFirstResponse = true; + this.metrics.firstResponseLatencies.record(totalTime, dimensions); } - ); + } } /** @@ -365,47 +354,42 @@ class MetricsTracer { */ onOperationComplete(info: OperationInfo) { const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; this.onAttemptComplete(info); - this.tabularApiSurface.bigtable.getProjectId_( - (err: Error | null, projectId?: string) => { - if (projectId && this.operationStartTime) { - const totalTime = - endTime.getTime() - this.operationStartTime.getTime(); - { - // This block records operation latency metrics. - const operationLatencyDimensions = - this.getOperationLatencyDimensions( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - this.metrics.operationLatencies.record( - totalTime, - operationLatencyDimensions - ); - } - if (info.retries) { - // This block records the retry count metrics - const retryCountDimensions = this.getFinalOpDimensions( - projectId, - info.finalOperationStatus - ); - this.metrics.retryCount.add(info.retries, retryCountDimensions); - } - if (info.connectivityErrorCount) { - // This block records the connectivity error count metrics - const connectivityCountDimensions = this.getAttemptStatusDimensions( - projectId, - info.finalOperationStatus - ); - this.metrics.connectivityErrorCount.record( - info.connectivityErrorCount, - connectivityCountDimensions - ); - } - } + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + { + // This block records operation latency metrics. + const operationLatencyDimensions = this.getOperationLatencyDimensions( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + this.metrics.operationLatencies.record( + totalTime, + operationLatencyDimensions + ); } - ); + if (info.retries) { + // This block records the retry count metrics + const retryCountDimensions = this.getFinalOpDimensions( + projectId, + info.finalOperationStatus + ); + this.metrics.retryCount.add(info.retries, retryCountDimensions); + } + if (info.connectivityErrorCount) { + // This block records the connectivity error count metrics + const connectivityCountDimensions = this.getAttemptStatusDimensions( + projectId, + info.finalOperationStatus + ); + this.metrics.connectivityErrorCount.record( + info.connectivityErrorCount, + connectivityCountDimensions + ); + } + } } /** @@ -431,18 +415,15 @@ class MetricsTracer { if (!this.serverTimeRead) { this.serverTimeRead = true; const serverTime = parseInt(durationValues[1]); - this.tabularApiSurface.bigtable.getProjectId_( - (err: Error | null, projectId?: string) => { - if (projectId) { - const dimensions = this.getAttemptDimensions( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - this.metrics.serverLatencies.record(serverTime, dimensions); - } - } - ); + const projectId = this.projectId; + if (projectId) { + const dimensions = this.getAttemptDimensions( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + this.metrics.serverLatencies.record(serverTime, dimensions); + } } } } @@ -600,6 +581,7 @@ export class MetricsTracerFactory { metrics, tabularApiSurface, methodName, + projectId, this.dateProvider ); } From e1dd61c97810fa1473d1fd8f7a9a39a27f87cdca Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:47:32 -0500 Subject: [PATCH 071/448] Remove only --- test/metrics-tracer/metrics-tracer.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 04d5cba52..61fc2dbbf 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -84,7 +84,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe.only('Bigtable/MetricsTracer', () => { +describe('Bigtable/MetricsTracer', () => { it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); class FakeTable { From 9ec98df4a541f47a62fed7afededc7dd7fa8425c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:53:01 -0500 Subject: [PATCH 072/448] Add open telemetry api --- package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/package.json b/package.json index 7b1c30d4a..2dc24800e 100644 --- a/package.json +++ b/package.json @@ -52,6 +52,7 @@ "@google-cloud/precise-date": "^4.0.0", "@google-cloud/projectify": "^4.0.0", "@google-cloud/promisify": "^4.0.0", + "@opentelemetry/api": "^1.9.0", "@opentelemetry/resources": "^1.30.0", "@opentelemetry/sdk-metrics": "^1.30.0", "arrify": "^2.0.0", From 5a1a3aad7bb8cf41a8a357a00c486bf84900505b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:56:36 -0500 Subject: [PATCH 073/448] Add TestExecuteQuery_EmptyResponse to failures --- testproxy/known_failures.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testproxy/known_failures.txt b/testproxy/known_failures.txt index 979a31f98..8a7be2dcb 100644 --- a/testproxy/known_failures.txt +++ b/testproxy/known_failures.txt @@ -12,4 +12,5 @@ TestReadRows_Retry_WithRoutingCookie_MultipleErrorResponses\| TestReadRows_Retry_WithRetryInfo\| TestReadRows_Retry_WithRetryInfo_MultipleErrorResponse\| TestSampleRowKeys_Retry_WithRoutingCookie\| -TestSampleRowKeys_Generic_CloseClient +TestSampleRowKeys_Generic_CloseClient\| +TestExecuteQuery_EmptyResponse From 1bd2d2b9232956ddd2f20eb1a97336daa75aba0a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:59:14 -0500 Subject: [PATCH 074/448] TestExecuteQuery_SingleSimpleRow known failures --- testproxy/known_failures.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testproxy/known_failures.txt b/testproxy/known_failures.txt index 8a7be2dcb..664980c39 100644 --- a/testproxy/known_failures.txt +++ b/testproxy/known_failures.txt @@ -13,4 +13,5 @@ TestReadRows_Retry_WithRetryInfo\| TestReadRows_Retry_WithRetryInfo_MultipleErrorResponse\| TestSampleRowKeys_Retry_WithRoutingCookie\| TestSampleRowKeys_Generic_CloseClient\| -TestExecuteQuery_EmptyResponse +TestExecuteQuery_EmptyResponse|\ +TestExecuteQuery_SingleSimpleRow From c2be338038f074ec115885ec177a08fa8e3e787b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 16:09:47 -0500 Subject: [PATCH 075/448] Fix syntax in known failures --- testproxy/known_failures.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testproxy/known_failures.txt b/testproxy/known_failures.txt index 664980c39..fc6e244af 100644 --- a/testproxy/known_failures.txt +++ b/testproxy/known_failures.txt @@ -13,5 +13,5 @@ TestReadRows_Retry_WithRetryInfo\| TestReadRows_Retry_WithRetryInfo_MultipleErrorResponse\| TestSampleRowKeys_Retry_WithRoutingCookie\| TestSampleRowKeys_Generic_CloseClient\| -TestExecuteQuery_EmptyResponse|\ +TestExecuteQuery_EmptyResponse\| TestExecuteQuery_SingleSimpleRow From cd0d774f882fdd592a1d66426a354e5cfb6688e0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 16:14:21 -0500 Subject: [PATCH 076/448] Add two tests to the known failures --- testproxy/known_failures.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testproxy/known_failures.txt b/testproxy/known_failures.txt index fc6e244af..5b74408c3 100644 --- a/testproxy/known_failures.txt +++ b/testproxy/known_failures.txt @@ -13,5 +13,7 @@ TestReadRows_Retry_WithRetryInfo\| TestReadRows_Retry_WithRetryInfo_MultipleErrorResponse\| TestSampleRowKeys_Retry_WithRoutingCookie\| TestSampleRowKeys_Generic_CloseClient\| +TestSampleRowKeys_Generic_Headers\| +TestSampleRowKeys_NoRetry_NoEmptyKey\| TestExecuteQuery_EmptyResponse\| TestExecuteQuery_SingleSimpleRow From e7caf36743beaa29ed57451b9e1e5e0de6b0eeb0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 16:19:46 -0500 Subject: [PATCH 077/448] TestSampleRowKeys_Retry_WithRetryInfo to known fai --- testproxy/known_failures.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/testproxy/known_failures.txt b/testproxy/known_failures.txt index 5b74408c3..81fade6bd 100644 --- a/testproxy/known_failures.txt +++ b/testproxy/known_failures.txt @@ -14,6 +14,7 @@ TestReadRows_Retry_WithRetryInfo_MultipleErrorResponse\| TestSampleRowKeys_Retry_WithRoutingCookie\| TestSampleRowKeys_Generic_CloseClient\| TestSampleRowKeys_Generic_Headers\| +TestSampleRowKeys_Retry_WithRetryInfo\| TestSampleRowKeys_NoRetry_NoEmptyKey\| TestExecuteQuery_EmptyResponse\| TestExecuteQuery_SingleSimpleRow From 7fd86d2aebbe12209dfa405dde1d02ace3b4af34 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 17:09:29 -0500 Subject: [PATCH 078/448] Change word dimensions to attributes --- ...s.ts => client-side-metrics-attributes.ts} | 10 +-- common/test-meter-provider.ts | 14 ++-- .../metrics-tracer-factory.ts | 72 +++++++++---------- 3 files changed, 48 insertions(+), 48 deletions(-) rename common/{client-side-metrics-dimensions.ts => client-side-metrics-attributes.ts} (78%) diff --git a/common/client-side-metrics-dimensions.ts b/common/client-side-metrics-attributes.ts similarity index 78% rename from common/client-side-metrics-dimensions.ts rename to common/client-side-metrics-attributes.ts index 0e06d365b..d5e900f97 100644 --- a/common/client-side-metrics-dimensions.ts +++ b/common/client-side-metrics-attributes.ts @@ -16,7 +16,7 @@ * Dimensions (labels) associated with a Bigtable metric. These * dimensions provide context for the metric values. */ -export interface Dimensions { +export interface Attributes { projectId: string; instanceId: string; table: string; @@ -36,10 +36,10 @@ export interface Dimensions { * The order of dimensions in the output string is fixed: * projectId;instanceId;table;cluster;zone;appProfileId;methodName;attemptStatus;finalOperationStatus;streamingOperation;clientName * If a dimension is null or undefined, the empty string is used. - * @param {Dimensions} d The Dimensions object to convert. + * @param {Attributes} a The Dimensions object to convert. * @returns A string representation of the dimensions. */ -export function dimensionsToString(d: Dimensions) { - const p = (dimension?: string | null) => (dimension ? dimension : ''); - return `${p(d.projectId)};${p(d.instanceId)};${p(d.table)};${p(d.cluster)};${p(d.zone)};${p(d.appProfileId)};${p(d.methodName)};${p(d.attemptStatus)};${p(d.finalOperationStatus)};${p(d.streamingOperation)};nodejs-bigtable`; +export function attributesToString(a: Attributes) { + const p = (attribute?: string | null) => (attribute ? attribute : ''); + return `${p(a.projectId)};${p(a.instanceId)};${p(a.table)};${p(a.cluster)};${p(a.zone)};${p(a.appProfileId)};${p(a.methodName)};${p(a.attemptStatus)};${p(a.finalOperationStatus)};${p(a.streamingOperation)};nodejs-bigtable`; } diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index dc9cb741e..1590fe322 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -13,7 +13,7 @@ // limitations under the License. import {WithLogger, WithLoggerAndName} from './logger'; -import {Dimensions, dimensionsToString} from './client-side-metrics-dimensions'; +import {Attributes, attributesToString} from './client-side-metrics-attributes'; /** * A test implementation of a MeterProvider. This MeterProvider is used for testing purposes. @@ -61,11 +61,11 @@ class TestCounter extends WithLoggerAndName { /** * Simulates adding a value to the counter. Logs the value and the counter name. * @param {number} value The value to be added to the counter. - * @param {Dimensions} dimensions The dimensions associated with the value. + * @param {Attributes} attributes The attributes associated with the value. */ - add(value: number, dimensions: Dimensions) { + add(value: number, attributes: Attributes) { this.logger.log( - `Value added to counter ${this.name} = ${value.toString()} with dimensions ${dimensionsToString(dimensions)}` + `Value added to counter ${this.name} = ${value.toString()} with attributes ${attributesToString(attributes)}` ); } } @@ -78,11 +78,11 @@ class TestHistogram extends WithLoggerAndName { /** * Simulates recording a value in the histogram. Logs the value and the histogram name. * @param {number} value The value to be recorded in the histogram. - * @param {Dimensions} dimensions The dimensions associated with the value. + * @param {Attributes} attributes The attributes associated with the value. */ - record(value: number, dimensions: Dimensions) { + record(value: number, attributes: Attributes) { this.logger.log( - `Value added to histogram ${this.name} = ${value.toString()} with dimensions ${dimensionsToString(dimensions)}` + `Value added to histogram ${this.name} = ${value.toString()} with attributes ${attributesToString(attributes)}` ); } } diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 13fc7993d..a355a17a2 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {Dimensions} from '../../common/client-side-metrics-dimensions'; +import {Attributes} from '../../common/client-side-metrics-attributes'; const { MeterProvider, @@ -176,12 +176,12 @@ class MetricsTracer { } /** - * Assembles the basic dimensions for metrics. These dimensions provide + * Assembles the basic attributes for metrics. These attributes provide * context about the Bigtable environment and the operation being performed. * @param {string} projectId The Google Cloud project ID. - * @returns {object} An object containing the basic dimensions. + * @returns {object} An object containing the basic attributes. */ - private getBasicDimensions(projectId: string) { + private getBasicAttributes(projectId: string) { return { projectId, instanceId: this.tabularApiSurface.instance.id, @@ -195,57 +195,57 @@ class MetricsTracer { } /** - * Assembles the dimensions for operation latency metrics. These dimensions + * Assembles the attributes for operation latency metrics. These attributes * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. * Includes whether the operation was a streaming operation or not. * @param {string} projectId The Google Cloud project ID. * @param {string} finalOperationStatus The final status of the operation. * @param {string} streamOperation Whether the operation was a streaming operation or not. - * @returns An object containing the dimensions for operation latency metrics. + * @returns An object containing the attributes for operation latency metrics. */ - private getOperationLatencyDimensions( + private getOperationLatencyAttributes( projectId: string, finalOperationStatus: string, streamOperation?: string - ): Dimensions { + ): Attributes { return Object.assign( { finalOperationStatus: finalOperationStatus, streamingOperation: streamOperation, }, - this.getBasicDimensions(projectId) + this.getBasicAttributes(projectId) ); } /** - * Assembles the dimensions for final operation metrics. These dimensions provide + * Assembles the attributes for final operation metrics. These attributes provide * context about the Bigtable environment and the operation being performed. * @param projectId The Google Cloud project ID. * @param finalOperationStatus The final status of the operation. - * @returns An object containing the dimensions for final operation metrics. + * @returns An object containing the attributes for final operation metrics. */ - private getFinalOpDimensions( + private getFinalOpAttributes( projectId: string, finalOperationStatus: string - ): Dimensions { + ): Attributes { return Object.assign( { finalOperationStatus: finalOperationStatus, }, - this.getBasicDimensions(projectId) + this.getBasicAttributes(projectId) ); } /** - * Assembles the dimensions for attempt metrics. These dimensions provide context + * Assembles the attributes for attempt metrics. These attributes provide context * about the Bigtable environment, the operation being performed, and the status of the attempt. * Includes whether the operation was a streaming operation or not. * @param projectId The Google Cloud project ID. * @param attemptStatus The status of the attempt. * @param streamingOperation Whether the operation was a streaming operation or not. - * @returns An object containing the dimensions for attempt metrics. + * @returns An object containing the attributes for attempt metrics. */ - private getAttemptDimensions( + private getAttemptAttributes( projectId: string, attemptStatus: string, streamingOperation: string @@ -255,23 +255,23 @@ class MetricsTracer { attemptStatus: attemptStatus, streamingOperation: streamingOperation, }, - this.getBasicDimensions(projectId) + this.getBasicAttributes(projectId) ); } /** - * Assembles the dimensions for attempt status metrics. These dimensions provide context + * Assembles the attributes for attempt status metrics. These attributes provide context * about the Bigtable environment and the operation being performed. * @param projectId The Google Cloud project ID. * @param attemptStatus The status of the attempt. - * @returns An object containing the dimensions for attempt status metrics. + * @returns An object containing the attributes for attempt status metrics. */ - private getAttemptStatusDimensions(projectId: string, attemptStatus: string) { + private getAttemptStatusAttributes(projectId: string, attemptStatus: string) { return Object.assign( { attemptStatus: attemptStatus, }, - this.getBasicDimensions(projectId) + this.getBasicAttributes(projectId) ); } @@ -290,11 +290,11 @@ class MetricsTracer { const projectId = this.projectId; if (this.lastReadTime) { if (projectId && this.lastReadTime) { - const dimensions = this.getBasicDimensions(projectId); + const attributes = this.getBasicAttributes(projectId); const difference = currentTime.getTime() - this.lastReadTime.getTime(); this.metrics.applicationBlockingLatencies.record( difference, - dimensions + attributes ); this.lastReadTime = currentTime; } @@ -311,13 +311,13 @@ class MetricsTracer { const endTime = this.dateProvider.getDate(); const projectId = this.projectId; if (projectId && this.attemptStartTime) { - const dimensions = this.getAttemptDimensions( + const attributes = this.getAttemptAttributes( projectId, info.finalOperationStatus, info.streamingOperation ); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metrics.attemptLatencies.record(totalTime, dimensions); + this.metrics.attemptLatencies.record(totalTime, attributes); } } @@ -335,14 +335,14 @@ class MetricsTracer { const endTime = this.dateProvider.getDate(); const projectId = this.projectId; if (projectId && this.operationStartTime) { - const dimensions = this.getFinalOpDimensions( + const attributes = this.getFinalOpAttributes( projectId, finalOperationStatus ); const totalTime = endTime.getTime() - this.operationStartTime.getTime(); if (!this.receivedFirstResponse) { this.receivedFirstResponse = true; - this.metrics.firstResponseLatencies.record(totalTime, dimensions); + this.metrics.firstResponseLatencies.record(totalTime, attributes); } } } @@ -360,33 +360,33 @@ class MetricsTracer { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { // This block records operation latency metrics. - const operationLatencyDimensions = this.getOperationLatencyDimensions( + const operationLatencyAttributes = this.getOperationLatencyAttributes( projectId, info.finalOperationStatus, info.streamingOperation ); this.metrics.operationLatencies.record( totalTime, - operationLatencyDimensions + operationLatencyAttributes ); } if (info.retries) { // This block records the retry count metrics - const retryCountDimensions = this.getFinalOpDimensions( + const retryCountAttributes = this.getFinalOpAttributes( projectId, info.finalOperationStatus ); - this.metrics.retryCount.add(info.retries, retryCountDimensions); + this.metrics.retryCount.add(info.retries, retryCountAttributes); } if (info.connectivityErrorCount) { // This block records the connectivity error count metrics - const connectivityCountDimensions = this.getAttemptStatusDimensions( + const connectivityCountAttributes = this.getAttemptStatusAttributes( projectId, info.finalOperationStatus ); this.metrics.connectivityErrorCount.record( info.connectivityErrorCount, - connectivityCountDimensions + connectivityCountAttributes ); } } @@ -417,12 +417,12 @@ class MetricsTracer { const serverTime = parseInt(durationValues[1]); const projectId = this.projectId; if (projectId) { - const dimensions = this.getAttemptDimensions( + const attributes = this.getAttemptAttributes( projectId, info.finalOperationStatus, info.streamingOperation ); - this.metrics.serverLatencies.record(serverTime, dimensions); + this.metrics.serverLatencies.record(serverTime, attributes); } } } From db05ff3771b641466816cf4cc36f0c3eef8fd856 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 17:19:27 -0500 Subject: [PATCH 079/448] Change more docs to use Attributes instead of dim --- common/client-side-metrics-attributes.ts | 14 +++++----- .../observability-options.ts | 26 ++++++++++--------- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d5e900f97..d1f31ab63 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -13,8 +13,8 @@ // limitations under the License. /** - * Dimensions (labels) associated with a Bigtable metric. These - * dimensions provide context for the metric values. + * Attributes (labels) associated with a Bigtable metric. These + * attributes provide context for the metric values. */ export interface Attributes { projectId: string; @@ -31,13 +31,13 @@ export interface Attributes { } /** - * Converts a Dimensions object to a string representation. + * Converts an Attributes object to a string representation. * This string representation is suitable for use as labels or tags. - * The order of dimensions in the output string is fixed: + * The order of attributes in the output string is fixed: * projectId;instanceId;table;cluster;zone;appProfileId;methodName;attemptStatus;finalOperationStatus;streamingOperation;clientName - * If a dimension is null or undefined, the empty string is used. - * @param {Attributes} a The Dimensions object to convert. - * @returns A string representation of the dimensions. + * If an attribute is null or undefined, the empty string is used. + * @param {Attributes} a The Attributes object to convert. + * @returns A string representation of the attribute. */ export function attributesToString(a: Attributes) { const p = (attribute?: string | null) => (attribute ? attribute : ''); diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts index 7d54fe623..919899827 100644 --- a/src/client-side-metrics/observability-options.ts +++ b/src/client-side-metrics/observability-options.ts @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +import {Attributes} from '../../common/client-side-metrics-attributes'; + /** * The Counter interface for recording increments of a metric. */ @@ -19,9 +21,9 @@ interface ICounter { /** * Adds a value to the counter. * @param retries The value to be added to the counter. - * @param dimensions The dimensions associated with this value. + * @param attributes The attributes associated with this value. */ - add(retries: number, dimensions: {}): void; + add(retries: number, attributes: Attributes): void; } /** @@ -30,10 +32,10 @@ interface ICounter { interface IHistogram { /** * Records a value in the histogram. - * @param value The value to be recorded in the histogram. - * @param dimensions The dimensions associated with this value. + * @param {number} value The value to be recorded in the histogram. + * @param attributes The attributes associated with this value. */ - record(value: number, dimensions: {}): void; + record(value: number, attributes: Attributes): void; } /** @@ -42,18 +44,18 @@ interface IHistogram { interface IMeter { /** * Creates a Counter instrument, which counts increments of a given metric. - * @param instrument The name of the counter instrument. - * @param attributes The attributes associated with this counter. + * @param {string} instrument The name of the counter instrument. + * @param {Attributes} attributes The attributes associated with this counter. * @returns {ICounter} A Counter instance. */ - createCounter(instrument: string, attributes: {}): ICounter; + createCounter(instrument: string, attributes: Attributes): ICounter; /** * Creates a Histogram instrument, which records distributions of values for a given metric. - * @param instrument The name of the histogram instrument. - * @param attributes The attributes associated with this histogram. + * @param {string} instrument The name of the histogram instrument. + * @param {Attributes} attributes The attributes associated with this histogram. * @returns {IHistogram} A Histogram instance. */ - createHistogram(instrument: string, attributes: {}): IHistogram; + createHistogram(instrument: string, attributes: Attributes): IHistogram; } /** @@ -62,7 +64,7 @@ interface IMeter { interface IMeterProvider { /** * Returns a Meter, which can be used to create instruments for recording measurements. - * @param name The name of the Meter. + * @param {string} name The name of the Meter. * @returns {IMeter} A Meter instance. */ getMeter(name: string): IMeter; From 9cc4b15930a00ffcc0eee499d02f35698671b5e3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 17:20:24 -0500 Subject: [PATCH 080/448] attributes --- src/client-side-metrics/observability-options.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts index 919899827..031c169da 100644 --- a/src/client-side-metrics/observability-options.ts +++ b/src/client-side-metrics/observability-options.ts @@ -20,8 +20,8 @@ import {Attributes} from '../../common/client-side-metrics-attributes'; interface ICounter { /** * Adds a value to the counter. - * @param retries The value to be added to the counter. - * @param attributes The attributes associated with this value. + * @param {number} retries The value to be added to the counter. + * @param {Attributes} attributes The attributes associated with this value. */ add(retries: number, attributes: Attributes): void; } @@ -33,7 +33,7 @@ interface IHistogram { /** * Records a value in the histogram. * @param {number} value The value to be recorded in the histogram. - * @param attributes The attributes associated with this value. + * @param {Attributes} attributes The attributes associated with this value. */ record(value: number, attributes: Attributes): void; } From 014232925a7e86aad4904b49b8519fe4388fa731 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 17:22:59 -0500 Subject: [PATCH 081/448] Test should use attributes as string --- test/metrics-tracer/typical-method-call.txt | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt index a7208bea5..4fa4fb6f4 100644 --- a/test/metrics-tracer/typical-method-call.txt +++ b/test/metrics-tracer/typical-method-call.txt @@ -4,16 +4,16 @@ getDate call returns 1000 ms getDate call returns 2000 ms 3. Client receives status information. 4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:server_latencies = 101 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable 5. Client receives first row. getDate call returns 3000 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable 6. Client receives metadata. 7. Client receives second row. getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -27,17 +27,17 @@ getDate call returns 8000 ms getDate call returns 9000 ms 16. User reads row 2 getDate call returns 10000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 17. User reads row 3 getDate call returns 11000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 18. User reads row 4 getDate call returns 12000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 19. Stream ends, operation completes getDate call returns 13000 ms getDate call returns 14000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable -Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable +Value added to counter bigtable.googleapis.com:retry_count = 1 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable From 15d6e4a1b781ca7af4984d4204f3792d9f6e88c4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 21 Jan 2025 11:25:01 -0500 Subject: [PATCH 082/448] For Windows replace carriage return --- test/metrics-tracer/metrics-tracer.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 61fc2dbbf..e6f05ef8e 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -204,6 +204,9 @@ describe('Bigtable/MetricsTracer', () => { 'utf8' ); // Ensure events occurred in the right order here: - assert.strictEqual(logger.getMessages().join('\n') + '\n', expectedOutput); + assert.strictEqual( + logger.getMessages().join('\n') + '\n', + expectedOutput.replace(/\r/g, '') + ); }); }); From 865529ea4d732404c8fb9788225ee22c09bba662 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 21 Jan 2025 11:44:15 -0500 Subject: [PATCH 083/448] Update documentation with types --- common/logger.ts | 6 +-- .../metrics-tracer-factory.ts | 48 ++++++++++++------- test/metrics-tracer/metrics-tracer.ts | 11 +++-- 3 files changed, 39 insertions(+), 26 deletions(-) diff --git a/common/logger.ts b/common/logger.ts index 82baa0f9c..7b09b8737 100644 --- a/common/logger.ts +++ b/common/logger.ts @@ -27,7 +27,7 @@ interface ILogger { export abstract class WithLogger { protected logger: ILogger; /** - * @param logger The logger instance to be used by this object. + * @param {ILogger} logger The logger instance to be used by this object. */ constructor(logger: ILogger) { this.logger = logger; @@ -42,8 +42,8 @@ export abstract class WithLoggerAndName { protected logger: ILogger; protected name: string; /** - * @param logger The logger instance to be used by this object. - * @param name The name associated with this object. + * @param {ILogger} logger The logger instance to be used by this object. + * @param {string} name The name associated with this object. */ constructor(logger: ILogger, name: string) { this.logger = logger; diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index a355a17a2..a4155c241 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -179,7 +179,7 @@ class MetricsTracer { * Assembles the basic attributes for metrics. These attributes provide * context about the Bigtable environment and the operation being performed. * @param {string} projectId The Google Cloud project ID. - * @returns {object} An object containing the basic attributes. + * @returns {Attributes} An object containing the basic attributes. */ private getBasicAttributes(projectId: string) { return { @@ -201,7 +201,7 @@ class MetricsTracer { * @param {string} projectId The Google Cloud project ID. * @param {string} finalOperationStatus The final status of the operation. * @param {string} streamOperation Whether the operation was a streaming operation or not. - * @returns An object containing the attributes for operation latency metrics. + * @returns {Attributes} An object containing the attributes for operation latency metrics. */ private getOperationLatencyAttributes( projectId: string, @@ -220,9 +220,9 @@ class MetricsTracer { /** * Assembles the attributes for final operation metrics. These attributes provide * context about the Bigtable environment and the operation being performed. - * @param projectId The Google Cloud project ID. - * @param finalOperationStatus The final status of the operation. - * @returns An object containing the attributes for final operation metrics. + * @param {string} projectId The Google Cloud project ID. + * @param {string} finalOperationStatus The final status of the operation. + * @returns {Attributes} An object containing the attributes for final operation metrics. */ private getFinalOpAttributes( projectId: string, @@ -240,10 +240,10 @@ class MetricsTracer { * Assembles the attributes for attempt metrics. These attributes provide context * about the Bigtable environment, the operation being performed, and the status of the attempt. * Includes whether the operation was a streaming operation or not. - * @param projectId The Google Cloud project ID. - * @param attemptStatus The status of the attempt. - * @param streamingOperation Whether the operation was a streaming operation or not. - * @returns An object containing the attributes for attempt metrics. + * @param {string} projectId The Google Cloud project ID. + * @param {string} attemptStatus The status of the attempt. + * @param {string} streamingOperation Whether the operation was a streaming operation or not. + * @returns {Attributes} An object containing the attributes for attempt metrics. */ private getAttemptAttributes( projectId: string, @@ -262,9 +262,9 @@ class MetricsTracer { /** * Assembles the attributes for attempt status metrics. These attributes provide context * about the Bigtable environment and the operation being performed. - * @param projectId The Google Cloud project ID. - * @param attemptStatus The status of the attempt. - * @returns An object containing the attributes for attempt status metrics. + * @param {string} projectId The Google Cloud project ID. + * @param {string} attemptStatus The status of the attempt. + * @returns {Attributes} An object containing the attributes for attempt status metrics. */ private getAttemptStatusAttributes(projectId: string, attemptStatus: string) { return Object.assign( @@ -305,7 +305,7 @@ class MetricsTracer { /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param info Information about the completed attempt. + * @param {AttemptInfo} info Information about the completed attempt. */ onAttemptComplete(info: AttemptInfo) { const endTime = this.dateProvider.getDate(); @@ -330,6 +330,7 @@ class MetricsTracer { /** * Called when the first response is received. Records first response latencies. + * @param {string} finalOperationStatus The final status of the operation. */ onResponse(finalOperationStatus: string) { const endTime = this.dateProvider.getDate(); @@ -350,7 +351,7 @@ class MetricsTracer { /** * Called when an operation completes (successfully or unsuccessfully). * Records operation latencies, retry counts, and connectivity error counts. - * @param info Information about the completed operation. + * @param {OperationInfo} info Information about the completed operation. */ onOperationComplete(info: OperationInfo) { const endTime = this.dateProvider.getDate(); @@ -394,8 +395,8 @@ class MetricsTracer { /** * Called when metadata is received. Extracts server timing information if available. - * @param info Information about the completed attempt. - * @param metadata The received metadata. + * @param {AttemptInfo} info Information about the completed attempt. + * @param {object} metadata The received metadata. */ onMetadataReceived( info: AttemptInfo, @@ -430,7 +431,7 @@ class MetricsTracer { /** * Called when status information is received. Extracts zone and cluster information. - * @param status The received status information. + * @param {object} status The received status information. */ onStatusReceived(status: { metadata: {internalRepr: Map; options: {}}; @@ -464,7 +465,8 @@ export class MetricsTracerFactory { private dateProvider: DateProvider; /** - * @param observabilityOptions Options for configuring client-side metrics observability. + * @param {DateProvider} dateProvider An object that provides dates for latency measurement. + * @param {ObservabilityOptions} observabilityOptions Options for configuring client-side metrics observability. */ constructor( dateProvider: DateProvider, @@ -474,6 +476,16 @@ export class MetricsTracerFactory { this.dateProvider = dateProvider; } + /** + * Initializes the OpenTelemetry metrics instruments if they haven't been already. + * If metrics already exist, this method returns early. Otherwise, it creates and registers + * metric instruments (histograms and counters) for various Bigtable client metrics. + * It handles the creation of a MeterProvider, either using a user-provided one or creating a default one, and + * configures a PeriodicExportingMetricReader for exporting metrics. + * @param {string} [projectId] The Google Cloud project ID. Used for metric export. + * @param {ObservabilityOptions} [observabilityOptions] Options for configuring client-side metrics observability, including a custom MeterProvider. + * @returns {Metrics} An object containing the initialized OpenTelemetry metric instruments. + */ private initialize( projectId?: string, observabilityOptions?: ObservabilityOptions diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index e6f05ef8e..180ad1bf7 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -18,6 +18,7 @@ import {TestMeterProvider} from '../../common/test-meter-provider'; import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; +import {ObservabilityOptions} from '../../src/client-side-metrics/observability-options'; /** * A basic logger class that stores log messages in an array. Useful for testing. @@ -27,7 +28,7 @@ class Logger { /** * Logs a message by adding it to the internal message array. - * @param message The message to be logged. + * @param {string} message The message to be logged. */ log(message: string) { this.messages.push(message); @@ -35,7 +36,7 @@ class Logger { /** * Retrieves all logged messages. - * @returns An array of logged messages. + * @returns {string[]} An array of logged messages. */ getMessages() { return this.messages; @@ -50,11 +51,11 @@ class FakeBigtable { appProfileId?: string; metricsTracerFactory: MetricsTracerFactory; /** - * @param observabilityOptions Options for configuring client-side metrics + * @param {ObservabilityOptions} observabilityOptions Options for configuring client-side metrics * observability, including a TestMeterProvider. */ constructor( - observabilityOptions: {meterProvider: TestMeterProvider}, + observabilityOptions: ObservabilityOptions, dateProvider: TestDateProvider ) { this.metricsTracerFactory = new MetricsTracerFactory(dateProvider, { @@ -65,7 +66,7 @@ class FakeBigtable { /** * A stubbed method that simulates retrieving the project ID. Always returns * 'my-project'. - * @param callback A callback function that receives the project ID (or an error). + * @param {function} callback A callback function that receives the project ID (or an error). */ getProjectId_( callback: (err: Error | null, projectId?: string) => void From 28fbfd8d5433d31106b0900adf75e440937a64c1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 23 Jan 2025 17:09:12 -0500 Subject: [PATCH 084/448] Add metrics collector --- .../gcp-metrics-handler.ts | 0 src/client-side-metrics/metrics-collector.ts | 383 ++++++++++++++++++ src/client-side-metrics/metrics-handler.ts | 30 ++ .../metrics-tracer-factory.ts | 54 --- 4 files changed, 413 insertions(+), 54 deletions(-) create mode 100644 src/client-side-metrics/gcp-metrics-handler.ts create mode 100644 src/client-side-metrics/metrics-collector.ts create mode 100644 src/client-side-metrics/metrics-handler.ts diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts new file mode 100644 index 000000000..e69de29bb diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts new file mode 100644 index 000000000..47448ca85 --- /dev/null +++ b/src/client-side-metrics/metrics-collector.ts @@ -0,0 +1,383 @@ +import {Attributes} from '../../common/client-side-metrics-attributes'; +import * as fs from 'fs'; +import {IMetricsHandler} from './metrics-handler'; + +/** + * An interface representing a Date-like object. Provides a `getTime` method + * for retrieving the time value in milliseconds. Used for abstracting time + * in tests. + */ +interface DateLike { + /** + * Returns the time value in milliseconds. + * @returns The time value in milliseconds. + */ + getTime(): number; +} + +/** + * Interface for a provider that returns DateLike objects. Used for mocking dates in tests. + */ +interface DateProvider { + /** + * Returns a DateLike object. + * @returns A DateLike object representing the current time or a fake time value. + */ + getDate(): DateLike; +} + +/** + * The default DateProvider implementation. Returns the current date and time. + */ +class DefaultDateProvider { + /** + * Returns a new Date object representing the current time. + * @returns {Date} The current date and time. + */ + getDate() { + return new Date(); + } +} + +/** + * An interface representing a tabular API surface, such as a Bigtable table. + */ +export interface ITabularApiSurface { + instance: { + id: string; + }; + id: string; + bigtable: { + appProfileId?: string; + }; +} + +/** + * Information about a Bigtable operation. + */ +interface OperationInfo { + /** + * The number of retries attempted for the operation. + */ + retries?: number; + /** + * The final status of the operation (e.g., 'OK', 'ERROR'). + */ + finalOperationStatus: string; + /** + * Number of times a connectivity error occurred during the operation. + */ + connectivityErrorCount?: number; + streamingOperation: string; +} + +/** + * Information about a single attempt of a Bigtable operation. + */ +interface AttemptInfo { + /** + * The final status of the attempt (e.g., 'OK', 'ERROR'). + */ + finalOperationStatus: string; + /** + * Whether the operation is a streaming operation or not + */ + streamingOperation: string; +} + +const packageJSON = fs.readFileSync('package.json'); +const version = JSON.parse(packageJSON.toString()).version; + +// TODO: Check if metrics tracer method exists. + +/** + * A class for tracing and recording client-side metrics related to Bigtable operations. + */ +export class MetricsCollector { + private operationStartTime: DateLike | null; + private attemptStartTime: DateLike | null; + private zone: string | null | undefined; + private cluster: string | null | undefined; + private tabularApiSurface: ITabularApiSurface; + private methodName: string; + private projectId?: string; + private receivedFirstResponse: boolean; + private metricsHandlers: IMetricsHandler[]; + private firstResponseLatency?: number; + private serverTimeRead: boolean; + private serverTime?: number; + private lastReadTime: DateLike | null; + private dateProvider: DateProvider; + + /** + * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. + * @param {string} methodName The name of the method being traced. + * @param {string} projectId The id of the project. + * @param {DateProvider} dateProvider A provider for date/time information (for testing). + */ + constructor( + tabularApiSurface: ITabularApiSurface, + metricsHandlers: IMetricsHandler[], + methodName: string, + projectId?: string, + dateProvider?: DateProvider + ) { + this.zone = null; + this.cluster = null; + this.tabularApiSurface = tabularApiSurface; + this.methodName = methodName; + this.operationStartTime = null; + this.attemptStartTime = null; + this.receivedFirstResponse = false; + this.metricsHandlers = metricsHandlers; + this.lastReadTime = null; + this.serverTimeRead = false; + this.projectId = projectId; + if (dateProvider) { + this.dateProvider = dateProvider; + } else { + this.dateProvider = new DefaultDateProvider(); + } + } + + /** + * Assembles the basic attributes for metrics. These attributes provide + * context about the Bigtable environment and the operation being performed. + * @param {string} projectId The Google Cloud project ID. + * @returns {Attributes} An object containing the basic attributes. + */ + private getBasicAttributes(projectId: string) { + return { + projectId, + instanceId: this.tabularApiSurface.instance.id, + table: this.tabularApiSurface.id, + cluster: this.cluster, + zone: this.zone, + appProfileId: this.tabularApiSurface.bigtable.appProfileId, + methodName: this.methodName, + clientName: `nodejs-bigtable/${version}`, + }; + } + + /** + * Assembles the attributes for operation latency metrics. These attributes + * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. + * Includes whether the operation was a streaming operation or not. + * @param {string} projectId The Google Cloud project ID. + * @param {string} finalOperationStatus The final status of the operation. + * @param {string} streamOperation Whether the operation was a streaming operation or not. + * @returns {Attributes} An object containing the attributes for operation latency metrics. + */ + private getOperationLatencyAttributes( + projectId: string, + finalOperationStatus: string, + streamOperation?: string + ): Attributes { + return Object.assign( + { + finalOperationStatus: finalOperationStatus, + streamingOperation: streamOperation, + }, + this.getBasicAttributes(projectId) + ); + } + + /** + * Assembles the attributes for attempt metrics. These attributes provide context + * about the Bigtable environment, the operation being performed, and the status of the attempt. + * Includes whether the operation was a streaming operation or not. + * @param {string} projectId The Google Cloud project ID. + * @param {string} attemptStatus The status of the attempt. + * @param {string} streamingOperation Whether the operation was a streaming operation or not. + * @returns {Attributes} An object containing the attributes for attempt metrics. + */ + private getAttemptAttributes( + projectId: string, + attemptStatus: string, + streamingOperation: string + ) { + return Object.assign( + { + attemptStatus: attemptStatus, + streamingOperation: streamingOperation, + }, + this.getBasicAttributes(projectId) + ); + } + + /** + * Called when the operation starts. Records the start time. + */ + onOperationStart() { + this.operationStartTime = this.dateProvider.getDate(); + } + + /** + * Called after the client reads a row. Records application blocking latencies. + */ + onRead() { + const currentTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (this.lastReadTime) { + if (projectId && this.lastReadTime) { + const attributes = this.getBasicAttributes(projectId); + const difference = currentTime.getTime() - this.lastReadTime.getTime(); + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onRead) { + metricsHandler.onRead({latency: difference}, attributes); + } + }); + this.lastReadTime = currentTime; + } + } else { + this.lastReadTime = currentTime; + } + } + + /** + * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. + * @param {AttemptInfo} info Information about the completed attempt. + */ + onAttemptComplete(info: AttemptInfo) { + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (projectId && this.attemptStartTime) { + const attributes = this.getAttemptAttributes( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onAttemptComplete) { + metricsHandler.onAttemptComplete( + { + attemptLatency: totalTime, + serverLatency: this.serverTime, + }, + attributes + ); + } + }); + } + } + + /** + * Called when a new attempt starts. Records the start time of the attempt. + */ + onAttemptStart() { + this.attemptStartTime = this.dateProvider.getDate(); + this.serverTime = undefined; + this.serverTimeRead = false; + this.firstResponseLatency = undefined; + this.receivedFirstResponse = false; + } + + /** + * Called when the first response is received. Records first response latencies. + * @param {string} finalOperationStatus The final status of the operation. + */ + onResponse() { + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + if (!this.receivedFirstResponse) { + this.receivedFirstResponse = true; + this.firstResponseLatency = totalTime; + } + } + } + + /** + * Called when an operation completes (successfully or unsuccessfully). + * Records operation latencies, retry counts, and connectivity error counts. + * @param {OperationInfo} info Information about the completed operation. + */ + onOperationComplete(info: OperationInfo) { + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + this.onAttemptComplete(info); + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + { + // This block records operation latency metrics. + const operationLatencyAttributes = this.getOperationLatencyAttributes( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + const metrics = { + operationLatency: totalTime, + firstResponseLatency: this.firstResponseLatency, + retryCount: info.retries, + connectivityErrorCount: info.connectivityErrorCount, + }; + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onOperationComplete) { + metricsHandler.onOperationComplete( + metrics, + operationLatencyAttributes + ); + } + }); + } + } + } + + /** + * Called when metadata is received. Extracts server timing information if available. + * @param {AttemptInfo} info Information about the completed attempt. + * @param {object} metadata The received metadata. + */ + onMetadataReceived( + info: AttemptInfo, + metadata: { + internalRepr: Map; + options: {}; + } + ) { + const mappedEntries = new Map( + Array.from(metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const durationValues = mappedEntries.get('server-timing')?.split('dur='); + if (durationValues && durationValues[1]) { + if (!this.serverTimeRead) { + this.serverTimeRead = true; + const serverTime = parseInt(durationValues[1]); + const projectId = this.projectId; + if (projectId) { + this.serverTime = serverTime; + } + } + } + } + + /** + * Called when status information is received. Extracts zone and cluster information. + * @param {object} status The received status information. + */ + onStatusReceived(status: { + metadata: {internalRepr: Map; options: {}}; + }) { + const mappedEntries = new Map( + Array.from(status.metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const instanceInformation = mappedEntries + .get('x-goog-ext-425905942-bin') + ?.replace(new RegExp('\\n', 'g'), '') + .split('\r'); + if (instanceInformation && instanceInformation[0]) { + this.zone = instanceInformation[0]; + } + if (instanceInformation && instanceInformation[1]) { + this.cluster = instanceInformation[1]; + } + } +} diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts new file mode 100644 index 000000000..7247e3697 --- /dev/null +++ b/src/client-side-metrics/metrics-handler.ts @@ -0,0 +1,30 @@ +import {Attributes} from '../../common/client-side-metrics-attributes'; + +interface onOperationCompleteMetrics { + operationLatency: number; + retryCount?: number; +} + +interface onAttemptCompleteMetrics { + attemptLatency: number; + serverLatency?: number; + firstResponseLatency?: number; + connectivityErrorCount?: number; +} + +interface onReadMetrics { + latency: number; +} + +// TODO: Trim attributes so only necessary attributes are required. +export interface IMetricsHandler { + onOperationComplete?( + metrics: onOperationCompleteMetrics, + attributes: Attributes + ): void; + onRead?(metrics: onReadMetrics, attributes: Attributes): void; + onAttemptComplete?( + metrics: onAttemptCompleteMetrics, + attributes: Attributes + ): void; +} diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index a4155c241..2d54df6be 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -24,7 +24,6 @@ import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {ObservabilityOptions} from './observability-options'; -import * as fs from 'fs'; /** * Information about a Bigtable operation. @@ -74,59 +73,6 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } -/** - * An interface representing a Date-like object. Provides a `getTime` method - * for retrieving the time value in milliseconds. Used for abstracting time - * in tests. - */ -interface DateLike { - /** - * Returns the time value in milliseconds. - * @returns The time value in milliseconds. - */ - getTime(): number; -} - -/** - * Interface for a provider that returns DateLike objects. Used for mocking dates in tests. - */ -interface DateProvider { - /** - * Returns a DateLike object. - * @returns A DateLike object representing the current time or a fake time value. - */ - getDate(): DateLike; -} - -/** - * The default DateProvider implementation. Returns the current date and time. - */ -class DefaultDateProvider { - /** - * Returns a new Date object representing the current time. - * @returns {Date} The current date and time. - */ - getDate() { - return new Date(); - } -} - -/** - * An interface representing a tabular API surface, such as a Bigtable table. - */ -export interface ITabularApiSurface { - instance: { - id: string; - }; - id: string; - bigtable: { - appProfileId?: string; - }; -} - -const packageJSON = fs.readFileSync('package.json'); -const version = JSON.parse(packageJSON.toString()).version; - /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ From 5995789849d9201caba13785f3abd9e2abeaac09 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 10:49:53 -0500 Subject: [PATCH 085/448] Metrics handler, GCPMetricsHandler and tests add --- common/test-metrics-handler.ts | 39 ++ .../gcp-metrics-handler.ts | 144 +++++ src/client-side-metrics/metrics-collector.ts | 24 - src/client-side-metrics/metrics-handler.ts | 9 +- .../metrics-tracer-factory.ts | 546 ------------------ .../metrics-collector.ts} | 82 +-- .../metrics-collector/typical-method-call.txt | 35 ++ test/metrics-tracer/typical-method-call.txt | 43 -- 8 files changed, 249 insertions(+), 673 deletions(-) create mode 100644 common/test-metrics-handler.ts delete mode 100644 src/client-side-metrics/metrics-tracer-factory.ts rename test/{metrics-tracer/metrics-tracer.ts => metrics-collector/metrics-collector.ts} (70%) create mode 100644 test/metrics-collector/typical-method-call.txt delete mode 100644 test/metrics-tracer/typical-method-call.txt diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts new file mode 100644 index 000000000..737168edb --- /dev/null +++ b/common/test-metrics-handler.ts @@ -0,0 +1,39 @@ +import {WithLogger} from './logger'; +import { + onAttemptCompleteMetrics, + onOperationCompleteMetrics, +} from '../src/client-side-metrics/metrics-handler'; +import {Attributes} from './client-side-metrics-attributes'; + +/** + * A test implementation of the IMetricsHandler interface. Used for testing purposes. + * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. + */ +export class TestMetricsHandler extends WithLogger { + /** + * Logs the metrics and attributes received for an operation completion. + * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {Attributes} attributes Attributes associated with the completed operation. + */ + onOperationComplete( + metrics: onOperationCompleteMetrics, + attributes: Attributes + ) { + attributes.clientName = 'nodejs-bigtable'; + this.logger.log('Recording parameters for onOperationComplete:'); + this.logger.log(`metrics: ${JSON.stringify(metrics)}`); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } + + /** + * Logs the metrics and attributes received for an attempt completion. + * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {Attributes} attributes Attributes associated with the completed attempt. + */ + onAttemptComplete(metrics: onAttemptCompleteMetrics, attributes: Attributes) { + attributes.clientName = 'nodejs-bigtable'; + this.logger.log('Recording parameters for onAttemptComplete:'); + this.logger.log(`metrics: ${JSON.stringify(metrics)}`); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } +} diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index e69de29bb..aea683fe1 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -0,0 +1,144 @@ +import { + IMetricsHandler, + onAttemptCompleteMetrics, + onOperationCompleteMetrics, +} from './metrics-handler'; +import * as Resources from '@opentelemetry/resources'; +import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; +import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import {Attributes} from '../../common/client-side-metrics-attributes'; +const { + MeterProvider, + Histogram, + Counter, + PeriodicExportingMetricReader, +} = require('@opentelemetry/sdk-metrics'); + +/** + * A collection of OpenTelemetry metric instruments used to record + * Bigtable client-side metrics. + */ +interface Metrics { + operationLatencies: typeof Histogram; + attemptLatencies: typeof Histogram; + retryCount: typeof Counter; + applicationBlockingLatencies: typeof Histogram; + firstResponseLatencies: typeof Histogram; + serverLatencies: typeof Histogram; + connectivityErrorCount: typeof Histogram; + clientBlockingLatencies: typeof Histogram; +} + +export class GCPMetricsHandler implements IMetricsHandler { + private initialized = false; + private otelMetrics?: Metrics; + + private initialize(projectId?: string) { + if (!this.initialized) { + this.initialized = true; + // Use MeterProvider provided by user + // If MeterProvider was not provided then use the default meter provider. + const meterProvider = new MeterProvider({ + // This is the default meter provider + // Create a resource. Fill the `service.*` attributes in with real values for your service. + // GcpDetectorSync will add in resource information about the current environment if you are + // running on GCP. These resource attributes will be translated to a specific GCP monitored + // resource if running on GCP. Otherwise, metrics will be sent with monitored resource + // `generic_task`. + resource: new Resources.Resource({ + 'service.name': 'bigtable-metrics', + }).merge(new ResourceUtil.GcpDetectorSync().detect()), + readers: [ + // Register the exporter + new PeriodicExportingMetricReader({ + // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by + // Cloud Monitoring. + exportIntervalMillis: 100_000, + exporter: new MetricExporter({ + projectId, + }), + }), + ], + }); + const meter = meterProvider.getMeter('bigtable.googleapis.com'); + this.otelMetrics = { + operationLatencies: meter.createHistogram('operation_latencies', { + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + }), + attemptLatencies: meter.createHistogram('attempt_latencies', { + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + unit: 'ms', + }), + retryCount: meter.createCounter('retry_count', { + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + }), + applicationBlockingLatencies: meter.createHistogram( + 'application_blocking_latencies', + { + description: + 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', + unit: 'ms', + } + ), + firstResponseLatencies: meter.createHistogram( + 'first_response_latencies', + { + description: + 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + } + ), + serverLatencies: meter.createHistogram('server_latencies', { + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + }), + connectivityErrorCount: meter.createHistogram( + 'connectivity_error_count', + { + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + } + ), + clientBlockingLatencies: meter.createHistogram( + 'client_blocking_latencies', + { + description: + 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + } + ), + }; + } + } + + onOperationComplete( + metrics: onOperationCompleteMetrics, + attributes: Attributes + ) { + this.initialize(); + this.otelMetrics?.operationLatencies.record( + metrics.operationLatency, + attributes + ); + this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); + } + onAttemptComplete(metrics: onAttemptCompleteMetrics, attributes: Attributes) { + this.initialize(); + this.otelMetrics?.attemptLatencies.record( + metrics.attemptLatency, + attributes + ); + this.otelMetrics?.connectivityErrorCount.record( + metrics.connectivityErrorCount, + attributes + ); + this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); + this.otelMetrics?.firstResponseLatencies.record( + metrics.firstResponseLatency, + attributes + ); + } +} diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 47448ca85..e7b4d7da9 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -106,7 +106,6 @@ export class MetricsCollector { private firstResponseLatency?: number; private serverTimeRead: boolean; private serverTime?: number; - private lastReadTime: DateLike | null; private dateProvider: DateProvider; /** @@ -130,7 +129,6 @@ export class MetricsCollector { this.attemptStartTime = null; this.receivedFirstResponse = false; this.metricsHandlers = metricsHandlers; - this.lastReadTime = null; this.serverTimeRead = false; this.projectId = projectId; if (dateProvider) { @@ -212,28 +210,6 @@ export class MetricsCollector { this.operationStartTime = this.dateProvider.getDate(); } - /** - * Called after the client reads a row. Records application blocking latencies. - */ - onRead() { - const currentTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (this.lastReadTime) { - if (projectId && this.lastReadTime) { - const attributes = this.getBasicAttributes(projectId); - const difference = currentTime.getTime() - this.lastReadTime.getTime(); - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onRead) { - metricsHandler.onRead({latency: difference}, attributes); - } - }); - this.lastReadTime = currentTime; - } - } else { - this.lastReadTime = currentTime; - } - } - /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. * @param {AttemptInfo} info Information about the completed attempt. diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 7247e3697..a3ae4840d 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -1,28 +1,23 @@ import {Attributes} from '../../common/client-side-metrics-attributes'; -interface onOperationCompleteMetrics { +export interface onOperationCompleteMetrics { operationLatency: number; retryCount?: number; } -interface onAttemptCompleteMetrics { +export interface onAttemptCompleteMetrics { attemptLatency: number; serverLatency?: number; firstResponseLatency?: number; connectivityErrorCount?: number; } -interface onReadMetrics { - latency: number; -} - // TODO: Trim attributes so only necessary attributes are required. export interface IMetricsHandler { onOperationComplete?( metrics: onOperationCompleteMetrics, attributes: Attributes ): void; - onRead?(metrics: onReadMetrics, attributes: Attributes): void; onAttemptComplete?( metrics: onAttemptCompleteMetrics, attributes: Attributes diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts deleted file mode 100644 index 2d54df6be..000000000 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {Attributes} from '../../common/client-side-metrics-attributes'; - -const { - MeterProvider, - Histogram, - Counter, - PeriodicExportingMetricReader, -} = require('@opentelemetry/sdk-metrics'); -import * as Resources from '@opentelemetry/resources'; -import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; -import {ObservabilityOptions} from './observability-options'; - -/** - * Information about a Bigtable operation. - */ -interface OperationInfo { - /** - * The number of retries attempted for the operation. - */ - retries?: number; - /** - * The final status of the operation (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: string; - /** - * Number of times a connectivity error occurred during the operation. - */ - connectivityErrorCount?: number; - streamingOperation: string; -} - -/** - * Information about a single attempt of a Bigtable operation. - */ -interface AttemptInfo { - /** - * The final status of the attempt (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: string; - /** - * Whether the operation is a streaming operation or not - */ - streamingOperation: string; -} - -/** - * A collection of OpenTelemetry metric instruments used to record - * Bigtable client-side metrics. - */ -interface Metrics { - operationLatencies: typeof Histogram; - attemptLatencies: typeof Histogram; - retryCount: typeof Counter; - applicationBlockingLatencies: typeof Histogram; - firstResponseLatencies: typeof Histogram; - serverLatencies: typeof Histogram; - connectivityErrorCount: typeof Histogram; - clientBlockingLatencies: typeof Histogram; -} - -/** - * A class for tracing and recording client-side metrics related to Bigtable operations. - */ -class MetricsTracer { - private operationStartTime: DateLike | null; - private attemptStartTime: DateLike | null; - private metrics: Metrics; - private zone: string | null | undefined; - private cluster: string | null | undefined; - private tabularApiSurface: ITabularApiSurface; - private methodName: string; - private projectId?: string; - private receivedFirstResponse: boolean; - private serverTimeRead: boolean; - private lastReadTime: DateLike | null; - private dateProvider: DateProvider; - - /** - * @param metrics The metrics instruments to record data with. - * @param tabularApiSurface Information about the Bigtable table being accessed. - * @param methodName The name of the method being traced. - * @param dateProvider A provider for date/time information (for testing). - */ - constructor( - metrics: Metrics, - tabularApiSurface: ITabularApiSurface, - methodName: string, - projectId?: string, - dateProvider?: DateProvider - ) { - this.metrics = metrics; - this.zone = null; - this.cluster = null; - this.tabularApiSurface = tabularApiSurface; - this.methodName = methodName; - this.operationStartTime = null; - this.attemptStartTime = null; - this.receivedFirstResponse = false; - this.lastReadTime = null; - this.serverTimeRead = false; - this.projectId = projectId; - if (dateProvider) { - this.dateProvider = dateProvider; - } else { - this.dateProvider = new DefaultDateProvider(); - } - } - - /** - * Assembles the basic attributes for metrics. These attributes provide - * context about the Bigtable environment and the operation being performed. - * @param {string} projectId The Google Cloud project ID. - * @returns {Attributes} An object containing the basic attributes. - */ - private getBasicAttributes(projectId: string) { - return { - projectId, - instanceId: this.tabularApiSurface.instance.id, - table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, - appProfileId: this.tabularApiSurface.bigtable.appProfileId, - methodName: this.methodName, - clientName: `nodejs-bigtable/${version}`, - }; - } - - /** - * Assembles the attributes for operation latency metrics. These attributes - * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. - * Includes whether the operation was a streaming operation or not. - * @param {string} projectId The Google Cloud project ID. - * @param {string} finalOperationStatus The final status of the operation. - * @param {string} streamOperation Whether the operation was a streaming operation or not. - * @returns {Attributes} An object containing the attributes for operation latency metrics. - */ - private getOperationLatencyAttributes( - projectId: string, - finalOperationStatus: string, - streamOperation?: string - ): Attributes { - return Object.assign( - { - finalOperationStatus: finalOperationStatus, - streamingOperation: streamOperation, - }, - this.getBasicAttributes(projectId) - ); - } - - /** - * Assembles the attributes for final operation metrics. These attributes provide - * context about the Bigtable environment and the operation being performed. - * @param {string} projectId The Google Cloud project ID. - * @param {string} finalOperationStatus The final status of the operation. - * @returns {Attributes} An object containing the attributes for final operation metrics. - */ - private getFinalOpAttributes( - projectId: string, - finalOperationStatus: string - ): Attributes { - return Object.assign( - { - finalOperationStatus: finalOperationStatus, - }, - this.getBasicAttributes(projectId) - ); - } - - /** - * Assembles the attributes for attempt metrics. These attributes provide context - * about the Bigtable environment, the operation being performed, and the status of the attempt. - * Includes whether the operation was a streaming operation or not. - * @param {string} projectId The Google Cloud project ID. - * @param {string} attemptStatus The status of the attempt. - * @param {string} streamingOperation Whether the operation was a streaming operation or not. - * @returns {Attributes} An object containing the attributes for attempt metrics. - */ - private getAttemptAttributes( - projectId: string, - attemptStatus: string, - streamingOperation: string - ) { - return Object.assign( - { - attemptStatus: attemptStatus, - streamingOperation: streamingOperation, - }, - this.getBasicAttributes(projectId) - ); - } - - /** - * Assembles the attributes for attempt status metrics. These attributes provide context - * about the Bigtable environment and the operation being performed. - * @param {string} projectId The Google Cloud project ID. - * @param {string} attemptStatus The status of the attempt. - * @returns {Attributes} An object containing the attributes for attempt status metrics. - */ - private getAttemptStatusAttributes(projectId: string, attemptStatus: string) { - return Object.assign( - { - attemptStatus: attemptStatus, - }, - this.getBasicAttributes(projectId) - ); - } - - /** - * Called when the operation starts. Records the start time. - */ - onOperationStart() { - this.operationStartTime = this.dateProvider.getDate(); - } - - /** - * Called after the client reads a row. Records application blocking latencies. - */ - onRead() { - const currentTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (this.lastReadTime) { - if (projectId && this.lastReadTime) { - const attributes = this.getBasicAttributes(projectId); - const difference = currentTime.getTime() - this.lastReadTime.getTime(); - this.metrics.applicationBlockingLatencies.record( - difference, - attributes - ); - this.lastReadTime = currentTime; - } - } else { - this.lastReadTime = currentTime; - } - } - - /** - * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param {AttemptInfo} info Information about the completed attempt. - */ - onAttemptComplete(info: AttemptInfo) { - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (projectId && this.attemptStartTime) { - const attributes = this.getAttemptAttributes( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metrics.attemptLatencies.record(totalTime, attributes); - } - } - - /** - * Called when a new attempt starts. Records the start time of the attempt. - */ - onAttemptStart() { - this.attemptStartTime = this.dateProvider.getDate(); - } - - /** - * Called when the first response is received. Records first response latencies. - * @param {string} finalOperationStatus The final status of the operation. - */ - onResponse(finalOperationStatus: string) { - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (projectId && this.operationStartTime) { - const attributes = this.getFinalOpAttributes( - projectId, - finalOperationStatus - ); - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - if (!this.receivedFirstResponse) { - this.receivedFirstResponse = true; - this.metrics.firstResponseLatencies.record(totalTime, attributes); - } - } - } - - /** - * Called when an operation completes (successfully or unsuccessfully). - * Records operation latencies, retry counts, and connectivity error counts. - * @param {OperationInfo} info Information about the completed operation. - */ - onOperationComplete(info: OperationInfo) { - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - this.onAttemptComplete(info); - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - { - // This block records operation latency metrics. - const operationLatencyAttributes = this.getOperationLatencyAttributes( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - this.metrics.operationLatencies.record( - totalTime, - operationLatencyAttributes - ); - } - if (info.retries) { - // This block records the retry count metrics - const retryCountAttributes = this.getFinalOpAttributes( - projectId, - info.finalOperationStatus - ); - this.metrics.retryCount.add(info.retries, retryCountAttributes); - } - if (info.connectivityErrorCount) { - // This block records the connectivity error count metrics - const connectivityCountAttributes = this.getAttemptStatusAttributes( - projectId, - info.finalOperationStatus - ); - this.metrics.connectivityErrorCount.record( - info.connectivityErrorCount, - connectivityCountAttributes - ); - } - } - } - - /** - * Called when metadata is received. Extracts server timing information if available. - * @param {AttemptInfo} info Information about the completed attempt. - * @param {object} metadata The received metadata. - */ - onMetadataReceived( - info: AttemptInfo, - metadata: { - internalRepr: Map; - options: {}; - } - ) { - const mappedEntries = new Map( - Array.from(metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const durationValues = mappedEntries.get('server-timing')?.split('dur='); - if (durationValues && durationValues[1]) { - if (!this.serverTimeRead) { - this.serverTimeRead = true; - const serverTime = parseInt(durationValues[1]); - const projectId = this.projectId; - if (projectId) { - const attributes = this.getAttemptAttributes( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - this.metrics.serverLatencies.record(serverTime, attributes); - } - } - } - } - - /** - * Called when status information is received. Extracts zone and cluster information. - * @param {object} status The received status information. - */ - onStatusReceived(status: { - metadata: {internalRepr: Map; options: {}}; - }) { - const mappedEntries = new Map( - Array.from(status.metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const instanceInformation = mappedEntries - .get('x-goog-ext-425905942-bin') - ?.replace(new RegExp('\\n', 'g'), '') - .split('\r'); - if (instanceInformation && instanceInformation[0]) { - this.zone = instanceInformation[0]; - } - if (instanceInformation && instanceInformation[1]) { - this.cluster = instanceInformation[1]; - } - } -} - -/** - * A factory class for creating MetricsTracer instances. Initializes - * OpenTelemetry metrics instruments. - */ -export class MetricsTracerFactory { - private metrics?: Metrics; - private observabilityOptions?: ObservabilityOptions; - private dateProvider: DateProvider; - - /** - * @param {DateProvider} dateProvider An object that provides dates for latency measurement. - * @param {ObservabilityOptions} observabilityOptions Options for configuring client-side metrics observability. - */ - constructor( - dateProvider: DateProvider, - observabilityOptions?: ObservabilityOptions - ) { - this.observabilityOptions = observabilityOptions; - this.dateProvider = dateProvider; - } - - /** - * Initializes the OpenTelemetry metrics instruments if they haven't been already. - * If metrics already exist, this method returns early. Otherwise, it creates and registers - * metric instruments (histograms and counters) for various Bigtable client metrics. - * It handles the creation of a MeterProvider, either using a user-provided one or creating a default one, and - * configures a PeriodicExportingMetricReader for exporting metrics. - * @param {string} [projectId] The Google Cloud project ID. Used for metric export. - * @param {ObservabilityOptions} [observabilityOptions] Options for configuring client-side metrics observability, including a custom MeterProvider. - * @returns {Metrics} An object containing the initialized OpenTelemetry metric instruments. - */ - private initialize( - projectId?: string, - observabilityOptions?: ObservabilityOptions - ) { - if (this.metrics) { - return this.metrics; - } else { - // Use MeterProvider provided by user - // If MeterProvider was not provided then use the default meter provider. - const meterProvider = - observabilityOptions && observabilityOptions.meterProvider - ? observabilityOptions.meterProvider - : new MeterProvider({ - // This is the default meter provider - // Create a resource. Fill the `service.*` attributes in with real values for your service. - // GcpDetectorSync will add in resource information about the current environment if you are - // running on GCP. These resource attributes will be translated to a specific GCP monitored - // resource if running on GCP. Otherwise, metrics will be sent with monitored resource - // `generic_task`. - resource: new Resources.Resource({ - 'service.name': 'bigtable-metrics', - }).merge(new ResourceUtil.GcpDetectorSync().detect()), - readers: [ - // Register the exporter - new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 10_000, - exporter: new MetricExporter({ - projectId, - }), - }), - ], - }); - const meter = meterProvider.getMeter('bigtable.googleapis.com'); - this.metrics = { - operationLatencies: meter.createHistogram('operation_latencies', { - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - }), - attemptLatencies: meter.createHistogram('attempt_latencies', { - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', - unit: 'ms', - }), - retryCount: meter.createCounter('retry_count', { - description: - 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - }), - applicationBlockingLatencies: meter.createHistogram( - 'application_blocking_latencies', - { - description: - 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', - unit: 'ms', - } - ), - firstResponseLatencies: meter.createHistogram( - 'first_response_latencies', - { - description: - 'Latencies from when a client sends a request and receives the first row of the response.', - unit: 'ms', - } - ), - serverLatencies: meter.createHistogram('server_latencies', { - description: - 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - }), - connectivityErrorCount: meter.createHistogram( - 'connectivity_error_count', - { - description: - "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - } - ), - clientBlockingLatencies: meter.createHistogram( - 'client_blocking_latencies', - { - description: - 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', - unit: 'ms', - } - ), - }; - return this.metrics; - } - } - - /** - * Creates a new MetricsTracer instance. - * @param tabularApiSurface The Bigtable table being accessed. - * @param methodName The name of the method being traced. - * @param dateProvider An optional DateProvider for testing purposes. - * @param {string} projectId The project id - * @returns A new MetricsTracer instance. - */ - getMetricsTracer( - tabularApiSurface: ITabularApiSurface, - methodName: string, - projectId?: string - ) { - const metrics = this.initialize(projectId, this.observabilityOptions); - return new MetricsTracer( - metrics, - tabularApiSurface, - methodName, - projectId, - this.dateProvider - ); - } -} diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-collector/metrics-collector.ts similarity index 70% rename from test/metrics-tracer/metrics-tracer.ts rename to test/metrics-collector/metrics-collector.ts index 180ad1bf7..34e7fa86a 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -13,12 +13,11 @@ // limitations under the License. import {describe} from 'mocha'; -import {MetricsTracerFactory} from '../../src/client-side-metrics/metrics-tracer-factory'; -import {TestMeterProvider} from '../../common/test-meter-provider'; import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; -import {ObservabilityOptions} from '../../src/client-side-metrics/observability-options'; +import {TestMetricsHandler} from '../../common/test-metrics-handler'; +import {MetricsCollector} from '../../src/client-side-metrics/metrics-collector'; /** * A basic logger class that stores log messages in an array. Useful for testing. @@ -49,19 +48,6 @@ class Logger { */ class FakeBigtable { appProfileId?: string; - metricsTracerFactory: MetricsTracerFactory; - /** - * @param {ObservabilityOptions} observabilityOptions Options for configuring client-side metrics - * observability, including a TestMeterProvider. - */ - constructor( - observabilityOptions: ObservabilityOptions, - dateProvider: TestDateProvider - ) { - this.metricsTracerFactory = new MetricsTracerFactory(dateProvider, { - meterProvider: observabilityOptions.meterProvider, - }); - } /** * A stubbed method that simulates retrieving the project ID. Always returns @@ -85,21 +71,17 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe('Bigtable/MetricsTracer', () => { +describe.only('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); + const metricsHandlers = [new TestMetricsHandler(logger)]; class FakeTable { id = 'fakeTableId'; instance = new FakeInstance(); - bigtable = new FakeBigtable( - { - meterProvider: new TestMeterProvider(logger), - }, - new TestDateProvider(logger) - ); + bigtable = new FakeBigtable(); async fakeMethod(): Promise { - return new Promise((resolve, reject) => { + return new Promise(resolve => { this.bigtable.getProjectId_((err, projectId) => { const standardAttemptInfo = { finalOperationStatus: 'PENDING', @@ -126,68 +108,62 @@ describe('Bigtable/MetricsTracer', () => { options: {}, }, }; - const metricsTracer = - this.bigtable.metricsTracerFactory.getMetricsTracer( - this, - 'fakeMethod', - projectId - ); + const metricsCollector = new MetricsCollector( + this, + metricsHandlers, + 'fakeMethod', + projectId, + new TestDateProvider(logger) + ); // In this method we simulate a series of events that might happen // when a user calls one of the Table methods. // Here is an example of what might happen in a method call: logger.log('1. The operation starts'); - metricsTracer.onOperationStart(); + metricsCollector.onOperationStart(); logger.log('2. The attempt starts.'); - metricsTracer.onAttemptStart(); + metricsCollector.onAttemptStart(); logger.log('3. Client receives status information.'); - metricsTracer.onStatusReceived(status); + metricsCollector.onStatusReceived(status); logger.log('4. Client receives metadata.'); - metricsTracer.onMetadataReceived( + metricsCollector.onMetadataReceived( standardAttemptInfo, createMetadata('101') ); logger.log('5. Client receives first row.'); - metricsTracer.onResponse('PENDING'); + metricsCollector.onResponse(); logger.log('6. Client receives metadata.'); - metricsTracer.onMetadataReceived( + metricsCollector.onMetadataReceived( standardAttemptInfo, createMetadata('102') ); logger.log('7. Client receives second row.'); - metricsTracer.onResponse('PENDING'); + metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); - metricsTracer.onAttemptComplete({ + metricsCollector.onAttemptComplete({ finalOperationStatus: 'ERROR', streamingOperation: 'YES', }); logger.log('9. After a timeout, the second attempt is made.'); - metricsTracer.onAttemptStart(); + metricsCollector.onAttemptStart(); logger.log('10. Client receives status information.'); - metricsTracer.onStatusReceived(status); + metricsCollector.onStatusReceived(status); logger.log('11. Client receives metadata.'); - metricsTracer.onMetadataReceived( + metricsCollector.onMetadataReceived( standardAttemptInfo, createMetadata('103') ); logger.log('12. Client receives third row.'); - metricsTracer.onResponse('PENDING'); + metricsCollector.onResponse(); logger.log('13. Client receives metadata.'); - metricsTracer.onMetadataReceived( + metricsCollector.onMetadataReceived( {finalOperationStatus: 'PENDING', streamingOperation: 'YES'}, createMetadata('104') ); logger.log('14. Client receives fourth row.'); - metricsTracer.onResponse('PENDING'); + metricsCollector.onResponse(); logger.log('15. User reads row 1'); - metricsTracer.onRead(); - logger.log('16. User reads row 2'); - metricsTracer.onRead(); - logger.log('17. User reads row 3'); - metricsTracer.onRead(); - logger.log('18. User reads row 4'); - metricsTracer.onRead(); logger.log('19. Stream ends, operation completes'); - metricsTracer.onOperationComplete({ + metricsCollector.onOperationComplete({ retries: 1, finalOperationStatus: 'SUCCESS', connectivityErrorCount: 1, @@ -201,7 +177,7 @@ describe('Bigtable/MetricsTracer', () => { const table = new FakeTable(); await table.fakeMethod(); const expectedOutput = fs.readFileSync( - './test/metrics-tracer/typical-method-call.txt', + './test/metrics-collector/typical-method-call.txt', 'utf8' ); // Ensure events occurred in the right order here: diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt new file mode 100644 index 000000000..9f10b0416 --- /dev/null +++ b/test/metrics-collector/typical-method-call.txt @@ -0,0 +1,35 @@ +1. The operation starts +getDate call returns 1000 ms +2. The attempt starts. +getDate call returns 2000 ms +3. Client receives status information. +4. Client receives metadata. +5. Client receives first row. +getDate call returns 3000 ms +6. Client receives metadata. +7. Client receives second row. +getDate call returns 4000 ms +8. A transient error occurs. +getDate call returns 5000 ms +Recording parameters for onAttemptComplete: +metrics: {"attemptLatency":3000,"serverLatency":101} +attributes: {"attemptStatus":"ERROR","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +9. After a timeout, the second attempt is made. +getDate call returns 6000 ms +10. Client receives status information. +11. Client receives metadata. +12. Client receives third row. +getDate call returns 7000 ms +13. Client receives metadata. +14. Client receives fourth row. +getDate call returns 8000 ms +15. User reads row 1 +19. Stream ends, operation completes +getDate call returns 9000 ms +getDate call returns 10000 ms +Recording parameters for onAttemptComplete: +metrics: {"attemptLatency":4000,"serverLatency":103} +attributes: {"attemptStatus":"SUCCESS","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +Recording parameters for onOperationComplete: +metrics: {"operationLatency":8000,"firstResponseLatency":6000,"retryCount":1,"connectivityErrorCount":1} +attributes: {"finalOperationStatus":"SUCCESS","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt deleted file mode 100644 index 4fa4fb6f4..000000000 --- a/test/metrics-tracer/typical-method-call.txt +++ /dev/null @@ -1,43 +0,0 @@ -1. The operation starts -getDate call returns 1000 ms -2. The attempt starts. -getDate call returns 2000 ms -3. Client receives status information. -4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 101 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable -5. Client receives first row. -getDate call returns 3000 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable -6. Client receives metadata. -7. Client receives second row. -getDate call returns 4000 ms -8. A transient error occurs. -getDate call returns 5000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable -9. After a timeout, the second attempt is made. -getDate call returns 6000 ms -10. Client receives status information. -11. Client receives metadata. -12. Client receives third row. -getDate call returns 7000 ms -13. Client receives metadata. -14. Client receives fourth row. -getDate call returns 8000 ms -15. User reads row 1 -getDate call returns 9000 ms -16. User reads row 2 -getDate call returns 10000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable -17. User reads row 3 -getDate call returns 11000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable -18. User reads row 4 -getDate call returns 12000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable -19. Stream ends, operation completes -getDate call returns 13000 ms -getDate call returns 14000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable -Value added to counter bigtable.googleapis.com:retry_count = 1 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable From a62b124bc0a04d347f2cbf18b65150ea75100eab Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 10:51:45 -0500 Subject: [PATCH 086/448] Remove only --- test/metrics-collector/metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 34e7fa86a..734b3334f 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -71,7 +71,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe.only('Bigtable/MetricsCollector', () => { +describe('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); const metricsHandlers = [new TestMetricsHandler(logger)]; From 0996d3cd1d3a8047ec511d229f6cade2e66b5ab7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 11:15:31 -0500 Subject: [PATCH 087/448] Add metrics handlers parameter to Doc --- src/client-side-metrics/metrics-collector.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index e7b4d7da9..7b71e53ca 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -110,6 +110,7 @@ export class MetricsCollector { /** * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. + * @param {IMetricsHandler[]} metricsHandlers The metrics handlers used for recording metrics. * @param {string} methodName The name of the method being traced. * @param {string} projectId The id of the project. * @param {DateProvider} dateProvider A provider for date/time information (for testing). From ef8e3fe28103bcfe53aff72a7de836d479749595 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 11:51:39 -0500 Subject: [PATCH 088/448] =?UTF-8?q?Don=E2=80=99t=20require=20retries=20to?= =?UTF-8?q?=20be=20passed=20into=20metrics?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit collector --- src/client-side-metrics/metrics-collector.ts | 19 +++++++----------- test/metrics-collector/metrics-collector.ts | 21 ++++---------------- 2 files changed, 11 insertions(+), 29 deletions(-) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 7b71e53ca..f537f48ee 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -56,10 +56,6 @@ export interface ITabularApiSurface { * Information about a Bigtable operation. */ interface OperationInfo { - /** - * The number of retries attempted for the operation. - */ - retries?: number; /** * The final status of the operation (e.g., 'OK', 'ERROR'). */ @@ -101,6 +97,7 @@ export class MetricsCollector { private tabularApiSurface: ITabularApiSurface; private methodName: string; private projectId?: string; + private attemptCount = 0; private receivedFirstResponse: boolean; private metricsHandlers: IMetricsHandler[]; private firstResponseLatency?: number; @@ -216,6 +213,7 @@ export class MetricsCollector { * @param {AttemptInfo} info Information about the completed attempt. */ onAttemptComplete(info: AttemptInfo) { + this.attemptCount++; const endTime = this.dateProvider.getDate(); const projectId = this.projectId; if (projectId && this.attemptStartTime) { @@ -287,7 +285,7 @@ export class MetricsCollector { const metrics = { operationLatency: totalTime, firstResponseLatency: this.firstResponseLatency, - retryCount: info.retries, + retryCount: this.attemptCount - 1, connectivityErrorCount: info.connectivityErrorCount, }; this.metricsHandlers.forEach(metricsHandler => { @@ -307,13 +305,10 @@ export class MetricsCollector { * @param {AttemptInfo} info Information about the completed attempt. * @param {object} metadata The received metadata. */ - onMetadataReceived( - info: AttemptInfo, - metadata: { - internalRepr: Map; - options: {}; - } - ) { + onMetadataReceived(metadata: { + internalRepr: Map; + options: {}; + }) { const mappedEntries = new Map( Array.from(metadata.internalRepr.entries(), ([key, value]) => [ key, diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 734b3334f..02b3f91ed 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -125,17 +125,11 @@ describe('Bigtable/MetricsCollector', () => { logger.log('3. Client receives status information.'); metricsCollector.onStatusReceived(status); logger.log('4. Client receives metadata.'); - metricsCollector.onMetadataReceived( - standardAttemptInfo, - createMetadata('101') - ); + metricsCollector.onMetadataReceived(createMetadata('101')); logger.log('5. Client receives first row.'); metricsCollector.onResponse(); logger.log('6. Client receives metadata.'); - metricsCollector.onMetadataReceived( - standardAttemptInfo, - createMetadata('102') - ); + metricsCollector.onMetadataReceived(createMetadata('102')); logger.log('7. Client receives second row.'); metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); @@ -148,23 +142,16 @@ describe('Bigtable/MetricsCollector', () => { logger.log('10. Client receives status information.'); metricsCollector.onStatusReceived(status); logger.log('11. Client receives metadata.'); - metricsCollector.onMetadataReceived( - standardAttemptInfo, - createMetadata('103') - ); + metricsCollector.onMetadataReceived(createMetadata('103')); logger.log('12. Client receives third row.'); metricsCollector.onResponse(); logger.log('13. Client receives metadata.'); - metricsCollector.onMetadataReceived( - {finalOperationStatus: 'PENDING', streamingOperation: 'YES'}, - createMetadata('104') - ); + metricsCollector.onMetadataReceived(createMetadata('104')); logger.log('14. Client receives fourth row.'); metricsCollector.onResponse(); logger.log('15. User reads row 1'); logger.log('19. Stream ends, operation completes'); metricsCollector.onOperationComplete({ - retries: 1, finalOperationStatus: 'SUCCESS', connectivityErrorCount: 1, streamingOperation: 'YES', From c68a76f2c8fdae5e90e876dc21c8cf164580d238 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 11:58:00 -0500 Subject: [PATCH 089/448] Remove testMeterProvider --- common/test-meter-provider.ts | 88 ----------------------------------- 1 file changed, 88 deletions(-) delete mode 100644 common/test-meter-provider.ts diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts deleted file mode 100644 index 1590fe322..000000000 --- a/common/test-meter-provider.ts +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {WithLogger, WithLoggerAndName} from './logger'; -import {Attributes, attributesToString} from './client-side-metrics-attributes'; - -/** - * A test implementation of a MeterProvider. This MeterProvider is used for testing purposes. - * It doesn't send metrics to a backend, but instead logs metric updates for verification. - */ -export class TestMeterProvider extends WithLogger { - /** - * Returns a TestMeter, that logs metric updates for verification. - * @param {string} name The name of the meter. - * @returns {TestMeter} - */ - getMeter(name: string) { - return new TestMeter(this.logger, name); - } -} - -/** - * A test implementation of a Meter. Used for testing purposes. It doesn't send metrics to a backend, - * but instead logs metric updates for verification. - */ -class TestMeter extends WithLoggerAndName { - /** - * Creates a test histogram. The TestHistogram logs when values are recorded. - * @param {string} instrument The name of the instrument. - * @returns {TestHistogram} - */ - createHistogram(instrument: string) { - return new TestHistogram(this.logger, `${this.name}:${instrument}`); - } - /** - * Creates a test counter. The TestCounter logs when values are added. - * @param {string} instrument The name of the instrument. - * @returns {TestCounter} - */ - createCounter(instrument: string) { - return new TestCounter(this.logger, `${this.name}:${instrument}`); - } -} - -/** - * A test implementation of a Counter. Used for testing purposes. It doesn't send metrics to a backend, - * but instead logs value additions for verification. - */ -class TestCounter extends WithLoggerAndName { - /** - * Simulates adding a value to the counter. Logs the value and the counter name. - * @param {number} value The value to be added to the counter. - * @param {Attributes} attributes The attributes associated with the value. - */ - add(value: number, attributes: Attributes) { - this.logger.log( - `Value added to counter ${this.name} = ${value.toString()} with attributes ${attributesToString(attributes)}` - ); - } -} - -/** - * A test implementation of a Histogram. Used for testing purposes. It doesn't send metrics to a backend, - * but instead logs recorded values for verification. - */ -class TestHistogram extends WithLoggerAndName { - /** - * Simulates recording a value in the histogram. Logs the value and the histogram name. - * @param {number} value The value to be recorded in the histogram. - * @param {Attributes} attributes The attributes associated with the value. - */ - record(value: number, attributes: Attributes) { - this.logger.log( - `Value added to histogram ${this.name} = ${value.toString()} with attributes ${attributesToString(attributes)}` - ); - } -} From 47a24b1e6a3463ce83d378c73c885fc05997d856 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 11:58:47 -0500 Subject: [PATCH 090/448] Remove the attributesToString function --- common/client-side-metrics-attributes.ts | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d1f31ab63..1b7adecb2 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -29,17 +29,3 @@ export interface Attributes { streamingOperation?: string; clientName: string; } - -/** - * Converts an Attributes object to a string representation. - * This string representation is suitable for use as labels or tags. - * The order of attributes in the output string is fixed: - * projectId;instanceId;table;cluster;zone;appProfileId;methodName;attemptStatus;finalOperationStatus;streamingOperation;clientName - * If an attribute is null or undefined, the empty string is used. - * @param {Attributes} a The Attributes object to convert. - * @returns A string representation of the attribute. - */ -export function attributesToString(a: Attributes) { - const p = (attribute?: string | null) => (attribute ? attribute : ''); - return `${p(a.projectId)};${p(a.instanceId)};${p(a.table)};${p(a.cluster)};${p(a.zone)};${p(a.appProfileId)};${p(a.methodName)};${p(a.attemptStatus)};${p(a.finalOperationStatus)};${p(a.streamingOperation)};nodejs-bigtable`; -} From b2600f213ce9f089820717a8972670423d0babc7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 13:05:23 -0500 Subject: [PATCH 091/448] Eliminate unused class --- common/logger.ts | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/common/logger.ts b/common/logger.ts index 7b09b8737..284005350 100644 --- a/common/logger.ts +++ b/common/logger.ts @@ -33,20 +33,3 @@ export abstract class WithLogger { this.logger = logger; } } - -/** - * An abstract base class that provides a logger instance and a name. Subclasses - * can use the logger for logging messages, incorporating the name for context. - */ -export abstract class WithLoggerAndName { - protected logger: ILogger; - protected name: string; - /** - * @param {ILogger} logger The logger instance to be used by this object. - * @param {string} name The name associated with this object. - */ - constructor(logger: ILogger, name: string) { - this.logger = logger; - this.name = name; - } -} From d4d3f6c053fbecf82d32148d770289ae61da39c6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 13:11:24 -0500 Subject: [PATCH 092/448] Generate documentation for the IMetricsHandler --- src/client-side-metrics/metrics-handler.ts | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index a3ae4840d..d402fc39e 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -1,10 +1,16 @@ import {Attributes} from '../../common/client-side-metrics-attributes'; +/** + * Metrics related to the completion of a Bigtable operation. + */ export interface onOperationCompleteMetrics { operationLatency: number; retryCount?: number; } +/** + * Metrics related to the completion of a single attempt of a Bigtable operation. + */ export interface onAttemptCompleteMetrics { attemptLatency: number; serverLatency?: number; @@ -13,11 +19,25 @@ export interface onAttemptCompleteMetrics { } // TODO: Trim attributes so only necessary attributes are required. +/** + * An interface for handling client-side metrics related to Bigtable operations. + * Implementations of this interface can define how metrics are recorded and processed. + */ export interface IMetricsHandler { + /** + * Called when an operation completes (successfully or unsuccessfully). + * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {Attributes} attributes Attributes associated with the completed operation. + */ onOperationComplete?( metrics: onOperationCompleteMetrics, attributes: Attributes ): void; + /** + * Called when an attempt (e.g., an RPC attempt) completes. + * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {Attributes} attributes Attributes associated with the completed attempt. + */ onAttemptComplete?( metrics: onAttemptCompleteMetrics, attributes: Attributes From b8dff1c85088fdc4cba6d8038fa4e831da058954 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 13:16:06 -0500 Subject: [PATCH 093/448] Generate documentation for GCPMetricsHandler --- .../gcp-metrics-handler.ts | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index aea683fe1..86628ac39 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -29,10 +29,21 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } +/** + * A metrics handler implementation that uses OpenTelemetry to export metrics to Google Cloud Monitoring. + * This handler records metrics such as operation latency, attempt latency, retry count, and more, + * associating them with relevant attributes for detailed analysis in Cloud Monitoring. + */ export class GCPMetricsHandler implements IMetricsHandler { private initialized = false; private otelMetrics?: Metrics; + /** + * Initializes the OpenTelemetry metrics instruments if they haven't been already. + * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. + * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. + * @param {string} [projectId] The Google Cloud project ID. Used for metric export. If not provided, it will attempt to detect it from the environment. + */ private initialize(projectId?: string) { if (!this.initialized) { this.initialized = true; @@ -114,6 +125,12 @@ export class GCPMetricsHandler implements IMetricsHandler { } } + /** + * Records metrics for a completed Bigtable operation. + * This method records the operation latency and retry count, associating them with provided attributes. + * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {Attributes} attributes Attributes associated with the completed operation. + */ onOperationComplete( metrics: onOperationCompleteMetrics, attributes: Attributes @@ -125,6 +142,14 @@ export class GCPMetricsHandler implements IMetricsHandler { ); this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); } + + /** + * Records metrics for a completed attempt of a Bigtable operation. + * This method records attempt latency, connectivity error count, server latency, and first response latency, + * along with the provided attributes. + * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {Attributes} attributes Attributes associated with the completed attempt. + */ onAttemptComplete(metrics: onAttemptCompleteMetrics, attributes: Attributes) { this.initialize(); this.otelMetrics?.attemptLatencies.record( From d50384f56d16a3ec00aced943e646bcf08c632cb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 14:19:03 -0500 Subject: [PATCH 094/448] Restrict attributes interfaces and solve compile --- common/client-side-metrics-attributes.ts | 53 ++++++++++-- common/test-metrics-handler.ts | 19 +++-- .../gcp-metrics-handler.ts | 23 ++--- src/client-side-metrics/metrics-collector.ts | 84 +++++-------------- src/client-side-metrics/metrics-handler.ts | 25 +++--- .../observability-options.ts | 81 ------------------ test/metrics-collector/metrics-collector.ts | 12 +-- 7 files changed, 110 insertions(+), 187 deletions(-) delete mode 100644 src/client-side-metrics/observability-options.ts diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 1b7adecb2..153bed3a1 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -12,11 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -/** - * Attributes (labels) associated with a Bigtable metric. These - * attributes provide context for the metric values. - */ -export interface Attributes { +interface StandardAttributes { projectId: string; instanceId: string; table: string; @@ -24,8 +20,49 @@ export interface Attributes { zone?: string | null; appProfileId?: string; methodName: string; - attemptStatus?: string; - finalOperationStatus?: string; - streamingOperation?: string; clientName: string; } + +/** + * Information about a Bigtable operation. + */ +export interface OperationOnlyAttributes { + /** + * The final status of the operation (e.g., 'OK', 'ERROR'). + */ + finalOperationStatus: string; + streamingOperation: string; +} + +/** + * Information about a single attempt of a Bigtable operation. + */ +export interface AttemptOnlyAttributes { + /** + * The final status of the operation (e.g., 'OK', 'ERROR'). + */ + finalOperationStatus: string; // TODO: enum + /** + * Whether the operation is a streaming operation or not. + */ + streamingOperation: string; // TODO: enum + /** + * The attempt status of the operation. + */ + attemptStatus: string; // TODO: enum +} + +export interface OnOperationCompleteAttributes + extends StandardAttributes, + OperationOnlyAttributes { + finalOperationStatus: string; + streamingOperation: string; +} + +export interface OnAttemptCompleteAttributes + extends StandardAttributes, + AttemptOnlyAttributes { + attemptStatus: string; + finalOperationStatus: string; + streamingOperation: string; +} diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 737168edb..7fd5be4d5 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -1,9 +1,9 @@ import {WithLogger} from './logger'; import { - onAttemptCompleteMetrics, - onOperationCompleteMetrics, + OnAttemptCompleteMetrics, + OnOperationCompleteMetrics, } from '../src/client-side-metrics/metrics-handler'; -import {Attributes} from './client-side-metrics-attributes'; +import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from './client-side-metrics-attributes'; /** * A test implementation of the IMetricsHandler interface. Used for testing purposes. @@ -12,12 +12,12 @@ import {Attributes} from './client-side-metrics-attributes'; export class TestMetricsHandler extends WithLogger { /** * Logs the metrics and attributes received for an operation completion. - * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. * @param {Attributes} attributes Attributes associated with the completed operation. */ onOperationComplete( - metrics: onOperationCompleteMetrics, - attributes: Attributes + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ) { attributes.clientName = 'nodejs-bigtable'; this.logger.log('Recording parameters for onOperationComplete:'); @@ -27,10 +27,13 @@ export class TestMetricsHandler extends WithLogger { /** * Logs the metrics and attributes received for an attempt completion. - * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. * @param {Attributes} attributes Attributes associated with the completed attempt. */ - onAttemptComplete(metrics: onAttemptCompleteMetrics, attributes: Attributes) { + onAttemptComplete( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes + ) { attributes.clientName = 'nodejs-bigtable'; this.logger.log('Recording parameters for onAttemptComplete:'); this.logger.log(`metrics: ${JSON.stringify(metrics)}`); diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 86628ac39..e62b711b1 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -1,12 +1,12 @@ import { IMetricsHandler, - onAttemptCompleteMetrics, - onOperationCompleteMetrics, + OnAttemptCompleteMetrics, + OnOperationCompleteMetrics, } from './metrics-handler'; import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import {Attributes} from '../../common/client-side-metrics-attributes'; +import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from '../../common/client-side-metrics-attributes'; const { MeterProvider, Histogram, @@ -128,12 +128,12 @@ export class GCPMetricsHandler implements IMetricsHandler { /** * Records metrics for a completed Bigtable operation. * This method records the operation latency and retry count, associating them with provided attributes. - * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {Attributes} attributes Attributes associated with the completed operation. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. */ onOperationComplete( - metrics: onOperationCompleteMetrics, - attributes: Attributes + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ) { this.initialize(); this.otelMetrics?.operationLatencies.record( @@ -147,10 +147,13 @@ export class GCPMetricsHandler implements IMetricsHandler { * Records metrics for a completed attempt of a Bigtable operation. * This method records attempt latency, connectivity error count, server latency, and first response latency, * along with the provided attributes. - * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {Attributes} attributes Attributes associated with the completed attempt. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. */ - onAttemptComplete(metrics: onAttemptCompleteMetrics, attributes: Attributes) { + onAttemptComplete( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes + ) { this.initialize(); this.otelMetrics?.attemptLatencies.record( metrics.attemptLatency, diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index f537f48ee..608fdbebc 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -1,6 +1,10 @@ -import {Attributes} from '../../common/client-side-metrics-attributes'; import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; +import { + AttemptOnlyAttributes, + OnOperationCompleteAttributes, + OperationOnlyAttributes, +} from '../../common/client-side-metrics-attributes'; /** * An interface representing a Date-like object. Provides a `getTime` method @@ -52,35 +56,6 @@ export interface ITabularApiSurface { }; } -/** - * Information about a Bigtable operation. - */ -interface OperationInfo { - /** - * The final status of the operation (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: string; - /** - * Number of times a connectivity error occurred during the operation. - */ - connectivityErrorCount?: number; - streamingOperation: string; -} - -/** - * Information about a single attempt of a Bigtable operation. - */ -interface AttemptInfo { - /** - * The final status of the attempt (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: string; - /** - * Whether the operation is a streaming operation or not - */ - streamingOperation: string; -} - const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; @@ -160,20 +135,15 @@ export class MetricsCollector { * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. * Includes whether the operation was a streaming operation or not. * @param {string} projectId The Google Cloud project ID. - * @param {string} finalOperationStatus The final status of the operation. - * @param {string} streamOperation Whether the operation was a streaming operation or not. - * @returns {Attributes} An object containing the attributes for operation latency metrics. + * @param {OperationOnlyAttributes} operationOnlyAttributes The attributes of the operation. + * @returns {OnOperationCompleteAttributes} An object containing the attributes for operation latency metrics. */ private getOperationLatencyAttributes( projectId: string, - finalOperationStatus: string, - streamOperation?: string - ): Attributes { + operationOnlyAttributes: OperationOnlyAttributes + ): OnOperationCompleteAttributes { return Object.assign( - { - finalOperationStatus: finalOperationStatus, - streamingOperation: streamOperation, - }, + operationOnlyAttributes, this.getBasicAttributes(projectId) ); } @@ -183,20 +153,15 @@ export class MetricsCollector { * about the Bigtable environment, the operation being performed, and the status of the attempt. * Includes whether the operation was a streaming operation or not. * @param {string} projectId The Google Cloud project ID. - * @param {string} attemptStatus The status of the attempt. - * @param {string} streamingOperation Whether the operation was a streaming operation or not. - * @returns {Attributes} An object containing the attributes for attempt metrics. + * @param {AttemptOnlyAttributes} attemptOnlyAttributes The attributes of the attempt. + * @returns {OnAttemptCompleteAttributes} An object containing the attributes for attempt metrics. */ private getAttemptAttributes( projectId: string, - attemptStatus: string, - streamingOperation: string + attemptOnlyAttributes: AttemptOnlyAttributes ) { return Object.assign( - { - attemptStatus: attemptStatus, - streamingOperation: streamingOperation, - }, + attemptOnlyAttributes, this.getBasicAttributes(projectId) ); } @@ -210,18 +175,14 @@ export class MetricsCollector { /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param {AttemptInfo} info Information about the completed attempt. + * @param {AttemptOnlyAttributes} info Information about the completed attempt. */ - onAttemptComplete(info: AttemptInfo) { + onAttemptComplete(info: AttemptOnlyAttributes) { this.attemptCount++; const endTime = this.dateProvider.getDate(); const projectId = this.projectId; if (projectId && this.attemptStartTime) { - const attributes = this.getAttemptAttributes( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); + const attributes = this.getAttemptAttributes(projectId, info); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onAttemptComplete) { @@ -267,26 +228,23 @@ export class MetricsCollector { /** * Called when an operation completes (successfully or unsuccessfully). * Records operation latencies, retry counts, and connectivity error counts. - * @param {OperationInfo} info Information about the completed operation. + * @param {OperationOnlyAttributes} info Information about the completed operation. */ - onOperationComplete(info: OperationInfo) { + onOperationComplete(info: OperationOnlyAttributes) { const endTime = this.dateProvider.getDate(); const projectId = this.projectId; - this.onAttemptComplete(info); if (projectId && this.operationStartTime) { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { // This block records operation latency metrics. const operationLatencyAttributes = this.getOperationLatencyAttributes( projectId, - info.finalOperationStatus, - info.streamingOperation + info ); const metrics = { operationLatency: totalTime, firstResponseLatency: this.firstResponseLatency, retryCount: this.attemptCount - 1, - connectivityErrorCount: info.connectivityErrorCount, }; this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { @@ -302,7 +260,7 @@ export class MetricsCollector { /** * Called when metadata is received. Extracts server timing information if available. - * @param {AttemptInfo} info Information about the completed attempt. + * @param {AttemptOnlyAttributes} info Information about the completed attempt. * @param {object} metadata The received metadata. */ onMetadataReceived(metadata: { diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index d402fc39e..9a1ba558d 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -1,9 +1,12 @@ -import {Attributes} from '../../common/client-side-metrics-attributes'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from '../../common/client-side-metrics-attributes'; /** * Metrics related to the completion of a Bigtable operation. */ -export interface onOperationCompleteMetrics { +export interface OnOperationCompleteMetrics { operationLatency: number; retryCount?: number; } @@ -11,7 +14,7 @@ export interface onOperationCompleteMetrics { /** * Metrics related to the completion of a single attempt of a Bigtable operation. */ -export interface onAttemptCompleteMetrics { +export interface OnAttemptCompleteMetrics { attemptLatency: number; serverLatency?: number; firstResponseLatency?: number; @@ -26,20 +29,20 @@ export interface onAttemptCompleteMetrics { export interface IMetricsHandler { /** * Called when an operation completes (successfully or unsuccessfully). - * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {Attributes} attributes Attributes associated with the completed operation. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. */ onOperationComplete?( - metrics: onOperationCompleteMetrics, - attributes: Attributes + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ): void; /** * Called when an attempt (e.g., an RPC attempt) completes. - * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {Attributes} attributes Attributes associated with the completed attempt. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. */ onAttemptComplete?( - metrics: onAttemptCompleteMetrics, - attributes: Attributes + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes ): void; } diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts deleted file mode 100644 index 031c169da..000000000 --- a/src/client-side-metrics/observability-options.ts +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {Attributes} from '../../common/client-side-metrics-attributes'; - -/** - * The Counter interface for recording increments of a metric. - */ -interface ICounter { - /** - * Adds a value to the counter. - * @param {number} retries The value to be added to the counter. - * @param {Attributes} attributes The attributes associated with this value. - */ - add(retries: number, attributes: Attributes): void; -} - -/** - * The Histogram interface for recording distributions of values of a metric. - */ -interface IHistogram { - /** - * Records a value in the histogram. - * @param {number} value The value to be recorded in the histogram. - * @param {Attributes} attributes The attributes associated with this value. - */ - record(value: number, attributes: Attributes): void; -} - -/** - * The Meter interface. Meters are responsible for creating and managing instruments (Counters, Histograms, etc.). - */ -interface IMeter { - /** - * Creates a Counter instrument, which counts increments of a given metric. - * @param {string} instrument The name of the counter instrument. - * @param {Attributes} attributes The attributes associated with this counter. - * @returns {ICounter} A Counter instance. - */ - createCounter(instrument: string, attributes: Attributes): ICounter; - /** - * Creates a Histogram instrument, which records distributions of values for a given metric. - * @param {string} instrument The name of the histogram instrument. - * @param {Attributes} attributes The attributes associated with this histogram. - * @returns {IHistogram} A Histogram instance. - */ - createHistogram(instrument: string, attributes: Attributes): IHistogram; -} - -/** - * The MeterProvider interface. A MeterProvider creates and manages Meters. - */ -interface IMeterProvider { - /** - * Returns a Meter, which can be used to create instruments for recording measurements. - * @param {string} name The name of the Meter. - * @returns {IMeter} A Meter instance. - */ - getMeter(name: string): IMeter; -} - -/** - * Options for configuring client-side metrics observability. Allows users to provide their own MeterProvider. - */ -export interface ObservabilityOptions { - /** - * The MeterProvider to use for recording metrics. If not provided, a default MeterProvider will be used. - */ - meterProvider: IMeterProvider; -} diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 02b3f91ed..98410b2fd 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -83,11 +83,6 @@ describe('Bigtable/MetricsCollector', () => { async fakeMethod(): Promise { return new Promise(resolve => { this.bigtable.getProjectId_((err, projectId) => { - const standardAttemptInfo = { - finalOperationStatus: 'PENDING', - streamingOperation: 'YES', - }; - function createMetadata(duration: string) { return { internalRepr: new Map([ @@ -136,6 +131,7 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onAttemptComplete({ finalOperationStatus: 'ERROR', streamingOperation: 'YES', + attemptStatus: 'ERROR', }); logger.log('9. After a timeout, the second attempt is made.'); metricsCollector.onAttemptStart(); @@ -151,9 +147,13 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(); logger.log('15. User reads row 1'); logger.log('19. Stream ends, operation completes'); + metricsCollector.onAttemptComplete({ + finalOperationStatus: 'SUCCESS', + attemptStatus: 'SUCCESS', + streamingOperation: 'YES', + }); metricsCollector.onOperationComplete({ finalOperationStatus: 'SUCCESS', - connectivityErrorCount: 1, streamingOperation: 'YES', }); resolve(); From b5fc1f248e8c3aa1fb82cf3c193b056db2791213 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Fri, 24 Jan 2025 19:23:58 +0000 Subject: [PATCH 095/448] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= =?UTF-8?q?=20post-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- common/test-metrics-handler.ts | 5 ++++- src/client-side-metrics/gcp-metrics-handler.ts | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 7fd5be4d5..479bdcbda 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -3,7 +3,10 @@ import { OnAttemptCompleteMetrics, OnOperationCompleteMetrics, } from '../src/client-side-metrics/metrics-handler'; -import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from './client-side-metrics-attributes'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from './client-side-metrics-attributes'; /** * A test implementation of the IMetricsHandler interface. Used for testing purposes. diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index e62b711b1..0041db253 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -6,7 +6,10 @@ import { import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from '../../common/client-side-metrics-attributes'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from '../../common/client-side-metrics-attributes'; const { MeterProvider, Histogram, From 1e5dc82b832e2679f97013ff7bea4bb1c1164191 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 14:24:54 -0500 Subject: [PATCH 096/448] use undefined instead of null --- common/client-side-metrics-attributes.ts | 4 ++-- src/client-side-metrics/metrics-collector.ts | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 153bed3a1..0927425e8 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -16,8 +16,8 @@ interface StandardAttributes { projectId: string; instanceId: string; table: string; - cluster?: string | null; - zone?: string | null; + cluster?: string; + zone?: string; appProfileId?: string; methodName: string; clientName: string; diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 608fdbebc..6d12d9b0d 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -67,8 +67,8 @@ const version = JSON.parse(packageJSON.toString()).version; export class MetricsCollector { private operationStartTime: DateLike | null; private attemptStartTime: DateLike | null; - private zone: string | null | undefined; - private cluster: string | null | undefined; + private zone: string | undefined; + private cluster: string | undefined; private tabularApiSurface: ITabularApiSurface; private methodName: string; private projectId?: string; @@ -94,8 +94,8 @@ export class MetricsCollector { projectId?: string, dateProvider?: DateProvider ) { - this.zone = null; - this.cluster = null; + this.zone = undefined; + this.cluster = undefined; this.tabularApiSurface = tabularApiSurface; this.methodName = methodName; this.operationStartTime = null; From c2ffbc649120befaa896aa6f7779c9aafaa9d87f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 15:46:27 -0500 Subject: [PATCH 097/448] Introduce enums for allowable values --- common/client-side-metrics-attributes.ts | 38 ++++++++++++------- src/client-side-metrics/metrics-collector.ts | 3 +- test/metrics-collector/metrics-collector.ts | 25 +++++++----- .../metrics-collector/typical-method-call.txt | 12 +++--- 4 files changed, 49 insertions(+), 29 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 0927425e8..d891d5841 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -30,8 +30,8 @@ export interface OperationOnlyAttributes { /** * The final status of the operation (e.g., 'OK', 'ERROR'). */ - finalOperationStatus: string; - streamingOperation: string; + finalOperationStatus: FinalOperationStatus; + streamingOperation: StreamingOperation; } /** @@ -41,28 +41,40 @@ export interface AttemptOnlyAttributes { /** * The final status of the operation (e.g., 'OK', 'ERROR'). */ - finalOperationStatus: string; // TODO: enum + finalOperationStatus: FinalOperationStatus; /** * Whether the operation is a streaming operation or not. */ - streamingOperation: string; // TODO: enum + streamingOperation: StreamingOperation; /** * The attempt status of the operation. */ - attemptStatus: string; // TODO: enum + attemptStatus: AttemptStatus; +} + +export enum FinalOperationStatus { + OK = 'OK', + ERROR = 'ERROR', +} + +export enum AttemptStatus { + OK = 'OK', + ERROR = 'ERROR', +} + +export enum StreamingOperation { + YES = 'YES', + NO = 'NO', } export interface OnOperationCompleteAttributes extends StandardAttributes, - OperationOnlyAttributes { - finalOperationStatus: string; - streamingOperation: string; -} + OperationOnlyAttributes {} export interface OnAttemptCompleteAttributes extends StandardAttributes, - AttemptOnlyAttributes { - attemptStatus: string; - finalOperationStatus: string; - streamingOperation: string; + AttemptOnlyAttributes {} + +export interface OnAttemptCompleteInfo extends AttemptOnlyAttributes { + connectivityErrorCount: number; } diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 6d12d9b0d..ae1bedfcc 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -2,6 +2,7 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { AttemptOnlyAttributes, + OnAttemptCompleteInfo, OnOperationCompleteAttributes, OperationOnlyAttributes, } from '../../common/client-side-metrics-attributes'; @@ -177,7 +178,7 @@ export class MetricsCollector { * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. * @param {AttemptOnlyAttributes} info Information about the completed attempt. */ - onAttemptComplete(info: AttemptOnlyAttributes) { + onAttemptComplete(info: OnAttemptCompleteInfo) { this.attemptCount++; const endTime = this.dateProvider.getDate(); const projectId = this.projectId; diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 98410b2fd..df1ca7321 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,6 +18,11 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {MetricsCollector} from '../../src/client-side-metrics/metrics-collector'; +import { + AttemptStatus, + FinalOperationStatus, + StreamingOperation, +} from '../../common/client-side-metrics-attributes'; /** * A basic logger class that stores log messages in an array. Useful for testing. @@ -71,7 +76,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe('Bigtable/MetricsCollector', () => { +describe.only('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); const metricsHandlers = [new TestMetricsHandler(logger)]; @@ -129,9 +134,10 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); metricsCollector.onAttemptComplete({ - finalOperationStatus: 'ERROR', - streamingOperation: 'YES', - attemptStatus: 'ERROR', + finalOperationStatus: FinalOperationStatus.ERROR, + streamingOperation: StreamingOperation.YES, + attemptStatus: AttemptStatus.ERROR, + connectivityErrorCount: 1, }); logger.log('9. After a timeout, the second attempt is made.'); metricsCollector.onAttemptStart(); @@ -148,13 +154,14 @@ describe('Bigtable/MetricsCollector', () => { logger.log('15. User reads row 1'); logger.log('19. Stream ends, operation completes'); metricsCollector.onAttemptComplete({ - finalOperationStatus: 'SUCCESS', - attemptStatus: 'SUCCESS', - streamingOperation: 'YES', + finalOperationStatus: FinalOperationStatus.ERROR, + attemptStatus: AttemptStatus.OK, + streamingOperation: StreamingOperation.YES, + connectivityErrorCount: 1, }); metricsCollector.onOperationComplete({ - finalOperationStatus: 'SUCCESS', - streamingOperation: 'YES', + finalOperationStatus: FinalOperationStatus.OK, + streamingOperation: StreamingOperation.YES, }); resolve(); }); diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 9f10b0416..00d645e0b 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -13,7 +13,7 @@ getDate call returns 4000 ms getDate call returns 5000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":101} -attributes: {"attemptStatus":"ERROR","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":"ERROR","streamingOperation":"YES","attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -26,10 +26,10 @@ getDate call returns 8000 ms 15. User reads row 1 19. Stream ends, operation completes getDate call returns 9000 ms -getDate call returns 10000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":4000,"serverLatency":103} -attributes: {"attemptStatus":"SUCCESS","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +metrics: {"attemptLatency":3000,"serverLatency":103} +attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":"YES","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +getDate call returns 10000 ms Recording parameters for onOperationComplete: -metrics: {"operationLatency":8000,"firstResponseLatency":6000,"retryCount":1,"connectivityErrorCount":1} -attributes: {"finalOperationStatus":"SUCCESS","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +metrics: {"operationLatency":9000,"firstResponseLatency":6000,"retryCount":1} +attributes: {"finalOperationStatus":"OK","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} From 9320149600f3f2f57c137d34dff23e3d484067c0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 15:50:41 -0500 Subject: [PATCH 098/448] Add more headers --- common/test-metrics-handler.ts | 14 ++++++++++++++ src/client-side-metrics/gcp-metrics-handler.ts | 14 ++++++++++++++ src/client-side-metrics/metrics-collector.ts | 14 ++++++++++++++ src/client-side-metrics/metrics-handler.ts | 14 ++++++++++++++ 4 files changed, 56 insertions(+) diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 479bdcbda..9ffabd7a0 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {WithLogger} from './logger'; import { OnAttemptCompleteMetrics, diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 0041db253..d80734b63 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import { IMetricsHandler, OnAttemptCompleteMetrics, diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index ae1bedfcc..a3d1623e5 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 9a1ba558d..cde1ee803 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import { OnAttemptCompleteAttributes, OnOperationCompleteAttributes, From 40233611aa3e6db1e092e20297a09ff28772e4c7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 15:54:45 -0500 Subject: [PATCH 099/448] Remove only --- test/metrics-collector/metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index df1ca7321..bdc58ed04 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -76,7 +76,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe.only('Bigtable/MetricsCollector', () => { +describe('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); const metricsHandlers = [new TestMetricsHandler(logger)]; From ef9173376c88deae0f547d1a81217820143cecdd Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:20:48 -0500 Subject: [PATCH 100/448] Use null to pass values around. Not undefined --- common/client-side-metrics-attributes.ts | 2 +- src/client-side-metrics/metrics-collector.ts | 12 ++++++++---- src/client-side-metrics/metrics-handler.ts | 14 ++++++++++---- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d891d5841..73aee51d5 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -76,5 +76,5 @@ export interface OnAttemptCompleteAttributes AttemptOnlyAttributes {} export interface OnAttemptCompleteInfo extends AttemptOnlyAttributes { - connectivityErrorCount: number; + connectivityErrorCount: number | null; } diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index a3d1623e5..e46efb580 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -90,9 +90,9 @@ export class MetricsCollector { private attemptCount = 0; private receivedFirstResponse: boolean; private metricsHandlers: IMetricsHandler[]; - private firstResponseLatency?: number; + private firstResponseLatency: number | null; private serverTimeRead: boolean; - private serverTime?: number; + private serverTime: number | null; private dateProvider: DateProvider; /** @@ -117,7 +117,9 @@ export class MetricsCollector { this.attemptStartTime = null; this.receivedFirstResponse = false; this.metricsHandlers = metricsHandlers; + this.firstResponseLatency = null; this.serverTimeRead = false; + this.serverTime = null; this.projectId = projectId; if (dateProvider) { this.dateProvider = dateProvider; @@ -205,6 +207,8 @@ export class MetricsCollector { { attemptLatency: totalTime, serverLatency: this.serverTime, + connectivityErrorCount: info.connectivityErrorCount, + firstResponseLatency: this.firstResponseLatency, }, attributes ); @@ -218,9 +222,9 @@ export class MetricsCollector { */ onAttemptStart() { this.attemptStartTime = this.dateProvider.getDate(); - this.serverTime = undefined; + this.serverTime = null; this.serverTimeRead = false; - this.firstResponseLatency = undefined; + this.firstResponseLatency = null; this.receivedFirstResponse = false; } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index cde1ee803..24f81a6af 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -17,12 +17,18 @@ import { OnOperationCompleteAttributes, } from '../../common/client-side-metrics-attributes'; +/** + * The interfaces below use null instead of undefined to gain the advantage + * of being able to use the compiler to tell us when a property isn't being + * provided for easier debugging. + */ + /** * Metrics related to the completion of a Bigtable operation. */ export interface OnOperationCompleteMetrics { operationLatency: number; - retryCount?: number; + retryCount: number | null; } /** @@ -30,9 +36,9 @@ export interface OnOperationCompleteMetrics { */ export interface OnAttemptCompleteMetrics { attemptLatency: number; - serverLatency?: number; - firstResponseLatency?: number; - connectivityErrorCount?: number; + serverLatency: number | null; + firstResponseLatency: number | null; + connectivityErrorCount: number | null; } // TODO: Trim attributes so only necessary attributes are required. From 52b570ccca25b336e615610c3ab119a46970af11 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:26:13 -0500 Subject: [PATCH 101/448] Modify test step --- test/metrics-collector/metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index bdc58ed04..bfda40b00 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -152,7 +152,7 @@ describe('Bigtable/MetricsCollector', () => { logger.log('14. Client receives fourth row.'); metricsCollector.onResponse(); logger.log('15. User reads row 1'); - logger.log('19. Stream ends, operation completes'); + logger.log('16. Stream ends, operation completes'); metricsCollector.onAttemptComplete({ finalOperationStatus: FinalOperationStatus.ERROR, attemptStatus: AttemptStatus.OK, From 6a6774f90b82009bef03aea8395927612e86228d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:30:58 -0500 Subject: [PATCH 102/448] Add metrics --- test/metrics-collector/typical-method-call.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 00d645e0b..4a94904f7 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -12,7 +12,7 @@ getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":101} +metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":2000} attributes: {"finalOperationStatus":"ERROR","streamingOperation":"YES","attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms @@ -24,10 +24,10 @@ getDate call returns 7000 ms 14. Client receives fourth row. getDate call returns 8000 ms 15. User reads row 1 -19. Stream ends, operation completes +16. Stream ends, operation completes getDate call returns 9000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":103} +metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":6000} attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":"YES","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: From 10b6d3071e1f861b327a76d11a49d26e9885ea6d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:32:36 -0500 Subject: [PATCH 103/448] =?UTF-8?q?Don=E2=80=99t=20provide=20first=20respo?= =?UTF-8?q?nse=20latency?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/client-side-metrics/metrics-collector.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index e46efb580..e2bf95152 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -262,7 +262,6 @@ export class MetricsCollector { ); const metrics = { operationLatency: totalTime, - firstResponseLatency: this.firstResponseLatency, retryCount: this.attemptCount - 1, }; this.metricsHandlers.forEach(metricsHandler => { From 33c17c6f96d22d23cbea194662ad6367b2cb0277 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:34:48 -0500 Subject: [PATCH 104/448] Remove firstResponseLatency from operation metrics --- test/metrics-collector/typical-method-call.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 4a94904f7..55210e357 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -31,5 +31,5 @@ metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1," attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":"YES","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: -metrics: {"operationLatency":9000,"firstResponseLatency":6000,"retryCount":1} +metrics: {"operationLatency":9000,"retryCount":1} attributes: {"finalOperationStatus":"OK","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} From fbf2314a43362b817b7e0112375fe50b432cda76 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:52:33 -0500 Subject: [PATCH 105/448] Expose interface allowing undefined not null --- common/client-side-metrics-attributes.ts | 2 +- src/client-side-metrics/metrics-collector.ts | 4 ++-- src/client-side-metrics/metrics-handler.ts | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 73aee51d5..d891d5841 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -76,5 +76,5 @@ export interface OnAttemptCompleteAttributes AttemptOnlyAttributes {} export interface OnAttemptCompleteInfo extends AttemptOnlyAttributes { - connectivityErrorCount: number | null; + connectivityErrorCount: number; } diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index e2bf95152..946fc723c 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -206,9 +206,9 @@ export class MetricsCollector { metricsHandler.onAttemptComplete( { attemptLatency: totalTime, - serverLatency: this.serverTime, + serverLatency: this.serverTime ?? undefined, connectivityErrorCount: info.connectivityErrorCount, - firstResponseLatency: this.firstResponseLatency, + firstResponseLatency: this.firstResponseLatency ?? undefined, }, attributes ); diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 24f81a6af..97b6c5ff5 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -28,7 +28,7 @@ import { */ export interface OnOperationCompleteMetrics { operationLatency: number; - retryCount: number | null; + retryCount?: number; } /** @@ -36,9 +36,9 @@ export interface OnOperationCompleteMetrics { */ export interface OnAttemptCompleteMetrics { attemptLatency: number; - serverLatency: number | null; - firstResponseLatency: number | null; - connectivityErrorCount: number | null; + serverLatency?: number; + firstResponseLatency?: number; + connectivityErrorCount: number; } // TODO: Trim attributes so only necessary attributes are required. From 39fe8610d6a1940bac8476c8052c826948fc6730 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 17:15:28 -0500 Subject: [PATCH 106/448] Better explanations for design decision inline --- src/client-side-metrics/metrics-collector.ts | 2 -- src/client-side-metrics/metrics-handler.ts | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 946fc723c..9868a673f 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -74,8 +74,6 @@ export interface ITabularApiSurface { const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; -// TODO: Check if metrics tracer method exists. - /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 97b6c5ff5..735758be4 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -18,9 +18,9 @@ import { } from '../../common/client-side-metrics-attributes'; /** - * The interfaces below use null instead of undefined to gain the advantage - * of being able to use the compiler to tell us when a property isn't being - * provided for easier debugging. + * The interfaces below use undefined instead of null to indicate a metric is + * not available yet. The benefit of this is that new metrics can be added + * without requiring users to change the methods in their metrics handler. */ /** From 8f131001a97f3c8701533ab4052defb10e6c27f2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 27 Jan 2025 11:03:27 -0500 Subject: [PATCH 107/448] Use attempt start time not operation start time for firstResponseLatency --- src/client-side-metrics/metrics-collector.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 9868a673f..24228cb33 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -233,8 +233,8 @@ export class MetricsCollector { onResponse() { const endTime = this.dateProvider.getDate(); const projectId = this.projectId; - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + if (projectId && this.attemptStartTime) { + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); if (!this.receivedFirstResponse) { this.receivedFirstResponse = true; this.firstResponseLatency = totalTime; From 48e0e95a7cbd4020959c68356e01b8960f1b67af Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 27 Jan 2025 11:06:50 -0500 Subject: [PATCH 108/448] Adjust tests for first response latency --- test/metrics-collector/typical-method-call.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 55210e357..c883357da 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -12,7 +12,7 @@ getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":2000} +metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":1000} attributes: {"finalOperationStatus":"ERROR","streamingOperation":"YES","attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms @@ -27,7 +27,7 @@ getDate call returns 8000 ms 16. Stream ends, operation completes getDate call returns 9000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":6000} +metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":1000} attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":"YES","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: From 66c4ab1efc5981332926c11218ba8ee8a4138101 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 27 Jan 2025 15:10:29 -0500 Subject: [PATCH 109/448] Remove TODO --- src/client-side-metrics/metrics-handler.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 735758be4..acc2b88af 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -41,7 +41,6 @@ export interface OnAttemptCompleteMetrics { connectivityErrorCount: number; } -// TODO: Trim attributes so only necessary attributes are required. /** * An interface for handling client-side metrics related to Bigtable operations. * Implementations of this interface can define how metrics are recorded and processed. From e7c5b5f4926002e43fd6b5b151ce77491239b5b9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 09:54:44 -0500 Subject: [PATCH 110/448] Use the MethodName enum instead of string --- common/client-side-metrics-attributes.ts | 9 +++++++++ src/client-side-metrics/metrics-collector.ts | 7 ++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d891d5841..3c492da43 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -78,3 +78,12 @@ export interface OnAttemptCompleteAttributes export interface OnAttemptCompleteInfo extends AttemptOnlyAttributes { connectivityErrorCount: number; } + +export enum MethodName { + READ_ROWS = 'readRows', + MUTATE_ROW = 'mutateRow', + CHECK_AND_MUTATE_ROW = 'checkAndMutateRow', + READ_MODIFY_WRITE_ROW = 'readModifyWriteRow', + SAMPLE_ROW_KEYS = 'sampleRowKeys', + MUTATE_ROWS = 'mutateRows', +} diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 24228cb33..5695b58d6 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -16,6 +16,7 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { AttemptOnlyAttributes, + MethodName, OnAttemptCompleteInfo, OnOperationCompleteAttributes, OperationOnlyAttributes, @@ -83,7 +84,7 @@ export class MetricsCollector { private zone: string | undefined; private cluster: string | undefined; private tabularApiSurface: ITabularApiSurface; - private methodName: string; + private methodName: MethodName; private projectId?: string; private attemptCount = 0; private receivedFirstResponse: boolean; @@ -96,14 +97,14 @@ export class MetricsCollector { /** * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. * @param {IMetricsHandler[]} metricsHandlers The metrics handlers used for recording metrics. - * @param {string} methodName The name of the method being traced. + * @param {MethodName} methodName The name of the method being traced. * @param {string} projectId The id of the project. * @param {DateProvider} dateProvider A provider for date/time information (for testing). */ constructor( tabularApiSurface: ITabularApiSurface, metricsHandlers: IMetricsHandler[], - methodName: string, + methodName: MethodName, projectId?: string, dateProvider?: DateProvider ) { From 98be3516ffa3aecbacdc9c81d531d062f59127d0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 10:09:57 -0500 Subject: [PATCH 111/448] =?UTF-8?q?Don=E2=80=99t=20use=20enum=20for=20stre?= =?UTF-8?q?aming=20operation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- common/client-side-metrics-attributes.ts | 9 ++------- test/metrics-collector/metrics-collector.ts | 7 +++---- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 3c492da43..8cbfdcfec 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -31,7 +31,7 @@ export interface OperationOnlyAttributes { * The final status of the operation (e.g., 'OK', 'ERROR'). */ finalOperationStatus: FinalOperationStatus; - streamingOperation: StreamingOperation; + streamingOperation: boolean; } /** @@ -45,7 +45,7 @@ export interface AttemptOnlyAttributes { /** * Whether the operation is a streaming operation or not. */ - streamingOperation: StreamingOperation; + streamingOperation: boolean; /** * The attempt status of the operation. */ @@ -62,11 +62,6 @@ export enum AttemptStatus { ERROR = 'ERROR', } -export enum StreamingOperation { - YES = 'YES', - NO = 'NO', -} - export interface OnOperationCompleteAttributes extends StandardAttributes, OperationOnlyAttributes {} diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index bfda40b00..89c9774c5 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -21,7 +21,6 @@ import {MetricsCollector} from '../../src/client-side-metrics/metrics-collector' import { AttemptStatus, FinalOperationStatus, - StreamingOperation, } from '../../common/client-side-metrics-attributes'; /** @@ -135,7 +134,7 @@ describe('Bigtable/MetricsCollector', () => { logger.log('8. A transient error occurs.'); metricsCollector.onAttemptComplete({ finalOperationStatus: FinalOperationStatus.ERROR, - streamingOperation: StreamingOperation.YES, + streamingOperation: true, attemptStatus: AttemptStatus.ERROR, connectivityErrorCount: 1, }); @@ -156,12 +155,12 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onAttemptComplete({ finalOperationStatus: FinalOperationStatus.ERROR, attemptStatus: AttemptStatus.OK, - streamingOperation: StreamingOperation.YES, + streamingOperation: true, connectivityErrorCount: 1, }); metricsCollector.onOperationComplete({ finalOperationStatus: FinalOperationStatus.OK, - streamingOperation: StreamingOperation.YES, + streamingOperation: true, }); resolve(); }); From efdfcead853a7331e66eefd9884de8d9cb08ddc0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 10:40:59 -0500 Subject: [PATCH 112/448] Remove copy/pasted comment --- src/client-side-metrics/gcp-metrics-handler.ts | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index d80734b63..7e2945a48 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -67,12 +67,6 @@ export class GCPMetricsHandler implements IMetricsHandler { // Use MeterProvider provided by user // If MeterProvider was not provided then use the default meter provider. const meterProvider = new MeterProvider({ - // This is the default meter provider - // Create a resource. Fill the `service.*` attributes in with real values for your service. - // GcpDetectorSync will add in resource information about the current environment if you are - // running on GCP. These resource attributes will be translated to a specific GCP monitored - // resource if running on GCP. Otherwise, metrics will be sent with monitored resource - // `generic_task`. resource: new Resources.Resource({ 'service.name': 'bigtable-metrics', }).merge(new ResourceUtil.GcpDetectorSync().detect()), From 4a6a47669e8106b675fb9e736b9703bc7048dd21 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 11:44:42 -0500 Subject: [PATCH 113/448] Rename to OperationMetricsCollector --- ...cs-collector.ts => operation-metrics-collector.ts} | 2 +- test/metrics-collector/metrics-collector.ts | 11 ++++------- 2 files changed, 5 insertions(+), 8 deletions(-) rename src/client-side-metrics/{metrics-collector.ts => operation-metrics-collector.ts} (99%) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts similarity index 99% rename from src/client-side-metrics/metrics-collector.ts rename to src/client-side-metrics/operation-metrics-collector.ts index 5695b58d6..0e38c4fc4 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -78,7 +78,7 @@ const version = JSON.parse(packageJSON.toString()).version; /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ -export class MetricsCollector { +export class OperationMetricsCollector { private operationStartTime: DateLike | null; private attemptStartTime: DateLike | null; private zone: string | undefined; diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 89c9774c5..2894aa90f 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -17,11 +17,8 @@ import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; -import {MetricsCollector} from '../../src/client-side-metrics/metrics-collector'; -import { - AttemptStatus, - FinalOperationStatus, -} from '../../common/client-side-metrics-attributes'; +import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; +import {AttemptStatus, FinalOperationStatus, MethodName,} from '../../common/client-side-metrics-attributes'; /** * A basic logger class that stores log messages in an array. Useful for testing. @@ -107,10 +104,10 @@ describe('Bigtable/MetricsCollector', () => { options: {}, }, }; - const metricsCollector = new MetricsCollector( + const metricsCollector = new OperationMetricsCollector( this, metricsHandlers, - 'fakeMethod', + MethodName.READ_ROWS, projectId, new TestDateProvider(logger) ); From edfcf8aef02b5299d11b3d72f7e643861b31a3d8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 11:49:10 -0500 Subject: [PATCH 114/448] Rename the method to getOperationAttributes --- .../operation-metrics-collector.ts | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 0e38c4fc4..54a7a21ce 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -147,14 +147,16 @@ export class OperationMetricsCollector { } /** - * Assembles the attributes for operation latency metrics. These attributes - * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. - * Includes whether the operation was a streaming operation or not. + * Assembles the attributes for an entire operation. These attributes + * provide context about the Bigtable environment, the operation being + * performed, and the final status of the operation. Includes whether the + * operation was a streaming operation or not. + * * @param {string} projectId The Google Cloud project ID. * @param {OperationOnlyAttributes} operationOnlyAttributes The attributes of the operation. * @returns {OnOperationCompleteAttributes} An object containing the attributes for operation latency metrics. */ - private getOperationLatencyAttributes( + private getOperationAttributes( projectId: string, operationOnlyAttributes: OperationOnlyAttributes ): OnOperationCompleteAttributes { @@ -255,7 +257,7 @@ export class OperationMetricsCollector { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { // This block records operation latency metrics. - const operationLatencyAttributes = this.getOperationLatencyAttributes( + const operationLatencyAttributes = this.getOperationAttributes( projectId, info ); From bc4998f37b1d272c225cb1bec032c77b55842562 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 28 Jan 2025 16:49:34 +0000 Subject: [PATCH 115/448] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= =?UTF-8?q?=20post-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- test/metrics-collector/metrics-collector.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 2894aa90f..9009f477b 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,7 +18,11 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import {AttemptStatus, FinalOperationStatus, MethodName,} from '../../common/client-side-metrics-attributes'; +import { + AttemptStatus, + FinalOperationStatus, + MethodName, +} from '../../common/client-side-metrics-attributes'; /** * A basic logger class that stores log messages in an array. Useful for testing. From 10b72ec98471f0bf9cb5a422eea6889a02ac3528 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 28 Jan 2025 16:53:57 +0000 Subject: [PATCH 116/448] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= =?UTF-8?q?=20post-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- test/metrics-collector/metrics-collector.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 2894aa90f..9009f477b 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,7 +18,11 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import {AttemptStatus, FinalOperationStatus, MethodName,} from '../../common/client-side-metrics-attributes'; +import { + AttemptStatus, + FinalOperationStatus, + MethodName, +} from '../../common/client-side-metrics-attributes'; /** * A basic logger class that stores log messages in an array. Useful for testing. From 47fd9d03af168d038ea2afbcc9ab05090a8ea812 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 14:58:42 -0500 Subject: [PATCH 117/448] Add aggregate views to the GCP metrics handler --- .../gcp-metrics-handler.ts | 29 +++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 7e2945a48..d0163b3d9 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -24,7 +24,10 @@ import { OnAttemptCompleteAttributes, OnOperationCompleteAttributes, } from '../../common/client-side-metrics-attributes'; +import {View} from '@opentelemetry/sdk-metrics'; const { + Aggregation, + ExplicitBucketHistogramAggregation, MeterProvider, Histogram, Counter, @@ -64,9 +67,31 @@ export class GCPMetricsHandler implements IMetricsHandler { private initialize(projectId?: string) { if (!this.initialized) { this.initialized = true; - // Use MeterProvider provided by user - // If MeterProvider was not provided then use the default meter provider. + const sumAggregation = Aggregation.Sum(); + const histogramAggregation = new ExplicitBucketHistogramAggregation([ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, + 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, + 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, + ]); + const viewList = [ + 'operation_latencies', + 'first_response_latencies', + 'attempt_latencies', + 'retry_count', + 'server_latencies', + 'connectivity_error_count', + 'application_latencies', + 'throttling_latencies', + ].map( + name => + new View({ + instrumentName: name, + name, + aggregation: name.slice(-9) ? sumAggregation : histogramAggregation, + }) + ); const meterProvider = new MeterProvider({ + views: viewList, resource: new Resources.Resource({ 'service.name': 'bigtable-metrics', }).merge(new ResourceUtil.GcpDetectorSync().detect()), From 9073f07182e362174ad0163e253b14042c143be7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 15:44:15 -0500 Subject: [PATCH 118/448] Adjust test based on enum changes --- test/metrics-collector/typical-method-call.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index c883357da..b51e98331 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -13,7 +13,7 @@ getDate call returns 4000 ms getDate call returns 5000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":1000} -attributes: {"finalOperationStatus":"ERROR","streamingOperation":"YES","attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":"ERROR","streamingOperation":true,"attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -28,8 +28,8 @@ getDate call returns 8000 ms getDate call returns 9000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":1000} -attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":"YES","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: metrics: {"operationLatency":9000,"retryCount":1} -attributes: {"finalOperationStatus":"OK","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":"OK","streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 32d3983c12b6feae22eb93121ffb2b159805ee19 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 16:37:31 -0500 Subject: [PATCH 119/448] Update the documentation to be more descriptive --- .../operation-metrics-collector.ts | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 54a7a21ce..98ada061d 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -154,7 +154,8 @@ export class OperationMetricsCollector { * * @param {string} projectId The Google Cloud project ID. * @param {OperationOnlyAttributes} operationOnlyAttributes The attributes of the operation. - * @returns {OnOperationCompleteAttributes} An object containing the attributes for operation latency metrics. + * @returns {OnOperationCompleteAttributes} An object containing the attributes + * for operation latency metrics. */ private getOperationAttributes( projectId: string, @@ -168,11 +169,13 @@ export class OperationMetricsCollector { /** * Assembles the attributes for attempt metrics. These attributes provide context - * about the Bigtable environment, the operation being performed, and the status of the attempt. - * Includes whether the operation was a streaming operation or not. + * about the Bigtable environment, the operation being performed, the status + * of the attempt and whether the operation was a streaming operation or not. + * * @param {string} projectId The Google Cloud project ID. * @param {AttemptOnlyAttributes} attemptOnlyAttributes The attributes of the attempt. - * @returns {OnAttemptCompleteAttributes} An object containing the attributes for attempt metrics. + * @returns {OnAttemptCompleteAttributes} The attributes all metrics recorded + * in the onAttemptComplete handler. */ private getAttemptAttributes( projectId: string, From 9716c4a097b81a432be7bab64b5503eaab2c7f3d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 29 Jan 2025 10:52:12 -0500 Subject: [PATCH 120/448] Add the state machine to the metrics collector --- .../operation-metrics-collector.ts | 144 ++++++++++++------ 1 file changed, 97 insertions(+), 47 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 98ada061d..9e63e3d3a 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -75,10 +75,29 @@ export interface ITabularApiSurface { const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; +// MetricsCollectorState is a list of states that the metrics collector can be in. +// Tracking the OperationMetricsCollector state is done so that the +// OperationMetricsCollector methods are not called in the wrong order. If the +// methods are called in the wrong order they will not execute and they will +// throw warnings. +// +// The following state transitions are allowed: +// OPERATION_NOT_STARTED -> OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS +// OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS -> OPERATION_STARTED_ATTEMPT_IN_PROGRESS +// OPERATION_STARTED_ATTEMPT_IN_PROGRESS -> OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS +// OPERATION_STARTED_ATTEMPT_IN_PROGRESS -> OPERATION_COMPLETE +enum MetricsCollectorState { + OPERATION_NOT_STARTED, + OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS, + OPERATION_STARTED_ATTEMPT_IN_PROGRESS, + OPERATION_COMPLETE, +} + /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ export class OperationMetricsCollector { + private state: MetricsCollectorState; private operationStartTime: DateLike | null; private attemptStartTime: DateLike | null; private zone: string | undefined; @@ -108,6 +127,7 @@ export class OperationMetricsCollector { projectId?: string, dateProvider?: DateProvider ) { + this.state = MetricsCollectorState.OPERATION_NOT_STARTED; this.zone = undefined; this.cluster = undefined; this.tabularApiSurface = tabularApiSurface; @@ -191,7 +211,13 @@ export class OperationMetricsCollector { * Called when the operation starts. Records the start time. */ onOperationStart() { - this.operationStartTime = this.dateProvider.getDate(); + if (this.state === MetricsCollectorState.OPERATION_NOT_STARTED) { + this.operationStartTime = this.dateProvider.getDate(); + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; + } else { + console.warn('Invalid state transition'); + } } /** @@ -199,25 +225,33 @@ export class OperationMetricsCollector { * @param {AttemptOnlyAttributes} info Information about the completed attempt. */ onAttemptComplete(info: OnAttemptCompleteInfo) { - this.attemptCount++; - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (projectId && this.attemptStartTime) { - const attributes = this.getAttemptAttributes(projectId, info); - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onAttemptComplete) { - metricsHandler.onAttemptComplete( - { - attemptLatency: totalTime, - serverLatency: this.serverTime ?? undefined, - connectivityErrorCount: info.connectivityErrorCount, - firstResponseLatency: this.firstResponseLatency ?? undefined, - }, - attributes - ); - } - }); + if ( + this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS + ) { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; + this.attemptCount++; + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (projectId && this.attemptStartTime) { + const attributes = this.getAttemptAttributes(projectId, info); + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onAttemptComplete) { + metricsHandler.onAttemptComplete( + { + attemptLatency: totalTime, + serverLatency: this.serverTime ?? undefined, + connectivityErrorCount: info.connectivityErrorCount, + firstResponseLatency: this.firstResponseLatency ?? undefined, + }, + attributes + ); + } + }); + } + } else { + console.warn('Invalid state transition attempted'); } } @@ -225,11 +259,19 @@ export class OperationMetricsCollector { * Called when a new attempt starts. Records the start time of the attempt. */ onAttemptStart() { - this.attemptStartTime = this.dateProvider.getDate(); - this.serverTime = null; - this.serverTimeRead = false; - this.firstResponseLatency = null; - this.receivedFirstResponse = false; + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS + ) { + this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS; + this.attemptStartTime = this.dateProvider.getDate(); + this.serverTime = null; + this.serverTimeRead = false; + this.firstResponseLatency = null; + this.receivedFirstResponse = false; + } else { + console.warn('Invalid state transition attempted'); + } } /** @@ -254,29 +296,37 @@ export class OperationMetricsCollector { * @param {OperationOnlyAttributes} info Information about the completed operation. */ onOperationComplete(info: OperationOnlyAttributes) { - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - { - // This block records operation latency metrics. - const operationLatencyAttributes = this.getOperationAttributes( - projectId, - info - ); - const metrics = { - operationLatency: totalTime, - retryCount: this.attemptCount - 1, - }; - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onOperationComplete) { - metricsHandler.onOperationComplete( - metrics, - operationLatencyAttributes - ); - } - }); + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS + ) { + this.state = MetricsCollectorState.OPERATION_COMPLETE; + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + { + // This block records operation latency metrics. + const operationLatencyAttributes = this.getOperationAttributes( + projectId, + info + ); + const metrics = { + operationLatency: totalTime, + retryCount: this.attemptCount - 1, + }; + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onOperationComplete) { + metricsHandler.onOperationComplete( + metrics, + operationLatencyAttributes + ); + } + }); + } } + } else { + console.warn('Invalid state transition attempted'); } } From d2b93ee8260efa1b1dc2b9d9d26be10f44ab566a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 29 Jan 2025 11:53:19 -0500 Subject: [PATCH 121/448] Use grpc code to report attempt/operation status --- common/client-side-metrics-attributes.ts | 12 ++++-------- test/metrics-collector/metrics-collector.ts | 11 ++++++----- test/metrics-collector/typical-method-call.txt | 6 +++--- 3 files changed, 13 insertions(+), 16 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 8cbfdcfec..7da3d31b8 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +import {grpc} from 'google-gax'; + interface StandardAttributes { projectId: string; instanceId: string; @@ -52,15 +54,9 @@ export interface AttemptOnlyAttributes { attemptStatus: AttemptStatus; } -export enum FinalOperationStatus { - OK = 'OK', - ERROR = 'ERROR', -} +export type FinalOperationStatus = grpc.status; -export enum AttemptStatus { - OK = 'OK', - ERROR = 'ERROR', -} +export type AttemptStatus = grpc.status; export interface OnOperationCompleteAttributes extends StandardAttributes, diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 9009f477b..24ffa2645 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -23,6 +23,7 @@ import { FinalOperationStatus, MethodName, } from '../../common/client-side-metrics-attributes'; +import {grpc} from 'google-gax'; /** * A basic logger class that stores log messages in an array. Useful for testing. @@ -134,9 +135,9 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); metricsCollector.onAttemptComplete({ - finalOperationStatus: FinalOperationStatus.ERROR, + finalOperationStatus: grpc.status.DEADLINE_EXCEEDED, streamingOperation: true, - attemptStatus: AttemptStatus.ERROR, + attemptStatus: grpc.status.DEADLINE_EXCEEDED, connectivityErrorCount: 1, }); logger.log('9. After a timeout, the second attempt is made.'); @@ -154,13 +155,13 @@ describe('Bigtable/MetricsCollector', () => { logger.log('15. User reads row 1'); logger.log('16. Stream ends, operation completes'); metricsCollector.onAttemptComplete({ - finalOperationStatus: FinalOperationStatus.ERROR, - attemptStatus: AttemptStatus.OK, + finalOperationStatus: grpc.status.OK, + attemptStatus: grpc.status.OK, streamingOperation: true, connectivityErrorCount: 1, }); metricsCollector.onOperationComplete({ - finalOperationStatus: FinalOperationStatus.OK, + finalOperationStatus: grpc.status.OK, streamingOperation: true, }); resolve(); diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index b51e98331..abf8de579 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -13,7 +13,7 @@ getDate call returns 4000 ms getDate call returns 5000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":1000} -attributes: {"finalOperationStatus":"ERROR","streamingOperation":true,"attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":4,"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -28,8 +28,8 @@ getDate call returns 8000 ms getDate call returns 9000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":1000} -attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":0,"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: metrics: {"operationLatency":9000,"retryCount":1} -attributes: {"finalOperationStatus":"OK","streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":0,"streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 99f95778ddf2dc5a5875c640a5bd6a448a1f9955 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 29 Jan 2025 13:13:55 -0500 Subject: [PATCH 122/448] Remove parameters from JS Documentation --- src/client-side-metrics/operation-metrics-collector.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 9e63e3d3a..413d3b087 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -276,7 +276,6 @@ export class OperationMetricsCollector { /** * Called when the first response is received. Records first response latencies. - * @param {string} finalOperationStatus The final status of the operation. */ onResponse() { const endTime = this.dateProvider.getDate(); @@ -332,7 +331,6 @@ export class OperationMetricsCollector { /** * Called when metadata is received. Extracts server timing information if available. - * @param {AttemptOnlyAttributes} info Information about the completed attempt. * @param {object} metadata The received metadata. */ onMetadataReceived(metadata: { From c82e72dc1493b9df8e357491ed61202224bb9618 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 30 Jan 2025 11:47:45 -0500 Subject: [PATCH 123/448] Update interfaces and some metrics - Introduce attributes interface for each metric. - First latency should be recorded per operation not per attempt - Update tests for firstResponseLatency change --- common/client-side-metrics-attributes.ts | 73 ++++++++++++++----- .../gcp-metrics-handler.ts | 8 +- src/client-side-metrics/metrics-handler.ts | 2 +- .../operation-metrics-collector.ts | 8 +- .../metrics-collector/typical-method-call.txt | 6 +- 5 files changed, 65 insertions(+), 32 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 7da3d31b8..86a70ef68 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -25,6 +25,42 @@ interface StandardAttributes { clientName: string; } +interface OperationLatencyAttributes extends StandardAttributes { + finalOperationStatus: FinalOperationStatus; + StreamingOperation: boolean; +} + +interface AttemptLatencyAttributes extends StandardAttributes { + attemptStatus: AttemptStatus; + streamingOperation: boolean; +} + +interface RetryCountAttributes extends StandardAttributes { + finalOperationStatus: FinalOperationStatus; +} + +type ApplicationBlockingLatenciesAttributes = StandardAttributes; + +interface FirstResponseLatencyAttributes extends StandardAttributes { + finalOperationStatus: FinalOperationStatus; +} + +interface ServerLatenciesAttributes extends StandardAttributes { + attemptStatus: AttemptStatus; + streamingOperation: boolean; +} + +interface ConnectivityErrorCountAttributes extends StandardAttributes { + attemptStatus: AttemptStatus; +} + +type ClientBlockingLatenciesAttributes = StandardAttributes; + +export interface AttemptOnlyAttributes { + attemptStatus: AttemptStatus; + streamingOperation: boolean; +} + /** * Information about a Bigtable operation. */ @@ -36,10 +72,23 @@ export interface OperationOnlyAttributes { streamingOperation: boolean; } -/** - * Information about a single attempt of a Bigtable operation. - */ -export interface AttemptOnlyAttributes { +export type FinalOperationStatus = grpc.status; + +export type AttemptStatus = grpc.status; + +export type OnOperationCompleteAttributes = + | OperationLatencyAttributes + | FirstResponseLatencyAttributes + | RetryCountAttributes; + +export type OnAttemptCompleteAttributes = + | AttemptLatencyAttributes + | ConnectivityErrorCountAttributes + | ServerLatenciesAttributes + | ClientBlockingLatenciesAttributes; + +export interface OnAttemptCompleteInfo { + connectivityErrorCount: number; /** * The final status of the operation (e.g., 'OK', 'ERROR'). */ @@ -54,22 +103,6 @@ export interface AttemptOnlyAttributes { attemptStatus: AttemptStatus; } -export type FinalOperationStatus = grpc.status; - -export type AttemptStatus = grpc.status; - -export interface OnOperationCompleteAttributes - extends StandardAttributes, - OperationOnlyAttributes {} - -export interface OnAttemptCompleteAttributes - extends StandardAttributes, - AttemptOnlyAttributes {} - -export interface OnAttemptCompleteInfo extends AttemptOnlyAttributes { - connectivityErrorCount: number; -} - export enum MethodName { READ_ROWS = 'readRows', MUTATE_ROW = 'mutateRow', diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index d0163b3d9..e1a462bb9 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -177,6 +177,10 @@ export class GCPMetricsHandler implements IMetricsHandler { attributes ); this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); + this.otelMetrics?.firstResponseLatencies.record( + metrics.firstResponseLatency, + attributes + ); } /** @@ -200,9 +204,5 @@ export class GCPMetricsHandler implements IMetricsHandler { attributes ); this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); - this.otelMetrics?.firstResponseLatencies.record( - metrics.firstResponseLatency, - attributes - ); } } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index acc2b88af..051b65394 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -27,6 +27,7 @@ import { * Metrics related to the completion of a Bigtable operation. */ export interface OnOperationCompleteMetrics { + firstResponseLatency?: number; operationLatency: number; retryCount?: number; } @@ -37,7 +38,6 @@ export interface OnOperationCompleteMetrics { export interface OnAttemptCompleteMetrics { attemptLatency: number; serverLatency?: number; - firstResponseLatency?: number; connectivityErrorCount: number; } diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 413d3b087..9a1b10568 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -213,6 +213,8 @@ export class OperationMetricsCollector { onOperationStart() { if (this.state === MetricsCollectorState.OPERATION_NOT_STARTED) { this.operationStartTime = this.dateProvider.getDate(); + this.firstResponseLatency = null; + this.receivedFirstResponse = false; this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; } else { @@ -222,7 +224,7 @@ export class OperationMetricsCollector { /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param {AttemptOnlyAttributes} info Information about the completed attempt. + * @param {OnAttemptCompleteInfo} info Information about the completed attempt. */ onAttemptComplete(info: OnAttemptCompleteInfo) { if ( @@ -243,7 +245,6 @@ export class OperationMetricsCollector { attemptLatency: totalTime, serverLatency: this.serverTime ?? undefined, connectivityErrorCount: info.connectivityErrorCount, - firstResponseLatency: this.firstResponseLatency ?? undefined, }, attributes ); @@ -267,8 +268,6 @@ export class OperationMetricsCollector { this.attemptStartTime = this.dateProvider.getDate(); this.serverTime = null; this.serverTimeRead = false; - this.firstResponseLatency = null; - this.receivedFirstResponse = false; } else { console.warn('Invalid state transition attempted'); } @@ -313,6 +312,7 @@ export class OperationMetricsCollector { const metrics = { operationLatency: totalTime, retryCount: this.attemptCount - 1, + firstResponseLatency: this.firstResponseLatency ?? undefined, }; this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index abf8de579..d8f9a142a 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -12,7 +12,7 @@ getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":1000} +metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1} attributes: {"finalOperationStatus":4,"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms @@ -27,9 +27,9 @@ getDate call returns 8000 ms 16. Stream ends, operation completes getDate call returns 9000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":1000} +metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1} attributes: {"finalOperationStatus":0,"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: -metrics: {"operationLatency":9000,"retryCount":1} +metrics: {"operationLatency":9000,"retryCount":1,"firstResponseLatency":1000} attributes: {"finalOperationStatus":0,"streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 759e8292960e3beb01239643c377dde47c7f6775 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 30 Jan 2025 12:06:46 -0500 Subject: [PATCH 124/448] Documentation for all the different interfaces --- common/client-side-metrics-attributes.ts | 72 ++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 86a70ef68..84cdf9c74 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -14,6 +14,10 @@ import {grpc} from 'google-gax'; +/** + * Standard attributes common to various Bigtable client-side metrics. These attributes provide + * contextual information about the Bigtable environment and operation. + */ interface StandardAttributes { projectId: string; instanceId: string; @@ -25,37 +29,73 @@ interface StandardAttributes { clientName: string; } +/** + * Attributes associated with operation latency metrics for Bigtable client operations. + * These attributes provide context about the Bigtable environment and the completed operation. + */ interface OperationLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; StreamingOperation: boolean; } +/** + * Attributes associated with attempt latency metrics for Bigtable client operations. + * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. + */ interface AttemptLatencyAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: boolean; } +/** + * Attributes associated with retry count metrics for Bigtable client operations. These attributes + * provide context about the Bigtable environment and the final status of the operation. + */ interface RetryCountAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } +/** + * Attributes associated with application blocking latencies for Bigtable client operations. + * These attributes provide context about the Bigtable environment and the operation being performed. + */ type ApplicationBlockingLatenciesAttributes = StandardAttributes; +/** + * Attributes associated with first response latency metrics for Bigtable client operations. + * These attributes provide context about the Bigtable environment and the final status of the operation. + */ interface FirstResponseLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } +/** + * Attributes associated with server latency metrics for Bigtable client operations. + * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. + */ interface ServerLatenciesAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: boolean; } +/** + * Attributes associated with connectivity error count metrics for Bigtable client operations. + * These attributes provide context about the Bigtable environment and the status of the attempt. + */ interface ConnectivityErrorCountAttributes extends StandardAttributes { attemptStatus: AttemptStatus; } +/** + * Attributes associated with client blocking latencies for Bigtable client operations. + * These attributes provide context about the Bigtable environment and the operation being performed. + */ type ClientBlockingLatenciesAttributes = StandardAttributes; +/** + * Attributes specific to a single attempt of a Bigtable operation. These attributes + * provide information about the attempt's status and whether it was part of a streaming operation. + */ export interface AttemptOnlyAttributes { attemptStatus: AttemptStatus; streamingOperation: boolean; @@ -72,21 +112,49 @@ export interface OperationOnlyAttributes { streamingOperation: boolean; } +/** + * The final status of a Bigtable operation. This represents the ultimate result + * of the operation, regardless of individual attempt statuses. It's represented + * as a gRPC status code. See the `google-gax` library's documentation on + * gRPC status codes for more information on specific values. + */ export type FinalOperationStatus = grpc.status; +/** + * The status of a single attempt of a Bigtable operation. This is represented as a + * gRPC status code. See the `google-gax` library's documentation on gRPC status + * codes for more information on specific values. + */ export type AttemptStatus = grpc.status; +/** + * Attributes associated with the completion of a Bigtable operation. These + * attributes provide context about the Bigtable environment, the completed + * operation, and its final status. They are used for recording metrics such as + * operation latency, first response latency, and retry count. + */ export type OnOperationCompleteAttributes = | OperationLatencyAttributes | FirstResponseLatencyAttributes | RetryCountAttributes; +/** + * Attributes associated with the completion of a single attempt of a Bigtable + * operation. These attributes provide context about the Bigtable environment, + * the specific attempt, its status, and whether the operation was streaming. They + * are used for recording metrics such as attempt latency, server latency, and + * connectivity errors. + */ export type OnAttemptCompleteAttributes = | AttemptLatencyAttributes | ConnectivityErrorCountAttributes | ServerLatenciesAttributes | ClientBlockingLatenciesAttributes; +/** + * Information about the completion of a single attempt of a Bigtable operation. + * This information is used for recording metrics. + */ export interface OnAttemptCompleteInfo { connectivityErrorCount: number; /** @@ -103,6 +171,10 @@ export interface OnAttemptCompleteInfo { attemptStatus: AttemptStatus; } +/** + * Represents the names of Bigtable methods. These are used as attributes for + * metrics, allowing for differentiation of performance by method. + */ export enum MethodName { READ_ROWS = 'readRows', MUTATE_ROW = 'mutateRow', From 76b6f5af37049c17cdbdd83cc3a3c7a98e2b81ae Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 30 Jan 2025 13:23:55 -0500 Subject: [PATCH 125/448] use operation start time as the benchmark --- src/client-side-metrics/operation-metrics-collector.ts | 4 ++-- test/metrics-collector/typical-method-call.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 9a1b10568..cc9d10d3f 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -279,8 +279,8 @@ export class OperationMetricsCollector { onResponse() { const endTime = this.dateProvider.getDate(); const projectId = this.projectId; - if (projectId && this.attemptStartTime) { - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); if (!this.receivedFirstResponse) { this.receivedFirstResponse = true; this.firstResponseLatency = totalTime; diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index d8f9a142a..f261fcf55 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -31,5 +31,5 @@ metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1} attributes: {"finalOperationStatus":0,"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: -metrics: {"operationLatency":9000,"retryCount":1,"firstResponseLatency":1000} +metrics: {"operationLatency":9000,"retryCount":1,"firstResponseLatency":2000} attributes: {"finalOperationStatus":0,"streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 1e840a4ae5fbd08cd514fdef0f6bbd125dc96afa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 30 Jan 2025 14:04:06 -0500 Subject: [PATCH 126/448] =?UTF-8?q?Final=20operation=20status=20shouldn?= =?UTF-8?q?=E2=80=99t=20be=20included=20per=20a?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- common/client-side-metrics-attributes.ts | 4 ---- test/metrics-collector/metrics-collector.ts | 2 -- test/metrics-collector/typical-method-call.txt | 4 ++-- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 84cdf9c74..d0cd2022c 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -157,10 +157,6 @@ export type OnAttemptCompleteAttributes = */ export interface OnAttemptCompleteInfo { connectivityErrorCount: number; - /** - * The final status of the operation (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: FinalOperationStatus; /** * Whether the operation is a streaming operation or not. */ diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 24ffa2645..342648163 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -135,7 +135,6 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); metricsCollector.onAttemptComplete({ - finalOperationStatus: grpc.status.DEADLINE_EXCEEDED, streamingOperation: true, attemptStatus: grpc.status.DEADLINE_EXCEEDED, connectivityErrorCount: 1, @@ -155,7 +154,6 @@ describe('Bigtable/MetricsCollector', () => { logger.log('15. User reads row 1'); logger.log('16. Stream ends, operation completes'); metricsCollector.onAttemptComplete({ - finalOperationStatus: grpc.status.OK, attemptStatus: grpc.status.OK, streamingOperation: true, connectivityErrorCount: 1, diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index f261fcf55..4014b0125 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -13,7 +13,7 @@ getDate call returns 4000 ms getDate call returns 5000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1} -attributes: {"finalOperationStatus":4,"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -28,7 +28,7 @@ getDate call returns 8000 ms getDate call returns 9000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1} -attributes: {"finalOperationStatus":0,"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: metrics: {"operationLatency":9000,"retryCount":1,"firstResponseLatency":2000} From 7bf62e91a171737ff9b048766624eec7125bfb7b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 11:20:04 -0500 Subject: [PATCH 127/448] Move OnAttemptCompleteInfo Remove unused imports too --- common/client-side-metrics-attributes.ts | 16 ---------------- .../operation-metrics-collector.ts | 18 +++++++++++++++++- test/metrics-collector/metrics-collector.ts | 6 +----- 3 files changed, 18 insertions(+), 22 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d0cd2022c..d8b5acacf 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -151,22 +151,6 @@ export type OnAttemptCompleteAttributes = | ServerLatenciesAttributes | ClientBlockingLatenciesAttributes; -/** - * Information about the completion of a single attempt of a Bigtable operation. - * This information is used for recording metrics. - */ -export interface OnAttemptCompleteInfo { - connectivityErrorCount: number; - /** - * Whether the operation is a streaming operation or not. - */ - streamingOperation: boolean; - /** - * The attempt status of the operation. - */ - attemptStatus: AttemptStatus; -} - /** * Represents the names of Bigtable methods. These are used as attributes for * metrics, allowing for differentiation of performance by method. diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index cc9d10d3f..0b5a2c610 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -16,8 +16,8 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { AttemptOnlyAttributes, + AttemptStatus, MethodName, - OnAttemptCompleteInfo, OnOperationCompleteAttributes, OperationOnlyAttributes, } from '../../common/client-side-metrics-attributes'; @@ -72,6 +72,22 @@ export interface ITabularApiSurface { }; } +/** + * Information about the completion of a single attempt of a Bigtable operation. + * This information is used for recording metrics. + */ +interface OnAttemptCompleteInfo { + connectivityErrorCount: number; + /** + * Whether the operation is a streaming operation or not. + */ + streamingOperation: boolean; + /** + * The attempt status of the operation. + */ + attemptStatus: AttemptStatus; +} + const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 342648163..0d7901698 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,11 +18,7 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import { - AttemptStatus, - FinalOperationStatus, - MethodName, -} from '../../common/client-side-metrics-attributes'; +import {MethodName} from '../../common/client-side-metrics-attributes'; import {grpc} from 'google-gax'; /** From fca55b7b95d0ef30bb5721fca42fb27cc871bd3b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 11:37:48 -0500 Subject: [PATCH 128/448] Provide AttemptOnlyAttributes in the only file In the only file that it is needed --- common/client-side-metrics-attributes.ts | 9 --------- src/client-side-metrics/operation-metrics-collector.ts | 10 +++++++++- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d8b5acacf..4b5be0e18 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -92,15 +92,6 @@ interface ConnectivityErrorCountAttributes extends StandardAttributes { */ type ClientBlockingLatenciesAttributes = StandardAttributes; -/** - * Attributes specific to a single attempt of a Bigtable operation. These attributes - * provide information about the attempt's status and whether it was part of a streaming operation. - */ -export interface AttemptOnlyAttributes { - attemptStatus: AttemptStatus; - streamingOperation: boolean; -} - /** * Information about a Bigtable operation. */ diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 0b5a2c610..119e38ac1 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -15,7 +15,6 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { - AttemptOnlyAttributes, AttemptStatus, MethodName, OnOperationCompleteAttributes, @@ -88,6 +87,15 @@ interface OnAttemptCompleteInfo { attemptStatus: AttemptStatus; } +/** + * Attributes specific to a single attempt of a Bigtable operation. These attributes + * provide information about the attempt's status and whether it was part of a streaming operation. + */ +interface AttemptOnlyAttributes { + attemptStatus: AttemptStatus; + streamingOperation: boolean; +} + const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; From 51afdce11ef0ce53753af0e7a86755bf124c2c3d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 11:45:50 -0500 Subject: [PATCH 129/448] Move over the OperationOnlyAttributes --- common/client-side-metrics-attributes.ts | 11 ----------- .../operation-metrics-collector.ts | 15 ++++++++++++--- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 4b5be0e18..64772019b 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -92,17 +92,6 @@ interface ConnectivityErrorCountAttributes extends StandardAttributes { */ type ClientBlockingLatenciesAttributes = StandardAttributes; -/** - * Information about a Bigtable operation. - */ -export interface OperationOnlyAttributes { - /** - * The final status of the operation (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: FinalOperationStatus; - streamingOperation: boolean; -} - /** * The final status of a Bigtable operation. This represents the ultimate result * of the operation, regardless of individual attempt statuses. It's represented diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 119e38ac1..0583d6c3f 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -16,9 +16,9 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { AttemptStatus, + FinalOperationStatus, MethodName, OnOperationCompleteAttributes, - OperationOnlyAttributes, } from '../../common/client-side-metrics-attributes'; /** @@ -88,14 +88,23 @@ interface OnAttemptCompleteInfo { } /** - * Attributes specific to a single attempt of a Bigtable operation. These attributes - * provide information about the attempt's status and whether it was part of a streaming operation. + * Attributes specific to a single attempt of a Bigtable operation. These + * attributes provide information about the attempt's status and whether it was + * part of a streaming operation. */ interface AttemptOnlyAttributes { attemptStatus: AttemptStatus; streamingOperation: boolean; } +/** + * Information about a Bigtable operation to be recorded in client side metrics. + */ +interface OperationOnlyAttributes { + finalOperationStatus: FinalOperationStatus; + streamingOperation: boolean; +} + const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; From 57b1dc17947a3714218a0f561103ea70b5d5635f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 13:47:33 -0500 Subject: [PATCH 130/448] Adjust the guard so that it is earlier --- .../operation-metrics-collector.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 0583d6c3f..ddefa0564 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -310,13 +310,13 @@ export class OperationMetricsCollector { * Called when the first response is received. Records first response latencies. */ onResponse() { - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - if (!this.receivedFirstResponse) { - this.receivedFirstResponse = true; - this.firstResponseLatency = totalTime; + if (!this.receivedFirstResponse) { + this.receivedFirstResponse = true; + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (projectId && this.operationStartTime) { + this.firstResponseLatency = + endTime.getTime() - this.operationStartTime.getTime(); } } } From 0f850b79d6a827424e1978edf1c7df16c424227f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 14:11:03 -0500 Subject: [PATCH 131/448] Adjust the test output file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dates don’t get read for rows after the first row anymore. --- test/metrics-collector/typical-method-call.txt | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 4014b0125..d08628efa 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -8,28 +8,25 @@ getDate call returns 2000 ms getDate call returns 3000 ms 6. Client receives metadata. 7. Client receives second row. -getDate call returns 4000 ms 8. A transient error occurs. -getDate call returns 5000 ms +getDate call returns 4000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1} +metrics: {"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":1} attributes: {"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. -getDate call returns 6000 ms +getDate call returns 5000 ms 10. Client receives status information. 11. Client receives metadata. 12. Client receives third row. -getDate call returns 7000 ms 13. Client receives metadata. 14. Client receives fourth row. -getDate call returns 8000 ms 15. User reads row 1 16. Stream ends, operation completes -getDate call returns 9000 ms +getDate call returns 6000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1} +metrics: {"attemptLatency":1000,"serverLatency":103,"connectivityErrorCount":1} attributes: {"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} -getDate call returns 10000 ms +getDate call returns 7000 ms Recording parameters for onOperationComplete: -metrics: {"operationLatency":9000,"retryCount":1,"firstResponseLatency":2000} +metrics: {"operationLatency":6000,"retryCount":1,"firstResponseLatency":2000} attributes: {"finalOperationStatus":0,"streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 6c1e01b0b9fb1f590c1945cf0ec64192679b9651 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 14:37:14 -0500 Subject: [PATCH 132/448] Change streaming back to STREAMING/UNARY --- common/client-side-metrics-attributes.ts | 11 ++++++++--- .../operation-metrics-collector.ts | 7 ++++--- test/metrics-collector/metrics-collector.ts | 8 ++++---- test/metrics-collector/typical-method-call.txt | 6 +++--- 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 64772019b..5f2adf7e2 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -29,13 +29,18 @@ interface StandardAttributes { clientName: string; } +export enum StreamingState { + STREAMING = 'streaming', + UNARY = 'unary', +} + /** * Attributes associated with operation latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the completed operation. */ interface OperationLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; - StreamingOperation: boolean; + streamingOperation: StreamingState; } /** @@ -44,7 +49,7 @@ interface OperationLatencyAttributes extends StandardAttributes { */ interface AttemptLatencyAttributes extends StandardAttributes { attemptStatus: AttemptStatus; - streamingOperation: boolean; + streamingOperation: StreamingState; } /** @@ -75,7 +80,7 @@ interface FirstResponseLatencyAttributes extends StandardAttributes { */ interface ServerLatenciesAttributes extends StandardAttributes { attemptStatus: AttemptStatus; - streamingOperation: boolean; + streamingOperation: StreamingState; } /** diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index ddefa0564..e120e5cf9 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -19,6 +19,7 @@ import { FinalOperationStatus, MethodName, OnOperationCompleteAttributes, + StreamingState, } from '../../common/client-side-metrics-attributes'; /** @@ -80,7 +81,7 @@ interface OnAttemptCompleteInfo { /** * Whether the operation is a streaming operation or not. */ - streamingOperation: boolean; + streamingOperation: StreamingState; /** * The attempt status of the operation. */ @@ -94,7 +95,7 @@ interface OnAttemptCompleteInfo { */ interface AttemptOnlyAttributes { attemptStatus: AttemptStatus; - streamingOperation: boolean; + streamingOperation: StreamingState; } /** @@ -102,7 +103,7 @@ interface AttemptOnlyAttributes { */ interface OperationOnlyAttributes { finalOperationStatus: FinalOperationStatus; - streamingOperation: boolean; + streamingOperation: StreamingState; } const packageJSON = fs.readFileSync('package.json'); diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 0d7901698..7ba8895ea 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,7 +18,7 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import {MethodName} from '../../common/client-side-metrics-attributes'; +import {MethodName, StreamingState} from '../../common/client-side-metrics-attributes'; import {grpc} from 'google-gax'; /** @@ -131,7 +131,7 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); metricsCollector.onAttemptComplete({ - streamingOperation: true, + streamingOperation: StreamingState.STREAMING, attemptStatus: grpc.status.DEADLINE_EXCEEDED, connectivityErrorCount: 1, }); @@ -151,12 +151,12 @@ describe('Bigtable/MetricsCollector', () => { logger.log('16. Stream ends, operation completes'); metricsCollector.onAttemptComplete({ attemptStatus: grpc.status.OK, - streamingOperation: true, + streamingOperation: StreamingState.STREAMING, connectivityErrorCount: 1, }); metricsCollector.onOperationComplete({ finalOperationStatus: grpc.status.OK, - streamingOperation: true, + streamingOperation: StreamingState.STREAMING, }); resolve(); }); diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index d08628efa..921270cf4 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -12,7 +12,7 @@ getDate call returns 3000 ms getDate call returns 4000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":1} -attributes: {"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"streamingOperation":"streaming","attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 5000 ms 10. Client receives status information. @@ -25,8 +25,8 @@ getDate call returns 5000 ms getDate call returns 6000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":1000,"serverLatency":103,"connectivityErrorCount":1} -attributes: {"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"attemptStatus":0,"streamingOperation":"streaming","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 7000 ms Recording parameters for onOperationComplete: metrics: {"operationLatency":6000,"retryCount":1,"firstResponseLatency":2000} -attributes: {"finalOperationStatus":0,"streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":0,"streamingOperation":"streaming","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 2910408d4012859d65e4788d257f34ff75c60268 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Fri, 31 Jan 2025 19:40:51 +0000 Subject: [PATCH 133/448] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= =?UTF-8?q?=20post-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- test/metrics-collector/metrics-collector.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 7ba8895ea..7983d99bb 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,7 +18,10 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import {MethodName, StreamingState} from '../../common/client-side-metrics-attributes'; +import { + MethodName, + StreamingState, +} from '../../common/client-side-metrics-attributes'; import {grpc} from 'google-gax'; /** From 2781561f3791868855e0daf76d90d4f80736beca Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 16:21:23 -0500 Subject: [PATCH 134/448] Change metrics handler interface to support each metric --- common/client-side-metrics-attributes.ts | 28 +++--- common/test-metrics-handler.ts | 79 +++++++++++------ .../gcp-metrics-handler.ts | 86 ++++++++++--------- src/client-side-metrics/metrics-handler.ts | 51 +++++++---- .../operation-metrics-collector.ts | 42 +++++---- 5 files changed, 170 insertions(+), 116 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 5f2adf7e2..cb7430906 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -38,7 +38,7 @@ export enum StreamingState { * Attributes associated with operation latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the completed operation. */ -interface OperationLatencyAttributes extends StandardAttributes { +export interface OperationLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; streamingOperation: StreamingState; } @@ -47,7 +47,7 @@ interface OperationLatencyAttributes extends StandardAttributes { * Attributes associated with attempt latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ -interface AttemptLatencyAttributes extends StandardAttributes { +export interface AttemptLatencyAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: StreamingState; } @@ -56,7 +56,7 @@ interface AttemptLatencyAttributes extends StandardAttributes { * Attributes associated with retry count metrics for Bigtable client operations. These attributes * provide context about the Bigtable environment and the final status of the operation. */ -interface RetryCountAttributes extends StandardAttributes { +export interface RetryCountAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } @@ -70,7 +70,7 @@ type ApplicationBlockingLatenciesAttributes = StandardAttributes; * Attributes associated with first response latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the final status of the operation. */ -interface FirstResponseLatencyAttributes extends StandardAttributes { +export interface FirstResponseLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } @@ -78,7 +78,7 @@ interface FirstResponseLatencyAttributes extends StandardAttributes { * Attributes associated with server latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ -interface ServerLatenciesAttributes extends StandardAttributes { +export interface ServerLatenciesAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: StreamingState; } @@ -87,7 +87,7 @@ interface ServerLatenciesAttributes extends StandardAttributes { * Attributes associated with connectivity error count metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the status of the attempt. */ -interface ConnectivityErrorCountAttributes extends StandardAttributes { +export interface ConnectivityErrorCountAttributes extends StandardAttributes { attemptStatus: AttemptStatus; } @@ -118,10 +118,9 @@ export type AttemptStatus = grpc.status; * operation, and its final status. They are used for recording metrics such as * operation latency, first response latency, and retry count. */ -export type OnOperationCompleteAttributes = - | OperationLatencyAttributes - | FirstResponseLatencyAttributes - | RetryCountAttributes; +export type OnOperationCompleteAttributes = OperationLatencyAttributes & + FirstResponseLatencyAttributes & + RetryCountAttributes; /** * Attributes associated with the completion of a single attempt of a Bigtable @@ -130,11 +129,10 @@ export type OnOperationCompleteAttributes = * are used for recording metrics such as attempt latency, server latency, and * connectivity errors. */ -export type OnAttemptCompleteAttributes = - | AttemptLatencyAttributes - | ConnectivityErrorCountAttributes - | ServerLatenciesAttributes - | ClientBlockingLatenciesAttributes; +export type OnAttemptCompleteAttributes = AttemptLatencyAttributes & + ConnectivityErrorCountAttributes & + ServerLatenciesAttributes & + ClientBlockingLatenciesAttributes; /** * Represents the names of Bigtable methods. These are used as attributes for diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 9ffabd7a0..07c8c1661 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -14,12 +14,12 @@ import {WithLogger} from './logger'; import { - OnAttemptCompleteMetrics, - OnOperationCompleteMetrics, -} from '../src/client-side-metrics/metrics-handler'; -import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, + AttemptLatencyAttributes, + ConnectivityErrorCountAttributes, + FirstResponseLatencyAttributes, + OperationLatencyAttributes, + RetryCountAttributes, + ServerLatenciesAttributes, } from './client-side-metrics-attributes'; /** @@ -27,33 +27,56 @@ import { * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. */ export class TestMetricsHandler extends WithLogger { - /** - * Logs the metrics and attributes received for an operation completion. - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {Attributes} attributes Attributes associated with the completed operation. - */ - onOperationComplete( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes + onRecordAttemptLatency( + attemptLatency: number, + attributes: AttemptLatencyAttributes + ) { + this.logger.log( + `Recording parameters for AttemptLatency: ${attemptLatency}:` + ); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } + + onRecordConnectivityErrorCount( + connectivityErrorCount: number, + attributes: ConnectivityErrorCountAttributes + ) { + this.logger.log( + `Recording parameters for ConnectivityErrorCount: ${connectivityErrorCount}:` + ); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } + + onRecordServerLatency( + serverLatency: number, + attributes: ServerLatenciesAttributes + ) { + this.logger.log(`Recording parameters for ServerLatency: ${serverLatency}`); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } + + onRecordOperationLatency( + operationLatency: number, + attributes: OperationLatencyAttributes ) { - attributes.clientName = 'nodejs-bigtable'; - this.logger.log('Recording parameters for onOperationComplete:'); - this.logger.log(`metrics: ${JSON.stringify(metrics)}`); + this.logger.log( + `Recording parameters for OperationLatency: ${operationLatency}` + ); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } + + onRecordRetryCount(retryCount: number, attributes: RetryCountAttributes) { + this.logger.log(`Recording parameters for RetryCount: ${retryCount}`); this.logger.log(`attributes: ${JSON.stringify(attributes)}`); } - /** - * Logs the metrics and attributes received for an attempt completion. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {Attributes} attributes Attributes associated with the completed attempt. - */ - onAttemptComplete( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes + onRecordFirstResponseLatency( + firstResponseLatency: number, + attributes: FirstResponseLatencyAttributes ) { - attributes.clientName = 'nodejs-bigtable'; - this.logger.log('Recording parameters for onAttemptComplete:'); - this.logger.log(`metrics: ${JSON.stringify(metrics)}`); + this.logger.log( + `Recording parameters for FirstResponseLatency: ${firstResponseLatency}` + ); this.logger.log(`attributes: ${JSON.stringify(attributes)}`); } } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index e1a462bb9..1192e221d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -import { - IMetricsHandler, - OnAttemptCompleteMetrics, - OnOperationCompleteMetrics, -} from './metrics-handler'; +import {IMetricsHandler} from './metrics-handler'; import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, + AttemptLatencyAttributes, + ConnectivityErrorCountAttributes, + FirstResponseLatencyAttributes, + OperationLatencyAttributes, + RetryCountAttributes, + ServerLatenciesAttributes, } from '../../common/client-side-metrics-attributes'; import {View} from '@opentelemetry/sdk-metrics'; const { @@ -161,48 +161,54 @@ export class GCPMetricsHandler implements IMetricsHandler { } } - /** - * Records metrics for a completed Bigtable operation. - * This method records the operation latency and retry count, associating them with provided attributes. - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. - */ - onOperationComplete( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes + onRecordAttemptLatency( + attemptLatency: number, + attributes: AttemptLatencyAttributes ) { this.initialize(); - this.otelMetrics?.operationLatencies.record( - metrics.operationLatency, - attributes - ); - this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); - this.otelMetrics?.firstResponseLatencies.record( - metrics.firstResponseLatency, - attributes - ); + this.otelMetrics?.attemptLatencies.record(attemptLatency, attributes); } - /** - * Records metrics for a completed attempt of a Bigtable operation. - * This method records attempt latency, connectivity error count, server latency, and first response latency, - * along with the provided attributes. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. - */ - onAttemptComplete( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes + onRecordConnectivityErrorCount( + connectivityErrorCount: number, + attributes: ConnectivityErrorCountAttributes ) { this.initialize(); - this.otelMetrics?.attemptLatencies.record( - metrics.attemptLatency, + this.otelMetrics?.connectivityErrorCount.record( + connectivityErrorCount, attributes ); - this.otelMetrics?.connectivityErrorCount.record( - metrics.connectivityErrorCount, + } + + onRecordServerLatency( + serverLatency: number, + attributes: ServerLatenciesAttributes + ) { + this.initialize(); + this.otelMetrics?.serverLatencies.record(serverLatency, attributes); + } + + onRecordOperationLatency( + operationLatency: number, + attributes: OperationLatencyAttributes + ) { + this.initialize(); + this.otelMetrics?.operationLatencies.record(operationLatency, attributes); + } + + onRecordRetryCount(retryCount: number, attributes: RetryCountAttributes) { + this.initialize(); + this.otelMetrics?.retryCount.add(retryCount, attributes); + } + + onRecordFirstResponseLatency( + firstResponseLatency: number, + attributes: FirstResponseLatencyAttributes + ) { + this.initialize(); + this.otelMetrics?.firstResponseLatencies.record( + firstResponseLatency, attributes ); - this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); } } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 051b65394..fa3cfda72 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -13,8 +13,12 @@ // limitations under the License. import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, + AttemptLatencyAttributes, + ConnectivityErrorCountAttributes, + FirstResponseLatencyAttributes, + OperationLatencyAttributes, + RetryCountAttributes, + ServerLatenciesAttributes, } from '../../common/client-side-metrics-attributes'; /** @@ -46,22 +50,33 @@ export interface OnAttemptCompleteMetrics { * Implementations of this interface can define how metrics are recorded and processed. */ export interface IMetricsHandler { - /** - * Called when an operation completes (successfully or unsuccessfully). - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. - */ - onOperationComplete?( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes + onRecordAttemptLatency?( + attemptLatency: number, + attributes: AttemptLatencyAttributes ): void; - /** - * Called when an attempt (e.g., an RPC attempt) completes. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. - */ - onAttemptComplete?( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes + + onRecordConnectivityErrorCount?( + connectivityErrorCount: number, + attributes: ConnectivityErrorCountAttributes + ): void; + + onRecordServerLatency?( + serverLatency: number, + attributes: ServerLatenciesAttributes + ): void; + + onRecordOperationLatency?( + operationLatency: number, + attributes: OperationLatencyAttributes + ): void; + + onRecordRetryCount?( + retryCount: number, + attributes: RetryCountAttributes + ): void; + + onRecordFirstResponseLatency?( + firstResponseLatency: number, + attributes: FirstResponseLatencyAttributes ): void; } diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index e120e5cf9..59c859e35 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -273,13 +273,15 @@ export class OperationMetricsCollector { const attributes = this.getAttemptAttributes(projectId, info); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onAttemptComplete) { - metricsHandler.onAttemptComplete( - { - attemptLatency: totalTime, - serverLatency: this.serverTime ?? undefined, - connectivityErrorCount: info.connectivityErrorCount, - }, + if (metricsHandler.onRecordAttemptLatency) { + metricsHandler.onRecordAttemptLatency(totalTime, attributes); + } + if (metricsHandler.onRecordServerLatency && this.serverTime) { + metricsHandler.onRecordServerLatency(this.serverTime, attributes); + } + if (metricsHandler.onRecordConnectivityErrorCount) { + metricsHandler.onRecordConnectivityErrorCount( + info.connectivityErrorCount, attributes ); } @@ -343,15 +345,25 @@ export class OperationMetricsCollector { projectId, info ); - const metrics = { - operationLatency: totalTime, - retryCount: this.attemptCount - 1, - firstResponseLatency: this.firstResponseLatency ?? undefined, - }; this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onOperationComplete) { - metricsHandler.onOperationComplete( - metrics, + if (metricsHandler.onRecordOperationLatency) { + metricsHandler.onRecordOperationLatency( + totalTime, + operationLatencyAttributes + ); + } + if (metricsHandler.onRecordRetryCount) { + metricsHandler.onRecordRetryCount( + this.attemptCount - 1, + operationLatencyAttributes + ); + } + if ( + metricsHandler.onRecordFirstResponseLatency && + this.firstResponseLatency + ) { + metricsHandler.onRecordFirstResponseLatency( + this.firstResponseLatency ?? undefined, operationLatencyAttributes ); } From 0b4d93edaaa6298b4fa3797867d897b71baadc92 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 16:22:36 -0500 Subject: [PATCH 135/448] Revert "Change metrics handler interface to support each metric" This reverts commit 2781561f3791868855e0daf76d90d4f80736beca. --- common/client-side-metrics-attributes.ts | 28 +++--- common/test-metrics-handler.ts | 79 ++++++----------- .../gcp-metrics-handler.ts | 86 +++++++++---------- src/client-side-metrics/metrics-handler.ts | 51 ++++------- .../operation-metrics-collector.ts | 42 ++++----- 5 files changed, 116 insertions(+), 170 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index cb7430906..5f2adf7e2 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -38,7 +38,7 @@ export enum StreamingState { * Attributes associated with operation latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the completed operation. */ -export interface OperationLatencyAttributes extends StandardAttributes { +interface OperationLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; streamingOperation: StreamingState; } @@ -47,7 +47,7 @@ export interface OperationLatencyAttributes extends StandardAttributes { * Attributes associated with attempt latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ -export interface AttemptLatencyAttributes extends StandardAttributes { +interface AttemptLatencyAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: StreamingState; } @@ -56,7 +56,7 @@ export interface AttemptLatencyAttributes extends StandardAttributes { * Attributes associated with retry count metrics for Bigtable client operations. These attributes * provide context about the Bigtable environment and the final status of the operation. */ -export interface RetryCountAttributes extends StandardAttributes { +interface RetryCountAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } @@ -70,7 +70,7 @@ type ApplicationBlockingLatenciesAttributes = StandardAttributes; * Attributes associated with first response latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the final status of the operation. */ -export interface FirstResponseLatencyAttributes extends StandardAttributes { +interface FirstResponseLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } @@ -78,7 +78,7 @@ export interface FirstResponseLatencyAttributes extends StandardAttributes { * Attributes associated with server latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ -export interface ServerLatenciesAttributes extends StandardAttributes { +interface ServerLatenciesAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: StreamingState; } @@ -87,7 +87,7 @@ export interface ServerLatenciesAttributes extends StandardAttributes { * Attributes associated with connectivity error count metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the status of the attempt. */ -export interface ConnectivityErrorCountAttributes extends StandardAttributes { +interface ConnectivityErrorCountAttributes extends StandardAttributes { attemptStatus: AttemptStatus; } @@ -118,9 +118,10 @@ export type AttemptStatus = grpc.status; * operation, and its final status. They are used for recording metrics such as * operation latency, first response latency, and retry count. */ -export type OnOperationCompleteAttributes = OperationLatencyAttributes & - FirstResponseLatencyAttributes & - RetryCountAttributes; +export type OnOperationCompleteAttributes = + | OperationLatencyAttributes + | FirstResponseLatencyAttributes + | RetryCountAttributes; /** * Attributes associated with the completion of a single attempt of a Bigtable @@ -129,10 +130,11 @@ export type OnOperationCompleteAttributes = OperationLatencyAttributes & * are used for recording metrics such as attempt latency, server latency, and * connectivity errors. */ -export type OnAttemptCompleteAttributes = AttemptLatencyAttributes & - ConnectivityErrorCountAttributes & - ServerLatenciesAttributes & - ClientBlockingLatenciesAttributes; +export type OnAttemptCompleteAttributes = + | AttemptLatencyAttributes + | ConnectivityErrorCountAttributes + | ServerLatenciesAttributes + | ClientBlockingLatenciesAttributes; /** * Represents the names of Bigtable methods. These are used as attributes for diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 07c8c1661..9ffabd7a0 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -14,12 +14,12 @@ import {WithLogger} from './logger'; import { - AttemptLatencyAttributes, - ConnectivityErrorCountAttributes, - FirstResponseLatencyAttributes, - OperationLatencyAttributes, - RetryCountAttributes, - ServerLatenciesAttributes, + OnAttemptCompleteMetrics, + OnOperationCompleteMetrics, +} from '../src/client-side-metrics/metrics-handler'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, } from './client-side-metrics-attributes'; /** @@ -27,56 +27,33 @@ import { * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. */ export class TestMetricsHandler extends WithLogger { - onRecordAttemptLatency( - attemptLatency: number, - attributes: AttemptLatencyAttributes - ) { - this.logger.log( - `Recording parameters for AttemptLatency: ${attemptLatency}:` - ); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); - } - - onRecordConnectivityErrorCount( - connectivityErrorCount: number, - attributes: ConnectivityErrorCountAttributes - ) { - this.logger.log( - `Recording parameters for ConnectivityErrorCount: ${connectivityErrorCount}:` - ); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); - } - - onRecordServerLatency( - serverLatency: number, - attributes: ServerLatenciesAttributes - ) { - this.logger.log(`Recording parameters for ServerLatency: ${serverLatency}`); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); - } - - onRecordOperationLatency( - operationLatency: number, - attributes: OperationLatencyAttributes + /** + * Logs the metrics and attributes received for an operation completion. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {Attributes} attributes Attributes associated with the completed operation. + */ + onOperationComplete( + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ) { - this.logger.log( - `Recording parameters for OperationLatency: ${operationLatency}` - ); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); - } - - onRecordRetryCount(retryCount: number, attributes: RetryCountAttributes) { - this.logger.log(`Recording parameters for RetryCount: ${retryCount}`); + attributes.clientName = 'nodejs-bigtable'; + this.logger.log('Recording parameters for onOperationComplete:'); + this.logger.log(`metrics: ${JSON.stringify(metrics)}`); this.logger.log(`attributes: ${JSON.stringify(attributes)}`); } - onRecordFirstResponseLatency( - firstResponseLatency: number, - attributes: FirstResponseLatencyAttributes + /** + * Logs the metrics and attributes received for an attempt completion. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {Attributes} attributes Attributes associated with the completed attempt. + */ + onAttemptComplete( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes ) { - this.logger.log( - `Recording parameters for FirstResponseLatency: ${firstResponseLatency}` - ); + attributes.clientName = 'nodejs-bigtable'; + this.logger.log('Recording parameters for onAttemptComplete:'); + this.logger.log(`metrics: ${JSON.stringify(metrics)}`); this.logger.log(`attributes: ${JSON.stringify(attributes)}`); } } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 1192e221d..e1a462bb9 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {IMetricsHandler} from './metrics-handler'; +import { + IMetricsHandler, + OnAttemptCompleteMetrics, + OnOperationCompleteMetrics, +} from './metrics-handler'; import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import { - AttemptLatencyAttributes, - ConnectivityErrorCountAttributes, - FirstResponseLatencyAttributes, - OperationLatencyAttributes, - RetryCountAttributes, - ServerLatenciesAttributes, + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, } from '../../common/client-side-metrics-attributes'; import {View} from '@opentelemetry/sdk-metrics'; const { @@ -161,54 +161,48 @@ export class GCPMetricsHandler implements IMetricsHandler { } } - onRecordAttemptLatency( - attemptLatency: number, - attributes: AttemptLatencyAttributes - ) { - this.initialize(); - this.otelMetrics?.attemptLatencies.record(attemptLatency, attributes); - } - - onRecordConnectivityErrorCount( - connectivityErrorCount: number, - attributes: ConnectivityErrorCountAttributes + /** + * Records metrics for a completed Bigtable operation. + * This method records the operation latency and retry count, associating them with provided attributes. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. + */ + onOperationComplete( + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ) { this.initialize(); - this.otelMetrics?.connectivityErrorCount.record( - connectivityErrorCount, + this.otelMetrics?.operationLatencies.record( + metrics.operationLatency, + attributes + ); + this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); + this.otelMetrics?.firstResponseLatencies.record( + metrics.firstResponseLatency, attributes ); } - onRecordServerLatency( - serverLatency: number, - attributes: ServerLatenciesAttributes - ) { - this.initialize(); - this.otelMetrics?.serverLatencies.record(serverLatency, attributes); - } - - onRecordOperationLatency( - operationLatency: number, - attributes: OperationLatencyAttributes - ) { - this.initialize(); - this.otelMetrics?.operationLatencies.record(operationLatency, attributes); - } - - onRecordRetryCount(retryCount: number, attributes: RetryCountAttributes) { - this.initialize(); - this.otelMetrics?.retryCount.add(retryCount, attributes); - } - - onRecordFirstResponseLatency( - firstResponseLatency: number, - attributes: FirstResponseLatencyAttributes + /** + * Records metrics for a completed attempt of a Bigtable operation. + * This method records attempt latency, connectivity error count, server latency, and first response latency, + * along with the provided attributes. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. + */ + onAttemptComplete( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes ) { this.initialize(); - this.otelMetrics?.firstResponseLatencies.record( - firstResponseLatency, + this.otelMetrics?.attemptLatencies.record( + metrics.attemptLatency, + attributes + ); + this.otelMetrics?.connectivityErrorCount.record( + metrics.connectivityErrorCount, attributes ); + this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); } } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index fa3cfda72..051b65394 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -13,12 +13,8 @@ // limitations under the License. import { - AttemptLatencyAttributes, - ConnectivityErrorCountAttributes, - FirstResponseLatencyAttributes, - OperationLatencyAttributes, - RetryCountAttributes, - ServerLatenciesAttributes, + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, } from '../../common/client-side-metrics-attributes'; /** @@ -50,33 +46,22 @@ export interface OnAttemptCompleteMetrics { * Implementations of this interface can define how metrics are recorded and processed. */ export interface IMetricsHandler { - onRecordAttemptLatency?( - attemptLatency: number, - attributes: AttemptLatencyAttributes + /** + * Called when an operation completes (successfully or unsuccessfully). + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. + */ + onOperationComplete?( + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ): void; - - onRecordConnectivityErrorCount?( - connectivityErrorCount: number, - attributes: ConnectivityErrorCountAttributes - ): void; - - onRecordServerLatency?( - serverLatency: number, - attributes: ServerLatenciesAttributes - ): void; - - onRecordOperationLatency?( - operationLatency: number, - attributes: OperationLatencyAttributes - ): void; - - onRecordRetryCount?( - retryCount: number, - attributes: RetryCountAttributes - ): void; - - onRecordFirstResponseLatency?( - firstResponseLatency: number, - attributes: FirstResponseLatencyAttributes + /** + * Called when an attempt (e.g., an RPC attempt) completes. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. + */ + onAttemptComplete?( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes ): void; } diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 59c859e35..e120e5cf9 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -273,15 +273,13 @@ export class OperationMetricsCollector { const attributes = this.getAttemptAttributes(projectId, info); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onRecordAttemptLatency) { - metricsHandler.onRecordAttemptLatency(totalTime, attributes); - } - if (metricsHandler.onRecordServerLatency && this.serverTime) { - metricsHandler.onRecordServerLatency(this.serverTime, attributes); - } - if (metricsHandler.onRecordConnectivityErrorCount) { - metricsHandler.onRecordConnectivityErrorCount( - info.connectivityErrorCount, + if (metricsHandler.onAttemptComplete) { + metricsHandler.onAttemptComplete( + { + attemptLatency: totalTime, + serverLatency: this.serverTime ?? undefined, + connectivityErrorCount: info.connectivityErrorCount, + }, attributes ); } @@ -345,25 +343,15 @@ export class OperationMetricsCollector { projectId, info ); + const metrics = { + operationLatency: totalTime, + retryCount: this.attemptCount - 1, + firstResponseLatency: this.firstResponseLatency ?? undefined, + }; this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onRecordOperationLatency) { - metricsHandler.onRecordOperationLatency( - totalTime, - operationLatencyAttributes - ); - } - if (metricsHandler.onRecordRetryCount) { - metricsHandler.onRecordRetryCount( - this.attemptCount - 1, - operationLatencyAttributes - ); - } - if ( - metricsHandler.onRecordFirstResponseLatency && - this.firstResponseLatency - ) { - metricsHandler.onRecordFirstResponseLatency( - this.firstResponseLatency ?? undefined, + if (metricsHandler.onOperationComplete) { + metricsHandler.onOperationComplete( + metrics, operationLatencyAttributes ); } From 1b6681b7aabde340a1fad04208c1cb52c23440c9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Feb 2025 15:23:24 -0500 Subject: [PATCH 136/448] Supply the projectId later in the client side metrics lifecycle --- .../operation-metrics-collector.ts | 28 ++- test/metrics-collector/metrics-collector.ts | 175 +++++++++--------- 2 files changed, 99 insertions(+), 104 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index e120e5cf9..e2893de5f 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -138,7 +138,6 @@ export class OperationMetricsCollector { private cluster: string | undefined; private tabularApiSurface: ITabularApiSurface; private methodName: MethodName; - private projectId?: string; private attemptCount = 0; private receivedFirstResponse: boolean; private metricsHandlers: IMetricsHandler[]; @@ -151,14 +150,12 @@ export class OperationMetricsCollector { * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. * @param {IMetricsHandler[]} metricsHandlers The metrics handlers used for recording metrics. * @param {MethodName} methodName The name of the method being traced. - * @param {string} projectId The id of the project. * @param {DateProvider} dateProvider A provider for date/time information (for testing). */ constructor( tabularApiSurface: ITabularApiSurface, metricsHandlers: IMetricsHandler[], methodName: MethodName, - projectId?: string, dateProvider?: DateProvider ) { this.state = MetricsCollectorState.OPERATION_NOT_STARTED; @@ -173,7 +170,6 @@ export class OperationMetricsCollector { this.firstResponseLatency = null; this.serverTimeRead = false; this.serverTime = null; - this.projectId = projectId; if (dateProvider) { this.dateProvider = dateProvider; } else { @@ -258,9 +254,10 @@ export class OperationMetricsCollector { /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. + * @param {string} projectId The id of the project. * @param {OnAttemptCompleteInfo} info Information about the completed attempt. */ - onAttemptComplete(info: OnAttemptCompleteInfo) { + onAttemptComplete(projectId: string, info: OnAttemptCompleteInfo) { if ( this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS ) { @@ -268,7 +265,6 @@ export class OperationMetricsCollector { MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; this.attemptCount++; const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; if (projectId && this.attemptStartTime) { const attributes = this.getAttemptAttributes(projectId, info); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); @@ -310,11 +306,10 @@ export class OperationMetricsCollector { /** * Called when the first response is received. Records first response latencies. */ - onResponse() { + onResponse(projectId: string) { if (!this.receivedFirstResponse) { this.receivedFirstResponse = true; const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; if (projectId && this.operationStartTime) { this.firstResponseLatency = endTime.getTime() - this.operationStartTime.getTime(); @@ -325,16 +320,16 @@ export class OperationMetricsCollector { /** * Called when an operation completes (successfully or unsuccessfully). * Records operation latencies, retry counts, and connectivity error counts. + * @param {string} projectId The id of the project. * @param {OperationOnlyAttributes} info Information about the completed operation. */ - onOperationComplete(info: OperationOnlyAttributes) { + onOperationComplete(projectId: string, info: OperationOnlyAttributes) { if ( this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS ) { this.state = MetricsCollectorState.OPERATION_COMPLETE; const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; if (projectId && this.operationStartTime) { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { @@ -365,12 +360,16 @@ export class OperationMetricsCollector { /** * Called when metadata is received. Extracts server timing information if available. + * @param {string} projectId The id of the project. * @param {object} metadata The received metadata. */ - onMetadataReceived(metadata: { - internalRepr: Map; - options: {}; - }) { + onMetadataReceived( + projectId: string, + metadata: { + internalRepr: Map; + options: {}; + } + ) { const mappedEntries = new Map( Array.from(metadata.internalRepr.entries(), ([key, value]) => [ key, @@ -382,7 +381,6 @@ export class OperationMetricsCollector { if (!this.serverTimeRead) { this.serverTimeRead = true; const serverTime = parseInt(durationValues[1]); - const projectId = this.projectId; if (projectId) { this.serverTime = serverTime; } diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 7983d99bb..dd8130389 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -49,21 +49,11 @@ class Logger { /** * A fake implementation of the Bigtable client for testing purposes. Provides a - * metricsTracerFactory and a stubbed getProjectId_ method. + * metricsTracerFactory and a stubbed projectId method. */ class FakeBigtable { appProfileId?: string; - - /** - * A stubbed method that simulates retrieving the project ID. Always returns - * 'my-project'. - * @param {function} callback A callback function that receives the project ID (or an error). - */ - getProjectId_( - callback: (err: Error | null, projectId?: string) => void - ): void { - callback(null, 'my-project'); - } + projectId = 'my-project'; } /** @@ -86,84 +76,91 @@ describe('Bigtable/MetricsCollector', () => { bigtable = new FakeBigtable(); async fakeMethod(): Promise { - return new Promise(resolve => { - this.bigtable.getProjectId_((err, projectId) => { - function createMetadata(duration: string) { - return { - internalRepr: new Map([ - ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], - ]), - options: {}, - }; - } - - const status = { - metadata: { - internalRepr: new Map([ - [ - 'x-goog-ext-425905942-bin', - Buffer.from('\n\nus-west1-c \rfake-cluster3'), - ], - ]), - options: {}, - }, - }; - const metricsCollector = new OperationMetricsCollector( - this, - metricsHandlers, - MethodName.READ_ROWS, - projectId, - new TestDateProvider(logger) - ); - // In this method we simulate a series of events that might happen - // when a user calls one of the Table methods. - // Here is an example of what might happen in a method call: - logger.log('1. The operation starts'); - metricsCollector.onOperationStart(); - logger.log('2. The attempt starts.'); - metricsCollector.onAttemptStart(); - logger.log('3. Client receives status information.'); - metricsCollector.onStatusReceived(status); - logger.log('4. Client receives metadata.'); - metricsCollector.onMetadataReceived(createMetadata('101')); - logger.log('5. Client receives first row.'); - metricsCollector.onResponse(); - logger.log('6. Client receives metadata.'); - metricsCollector.onMetadataReceived(createMetadata('102')); - logger.log('7. Client receives second row.'); - metricsCollector.onResponse(); - logger.log('8. A transient error occurs.'); - metricsCollector.onAttemptComplete({ - streamingOperation: StreamingState.STREAMING, - attemptStatus: grpc.status.DEADLINE_EXCEEDED, - connectivityErrorCount: 1, - }); - logger.log('9. After a timeout, the second attempt is made.'); - metricsCollector.onAttemptStart(); - logger.log('10. Client receives status information.'); - metricsCollector.onStatusReceived(status); - logger.log('11. Client receives metadata.'); - metricsCollector.onMetadataReceived(createMetadata('103')); - logger.log('12. Client receives third row.'); - metricsCollector.onResponse(); - logger.log('13. Client receives metadata.'); - metricsCollector.onMetadataReceived(createMetadata('104')); - logger.log('14. Client receives fourth row.'); - metricsCollector.onResponse(); - logger.log('15. User reads row 1'); - logger.log('16. Stream ends, operation completes'); - metricsCollector.onAttemptComplete({ - attemptStatus: grpc.status.OK, - streamingOperation: StreamingState.STREAMING, - connectivityErrorCount: 1, - }); - metricsCollector.onOperationComplete({ - finalOperationStatus: grpc.status.OK, - streamingOperation: StreamingState.STREAMING, - }); - resolve(); + function createMetadata(duration: string) { + return { + internalRepr: new Map([ + ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], + ]), + options: {}, + }; + } + if (this.bigtable.projectId) { + const status = { + metadata: { + internalRepr: new Map([ + [ + 'x-goog-ext-425905942-bin', + Buffer.from('\n\nus-west1-c \rfake-cluster3'), + ], + ]), + options: {}, + }, + }; + const metricsCollector = new OperationMetricsCollector( + this, + metricsHandlers, + MethodName.READ_ROWS, + new TestDateProvider(logger) + ); + // In this method we simulate a series of events that might happen + // when a user calls one of the Table methods. + // Here is an example of what might happen in a method call: + logger.log('1. The operation starts'); + metricsCollector.onOperationStart(); + logger.log('2. The attempt starts.'); + metricsCollector.onAttemptStart(); + logger.log('3. Client receives status information.'); + metricsCollector.onStatusReceived(status); + logger.log('4. Client receives metadata.'); + metricsCollector.onMetadataReceived( + this.bigtable.projectId, + createMetadata('101') + ); + logger.log('5. Client receives first row.'); + metricsCollector.onResponse(this.bigtable.projectId); + logger.log('6. Client receives metadata.'); + metricsCollector.onMetadataReceived( + this.bigtable.projectId, + createMetadata('102') + ); + logger.log('7. Client receives second row.'); + metricsCollector.onResponse(this.bigtable.projectId); + logger.log('8. A transient error occurs.'); + metricsCollector.onAttemptComplete(this.bigtable.projectId, { + streamingOperation: StreamingState.STREAMING, + attemptStatus: grpc.status.DEADLINE_EXCEEDED, + connectivityErrorCount: 1, + }); + logger.log('9. After a timeout, the second attempt is made.'); + metricsCollector.onAttemptStart(); + logger.log('10. Client receives status information.'); + metricsCollector.onStatusReceived(status); + logger.log('11. Client receives metadata.'); + metricsCollector.onMetadataReceived( + this.bigtable.projectId, + createMetadata('103') + ); + logger.log('12. Client receives third row.'); + metricsCollector.onResponse(this.bigtable.projectId); + logger.log('13. Client receives metadata.'); + metricsCollector.onMetadataReceived( + this.bigtable.projectId, + createMetadata('104') + ); + logger.log('14. Client receives fourth row.'); + metricsCollector.onResponse(this.bigtable.projectId); + logger.log('15. User reads row 1'); + logger.log('16. Stream ends, operation completes'); + metricsCollector.onAttemptComplete(this.bigtable.projectId, { + attemptStatus: grpc.status.OK, + streamingOperation: StreamingState.STREAMING, + connectivityErrorCount: 1, + }); + metricsCollector.onOperationComplete(this.bigtable.projectId, { + finalOperationStatus: grpc.status.OK, + streamingOperation: StreamingState.STREAMING, }); - }); + } } } const table = new FakeTable(); From b6f130258101446a7fc65c02a36039184ad9d5ed Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Feb 2025 16:44:05 -0500 Subject: [PATCH 137/448] Remove the GCPMetricsHandler file --- .../gcp-metrics-handler.ts | 208 ------------------ 1 file changed, 208 deletions(-) delete mode 100644 src/client-side-metrics/gcp-metrics-handler.ts diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts deleted file mode 100644 index e1a462bb9..000000000 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import { - IMetricsHandler, - OnAttemptCompleteMetrics, - OnOperationCompleteMetrics, -} from './metrics-handler'; -import * as Resources from '@opentelemetry/resources'; -import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; -import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from '../../common/client-side-metrics-attributes'; -import {View} from '@opentelemetry/sdk-metrics'; -const { - Aggregation, - ExplicitBucketHistogramAggregation, - MeterProvider, - Histogram, - Counter, - PeriodicExportingMetricReader, -} = require('@opentelemetry/sdk-metrics'); - -/** - * A collection of OpenTelemetry metric instruments used to record - * Bigtable client-side metrics. - */ -interface Metrics { - operationLatencies: typeof Histogram; - attemptLatencies: typeof Histogram; - retryCount: typeof Counter; - applicationBlockingLatencies: typeof Histogram; - firstResponseLatencies: typeof Histogram; - serverLatencies: typeof Histogram; - connectivityErrorCount: typeof Histogram; - clientBlockingLatencies: typeof Histogram; -} - -/** - * A metrics handler implementation that uses OpenTelemetry to export metrics to Google Cloud Monitoring. - * This handler records metrics such as operation latency, attempt latency, retry count, and more, - * associating them with relevant attributes for detailed analysis in Cloud Monitoring. - */ -export class GCPMetricsHandler implements IMetricsHandler { - private initialized = false; - private otelMetrics?: Metrics; - - /** - * Initializes the OpenTelemetry metrics instruments if they haven't been already. - * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. - * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. - * @param {string} [projectId] The Google Cloud project ID. Used for metric export. If not provided, it will attempt to detect it from the environment. - */ - private initialize(projectId?: string) { - if (!this.initialized) { - this.initialized = true; - const sumAggregation = Aggregation.Sum(); - const histogramAggregation = new ExplicitBucketHistogramAggregation([ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, - 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, - 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, - ]); - const viewList = [ - 'operation_latencies', - 'first_response_latencies', - 'attempt_latencies', - 'retry_count', - 'server_latencies', - 'connectivity_error_count', - 'application_latencies', - 'throttling_latencies', - ].map( - name => - new View({ - instrumentName: name, - name, - aggregation: name.slice(-9) ? sumAggregation : histogramAggregation, - }) - ); - const meterProvider = new MeterProvider({ - views: viewList, - resource: new Resources.Resource({ - 'service.name': 'bigtable-metrics', - }).merge(new ResourceUtil.GcpDetectorSync().detect()), - readers: [ - // Register the exporter - new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 100_000, - exporter: new MetricExporter({ - projectId, - }), - }), - ], - }); - const meter = meterProvider.getMeter('bigtable.googleapis.com'); - this.otelMetrics = { - operationLatencies: meter.createHistogram('operation_latencies', { - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - }), - attemptLatencies: meter.createHistogram('attempt_latencies', { - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', - unit: 'ms', - }), - retryCount: meter.createCounter('retry_count', { - description: - 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - }), - applicationBlockingLatencies: meter.createHistogram( - 'application_blocking_latencies', - { - description: - 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', - unit: 'ms', - } - ), - firstResponseLatencies: meter.createHistogram( - 'first_response_latencies', - { - description: - 'Latencies from when a client sends a request and receives the first row of the response.', - unit: 'ms', - } - ), - serverLatencies: meter.createHistogram('server_latencies', { - description: - 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - }), - connectivityErrorCount: meter.createHistogram( - 'connectivity_error_count', - { - description: - "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - } - ), - clientBlockingLatencies: meter.createHistogram( - 'client_blocking_latencies', - { - description: - 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', - unit: 'ms', - } - ), - }; - } - } - - /** - * Records metrics for a completed Bigtable operation. - * This method records the operation latency and retry count, associating them with provided attributes. - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. - */ - onOperationComplete( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes - ) { - this.initialize(); - this.otelMetrics?.operationLatencies.record( - metrics.operationLatency, - attributes - ); - this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); - this.otelMetrics?.firstResponseLatencies.record( - metrics.firstResponseLatency, - attributes - ); - } - - /** - * Records metrics for a completed attempt of a Bigtable operation. - * This method records attempt latency, connectivity error count, server latency, and first response latency, - * along with the provided attributes. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. - */ - onAttemptComplete( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes - ) { - this.initialize(); - this.otelMetrics?.attemptLatencies.record( - metrics.attemptLatency, - attributes - ); - this.otelMetrics?.connectivityErrorCount.record( - metrics.connectivityErrorCount, - attributes - ); - this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); - } -} From 1ae82ff00e12af0b306175bd7133ffa4d08fe871 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 09:41:14 -0500 Subject: [PATCH 138/448] Change location of the client-side-metrics-attribu file --- common/test-metrics-handler.ts | 2 +- .../client-side-metrics}/client-side-metrics-attributes.ts | 0 src/client-side-metrics/metrics-handler.ts | 2 +- src/client-side-metrics/operation-metrics-collector.ts | 2 +- test/metrics-collector/metrics-collector.ts | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) rename {common => src/client-side-metrics}/client-side-metrics-attributes.ts (100%) diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 9ffabd7a0..c9c5507df 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -20,7 +20,7 @@ import { import { OnAttemptCompleteAttributes, OnOperationCompleteAttributes, -} from './client-side-metrics-attributes'; +} from '../src/client-side-metrics/client-side-metrics-attributes'; /** * A test implementation of the IMetricsHandler interface. Used for testing purposes. diff --git a/common/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts similarity index 100% rename from common/client-side-metrics-attributes.ts rename to src/client-side-metrics/client-side-metrics-attributes.ts diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 051b65394..38a98ae59 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -15,7 +15,7 @@ import { OnAttemptCompleteAttributes, OnOperationCompleteAttributes, -} from '../../common/client-side-metrics-attributes'; +} from './client-side-metrics-attributes'; /** * The interfaces below use undefined instead of null to indicate a metric is diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index e2893de5f..502a11ad0 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -20,7 +20,7 @@ import { MethodName, OnOperationCompleteAttributes, StreamingState, -} from '../../common/client-side-metrics-attributes'; +} from './client-side-metrics-attributes'; /** * An interface representing a Date-like object. Provides a `getTime` method diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index dd8130389..335a288cb 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -21,7 +21,7 @@ import {OperationMetricsCollector} from '../../src/client-side-metrics/operation import { MethodName, StreamingState, -} from '../../common/client-side-metrics-attributes'; +} from '../../src/client-side-metrics/client-side-metrics-attributes'; import {grpc} from 'google-gax'; /** From 3ee5604464d250060bf44541ecfe86f54a430b90 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 09:42:57 -0500 Subject: [PATCH 139/448] Change common test utilities folder name --- {common => test-common}/logger.ts | 0 {common => test-common}/test-date-provider.ts | 0 {common => test-common}/test-metrics-handler.ts | 0 test/metrics-collector/metrics-collector.ts | 4 ++-- 4 files changed, 2 insertions(+), 2 deletions(-) rename {common => test-common}/logger.ts (100%) rename {common => test-common}/test-date-provider.ts (100%) rename {common => test-common}/test-metrics-handler.ts (100%) diff --git a/common/logger.ts b/test-common/logger.ts similarity index 100% rename from common/logger.ts rename to test-common/logger.ts diff --git a/common/test-date-provider.ts b/test-common/test-date-provider.ts similarity index 100% rename from common/test-date-provider.ts rename to test-common/test-date-provider.ts diff --git a/common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts similarity index 100% rename from common/test-metrics-handler.ts rename to test-common/test-metrics-handler.ts diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 335a288cb..48f1327fd 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -13,10 +13,10 @@ // limitations under the License. import {describe} from 'mocha'; -import {TestDateProvider} from '../../common/test-date-provider'; +import {TestDateProvider} from '../../test-common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; -import {TestMetricsHandler} from '../../common/test-metrics-handler'; +import {TestMetricsHandler} from '../../test-common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; import { MethodName, From 124ed30cbb79768bc293d55210362fed0c618531 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 10:04:05 -0500 Subject: [PATCH 140/448] Remove aliases for grpc status --- .../client-side-metrics-attributes.ts | 27 +++++-------------- .../operation-metrics-collector.ts | 9 +++---- 2 files changed, 10 insertions(+), 26 deletions(-) diff --git a/src/client-side-metrics/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts index 5f2adf7e2..1e5c04119 100644 --- a/src/client-side-metrics/client-side-metrics-attributes.ts +++ b/src/client-side-metrics/client-side-metrics-attributes.ts @@ -39,7 +39,7 @@ export enum StreamingState { * These attributes provide context about the Bigtable environment and the completed operation. */ interface OperationLatencyAttributes extends StandardAttributes { - finalOperationStatus: FinalOperationStatus; + finalOperationStatus: grpc.status; streamingOperation: StreamingState; } @@ -48,7 +48,7 @@ interface OperationLatencyAttributes extends StandardAttributes { * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ interface AttemptLatencyAttributes extends StandardAttributes { - attemptStatus: AttemptStatus; + attemptStatus: grpc.status; streamingOperation: StreamingState; } @@ -57,7 +57,7 @@ interface AttemptLatencyAttributes extends StandardAttributes { * provide context about the Bigtable environment and the final status of the operation. */ interface RetryCountAttributes extends StandardAttributes { - finalOperationStatus: FinalOperationStatus; + finalOperationStatus: grpc.status; } /** @@ -71,7 +71,7 @@ type ApplicationBlockingLatenciesAttributes = StandardAttributes; * These attributes provide context about the Bigtable environment and the final status of the operation. */ interface FirstResponseLatencyAttributes extends StandardAttributes { - finalOperationStatus: FinalOperationStatus; + finalOperationStatus: grpc.status; } /** @@ -79,7 +79,7 @@ interface FirstResponseLatencyAttributes extends StandardAttributes { * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ interface ServerLatenciesAttributes extends StandardAttributes { - attemptStatus: AttemptStatus; + attemptStatus: grpc.status; streamingOperation: StreamingState; } @@ -88,7 +88,7 @@ interface ServerLatenciesAttributes extends StandardAttributes { * These attributes provide context about the Bigtable environment and the status of the attempt. */ interface ConnectivityErrorCountAttributes extends StandardAttributes { - attemptStatus: AttemptStatus; + attemptStatus: grpc.status; } /** @@ -97,21 +97,6 @@ interface ConnectivityErrorCountAttributes extends StandardAttributes { */ type ClientBlockingLatenciesAttributes = StandardAttributes; -/** - * The final status of a Bigtable operation. This represents the ultimate result - * of the operation, regardless of individual attempt statuses. It's represented - * as a gRPC status code. See the `google-gax` library's documentation on - * gRPC status codes for more information on specific values. - */ -export type FinalOperationStatus = grpc.status; - -/** - * The status of a single attempt of a Bigtable operation. This is represented as a - * gRPC status code. See the `google-gax` library's documentation on gRPC status - * codes for more information on specific values. - */ -export type AttemptStatus = grpc.status; - /** * Attributes associated with the completion of a Bigtable operation. These * attributes provide context about the Bigtable environment, the completed diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 502a11ad0..1034c826b 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -15,12 +15,11 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { - AttemptStatus, - FinalOperationStatus, MethodName, OnOperationCompleteAttributes, StreamingState, } from './client-side-metrics-attributes'; +import {grpc} from 'google-gax'; /** * An interface representing a Date-like object. Provides a `getTime` method @@ -85,7 +84,7 @@ interface OnAttemptCompleteInfo { /** * The attempt status of the operation. */ - attemptStatus: AttemptStatus; + attemptStatus: grpc.status; } /** @@ -94,7 +93,7 @@ interface OnAttemptCompleteInfo { * part of a streaming operation. */ interface AttemptOnlyAttributes { - attemptStatus: AttemptStatus; + attemptStatus: grpc.status; streamingOperation: StreamingState; } @@ -102,7 +101,7 @@ interface AttemptOnlyAttributes { * Information about a Bigtable operation to be recorded in client side metrics. */ interface OperationOnlyAttributes { - finalOperationStatus: FinalOperationStatus; + finalOperationStatus: grpc.status; streamingOperation: StreamingState; } From ef36a6fa056be739ebfadee67c52f22dcb01b9a5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 10:14:17 -0500 Subject: [PATCH 141/448] Should be MethodName type --- src/client-side-metrics/client-side-metrics-attributes.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts index 1e5c04119..0672f6f1c 100644 --- a/src/client-side-metrics/client-side-metrics-attributes.ts +++ b/src/client-side-metrics/client-side-metrics-attributes.ts @@ -25,7 +25,7 @@ interface StandardAttributes { cluster?: string; zone?: string; appProfileId?: string; - methodName: string; + methodName: MethodName; clientName: string; } From 68292248bbf9549f9a843eed3cb45a84ecc07a2d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 10:38:38 -0500 Subject: [PATCH 142/448] Rename variable as it expands beyond latency --- src/client-side-metrics/operation-metrics-collector.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 1034c826b..cedb67c2e 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -333,7 +333,7 @@ export class OperationMetricsCollector { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { // This block records operation latency metrics. - const operationLatencyAttributes = this.getOperationAttributes( + const operationAttributes = this.getOperationAttributes( projectId, info ); @@ -346,7 +346,7 @@ export class OperationMetricsCollector { if (metricsHandler.onOperationComplete) { metricsHandler.onOperationComplete( metrics, - operationLatencyAttributes + operationAttributes ); } }); From dd603f180e013ebd45f842277166257365351d2e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 10:59:42 -0500 Subject: [PATCH 143/448] Remove private methods for building attributes --- .../operation-metrics-collector.ts | 94 +++++-------------- .../metrics-collector/typical-method-call.txt | 2 +- 2 files changed, 26 insertions(+), 70 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index cedb67c2e..488748329 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -176,66 +176,6 @@ export class OperationMetricsCollector { } } - /** - * Assembles the basic attributes for metrics. These attributes provide - * context about the Bigtable environment and the operation being performed. - * @param {string} projectId The Google Cloud project ID. - * @returns {Attributes} An object containing the basic attributes. - */ - private getBasicAttributes(projectId: string) { - return { - projectId, - instanceId: this.tabularApiSurface.instance.id, - table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, - appProfileId: this.tabularApiSurface.bigtable.appProfileId, - methodName: this.methodName, - clientName: `nodejs-bigtable/${version}`, - }; - } - - /** - * Assembles the attributes for an entire operation. These attributes - * provide context about the Bigtable environment, the operation being - * performed, and the final status of the operation. Includes whether the - * operation was a streaming operation or not. - * - * @param {string} projectId The Google Cloud project ID. - * @param {OperationOnlyAttributes} operationOnlyAttributes The attributes of the operation. - * @returns {OnOperationCompleteAttributes} An object containing the attributes - * for operation latency metrics. - */ - private getOperationAttributes( - projectId: string, - operationOnlyAttributes: OperationOnlyAttributes - ): OnOperationCompleteAttributes { - return Object.assign( - operationOnlyAttributes, - this.getBasicAttributes(projectId) - ); - } - - /** - * Assembles the attributes for attempt metrics. These attributes provide context - * about the Bigtable environment, the operation being performed, the status - * of the attempt and whether the operation was a streaming operation or not. - * - * @param {string} projectId The Google Cloud project ID. - * @param {AttemptOnlyAttributes} attemptOnlyAttributes The attributes of the attempt. - * @returns {OnAttemptCompleteAttributes} The attributes all metrics recorded - * in the onAttemptComplete handler. - */ - private getAttemptAttributes( - projectId: string, - attemptOnlyAttributes: AttemptOnlyAttributes - ) { - return Object.assign( - attemptOnlyAttributes, - this.getBasicAttributes(projectId) - ); - } - /** * Called when the operation starts. Records the start time. */ @@ -265,7 +205,19 @@ export class OperationMetricsCollector { this.attemptCount++; const endTime = this.dateProvider.getDate(); if (projectId && this.attemptStartTime) { - const attributes = this.getAttemptAttributes(projectId, info); + const attributes = { + streamingOperation: info.streamingOperation, + attemptStatus: info.attemptStatus, + connectivityErrorCount: info.connectivityErrorCount, + projectId, + instanceId: this.tabularApiSurface.instance.id, + table: this.tabularApiSurface.id, + cluster: this.cluster, + zone: this.zone, + appProfileId: this.tabularApiSurface.bigtable.appProfileId, + methodName: this.methodName, + clientName: `nodejs-bigtable/${version}`, + }; const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onAttemptComplete) { @@ -332,11 +284,18 @@ export class OperationMetricsCollector { if (projectId && this.operationStartTime) { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { - // This block records operation latency metrics. - const operationAttributes = this.getOperationAttributes( + const operationAttributes = { + finalOperationStatus: info.finalOperationStatus, + streamingOperation: info.streamingOperation, projectId, - info - ); + instanceId: this.tabularApiSurface.instance.id, + table: this.tabularApiSurface.id, + cluster: this.cluster, + zone: this.zone, + appProfileId: this.tabularApiSurface.bigtable.appProfileId, + methodName: this.methodName, + clientName: `nodejs-bigtable/${version}`, + }; const metrics = { operationLatency: totalTime, retryCount: this.attemptCount - 1, @@ -344,10 +303,7 @@ export class OperationMetricsCollector { }; this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { - metricsHandler.onOperationComplete( - metrics, - operationAttributes - ); + metricsHandler.onOperationComplete(metrics, operationAttributes); } }); } diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 921270cf4..28bde2266 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -25,7 +25,7 @@ getDate call returns 5000 ms getDate call returns 6000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":1000,"serverLatency":103,"connectivityErrorCount":1} -attributes: {"attemptStatus":0,"streamingOperation":"streaming","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"streamingOperation":"streaming","attemptStatus":0,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 7000 ms Recording parameters for onOperationComplete: metrics: {"operationLatency":6000,"retryCount":1,"firstResponseLatency":2000} From b493c0defd70fdede128d3ed10f1a8679bc60382 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 11:33:20 -0500 Subject: [PATCH 144/448] Replace the logger class with a simple object --- test-common/test-date-provider.ts | 9 ++++- test-common/test-metrics-handler.ts | 19 ++++++---- test/metrics-collector/metrics-collector.ts | 41 ++++++++++----------- 3 files changed, 38 insertions(+), 31 deletions(-) diff --git a/test-common/test-date-provider.ts b/test-common/test-date-provider.ts index 71ef66aee..533b6b148 100644 --- a/test-common/test-date-provider.ts +++ b/test-common/test-date-provider.ts @@ -41,8 +41,13 @@ class TestDateLike { * a deterministic series of fake dates, with each call to getDate() returning a date 1000ms later than the last. * Logs each date value returned for verification purposes. */ -export class TestDateProvider extends WithLogger { +export class TestDateProvider { private dateCounter = 0; + private messages: {value: string}; + + constructor(messages: {value: string}) { + this.messages = messages; + } /** * Returns a new fake date 1000ms later than the last. Logs the date for test verification. * @returns {TestDateLike} A fake date object. @@ -50,7 +55,7 @@ export class TestDateProvider extends WithLogger { getDate() { // The test assumes exactly 1s passes between each getDate call. this.dateCounter = this.dateCounter + 1000; - this.logger.log(`getDate call returns ${this.dateCounter.toString()} ms`); + this.messages.value += `getDate call returns ${this.dateCounter.toString()} ms\n`; return new TestDateLike(this.dateCounter); } } diff --git a/test-common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts index c9c5507df..970459807 100644 --- a/test-common/test-metrics-handler.ts +++ b/test-common/test-metrics-handler.ts @@ -26,7 +26,12 @@ import { * A test implementation of the IMetricsHandler interface. Used for testing purposes. * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. */ -export class TestMetricsHandler extends WithLogger { +export class TestMetricsHandler { + private messages: {value: string}; + + constructor(messages: {value: string}) { + this.messages = messages; + } /** * Logs the metrics and attributes received for an operation completion. * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. @@ -37,9 +42,9 @@ export class TestMetricsHandler extends WithLogger { attributes: OnOperationCompleteAttributes ) { attributes.clientName = 'nodejs-bigtable'; - this.logger.log('Recording parameters for onOperationComplete:'); - this.logger.log(`metrics: ${JSON.stringify(metrics)}`); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + this.messages.value += 'Recording parameters for onOperationComplete:\n'; + this.messages.value += `metrics: ${JSON.stringify(metrics)}\n`; + this.messages.value += `attributes: ${JSON.stringify(attributes)}\n`; } /** @@ -52,8 +57,8 @@ export class TestMetricsHandler extends WithLogger { attributes: OnAttemptCompleteAttributes ) { attributes.clientName = 'nodejs-bigtable'; - this.logger.log('Recording parameters for onAttemptComplete:'); - this.logger.log(`metrics: ${JSON.stringify(metrics)}`); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + this.messages.value += 'Recording parameters for onAttemptComplete:\n'; + this.messages.value += `metrics: ${JSON.stringify(metrics)}\n`; + this.messages.value += `attributes: ${JSON.stringify(attributes)}\n`; } } diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 48f1327fd..4f933d007 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -66,9 +66,9 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe('Bigtable/MetricsCollector', () => { +describe.only('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { - const logger = new Logger(); + const logger = {value: ''}; const metricsHandlers = [new TestMetricsHandler(logger)]; class FakeTable { id = 'fakeTableId'; @@ -105,52 +105,52 @@ describe('Bigtable/MetricsCollector', () => { // In this method we simulate a series of events that might happen // when a user calls one of the Table methods. // Here is an example of what might happen in a method call: - logger.log('1. The operation starts'); + logger.value += '1. The operation starts\n'; metricsCollector.onOperationStart(); - logger.log('2. The attempt starts.'); + logger.value += '2. The attempt starts.\n'; metricsCollector.onAttemptStart(); - logger.log('3. Client receives status information.'); + logger.value += '3. Client receives status information.\n'; metricsCollector.onStatusReceived(status); - logger.log('4. Client receives metadata.'); + logger.value += '4. Client receives metadata.\n'; metricsCollector.onMetadataReceived( this.bigtable.projectId, createMetadata('101') ); - logger.log('5. Client receives first row.'); + logger.value += '5. Client receives first row.\n'; metricsCollector.onResponse(this.bigtable.projectId); - logger.log('6. Client receives metadata.'); + logger.value += '6. Client receives metadata.\n'; metricsCollector.onMetadataReceived( this.bigtable.projectId, createMetadata('102') ); - logger.log('7. Client receives second row.'); + logger.value += '7. Client receives second row.\n'; metricsCollector.onResponse(this.bigtable.projectId); - logger.log('8. A transient error occurs.'); + logger.value += '8. A transient error occurs.\n'; metricsCollector.onAttemptComplete(this.bigtable.projectId, { streamingOperation: StreamingState.STREAMING, attemptStatus: grpc.status.DEADLINE_EXCEEDED, connectivityErrorCount: 1, }); - logger.log('9. After a timeout, the second attempt is made.'); + logger.value += '9. After a timeout, the second attempt is made.\n'; metricsCollector.onAttemptStart(); - logger.log('10. Client receives status information.'); + logger.value += '10. Client receives status information.\n'; metricsCollector.onStatusReceived(status); - logger.log('11. Client receives metadata.'); + logger.value += '11. Client receives metadata.\n'; metricsCollector.onMetadataReceived( this.bigtable.projectId, createMetadata('103') ); - logger.log('12. Client receives third row.'); + logger.value += '12. Client receives third row.\n'; metricsCollector.onResponse(this.bigtable.projectId); - logger.log('13. Client receives metadata.'); + logger.value += '13. Client receives metadata.\n'; metricsCollector.onMetadataReceived( this.bigtable.projectId, createMetadata('104') ); - logger.log('14. Client receives fourth row.'); + logger.value += '14. Client receives fourth row.\n'; metricsCollector.onResponse(this.bigtable.projectId); - logger.log('15. User reads row 1'); - logger.log('16. Stream ends, operation completes'); + logger.value += '15. User reads row 1\n'; + logger.value += '16. Stream ends, operation completes\n'; metricsCollector.onAttemptComplete(this.bigtable.projectId, { attemptStatus: grpc.status.OK, streamingOperation: StreamingState.STREAMING, @@ -170,9 +170,6 @@ describe('Bigtable/MetricsCollector', () => { 'utf8' ); // Ensure events occurred in the right order here: - assert.strictEqual( - logger.getMessages().join('\n') + '\n', - expectedOutput.replace(/\r/g, '') - ); + assert.strictEqual(logger.value, expectedOutput.replace(/\r/g, '')); }); }); From 2f19f31b08ed250ac587491f6108e91d670125d8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 11:33:52 -0500 Subject: [PATCH 145/448] Remove only --- test/metrics-collector/metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 4f933d007..0ce90962e 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -66,7 +66,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe.only('Bigtable/MetricsCollector', () => { +describe('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const logger = {value: ''}; const metricsHandlers = [new TestMetricsHandler(logger)]; From dfe7d579e582fbc7f785c549fa1df76cc3f4e821 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 11:39:20 -0500 Subject: [PATCH 146/448] Remove the logger classes Replace them with a simpler object --- test-common/logger.ts | 35 --------------------- test-common/test-date-provider.ts | 2 -- test-common/test-metrics-handler.ts | 1 - test/metrics-collector/metrics-collector.ts | 23 -------------- 4 files changed, 61 deletions(-) delete mode 100644 test-common/logger.ts diff --git a/test-common/logger.ts b/test-common/logger.ts deleted file mode 100644 index 284005350..000000000 --- a/test-common/logger.ts +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * A simple logger interface for logging messages. Implementations of this interface - * can provide various logging mechanisms (e.g., console logging, file logging, etc.). - */ -interface ILogger { - log(message: string): void; -} - -/** - * An abstract base class that provides a logger instance. Subclasses can use this logger - * for logging messages. - */ -export abstract class WithLogger { - protected logger: ILogger; - /** - * @param {ILogger} logger The logger instance to be used by this object. - */ - constructor(logger: ILogger) { - this.logger = logger; - } -} diff --git a/test-common/test-date-provider.ts b/test-common/test-date-provider.ts index 533b6b148..8eaa7b38c 100644 --- a/test-common/test-date-provider.ts +++ b/test-common/test-date-provider.ts @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {WithLogger} from './logger'; - /** * A test implementation of a Date-like object. Used for testing purposes. It provides a * getTime method that returns a pre-determined fake date value, allowing for diff --git a/test-common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts index 970459807..8166155b9 100644 --- a/test-common/test-metrics-handler.ts +++ b/test-common/test-metrics-handler.ts @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {WithLogger} from './logger'; import { OnAttemptCompleteMetrics, OnOperationCompleteMetrics, diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 0ce90962e..5c158d28c 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -24,29 +24,6 @@ import { } from '../../src/client-side-metrics/client-side-metrics-attributes'; import {grpc} from 'google-gax'; -/** - * A basic logger class that stores log messages in an array. Useful for testing. - */ -class Logger { - private messages: string[] = []; - - /** - * Logs a message by adding it to the internal message array. - * @param {string} message The message to be logged. - */ - log(message: string) { - this.messages.push(message); - } - - /** - * Retrieves all logged messages. - * @returns {string[]} An array of logged messages. - */ - getMessages() { - return this.messages; - } -} - /** * A fake implementation of the Bigtable client for testing purposes. Provides a * metricsTracerFactory and a stubbed projectId method. From 02d752ab4a27d72f821e9f5184f77e5d3c2e1765 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 16:25:04 -0500 Subject: [PATCH 147/448] Add stubs --- src/client-side-metrics/exporter.ts | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 src/client-side-metrics/exporter.ts diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts new file mode 100644 index 000000000..107a1d700 --- /dev/null +++ b/src/client-side-metrics/exporter.ts @@ -0,0 +1,9 @@ +export function transformInExport(args: {}) { + +} + +export class CloudMonitoringExporter { + export() { + + } +} From 19d1d81b94d4eb0c571005d5652a495c35bcbafa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 16:26:13 -0500 Subject: [PATCH 148/448] Revert "Remove the GCPMetricsHandler file" This reverts commit b6f130258101446a7fc65c02a36039184ad9d5ed. --- .../gcp-metrics-handler.ts | 208 ++++++++++++++++++ 1 file changed, 208 insertions(+) create mode 100644 src/client-side-metrics/gcp-metrics-handler.ts diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts new file mode 100644 index 000000000..e1a462bb9 --- /dev/null +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -0,0 +1,208 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import { + IMetricsHandler, + OnAttemptCompleteMetrics, + OnOperationCompleteMetrics, +} from './metrics-handler'; +import * as Resources from '@opentelemetry/resources'; +import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; +import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from '../../common/client-side-metrics-attributes'; +import {View} from '@opentelemetry/sdk-metrics'; +const { + Aggregation, + ExplicitBucketHistogramAggregation, + MeterProvider, + Histogram, + Counter, + PeriodicExportingMetricReader, +} = require('@opentelemetry/sdk-metrics'); + +/** + * A collection of OpenTelemetry metric instruments used to record + * Bigtable client-side metrics. + */ +interface Metrics { + operationLatencies: typeof Histogram; + attemptLatencies: typeof Histogram; + retryCount: typeof Counter; + applicationBlockingLatencies: typeof Histogram; + firstResponseLatencies: typeof Histogram; + serverLatencies: typeof Histogram; + connectivityErrorCount: typeof Histogram; + clientBlockingLatencies: typeof Histogram; +} + +/** + * A metrics handler implementation that uses OpenTelemetry to export metrics to Google Cloud Monitoring. + * This handler records metrics such as operation latency, attempt latency, retry count, and more, + * associating them with relevant attributes for detailed analysis in Cloud Monitoring. + */ +export class GCPMetricsHandler implements IMetricsHandler { + private initialized = false; + private otelMetrics?: Metrics; + + /** + * Initializes the OpenTelemetry metrics instruments if they haven't been already. + * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. + * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. + * @param {string} [projectId] The Google Cloud project ID. Used for metric export. If not provided, it will attempt to detect it from the environment. + */ + private initialize(projectId?: string) { + if (!this.initialized) { + this.initialized = true; + const sumAggregation = Aggregation.Sum(); + const histogramAggregation = new ExplicitBucketHistogramAggregation([ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, + 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, + 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, + ]); + const viewList = [ + 'operation_latencies', + 'first_response_latencies', + 'attempt_latencies', + 'retry_count', + 'server_latencies', + 'connectivity_error_count', + 'application_latencies', + 'throttling_latencies', + ].map( + name => + new View({ + instrumentName: name, + name, + aggregation: name.slice(-9) ? sumAggregation : histogramAggregation, + }) + ); + const meterProvider = new MeterProvider({ + views: viewList, + resource: new Resources.Resource({ + 'service.name': 'bigtable-metrics', + }).merge(new ResourceUtil.GcpDetectorSync().detect()), + readers: [ + // Register the exporter + new PeriodicExportingMetricReader({ + // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by + // Cloud Monitoring. + exportIntervalMillis: 100_000, + exporter: new MetricExporter({ + projectId, + }), + }), + ], + }); + const meter = meterProvider.getMeter('bigtable.googleapis.com'); + this.otelMetrics = { + operationLatencies: meter.createHistogram('operation_latencies', { + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + }), + attemptLatencies: meter.createHistogram('attempt_latencies', { + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + unit: 'ms', + }), + retryCount: meter.createCounter('retry_count', { + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + }), + applicationBlockingLatencies: meter.createHistogram( + 'application_blocking_latencies', + { + description: + 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', + unit: 'ms', + } + ), + firstResponseLatencies: meter.createHistogram( + 'first_response_latencies', + { + description: + 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + } + ), + serverLatencies: meter.createHistogram('server_latencies', { + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + }), + connectivityErrorCount: meter.createHistogram( + 'connectivity_error_count', + { + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + } + ), + clientBlockingLatencies: meter.createHistogram( + 'client_blocking_latencies', + { + description: + 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + } + ), + }; + } + } + + /** + * Records metrics for a completed Bigtable operation. + * This method records the operation latency and retry count, associating them with provided attributes. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. + */ + onOperationComplete( + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes + ) { + this.initialize(); + this.otelMetrics?.operationLatencies.record( + metrics.operationLatency, + attributes + ); + this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); + this.otelMetrics?.firstResponseLatencies.record( + metrics.firstResponseLatency, + attributes + ); + } + + /** + * Records metrics for a completed attempt of a Bigtable operation. + * This method records attempt latency, connectivity error count, server latency, and first response latency, + * along with the provided attributes. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. + */ + onAttemptComplete( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes + ) { + this.initialize(); + this.otelMetrics?.attemptLatencies.record( + metrics.attemptLatency, + attributes + ); + this.otelMetrics?.connectivityErrorCount.record( + metrics.connectivityErrorCount, + attributes + ); + this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); + } +} From 48ff70603ad241e864c1296254052385e471bb5e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 09:51:30 -0500 Subject: [PATCH 149/448] Add exporter and helper function --- package.json | 1 + src/client-side-metrics/exporter.ts | 30 ++- .../gcp-metrics-handler.ts | 2 +- test/metrics-collector/metricsToRequest.ts | 209 ++++++++++++++++++ 4 files changed, 238 insertions(+), 4 deletions(-) create mode 100644 test/metrics-collector/metricsToRequest.ts diff --git a/package.json b/package.json index 2dc24800e..903a4b89c 100644 --- a/package.json +++ b/package.json @@ -47,6 +47,7 @@ "precompile": "gts clean" }, "dependencies": { + "@google-cloud/monitoring": "^4.1.0", "@google-cloud/opentelemetry-cloud-monitoring-exporter": "^0.20.0", "@google-cloud/opentelemetry-resource-util": "^2.4.0", "@google-cloud/precise-date": "^4.0.0", diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 107a1d700..784c9c078 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -1,9 +1,33 @@ -export function transformInExport(args: {}) { +import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {ServiceError} from 'google-gax'; +import {MetricServiceClient} from '@google-cloud/monitoring'; +interface ExportResult { + code: number; } -export class CloudMonitoringExporter { - export() { +export function metricsToRequest(metrics: ResourceMetrics) { + return {}; +} + +export class CloudMonitoringExporter extends MetricExporter { + private monitoringClient = new MetricServiceClient(); + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + (async () => { + try { + const request = metricsToRequest(metrics); + await this.monitoringClient.createTimeSeries(request); + const exportResult = {code: 0}; + resultCallback(exportResult); + } catch (error) { + const exportResult = {code: (error as ServiceError).code as number}; + resultCallback(exportResult); + } + })(); } } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index e1a462bb9..b947d8f88 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -23,7 +23,7 @@ import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-expor import { OnAttemptCompleteAttributes, OnOperationCompleteAttributes, -} from '../../common/client-side-metrics-attributes'; +} from './client-side-metrics-attributes'; import {View} from '@opentelemetry/sdk-metrics'; const { Aggregation, diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts new file mode 100644 index 000000000..97857a704 --- /dev/null +++ b/test/metrics-collector/metricsToRequest.ts @@ -0,0 +1,209 @@ +import {describe} from 'mocha'; + +// TODO: Generate the export code +describe('Bigtable/metricsToRequest', () => { + it('Converts a counter and a histogram to the cloud monitoring format', () => { + const exportArgs = { + resource: { + _attributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', + 'monitored_resource.type': 'bigtable_client_raw', + 'monitored_resource.labels.project_id': 'cloud-native-db-dpes-shared', + 'monitored_resource.labels.instance_id': 'dan-bigtable-instance', + 'monitored_resource.labels.table_id': 'events-table', + }, + asyncAttributesPending: false, + _syncAttributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', + 'monitored_resource.type': 'bigtable_client_raw', + 'monitored_resource.labels.project_id': 'cloud-native-db-dpes-shared', + 'monitored_resource.labels.instance_id': 'dan-bigtable-instance', + 'monitored_resource.labels.table_id': 'events-table', + }, + }, + scopeMetrics: [ + { + scope: { + name: 'sample_metric', + version: '', + }, + metrics: [ + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/metric91', + type: 'COUNTER', + description: '', + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + key: 'value', + }, + startTime: [1738789130, 855000000], + endTime: [1738789140, 857000000], + value: 15, + }, + ], + isMonotonic: true, + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/metric92', + type: 'HISTOGRAM', + description: + 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + key: 'value', + }, + startTime: [1738789130, 855000000], + endTime: [1738789140, 857000000], + value: { + min: 7, + max: 7, + sum: 7, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, + 5000, 7500, 10000, + ], + counts: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + ], + }, + ], + }; + const expectedRequest = { + name: 'projects/cloud-native-db-dpes-shared', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.MutateRows', + status: 'OK', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'bahaaiman-instance-01', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: Math.floor(Date.now() / 1000), + }, + startTime: { + seconds: Math.floor(Date.now() / 1000) - 1000, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 376.177845, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, + 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, + 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, + 200000, 400000, 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + ], + }; + }); +}); From 883ea1a04536e87537113b4079041ae8a37cea2e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 10:58:40 -0500 Subject: [PATCH 150/448] Add headers and fixture --- .../export-input-fixture.ts | 202 ++++++++++++++++++ src/client-side-metrics/exporter.ts | 14 ++ 2 files changed, 216 insertions(+) create mode 100644 src/client-side-metrics/export-input-fixture.ts diff --git a/src/client-side-metrics/export-input-fixture.ts b/src/client-side-metrics/export-input-fixture.ts new file mode 100644 index 000000000..833f0c36b --- /dev/null +++ b/src/client-side-metrics/export-input-fixture.ts @@ -0,0 +1,202 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +export const exportInput = { + resource: { + _attributes: { + 'service.name': 'bigtable-metrics', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + }, + asyncAttributesPending: false, + _syncAttributes: { + 'service.name': 'bigtable-metrics', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + }, + _asyncAttributesPromise: {}, + }, + scopeMetrics: [ + { + scope: { + name: 'bigtable.googleapis.com', + version: '', + }, + metrics: [ + { + descriptor: { + name: 'operation_latencies', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + type: 'HISTOGRAM', + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + finalOperationStatus: 0, + streamingOperation: true, + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c\u0012', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [1738943373, 943000000], + endTime: [1738943383, 940000000], + value: 11956, + }, + ], + isMonotonic: true, + }, + { + descriptor: { + name: 'attempt_latencies', + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + type: 'HISTOGRAM', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + attemptStatus: 0, + streamingOperation: true, + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c\u0012', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [1738943373, 942000000], + endTime: [1738943383, 940000000], + value: 11830, + }, + ], + isMonotonic: true, + }, + { + descriptor: { + name: 'retry_count', + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + type: 'HISTOGRAM', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + finalOperationStatus: 0, + streamingOperation: true, + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c\u0012', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [1738943373, 943000000], + endTime: [1738943383, 940000000], + value: 0, + }, + ], + isMonotonic: true, + }, + { + descriptor: { + name: 'server_latencies', + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + type: 'HISTOGRAM', + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + attemptStatus: 0, + streamingOperation: true, + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c\u0012', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [1738943373, 943000000], + endTime: [1738943383, 940000000], + value: 7642, + }, + ], + isMonotonic: true, + }, + { + descriptor: { + name: 'connectivity_error_count', + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + type: 'HISTOGRAM', + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + attemptStatus: 0, + streamingOperation: true, + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c\u0012', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [1738943373, 943000000], + endTime: [1738943383, 940000000], + value: 0, + }, + ], + isMonotonic: true, + }, + ], + }, + ], +}; diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 784c9c078..88bf4d771 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {ServiceError} from 'google-gax'; From ad9c85bde5215c4b373445aa9fa0f8932f8d685e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:06:31 -0500 Subject: [PATCH 151/448] Add milliseconds unit --- src/client-side-metrics/gcp-metrics-handler.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index b947d8f88..ea7a0ab72 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -112,6 +112,7 @@ export class GCPMetricsHandler implements IMetricsHandler { operationLatencies: meter.createHistogram('operation_latencies', { description: "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: 'ms', }), attemptLatencies: meter.createHistogram('attempt_latencies', { description: @@ -141,6 +142,7 @@ export class GCPMetricsHandler implements IMetricsHandler { serverLatencies: meter.createHistogram('server_latencies', { description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + unit: 'ms', }), connectivityErrorCount: meter.createHistogram( 'connectivity_error_count', From 5a3bac2f94454d4e69647831f6ce5732f929c3e0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:08:43 -0500 Subject: [PATCH 152/448] Record to a histogram --- src/client-side-metrics/gcp-metrics-handler.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index ea7a0ab72..6b97a52c9 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -41,7 +41,7 @@ const { interface Metrics { operationLatencies: typeof Histogram; attemptLatencies: typeof Histogram; - retryCount: typeof Counter; + retryCount: typeof Histogram; applicationBlockingLatencies: typeof Histogram; firstResponseLatencies: typeof Histogram; serverLatencies: typeof Histogram; @@ -119,7 +119,7 @@ export class GCPMetricsHandler implements IMetricsHandler { 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', }), - retryCount: meter.createCounter('retry_count', { + retryCount: meter.createHistogram('retry_count', { description: 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', }), @@ -178,7 +178,7 @@ export class GCPMetricsHandler implements IMetricsHandler { metrics.operationLatency, attributes ); - this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); + this.otelMetrics?.retryCount.record(metrics.retryCount, attributes); this.otelMetrics?.firstResponseLatencies.record( metrics.firstResponseLatency, attributes From 4740c624565e938d1a604b95ae4d31fabbebef06 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:24:44 -0500 Subject: [PATCH 153/448] Add necessary elements to the GCPMetricsHandler # Conflicts: # src/client-side-metrics/gcp-metrics-handler.ts --- .../gcp-metrics-handler.ts | 68 +++++++++++-------- 1 file changed, 41 insertions(+), 27 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 6b97a52c9..0afa4b64d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -30,7 +30,6 @@ const { ExplicitBucketHistogramAggregation, MeterProvider, Histogram, - Counter, PeriodicExportingMetricReader, } = require('@opentelemetry/sdk-metrics'); @@ -93,7 +92,11 @@ export class GCPMetricsHandler implements IMetricsHandler { const meterProvider = new MeterProvider({ views: viewList, resource: new Resources.Resource({ - 'service.name': 'bigtable-metrics', + 'service.name': 'Cloud Bigtable Table', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': projectId, + 'monitored_resource.type': 'bigtable_client_raw', }).merge(new ResourceUtil.GcpDetectorSync().detect()), readers: [ // Register the exporter @@ -109,22 +112,30 @@ export class GCPMetricsHandler implements IMetricsHandler { }); const meter = meterProvider.getMeter('bigtable.googleapis.com'); this.otelMetrics = { - operationLatencies: meter.createHistogram('operation_latencies', { - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - unit: 'ms', - }), - attemptLatencies: meter.createHistogram('attempt_latencies', { - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', - unit: 'ms', - }), - retryCount: meter.createHistogram('retry_count', { - description: - 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - }), + operationLatencies: meter.createHistogram( + 'bigtable.googleapis.com/internal/client/operation_latencies', + { + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + } + ), + attemptLatencies: meter.createHistogram( + 'bigtable.googleapis.com/internal/client/attempt_latencies', + { + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + unit: 'ms', + } + ), + retryCount: meter.createHistogram( + 'bigtable.googleapis.com/internal/client/retry_count', + { + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + } + ), applicationBlockingLatencies: meter.createHistogram( - 'application_blocking_latencies', + 'bigtable.googleapis.com/internal/client/application_blocking_latencies', { description: 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', @@ -132,27 +143,29 @@ export class GCPMetricsHandler implements IMetricsHandler { } ), firstResponseLatencies: meter.createHistogram( - 'first_response_latencies', + 'bigtable.googleapis.com/internal/client/first_response_latencies', { description: 'Latencies from when a client sends a request and receives the first row of the response.', unit: 'ms', } ), - serverLatencies: meter.createHistogram('server_latencies', { - description: - 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - unit: 'ms', - }), + serverLatencies: meter.createHistogram( + 'bigtable.googleapis.com/internal/client/server_latencies', + { + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + } + ), connectivityErrorCount: meter.createHistogram( - 'connectivity_error_count', + 'bigtable.googleapis.com/internal/client/connectivity_error_count', { description: "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", } ), clientBlockingLatencies: meter.createHistogram( - 'client_blocking_latencies', + 'bigtable.googleapis.com/internal/client/client_blocking_latencies', { description: 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', @@ -173,7 +186,7 @@ export class GCPMetricsHandler implements IMetricsHandler { metrics: OnOperationCompleteMetrics, attributes: OnOperationCompleteAttributes ) { - this.initialize(); + this.initialize(attributes.projectId); this.otelMetrics?.operationLatencies.record( metrics.operationLatency, attributes @@ -196,7 +209,8 @@ export class GCPMetricsHandler implements IMetricsHandler { metrics: OnAttemptCompleteMetrics, attributes: OnAttemptCompleteAttributes ) { - this.initialize(); + console.log('onAttemptComplete handler'); + this.initialize(attributes.projectId); this.otelMetrics?.attemptLatencies.record( metrics.attemptLatency, attributes From ee404f1fe57191ba93818de8c36cca8b9aa534c2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:47:45 -0500 Subject: [PATCH 154/448] Pass an exporter into the GCPMetricsHandler --- src/client-side-metrics/gcp-metrics-handler.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 0afa4b64d..afd2e12b4 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -56,6 +56,11 @@ interface Metrics { export class GCPMetricsHandler implements IMetricsHandler { private initialized = false; private otelMetrics?: Metrics; + private exporter: typeof MetricExporter; + + constructor(exporter: typeof MetricExporter) { + this.exporter = exporter; + } /** * Initializes the OpenTelemetry metrics instruments if they haven't been already. @@ -104,9 +109,7 @@ export class GCPMetricsHandler implements IMetricsHandler { // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. exportIntervalMillis: 100_000, - exporter: new MetricExporter({ - projectId, - }), + exporter: this.exporter, }), ], }); From c997f0fd22a36b2db3ed8dbab79190a386baf8d8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:51:38 -0500 Subject: [PATCH 155/448] Move file to tests --- .../metrics-collector}/export-input-fixture.ts | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {src/client-side-metrics => test/metrics-collector}/export-input-fixture.ts (100%) diff --git a/src/client-side-metrics/export-input-fixture.ts b/test/metrics-collector/export-input-fixture.ts similarity index 100% rename from src/client-side-metrics/export-input-fixture.ts rename to test/metrics-collector/export-input-fixture.ts From 3719257988a9a999af1e6bc7a2a6c65841daf5c7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:52:00 -0500 Subject: [PATCH 156/448] Remove unused import --- src/client-side-metrics/operation-metrics-collector.ts | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 488748329..b9c3e90a2 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -14,11 +14,7 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; -import { - MethodName, - OnOperationCompleteAttributes, - StreamingState, -} from './client-side-metrics-attributes'; +import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; /** From a6498f2891add40f75682ff68c5c62521b17f4bc Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:57:03 -0500 Subject: [PATCH 157/448] Adapt the fixture to include the projectId --- .../metrics-collector/export-input-fixture.ts | 132 ++++++++++++------ 1 file changed, 92 insertions(+), 40 deletions(-) diff --git a/test/metrics-collector/export-input-fixture.ts b/test/metrics-collector/export-input-fixture.ts index 833f0c36b..bf3cb2106 100644 --- a/test/metrics-collector/export-input-fixture.ts +++ b/test/metrics-collector/export-input-fixture.ts @@ -15,17 +15,25 @@ export const exportInput = { resource: { _attributes: { - 'service.name': 'bigtable-metrics', + 'service.name': 'Cloud Bigtable Table', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'some-project', + 'monitored_resource.type': 'bigtable_client_raw', }, asyncAttributesPending: false, _syncAttributes: { - 'service.name': 'bigtable-metrics', + 'service.name': 'Cloud Bigtable Table', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'some-project', + 'monitored_resource.type': 'bigtable_client_raw', }, _asyncAttributesPromise: {}, }, @@ -38,11 +46,11 @@ export const exportInput = { metrics: [ { descriptor: { - name: 'operation_latencies', + name: 'attempt_latencies', description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', type: 'HISTOGRAM', - unit: '', + unit: 'ms', valueType: 1, advice: {}, }, @@ -51,7 +59,7 @@ export const exportInput = { dataPoints: [ { attributes: { - finalOperationStatus: 0, + attemptStatus: 0, streamingOperation: true, projectId: 'some-project', instanceId: 'emulator-test-instance', @@ -61,29 +69,29 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738943373, 943000000], - endTime: [1738943383, 940000000], - value: 11956, + startTime: [1738946024, 950000000], + endTime: [1738946034, 948000000], + value: 10944, }, ], isMonotonic: true, }, { descriptor: { - name: 'attempt_latencies', - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + name: 'bigtable.googleapis.com/internal/client/operation_latencies', type: 'HISTOGRAM', - unit: 'ms', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: '', valueType: 1, advice: {}, }, aggregationTemporality: 1, - dataPointType: 3, + dataPointType: 0, dataPoints: [ { attributes: { - attemptStatus: 0, + finalOperationStatus: 0, streamingOperation: true, projectId: 'some-project', instanceId: 'emulator-test-instance', @@ -93,25 +101,36 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738943373, 942000000], - endTime: [1738943383, 940000000], - value: 11830, + startTime: [1738946024, 950000000], + endTime: [1738946034, 948000000], + value: { + min: 76, + max: 1337, + sum: 11027, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], + }, + count: 99, + }, }, ], - isMonotonic: true, }, { descriptor: { - name: 'retry_count', + name: 'bigtable.googleapis.com/internal/client/retry_count', + type: 'HISTOGRAM', description: 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - type: 'HISTOGRAM', unit: 'ms', valueType: 1, advice: {}, }, aggregationTemporality: 1, - dataPointType: 3, + dataPointType: 0, dataPoints: [ { attributes: { @@ -125,25 +144,36 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738943373, 943000000], - endTime: [1738943383, 940000000], - value: 0, + startTime: [1738946024, 951000000], + endTime: [1738946034, 948000000], + value: { + min: 0, + max: 0, + sum: 0, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 99, + }, }, ], - isMonotonic: true, }, { descriptor: { - name: 'server_latencies', + name: 'bigtable.googleapis.com/internal/client/server_latencies', + type: 'HISTOGRAM', description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - type: 'HISTOGRAM', unit: '', valueType: 1, advice: {}, }, aggregationTemporality: 1, - dataPointType: 3, + dataPointType: 0, dataPoints: [ { attributes: { @@ -157,25 +187,36 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738943373, 943000000], - endTime: [1738943383, 940000000], - value: 7642, + startTime: [1738946024, 950000000], + endTime: [1738946034, 948000000], + value: { + min: 57, + max: 379, + sum: 7271, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 94, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0], + }, + count: 99, + }, }, ], - isMonotonic: true, }, { descriptor: { - name: 'connectivity_error_count', + name: 'bigtable.googleapis.com/internal/client/connectivity_error_count', + type: 'HISTOGRAM', description: "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - type: 'HISTOGRAM', unit: '', valueType: 1, advice: {}, }, aggregationTemporality: 1, - dataPointType: 3, + dataPointType: 0, dataPoints: [ { attributes: { @@ -189,12 +230,23 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738943373, 943000000], - endTime: [1738943383, 940000000], - value: 0, + startTime: [1738946024, 950000000], + endTime: [1738946034, 948000000], + value: { + min: 0, + max: 0, + sum: 0, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 99, + }, }, ], - isMonotonic: true, }, ], }, From e7d631d74d07794ea76123f1afa11024d712fb20 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 14:51:55 -0500 Subject: [PATCH 158/448] More exporter code fixes --- src/client-side-metrics/exporter.ts | 128 ++++++- .../metrics-collector/export-input-fixture.ts | 127 +------ test/metrics-collector/metricsToRequest.ts | 321 ++++++++---------- 3 files changed, 263 insertions(+), 313 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 88bf4d771..964cb3780 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -21,8 +21,129 @@ interface ExportResult { code: number; } -export function metricsToRequest(metrics: ResourceMetrics) { - return {}; +// TODO: Only involves the values that we care about +interface ExportInput { + resource: { + _attributes: { + 'cloud.resource_manager.project_id': string; + }; + _syncAttributes: { + 'monitored_resource.type': string; + }; + }; + scopeMetrics: [ + { + metrics: [ + { + descriptor: { + name: string; + unit: string; + }; + dataPoints: [ + { + attributes: { + appProfileId: string; + finalOperationStatus: number; + streamingOperation: string; + projectId: string; + instanceId: string; + table: string; + cluster: string; + zone: string; + methodName: string; + clientName: string; + }; + startTime: [number, number]; + endTime: [number, number]; + value: { + sum: number; + count: number; + buckets: { + boundaries: number[]; + counts: number[]; + }; + }; + }, + ]; + }, + ]; + }, + ]; +} + +export function metricsToRequest(exportArgs: ExportInput) { + const request = { + name: `projects/${exportArgs.resource._attributes['cloud.resource_manager.project_id']}`, + timeSeries: [], + }; + + for (const scopeMetrics of exportArgs.scopeMetrics) { + for (const metric of scopeMetrics.metrics) { + const metricName = metric.descriptor.name; + + for (const dataPoint of metric.dataPoints) { + // Extract attributes to labels based on their intended target (resource or metric) + const allAttributes = dataPoint.attributes; + const metricLabels = { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + finalOperationStatus: allAttributes.finalOperationStatus, + streaming: allAttributes.streamingOperation, + }; + const resourceLabels = { + cluster: allAttributes.cluster, + instance: allAttributes.instanceId, + project_id: allAttributes.projectId, + table: allAttributes.table, + zone: allAttributes.zone, + }; + const timeSeries = { + metric: { + type: metricName, + labels: metricLabels, + }, + resource: { + type: exportArgs.resource._syncAttributes[ + 'monitored_resource.type' + ], + labels: resourceLabels, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: dataPoint.endTime[0], + nanos: dataPoint.endTime[1], + }, + startTime: { + seconds: dataPoint.startTime[0], + nanos: dataPoint.startTime[1], + }, + }, + value: { + distributionValue: { + count: String(dataPoint.value.count), + mean: dataPoint.value.sum / dataPoint.value.count, + bucketOptions: { + explicitBuckets: { + bounds: dataPoint.value.buckets.boundaries, + }, + }, + bucketCounts: dataPoint.value.buckets.counts.map(String), + }, + }, + }, + ], + unit: metric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + }; + request.timeSeries.push(timeSeries); + } + } + } + return request; } export class CloudMonitoringExporter extends MetricExporter { @@ -34,7 +155,8 @@ export class CloudMonitoringExporter extends MetricExporter { ): void { (async () => { try { - const request = metricsToRequest(metrics); + // TODO: Remove casting. + const request = metricsToRequest(metrics as unknown as ExportInput); await this.monitoringClient.createTimeSeries(request); const exportResult = {code: 0}; resultCallback(exportResult); diff --git a/test/metrics-collector/export-input-fixture.ts b/test/metrics-collector/export-input-fixture.ts index bf3cb2106..bd5b290c7 100644 --- a/test/metrics-collector/export-input-fixture.ts +++ b/test/metrics-collector/export-input-fixture.ts @@ -44,38 +44,6 @@ export const exportInput = { version: '', }, metrics: [ - { - descriptor: { - name: 'attempt_latencies', - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', - type: 'HISTOGRAM', - unit: 'ms', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 3, - dataPoints: [ - { - attributes: { - attemptStatus: 0, - streamingOperation: true, - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c\u0012', - methodName: 'readRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [1738946024, 950000000], - endTime: [1738946034, 948000000], - value: 10944, - }, - ], - isMonotonic: true, - }, { descriptor: { name: 'bigtable.googleapis.com/internal/client/operation_latencies', @@ -91,8 +59,9 @@ export const exportInput = { dataPoints: [ { attributes: { + appProfileId: 'fake-app-profile-id', finalOperationStatus: 0, - streamingOperation: true, + streamingOperation: 'STREAMING', projectId: 'some-project', instanceId: 'emulator-test-instance', table: 'my-table', @@ -106,7 +75,7 @@ export const exportInput = { value: { min: 76, max: 1337, - sum: 11027, + sum: 11979, buckets: { boundaries: [ 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, @@ -114,7 +83,7 @@ export const exportInput = { ], counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], }, - count: 99, + count: 100, }, }, ], @@ -135,7 +104,7 @@ export const exportInput = { { attributes: { finalOperationStatus: 0, - streamingOperation: true, + streamingOperation: 'STREAMING', projectId: 'some-project', instanceId: 'emulator-test-instance', table: 'my-table', @@ -162,92 +131,6 @@ export const exportInput = { }, ], }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/server_latencies', - type: 'HISTOGRAM', - description: - 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - attemptStatus: 0, - streamingOperation: true, - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c\u0012', - methodName: 'readRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [1738946024, 950000000], - endTime: [1738946034, 948000000], - value: { - min: 57, - max: 379, - sum: 7271, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [0, 0, 0, 0, 0, 94, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/connectivity_error_count', - type: 'HISTOGRAM', - description: - "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - attemptStatus: 0, - streamingOperation: true, - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c\u0012', - methodName: 'readRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [1738946024, 950000000], - endTime: [1738946034, 948000000], - value: { - min: 0, - max: 0, - sum: 0, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, ], }, ], diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 97857a704..fd5804820 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -1,209 +1,154 @@ import {describe} from 'mocha'; +import {exportInput} from './export-input-fixture'; // TODO: Generate the export code describe('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { - const exportArgs = { - resource: { - _attributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', - 'monitored_resource.type': 'bigtable_client_raw', - 'monitored_resource.labels.project_id': 'cloud-native-db-dpes-shared', - 'monitored_resource.labels.instance_id': 'dan-bigtable-instance', - 'monitored_resource.labels.table_id': 'events-table', - }, - asyncAttributesPending: false, - _syncAttributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', - 'monitored_resource.type': 'bigtable_client_raw', - 'monitored_resource.labels.project_id': 'cloud-native-db-dpes-shared', - 'monitored_resource.labels.instance_id': 'dan-bigtable-instance', - 'monitored_resource.labels.table_id': 'events-table', - }, - }, - scopeMetrics: [ - { - scope: { - name: 'sample_metric', - version: '', - }, - metrics: [ - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/metric91', - type: 'COUNTER', - description: '', - unit: '', - valueType: 1, - advice: {}, + const exportArgs = exportInput; + const expectedRequests = [ + { + name: 'projects/some-project', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'readRows', + finalOperationStatus: 0, + streaming: 'STREAMING', }, - aggregationTemporality: 1, - dataPointType: 3, - dataPoints: [ - { - attributes: { - key: 'value', - }, - startTime: [1738789130, 855000000], - endTime: [1738789140, 857000000], - value: 15, - }, - ], - isMonotonic: true, }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/metric92', - type: 'HISTOGRAM', - description: - 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', - unit: 'ms', - valueType: 1, - advice: {}, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c\u0012', }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - key: 'value', + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 1738946034, + }, + startTime: { + seconds: 1738946024, }, - startTime: [1738789130, 855000000], - endTime: [1738789140, 857000000], - value: { - min: 7, - max: 7, - sum: 7, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 5000, 7500, 10000, - ], - counts: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + value: { + distributionValue: { + count: '99', + mean: 121, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, + 7500, 10000, + ], + }, }, - count: 1, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '93', + '0', + '5', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], }, }, - ], - }, - ], - }, - ], - }; - const expectedRequest = { - name: 'projects/cloud-native-db-dpes-shared', - timeSeries: [ - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.MutateRows', - status: 'OK', - streaming: 'true', - }, + }, + ], + unit: 'ms', }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'bahaaiman-instance-01', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', + { + metric: { + type: 'bigtable.googleapis.com/internal/client/retry_count', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'readRows', + finalOperationStatus: 0, + streaming: 'STREAMING', + }, }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: Math.floor(Date.now() / 1000), - }, - startTime: { - seconds: Math.floor(Date.now() / 1000) - 1000, - }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c\u0012', }, - value: { - distributionValue: { - count: '1', - mean: 376.177845, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, - 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, - 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, - 200000, 400000, 800000, 1600000, 3200000, - ], + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 1738946034, + }, + startTime: { + seconds: 1738946024, + }, + }, + value: { + distributionValue: { + count: '100', + mean: 110.27, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, + 7500, 10000, + ], + }, }, + bucketCounts: [ + '99', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], }, }, - }, - ], - unit: 'ms', - }, - ], - }; + ], + unit: 'ms', + }, + ], + }, + ]; }); }); From 382ebef14c0604a93b6bc39a3fe228a04a1ad2b6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 15:40:03 -0500 Subject: [PATCH 159/448] Add the fixture. We are going to use it later --- .../metrics-handler-fixture.ts | 75 +++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 test/metrics-collector/metrics-handler-fixture.ts diff --git a/test/metrics-collector/metrics-handler-fixture.ts b/test/metrics-collector/metrics-handler-fixture.ts new file mode 100644 index 000000000..b9280bfa0 --- /dev/null +++ b/test/metrics-collector/metrics-handler-fixture.ts @@ -0,0 +1,75 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +export const expectedRequestsHandled = [ + { + metrics: { + attemptLatency: 2000, + serverLatency: 101, + connectivityErrorCount: 1, + }, + attributes: { + appProfileId: undefined, + streamingOperation: 'streaming', + attemptStatus: 4, + connectivityErrorCount: 1, + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c ', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, + { + metrics: { + attemptLatency: 1000, + serverLatency: 103, + connectivityErrorCount: 1, + }, + attributes: { + appProfileId: undefined, + streamingOperation: 'streaming', + attemptStatus: 0, + connectivityErrorCount: 1, + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c ', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, + { + metrics: { + operationLatency: 6000, + retryCount: 1, + firstResponseLatency: 2000, + }, + attributes: { + appProfileId: undefined, + finalOperationStatus: 0, + streamingOperation: 'streaming', + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c ', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, +]; From 8ba3347c41bb743126c70abece16b0a0c8cc6b53 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 16:50:23 -0500 Subject: [PATCH 160/448] Solved compiler errors. Test almost passes --- src/client-side-metrics/exporter.ts | 103 ++++---- test/metrics-collector/metricsToRequest.ts | 270 +++++++++++---------- 2 files changed, 193 insertions(+), 180 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 964cb3780..b1fd41a92 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -16,13 +16,15 @@ import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-expor import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; +import {google} from '@google-cloud/monitoring/build/protos/protos'; +import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; interface ExportResult { code: number; } // TODO: Only involves the values that we care about -interface ExportInput { +export interface ExportInput { resource: { _attributes: { 'cloud.resource_manager.project_id': string; @@ -31,52 +33,54 @@ interface ExportInput { 'monitored_resource.type': string; }; }; - scopeMetrics: [ - { - metrics: [ - { - descriptor: { - name: string; - unit: string; + scopeMetrics: { + scope: { + name: string; + version: string; + }; + metrics: { + descriptor: { + name: string; + unit: string; + description?: string; + type?: string; + valueType?: number; + advice?: {}; + }; + aggregationTemporality?: number; + dataPointType?: number; + dataPoints: { + attributes: { + appProfileId?: string; + finalOperationStatus: number; + streamingOperation: string; + projectId: string; + instanceId: string; + table: string; + cluster: string; + zone: string; + methodName: string; + clientName: string; + }; + startTime: number[]; + endTime: number[]; + value: { + min?: number; + max?: number; + sum: number; + count: number; + buckets: { + boundaries: number[]; + counts: number[]; }; - dataPoints: [ - { - attributes: { - appProfileId: string; - finalOperationStatus: number; - streamingOperation: string; - projectId: string; - instanceId: string; - table: string; - cluster: string; - zone: string; - methodName: string; - clientName: string; - }; - startTime: [number, number]; - endTime: [number, number]; - value: { - sum: number; - count: number; - buckets: { - boundaries: number[]; - counts: number[]; - }; - }; - }, - ]; - }, - ]; - }, - ]; + }; + }[]; + }[]; + }[]; } export function metricsToRequest(exportArgs: ExportInput) { - const request = { - name: `projects/${exportArgs.resource._attributes['cloud.resource_manager.project_id']}`, - timeSeries: [], - }; - + const timeSeriesArray = []; for (const scopeMetrics of exportArgs.scopeMetrics) { for (const metric of scopeMetrics.metrics) { const metricName = metric.descriptor.name; @@ -88,7 +92,7 @@ export function metricsToRequest(exportArgs: ExportInput) { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, method: allAttributes.methodName, - finalOperationStatus: allAttributes.finalOperationStatus, + finalOperationStatus: allAttributes.finalOperationStatus.toString(), streaming: allAttributes.streamingOperation, }; const resourceLabels = { @@ -139,11 +143,14 @@ export function metricsToRequest(exportArgs: ExportInput) { ], unit: metric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified }; - request.timeSeries.push(timeSeries); + timeSeriesArray.push(timeSeries); } } } - return request; + return { + name: `projects/${exportArgs.resource._attributes['cloud.resource_manager.project_id']}`, + timeSeries: timeSeriesArray, + }; } export class CloudMonitoringExporter extends MetricExporter { @@ -157,7 +164,9 @@ export class CloudMonitoringExporter extends MetricExporter { try { // TODO: Remove casting. const request = metricsToRequest(metrics as unknown as ExportInput); - await this.monitoringClient.createTimeSeries(request); + await this.monitoringClient.createTimeSeries( + request as ICreateTimeSeriesRequest + ); const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index fd5804820..3adbd6369 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -1,154 +1,158 @@ +import * as assert from 'assert'; import {describe} from 'mocha'; import {exportInput} from './export-input-fixture'; +import { + ExportInput, + metricsToRequest, +} from '../../src/client-side-metrics/exporter'; // TODO: Generate the export code -describe('Bigtable/metricsToRequest', () => { +describe.only('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { - const exportArgs = exportInput; - const expectedRequests = [ - { - name: 'projects/some-project', - timeSeries: [ - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'readRows', - finalOperationStatus: 0, - streaming: 'STREAMING', - }, + const expectedRequest = { + name: 'projects/some-project', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'readRows', + finalOperationStatus: 0, + streaming: 'STREAMING', }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c\u0012', - }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c\u0012', }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 1738946034, - }, - startTime: { - seconds: 1738946024, - }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 1738946034, + }, + startTime: { + seconds: 1738946024, }, - value: { - distributionValue: { - count: '99', - mean: 121, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 7500, 10000, - ], - }, + }, + value: { + distributionValue: { + count: '99', + mean: 121, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, + 7500, 10000, + ], }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '93', - '0', - '5', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - ], }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '93', + '0', + '5', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], }, }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/retry_count', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'readRows', - finalOperationStatus: 0, - streaming: 'STREAMING', - }, }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c\u0012', - }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/retry_count', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'readRows', + finalOperationStatus: 0, + streaming: 'STREAMING', }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 1738946034, - }, - startTime: { - seconds: 1738946024, - }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c\u0012', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 1738946034, + }, + startTime: { + seconds: 1738946024, }, - value: { - distributionValue: { - count: '100', - mean: 110.27, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 7500, 10000, - ], - }, + }, + value: { + distributionValue: { + count: '100', + mean: 110.27, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, + 7500, 10000, + ], }, - bucketCounts: [ - '99', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], }, + bucketCounts: [ + '99', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], }, }, - ], - unit: 'ms', - }, - ], - }, - ]; + }, + ], + unit: 'ms', + }, + ], + }; + const actualRequest = metricsToRequest(exportInput); + assert.deepStrictEqual(actualRequest, expectedRequest); }); }); From 13382d2383e29459326f5267079ea63f5b7886f4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 17:18:47 -0500 Subject: [PATCH 161/448] Address the failing export test --- test/metrics-collector/export-input-fixture.ts | 5 +++-- test/metrics-collector/metricsToRequest.ts | 17 ++++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/test/metrics-collector/export-input-fixture.ts b/test/metrics-collector/export-input-fixture.ts index bd5b290c7..6017029ef 100644 --- a/test/metrics-collector/export-input-fixture.ts +++ b/test/metrics-collector/export-input-fixture.ts @@ -70,7 +70,7 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738946024, 950000000], + startTime: [1738946024, 951000000], endTime: [1738946034, 948000000], value: { min: 76, @@ -83,7 +83,7 @@ export const exportInput = { ], counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], }, - count: 100, + count: 99, }, }, ], @@ -103,6 +103,7 @@ export const exportInput = { dataPoints: [ { attributes: { + appProfileId: 'fake-app-profile-id', finalOperationStatus: 0, streamingOperation: 'STREAMING', projectId: 'some-project', diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 3adbd6369..dedb47f50 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -2,7 +2,6 @@ import * as assert from 'assert'; import {describe} from 'mocha'; import {exportInput} from './export-input-fixture'; import { - ExportInput, metricsToRequest, } from '../../src/client-side-metrics/exporter'; @@ -19,7 +18,7 @@ describe.only('Bigtable/metricsToRequest', () => { app_profile: 'fake-app-profile-id', client_name: 'nodejs-bigtable/5.1.2', method: 'readRows', - finalOperationStatus: 0, + finalOperationStatus: '0', streaming: 'STREAMING', }, }, @@ -40,9 +39,11 @@ describe.only('Bigtable/metricsToRequest', () => { interval: { endTime: { seconds: 1738946034, + nanos: 948000000, }, startTime: { seconds: 1738946024, + nanos: 951000000, }, }, value: { @@ -53,7 +54,7 @@ describe.only('Bigtable/metricsToRequest', () => { explicitBuckets: { bounds: [ 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 7500, 10000, + 5000, 7500, 10000, ], }, }, @@ -88,7 +89,7 @@ describe.only('Bigtable/metricsToRequest', () => { app_profile: 'fake-app-profile-id', client_name: 'nodejs-bigtable/5.1.2', method: 'readRows', - finalOperationStatus: 0, + finalOperationStatus: '0', streaming: 'STREAMING', }, }, @@ -108,21 +109,23 @@ describe.only('Bigtable/metricsToRequest', () => { { interval: { endTime: { + nanos: 948000000, seconds: 1738946034, }, startTime: { + nanos: 951000000, seconds: 1738946024, }, }, value: { distributionValue: { - count: '100', - mean: 110.27, + count: '99', + mean: 0, bucketOptions: { explicitBuckets: { bounds: [ 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 7500, 10000, + 5000, 7500, 10000, ], }, }, From 948a3a341549c73dcf476920b5de665a4d7b3257 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 13:32:53 -0500 Subject: [PATCH 162/448] Fixed the exporter. It now writes data properly. --- src/client-side-metrics/exporter.ts | 2 +- system-test/cloud-monitoring-exporter.ts | 33 ++ test-common/export-input-fixture.ts | 495 ++++++++++++++++++ .../metrics-collector/export-input-fixture.ts | 138 ----- test/metrics-collector/metricsToRequest.ts | 91 +--- 5 files changed, 540 insertions(+), 219 deletions(-) create mode 100644 system-test/cloud-monitoring-exporter.ts create mode 100644 test-common/export-input-fixture.ts delete mode 100644 test/metrics-collector/export-input-fixture.ts diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index b1fd41a92..fc2e480d4 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -92,7 +92,7 @@ export function metricsToRequest(exportArgs: ExportInput) { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, method: allAttributes.methodName, - finalOperationStatus: allAttributes.finalOperationStatus.toString(), + status: allAttributes.finalOperationStatus.toString(), streaming: allAttributes.streamingOperation, }; const resourceLabels = { diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts new file mode 100644 index 000000000..08bc9bbbc --- /dev/null +++ b/system-test/cloud-monitoring-exporter.ts @@ -0,0 +1,33 @@ +import {describe} from 'mocha'; +import {CloudMonitoringExporter} from '../src/client-side-metrics/exporter'; +import {exportInput} from '../test-common/export-input-fixture'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {Bigtable} from '../src'; + +describe.only('Bigtable/CloudMonitoringExporter', () => { + it('exports client side metrics to cloud monitoring', done => { + // When this test is run, metrics should be visible at the following link: + // https://pantheon.corp.google.com/monitoring/metrics-explorer;duration=PT1H?inv=1&invt=Abo9_A&project={projectId} + // This test will add metrics so that they are available in Pantheon + (async () => { + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const transformedExportInput = JSON.parse( + JSON.stringify(exportInput).replace(/some-project/g, projectId) + ); + const exporter = new CloudMonitoringExporter(); + exporter.export( + transformedExportInput as unknown as ResourceMetrics, + done + ); + })(); + }); +}); diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts new file mode 100644 index 000000000..6147565e2 --- /dev/null +++ b/test-common/export-input-fixture.ts @@ -0,0 +1,495 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +export const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; +export const fakeEndTime = fakeStartTime + 1000; + +export const exportInput = { + resource: { + _attributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'some-project', + 'monitored_resource.type': 'bigtable_client_raw', + }, + asyncAttributesPending: false, + _syncAttributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'some-project', + 'monitored_resource.type': 'bigtable_client_raw', + }, + _asyncAttributesPromise: {}, + }, + scopeMetrics: [ + { + scope: { + name: 'bigtable.googleapis.com', + version: '', + }, + metrics: [ + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/operation_latencies', + type: 'HISTOGRAM', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + appProfileId: 'fake-app-profile-id', + finalOperationStatus: 0, + streamingOperation: 'true', + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [fakeStartTime, 951000000], + endTime: [fakeEndTime, 948000000], + value: { + min: 76, + max: 1337, + sum: 11979, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], + }, + count: 99, + }, + }, + ], + }, + ], + }, + ], +}; + +const serverLatencyExportOutput = { + name: 'projects/cloud-native-db-dpes-shared', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/server_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.MutateRows', + status: 'OK', + // "streaming": "true" + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'bahaaiman-instance-01', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 2000, + }, + startTime: { + seconds: 1000, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 376.103605, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/attempt_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.ReadRows', + status: 'OK', + // "streaming": "true" + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'bahaaiman-instance-01', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 2000, + }, + startTime: { + seconds: 1000, + }, + }, + value: { + distributionValue: { + count: '5', + mean: 272.559932, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '2', + '2', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/server_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.MutateRows', + status: 'OK', + // "streaming": "true" + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'bahaaiman-instance-01', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 2000, + }, + startTime: { + seconds: 1000, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 331, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/server_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.ReadRows', + status: 'OK', + // "streaming": "true" + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'bahaaiman-instance-01', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 2000, + }, + startTime: { + seconds: 1000, + }, + }, + value: { + distributionValue: { + count: '5', + mean: 230, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '2', + '2', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + ], +}; diff --git a/test/metrics-collector/export-input-fixture.ts b/test/metrics-collector/export-input-fixture.ts deleted file mode 100644 index 6017029ef..000000000 --- a/test/metrics-collector/export-input-fixture.ts +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -export const exportInput = { - resource: { - _attributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'some-project', - 'monitored_resource.type': 'bigtable_client_raw', - }, - asyncAttributesPending: false, - _syncAttributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'some-project', - 'monitored_resource.type': 'bigtable_client_raw', - }, - _asyncAttributesPromise: {}, - }, - scopeMetrics: [ - { - scope: { - name: 'bigtable.googleapis.com', - version: '', - }, - metrics: [ - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/operation_latencies', - type: 'HISTOGRAM', - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - appProfileId: 'fake-app-profile-id', - finalOperationStatus: 0, - streamingOperation: 'STREAMING', - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c\u0012', - methodName: 'readRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [1738946024, 951000000], - endTime: [1738946034, 948000000], - value: { - min: 76, - max: 1337, - sum: 11979, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/retry_count', - type: 'HISTOGRAM', - description: - 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - unit: 'ms', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - appProfileId: 'fake-app-profile-id', - finalOperationStatus: 0, - streamingOperation: 'STREAMING', - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c\u0012', - methodName: 'readRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [1738946024, 951000000], - endTime: [1738946034, 948000000], - value: { - min: 0, - max: 0, - sum: 0, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, - ], - }, - ], -}; diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index dedb47f50..6a0bc5919 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -1,12 +1,14 @@ import * as assert from 'assert'; import {describe} from 'mocha'; -import {exportInput} from './export-input-fixture'; import { - metricsToRequest, -} from '../../src/client-side-metrics/exporter'; + exportInput, + fakeEndTime, + fakeStartTime, +} from '../../test-common/export-input-fixture'; +import {metricsToRequest} from '../../src/client-side-metrics/exporter'; // TODO: Generate the export code -describe.only('Bigtable/metricsToRequest', () => { +describe('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const expectedRequest = { name: 'projects/some-project', @@ -18,8 +20,8 @@ describe.only('Bigtable/metricsToRequest', () => { app_profile: 'fake-app-profile-id', client_name: 'nodejs-bigtable/5.1.2', method: 'readRows', - finalOperationStatus: '0', - streaming: 'STREAMING', + status: '0', + streaming: 'true', }, }, resource: { @@ -29,7 +31,7 @@ describe.only('Bigtable/metricsToRequest', () => { instance: 'emulator-test-instance', project_id: 'some-project', table: 'my-table', - zone: 'us-west1-c\u0012', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -38,11 +40,11 @@ describe.only('Bigtable/metricsToRequest', () => { { interval: { endTime: { - seconds: 1738946034, + seconds: fakeEndTime, nanos: 948000000, }, startTime: { - seconds: 1738946024, + seconds: fakeStartTime, nanos: 951000000, }, }, @@ -82,77 +84,6 @@ describe.only('Bigtable/metricsToRequest', () => { ], unit: 'ms', }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/retry_count', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'readRows', - finalOperationStatus: '0', - streaming: 'STREAMING', - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c\u0012', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - nanos: 948000000, - seconds: 1738946034, - }, - startTime: { - nanos: 951000000, - seconds: 1738946024, - }, - }, - value: { - distributionValue: { - count: '99', - mean: 0, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 5000, 7500, 10000, - ], - }, - }, - bucketCounts: [ - '99', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, ], }; const actualRequest = metricsToRequest(exportInput); From c8bb0a8b4ae135a821b95d3f20cf1970210da086 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 13:39:51 -0500 Subject: [PATCH 163/448] Test should complete when the export doesnt error --- src/client-side-metrics/exporter.ts | 1 + system-test/cloud-monitoring-exporter.ts | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index fc2e480d4..4c697d9fb 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -153,6 +153,7 @@ export function metricsToRequest(exportArgs: ExportInput) { }; } +// TODO: Add test for when the export fails export class CloudMonitoringExporter extends MetricExporter { private monitoringClient = new MetricServiceClient(); diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 08bc9bbbc..3e78c77c6 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -26,7 +26,13 @@ describe.only('Bigtable/CloudMonitoringExporter', () => { const exporter = new CloudMonitoringExporter(); exporter.export( transformedExportInput as unknown as ResourceMetrics, - done + (result: {code: number}) => { + if (result.code === 0) { + done(); + } else { + done(result.code); + } + } ); })(); }); From 78ec2e89cd32469024829bdfd8c122397bc5efb2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 15:52:31 -0500 Subject: [PATCH 164/448] Add the fixture to the shared folder --- test-common/metrics-handler-fixture.ts | 75 ++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 test-common/metrics-handler-fixture.ts diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts new file mode 100644 index 000000000..f5899ce43 --- /dev/null +++ b/test-common/metrics-handler-fixture.ts @@ -0,0 +1,75 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +export const expectedRequestsHandled = [ + { + metrics: { + attemptLatency: 2000, + serverLatency: 101, + connectivityErrorCount: 1, + }, + attributes: { + appProfileId: undefined, + streamingOperation: 'true', + attemptStatus: 4, + connectivityErrorCount: 1, + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, + { + metrics: { + attemptLatency: 1000, + serverLatency: 103, + connectivityErrorCount: 1, + }, + attributes: { + appProfileId: undefined, + streamingOperation: 'true', + attemptStatus: 0, + connectivityErrorCount: 1, + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, + { + metrics: { + operationLatency: 6000, + retryCount: 1, + firstResponseLatency: 2000, + }, + attributes: { + appProfileId: undefined, + finalOperationStatus: 0, + streamingOperation: 'true', + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, +]; From e5ec89f522b1ae39c63268a4f40e5e72705a4584 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 16:55:48 -0500 Subject: [PATCH 165/448] Remove two files not in this PR --- .../operation-metrics-collector.ts | 366 ------------------ src/something.js | 0 test/metrics-collector/metrics-collector.ts | 152 -------- 3 files changed, 518 deletions(-) delete mode 100644 src/client-side-metrics/operation-metrics-collector.ts create mode 100644 src/something.js delete mode 100644 test/metrics-collector/metrics-collector.ts diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts deleted file mode 100644 index b9c3e90a2..000000000 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import * as fs from 'fs'; -import {IMetricsHandler} from './metrics-handler'; -import {MethodName, StreamingState} from './client-side-metrics-attributes'; -import {grpc} from 'google-gax'; - -/** - * An interface representing a Date-like object. Provides a `getTime` method - * for retrieving the time value in milliseconds. Used for abstracting time - * in tests. - */ -interface DateLike { - /** - * Returns the time value in milliseconds. - * @returns The time value in milliseconds. - */ - getTime(): number; -} - -/** - * Interface for a provider that returns DateLike objects. Used for mocking dates in tests. - */ -interface DateProvider { - /** - * Returns a DateLike object. - * @returns A DateLike object representing the current time or a fake time value. - */ - getDate(): DateLike; -} - -/** - * The default DateProvider implementation. Returns the current date and time. - */ -class DefaultDateProvider { - /** - * Returns a new Date object representing the current time. - * @returns {Date} The current date and time. - */ - getDate() { - return new Date(); - } -} - -/** - * An interface representing a tabular API surface, such as a Bigtable table. - */ -export interface ITabularApiSurface { - instance: { - id: string; - }; - id: string; - bigtable: { - appProfileId?: string; - }; -} - -/** - * Information about the completion of a single attempt of a Bigtable operation. - * This information is used for recording metrics. - */ -interface OnAttemptCompleteInfo { - connectivityErrorCount: number; - /** - * Whether the operation is a streaming operation or not. - */ - streamingOperation: StreamingState; - /** - * The attempt status of the operation. - */ - attemptStatus: grpc.status; -} - -/** - * Attributes specific to a single attempt of a Bigtable operation. These - * attributes provide information about the attempt's status and whether it was - * part of a streaming operation. - */ -interface AttemptOnlyAttributes { - attemptStatus: grpc.status; - streamingOperation: StreamingState; -} - -/** - * Information about a Bigtable operation to be recorded in client side metrics. - */ -interface OperationOnlyAttributes { - finalOperationStatus: grpc.status; - streamingOperation: StreamingState; -} - -const packageJSON = fs.readFileSync('package.json'); -const version = JSON.parse(packageJSON.toString()).version; - -// MetricsCollectorState is a list of states that the metrics collector can be in. -// Tracking the OperationMetricsCollector state is done so that the -// OperationMetricsCollector methods are not called in the wrong order. If the -// methods are called in the wrong order they will not execute and they will -// throw warnings. -// -// The following state transitions are allowed: -// OPERATION_NOT_STARTED -> OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS -// OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS -> OPERATION_STARTED_ATTEMPT_IN_PROGRESS -// OPERATION_STARTED_ATTEMPT_IN_PROGRESS -> OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS -// OPERATION_STARTED_ATTEMPT_IN_PROGRESS -> OPERATION_COMPLETE -enum MetricsCollectorState { - OPERATION_NOT_STARTED, - OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS, - OPERATION_STARTED_ATTEMPT_IN_PROGRESS, - OPERATION_COMPLETE, -} - -/** - * A class for tracing and recording client-side metrics related to Bigtable operations. - */ -export class OperationMetricsCollector { - private state: MetricsCollectorState; - private operationStartTime: DateLike | null; - private attemptStartTime: DateLike | null; - private zone: string | undefined; - private cluster: string | undefined; - private tabularApiSurface: ITabularApiSurface; - private methodName: MethodName; - private attemptCount = 0; - private receivedFirstResponse: boolean; - private metricsHandlers: IMetricsHandler[]; - private firstResponseLatency: number | null; - private serverTimeRead: boolean; - private serverTime: number | null; - private dateProvider: DateProvider; - - /** - * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. - * @param {IMetricsHandler[]} metricsHandlers The metrics handlers used for recording metrics. - * @param {MethodName} methodName The name of the method being traced. - * @param {DateProvider} dateProvider A provider for date/time information (for testing). - */ - constructor( - tabularApiSurface: ITabularApiSurface, - metricsHandlers: IMetricsHandler[], - methodName: MethodName, - dateProvider?: DateProvider - ) { - this.state = MetricsCollectorState.OPERATION_NOT_STARTED; - this.zone = undefined; - this.cluster = undefined; - this.tabularApiSurface = tabularApiSurface; - this.methodName = methodName; - this.operationStartTime = null; - this.attemptStartTime = null; - this.receivedFirstResponse = false; - this.metricsHandlers = metricsHandlers; - this.firstResponseLatency = null; - this.serverTimeRead = false; - this.serverTime = null; - if (dateProvider) { - this.dateProvider = dateProvider; - } else { - this.dateProvider = new DefaultDateProvider(); - } - } - - /** - * Called when the operation starts. Records the start time. - */ - onOperationStart() { - if (this.state === MetricsCollectorState.OPERATION_NOT_STARTED) { - this.operationStartTime = this.dateProvider.getDate(); - this.firstResponseLatency = null; - this.receivedFirstResponse = false; - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; - } else { - console.warn('Invalid state transition'); - } - } - - /** - * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param {string} projectId The id of the project. - * @param {OnAttemptCompleteInfo} info Information about the completed attempt. - */ - onAttemptComplete(projectId: string, info: OnAttemptCompleteInfo) { - if ( - this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; - this.attemptCount++; - const endTime = this.dateProvider.getDate(); - if (projectId && this.attemptStartTime) { - const attributes = { - streamingOperation: info.streamingOperation, - attemptStatus: info.attemptStatus, - connectivityErrorCount: info.connectivityErrorCount, - projectId, - instanceId: this.tabularApiSurface.instance.id, - table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, - appProfileId: this.tabularApiSurface.bigtable.appProfileId, - methodName: this.methodName, - clientName: `nodejs-bigtable/${version}`, - }; - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onAttemptComplete) { - metricsHandler.onAttemptComplete( - { - attemptLatency: totalTime, - serverLatency: this.serverTime ?? undefined, - connectivityErrorCount: info.connectivityErrorCount, - }, - attributes - ); - } - }); - } - } else { - console.warn('Invalid state transition attempted'); - } - } - - /** - * Called when a new attempt starts. Records the start time of the attempt. - */ - onAttemptStart() { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS - ) { - this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS; - this.attemptStartTime = this.dateProvider.getDate(); - this.serverTime = null; - this.serverTimeRead = false; - } else { - console.warn('Invalid state transition attempted'); - } - } - - /** - * Called when the first response is received. Records first response latencies. - */ - onResponse(projectId: string) { - if (!this.receivedFirstResponse) { - this.receivedFirstResponse = true; - const endTime = this.dateProvider.getDate(); - if (projectId && this.operationStartTime) { - this.firstResponseLatency = - endTime.getTime() - this.operationStartTime.getTime(); - } - } - } - - /** - * Called when an operation completes (successfully or unsuccessfully). - * Records operation latencies, retry counts, and connectivity error counts. - * @param {string} projectId The id of the project. - * @param {OperationOnlyAttributes} info Information about the completed operation. - */ - onOperationComplete(projectId: string, info: OperationOnlyAttributes) { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS - ) { - this.state = MetricsCollectorState.OPERATION_COMPLETE; - const endTime = this.dateProvider.getDate(); - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - { - const operationAttributes = { - finalOperationStatus: info.finalOperationStatus, - streamingOperation: info.streamingOperation, - projectId, - instanceId: this.tabularApiSurface.instance.id, - table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, - appProfileId: this.tabularApiSurface.bigtable.appProfileId, - methodName: this.methodName, - clientName: `nodejs-bigtable/${version}`, - }; - const metrics = { - operationLatency: totalTime, - retryCount: this.attemptCount - 1, - firstResponseLatency: this.firstResponseLatency ?? undefined, - }; - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onOperationComplete) { - metricsHandler.onOperationComplete(metrics, operationAttributes); - } - }); - } - } - } else { - console.warn('Invalid state transition attempted'); - } - } - - /** - * Called when metadata is received. Extracts server timing information if available. - * @param {string} projectId The id of the project. - * @param {object} metadata The received metadata. - */ - onMetadataReceived( - projectId: string, - metadata: { - internalRepr: Map; - options: {}; - } - ) { - const mappedEntries = new Map( - Array.from(metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const durationValues = mappedEntries.get('server-timing')?.split('dur='); - if (durationValues && durationValues[1]) { - if (!this.serverTimeRead) { - this.serverTimeRead = true; - const serverTime = parseInt(durationValues[1]); - if (projectId) { - this.serverTime = serverTime; - } - } - } - } - - /** - * Called when status information is received. Extracts zone and cluster information. - * @param {object} status The received status information. - */ - onStatusReceived(status: { - metadata: {internalRepr: Map; options: {}}; - }) { - const mappedEntries = new Map( - Array.from(status.metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const instanceInformation = mappedEntries - .get('x-goog-ext-425905942-bin') - ?.replace(new RegExp('\\n', 'g'), '') - .split('\r'); - if (instanceInformation && instanceInformation[0]) { - this.zone = instanceInformation[0]; - } - if (instanceInformation && instanceInformation[1]) { - this.cluster = instanceInformation[1]; - } - } -} diff --git a/src/something.js b/src/something.js new file mode 100644 index 000000000..e69de29bb diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts deleted file mode 100644 index 5c158d28c..000000000 --- a/test/metrics-collector/metrics-collector.ts +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {describe} from 'mocha'; -import {TestDateProvider} from '../../test-common/test-date-provider'; -import * as assert from 'assert'; -import * as fs from 'fs'; -import {TestMetricsHandler} from '../../test-common/test-metrics-handler'; -import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import { - MethodName, - StreamingState, -} from '../../src/client-side-metrics/client-side-metrics-attributes'; -import {grpc} from 'google-gax'; - -/** - * A fake implementation of the Bigtable client for testing purposes. Provides a - * metricsTracerFactory and a stubbed projectId method. - */ -class FakeBigtable { - appProfileId?: string; - projectId = 'my-project'; -} - -/** - * A fake implementation of a Bigtable instance for testing purposes. Provides only an ID. - */ -class FakeInstance { - /** - * The ID of the fake instance. - */ - id = 'fakeInstanceId'; -} - -describe('Bigtable/MetricsCollector', () => { - it('should record the right metrics with a typical method call', async () => { - const logger = {value: ''}; - const metricsHandlers = [new TestMetricsHandler(logger)]; - class FakeTable { - id = 'fakeTableId'; - instance = new FakeInstance(); - bigtable = new FakeBigtable(); - - async fakeMethod(): Promise { - function createMetadata(duration: string) { - return { - internalRepr: new Map([ - ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], - ]), - options: {}, - }; - } - if (this.bigtable.projectId) { - const status = { - metadata: { - internalRepr: new Map([ - [ - 'x-goog-ext-425905942-bin', - Buffer.from('\n\nus-west1-c \rfake-cluster3'), - ], - ]), - options: {}, - }, - }; - const metricsCollector = new OperationMetricsCollector( - this, - metricsHandlers, - MethodName.READ_ROWS, - new TestDateProvider(logger) - ); - // In this method we simulate a series of events that might happen - // when a user calls one of the Table methods. - // Here is an example of what might happen in a method call: - logger.value += '1. The operation starts\n'; - metricsCollector.onOperationStart(); - logger.value += '2. The attempt starts.\n'; - metricsCollector.onAttemptStart(); - logger.value += '3. Client receives status information.\n'; - metricsCollector.onStatusReceived(status); - logger.value += '4. Client receives metadata.\n'; - metricsCollector.onMetadataReceived( - this.bigtable.projectId, - createMetadata('101') - ); - logger.value += '5. Client receives first row.\n'; - metricsCollector.onResponse(this.bigtable.projectId); - logger.value += '6. Client receives metadata.\n'; - metricsCollector.onMetadataReceived( - this.bigtable.projectId, - createMetadata('102') - ); - logger.value += '7. Client receives second row.\n'; - metricsCollector.onResponse(this.bigtable.projectId); - logger.value += '8. A transient error occurs.\n'; - metricsCollector.onAttemptComplete(this.bigtable.projectId, { - streamingOperation: StreamingState.STREAMING, - attemptStatus: grpc.status.DEADLINE_EXCEEDED, - connectivityErrorCount: 1, - }); - logger.value += '9. After a timeout, the second attempt is made.\n'; - metricsCollector.onAttemptStart(); - logger.value += '10. Client receives status information.\n'; - metricsCollector.onStatusReceived(status); - logger.value += '11. Client receives metadata.\n'; - metricsCollector.onMetadataReceived( - this.bigtable.projectId, - createMetadata('103') - ); - logger.value += '12. Client receives third row.\n'; - metricsCollector.onResponse(this.bigtable.projectId); - logger.value += '13. Client receives metadata.\n'; - metricsCollector.onMetadataReceived( - this.bigtable.projectId, - createMetadata('104') - ); - logger.value += '14. Client receives fourth row.\n'; - metricsCollector.onResponse(this.bigtable.projectId); - logger.value += '15. User reads row 1\n'; - logger.value += '16. Stream ends, operation completes\n'; - metricsCollector.onAttemptComplete(this.bigtable.projectId, { - attemptStatus: grpc.status.OK, - streamingOperation: StreamingState.STREAMING, - connectivityErrorCount: 1, - }); - metricsCollector.onOperationComplete(this.bigtable.projectId, { - finalOperationStatus: grpc.status.OK, - streamingOperation: StreamingState.STREAMING, - }); - } - } - } - const table = new FakeTable(); - await table.fakeMethod(); - const expectedOutput = fs.readFileSync( - './test/metrics-collector/typical-method-call.txt', - 'utf8' - ); - // Ensure events occurred in the right order here: - assert.strictEqual(logger.value, expectedOutput.replace(/\r/g, '')); - }); -}); From 5403a1b9c34e62d864079e2f76780baed3f70466 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 16:57:15 -0500 Subject: [PATCH 166/448] delete empty file --- src/something.js | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 src/something.js diff --git a/src/something.js b/src/something.js deleted file mode 100644 index e69de29bb..000000000 From 176ed0278459a3a050a96b55d341b46240baee80 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 16:58:51 -0500 Subject: [PATCH 167/448] Remove files that are already in the other PR --- test-common/test-date-provider.ts | 59 --------------------------- test-common/test-metrics-handler.ts | 63 ----------------------------- 2 files changed, 122 deletions(-) delete mode 100644 test-common/test-date-provider.ts delete mode 100644 test-common/test-metrics-handler.ts diff --git a/test-common/test-date-provider.ts b/test-common/test-date-provider.ts deleted file mode 100644 index 8eaa7b38c..000000000 --- a/test-common/test-date-provider.ts +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * A test implementation of a Date-like object. Used for testing purposes. It provides a - * getTime method that returns a pre-determined fake date value, allowing for - * deterministic testing of time-dependent functionality. - */ -class TestDateLike { - private fakeDate; - /** - * @param {number} fakeDate The fake date value to be returned by getTime(), in milliseconds. - */ - constructor(fakeDate: number) { - this.fakeDate = fakeDate; - } - /** - * Returns the fake date value that this object was created with. - * @returns {number} The fake date, in milliseconds. - */ - getTime() { - return this.fakeDate; - } -} - -/** - * A test implementation of a DateProvider. Used for testing purposes. Provides - * a deterministic series of fake dates, with each call to getDate() returning a date 1000ms later than the last. - * Logs each date value returned for verification purposes. - */ -export class TestDateProvider { - private dateCounter = 0; - private messages: {value: string}; - - constructor(messages: {value: string}) { - this.messages = messages; - } - /** - * Returns a new fake date 1000ms later than the last. Logs the date for test verification. - * @returns {TestDateLike} A fake date object. - */ - getDate() { - // The test assumes exactly 1s passes between each getDate call. - this.dateCounter = this.dateCounter + 1000; - this.messages.value += `getDate call returns ${this.dateCounter.toString()} ms\n`; - return new TestDateLike(this.dateCounter); - } -} diff --git a/test-common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts deleted file mode 100644 index 8166155b9..000000000 --- a/test-common/test-metrics-handler.ts +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import { - OnAttemptCompleteMetrics, - OnOperationCompleteMetrics, -} from '../src/client-side-metrics/metrics-handler'; -import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from '../src/client-side-metrics/client-side-metrics-attributes'; - -/** - * A test implementation of the IMetricsHandler interface. Used for testing purposes. - * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. - */ -export class TestMetricsHandler { - private messages: {value: string}; - - constructor(messages: {value: string}) { - this.messages = messages; - } - /** - * Logs the metrics and attributes received for an operation completion. - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {Attributes} attributes Attributes associated with the completed operation. - */ - onOperationComplete( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes - ) { - attributes.clientName = 'nodejs-bigtable'; - this.messages.value += 'Recording parameters for onOperationComplete:\n'; - this.messages.value += `metrics: ${JSON.stringify(metrics)}\n`; - this.messages.value += `attributes: ${JSON.stringify(attributes)}\n`; - } - - /** - * Logs the metrics and attributes received for an attempt completion. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {Attributes} attributes Attributes associated with the completed attempt. - */ - onAttemptComplete( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes - ) { - attributes.clientName = 'nodejs-bigtable'; - this.messages.value += 'Recording parameters for onAttemptComplete:\n'; - this.messages.value += `metrics: ${JSON.stringify(metrics)}\n`; - this.messages.value += `attributes: ${JSON.stringify(attributes)}\n`; - } -} From 08c1c1ba349e04cd012684ca475ac5b6d4429650 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 17:02:17 -0500 Subject: [PATCH 168/448] Remove the metrics handler fixture --- .../metrics-handler-fixture.ts | 75 ------------------- 1 file changed, 75 deletions(-) delete mode 100644 test/metrics-collector/metrics-handler-fixture.ts diff --git a/test/metrics-collector/metrics-handler-fixture.ts b/test/metrics-collector/metrics-handler-fixture.ts deleted file mode 100644 index b9280bfa0..000000000 --- a/test/metrics-collector/metrics-handler-fixture.ts +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -export const expectedRequestsHandled = [ - { - metrics: { - attemptLatency: 2000, - serverLatency: 101, - connectivityErrorCount: 1, - }, - attributes: { - appProfileId: undefined, - streamingOperation: 'streaming', - attemptStatus: 4, - connectivityErrorCount: 1, - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c ', - methodName: 'readRows', - clientName: 'nodejs-bigtable', - }, - }, - { - metrics: { - attemptLatency: 1000, - serverLatency: 103, - connectivityErrorCount: 1, - }, - attributes: { - appProfileId: undefined, - streamingOperation: 'streaming', - attemptStatus: 0, - connectivityErrorCount: 1, - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c ', - methodName: 'readRows', - clientName: 'nodejs-bigtable', - }, - }, - { - metrics: { - operationLatency: 6000, - retryCount: 1, - firstResponseLatency: 2000, - }, - attributes: { - appProfileId: undefined, - finalOperationStatus: 0, - streamingOperation: 'streaming', - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c ', - methodName: 'readRows', - clientName: 'nodejs-bigtable', - }, - }, -]; From be7673fad26622fe16e0f771efa253dc48d95b30 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 11 Feb 2025 10:30:08 -0500 Subject: [PATCH 169/448] Use 3 metrics instead of 1 --- test-common/export-input-fixture.ts | 88 +++++++ test/metrics-collector/metricsToRequest.ts | 275 ++++++++++++++++----- 2 files changed, 297 insertions(+), 66 deletions(-) diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts index 6147565e2..2d29306b0 100644 --- a/test-common/export-input-fixture.ts +++ b/test-common/export-input-fixture.ts @@ -91,6 +91,94 @@ export const exportInput = { }, ], }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/operation_latencies', + type: 'HISTOGRAM', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + appProfileId: 'fake-app-profile-id', + finalOperationStatus: 0, + streamingOperation: 'true', + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'mutateRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [fakeStartTime, 951000000], + endTime: [fakeEndTime, 948000000], + value: { + min: 76, + max: 1337, + sum: 11979, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], + }, + count: 99, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/operation_latencies', + type: 'HISTOGRAM', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + appProfileId: 'fake-app-profile-id', + finalOperationStatus: 0, + streamingOperation: 'true', + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'sampleRowKeys', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [fakeStartTime, 951000000], + endTime: [fakeEndTime, 948000000], + value: { + min: 76, + max: 1337, + sum: 11979, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], + }, + count: 99, + }, + }, + ], + }, ], }, ], diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 6a0bc5919..368f62f48 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -7,85 +7,228 @@ import { } from '../../test-common/export-input-fixture'; import {metricsToRequest} from '../../src/client-side-metrics/exporter'; -// TODO: Generate the export code -describe('Bigtable/metricsToRequest', () => { - it('Converts a counter and a histogram to the cloud monitoring format', () => { - const expectedRequest = { - name: 'projects/some-project', - timeSeries: [ +export const expectedRequest = { + name: 'projects/some-project', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'readRows', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'readRows', - status: '0', - streaming: 'true', + interval: { + endTime: { + seconds: fakeEndTime, + nanos: 948000000, }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c', + startTime: { + seconds: fakeStartTime, + nanos: 951000000, }, }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: fakeEndTime, - nanos: 948000000, + value: { + distributionValue: { + count: '99', + mean: 121, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], }, - startTime: { - seconds: fakeStartTime, - nanos: 951000000, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '93', + '0', + '5', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'mutateRows', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: fakeEndTime, + nanos: 948000000, + }, + startTime: { + seconds: fakeStartTime, + nanos: 951000000, + }, + }, + value: { + distributionValue: { + count: '99', + mean: 121, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], }, }, - value: { - distributionValue: { - count: '99', - mean: 121, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 5000, 7500, 10000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '93', - '0', - '5', - '0', - '0', - '1', - '0', - '0', - '0', - '0', + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '93', + '0', + '5', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'sampleRowKeys', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: fakeEndTime, + nanos: 948000000, + }, + startTime: { + seconds: fakeStartTime, + nanos: 951000000, + }, + }, + value: { + distributionValue: { + count: '99', + mean: 121, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, ], }, }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '93', + '0', + '5', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], }, - ], - unit: 'ms', + }, }, ], - }; + unit: 'ms', + }, + ], +}; + +// TODO: Generate the export code +describe.only('Bigtable/metricsToRequest', () => { + it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); }); From e6d66064d8e6520b2597c3eaa05290f50606d5f7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 11 Feb 2025 15:33:41 -0500 Subject: [PATCH 170/448] Replace with proper buckets --- src/client-side-metrics/exporter.ts | 124 +++++++++- .../gcp-metrics-handler.ts | 13 +- test/metrics-collector/metricsToRequest.ts | 225 ++++-------------- 3 files changed, 186 insertions(+), 176 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 4c697d9fb..598a38926 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,6 +18,8 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; +import {expectedRequest} from '../../test/metrics-collector/metricsToRequest'; +import {exportInput} from '../../test-common/export-input-fixture'; interface ExportResult { code: number; @@ -153,6 +155,110 @@ export function metricsToRequest(exportArgs: ExportInput) { }; } +const goRequestToExporter = { + name: 'projects/cloud-native-db-dpes-shared', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.MutateRows', + status: 'OK', + // "streaming": "true" + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'interim-instance3', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: Math.floor(Date.now() / 1000), + }, + startTime: { + seconds: Math.floor(Date.now() / 1000) - 1000, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 331, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + ], +}; + // TODO: Add test for when the export fails export class CloudMonitoringExporter extends MetricExporter { private monitoringClient = new MetricServiceClient(); @@ -165,9 +271,25 @@ export class CloudMonitoringExporter extends MetricExporter { try { // TODO: Remove casting. const request = metricsToRequest(metrics as unknown as ExportInput); - await this.monitoringClient.createTimeSeries( + /* + const result = await this.monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest ); + */ + const usedRequest = JSON.parse( + JSON.stringify(expectedRequest).replace( + /some-project/g, + 'cloud-native-db-dpes-shared' + ) + ); + await this.monitoringClient.createTimeSeries( + usedRequest as ICreateTimeSeriesRequest + ); + /* + await this.monitoringClient.createTimeSeries( + goRequestToExporter as ICreateTimeSeriesRequest + ); + */ const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index afd2e12b4..2487d0c70 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -72,11 +72,12 @@ export class GCPMetricsHandler implements IMetricsHandler { if (!this.initialized) { this.initialized = true; const sumAggregation = Aggregation.Sum(); - const histogramAggregation = new ExplicitBucketHistogramAggregation([ + const buckets = [ 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, - ]); + ]; + const histogramAggregation = new ExplicitBucketHistogramAggregation(); const viewList = [ 'operation_latencies', 'first_response_latencies', @@ -120,6 +121,10 @@ export class GCPMetricsHandler implements IMetricsHandler { { description: "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: 'ms', + advice: { + explicitBucketBoundaries: buckets, + }, } ), attemptLatencies: meter.createHistogram( @@ -128,6 +133,9 @@ export class GCPMetricsHandler implements IMetricsHandler { description: 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', + advice: { + explicitBucketBoundaries: buckets, + }, } ), retryCount: meter.createHistogram( @@ -158,6 +166,7 @@ export class GCPMetricsHandler implements IMetricsHandler { { description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + unit: 'ms', } ), connectivityErrorCount: meter.createHistogram( diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 368f62f48..cfa9cfe98 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -15,20 +15,21 @@ export const expectedRequest = { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'readRows', - status: '0', - streaming: 'true', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.ReadRows', + status: 'OK', }, }, resource: { type: 'bigtable_client_raw', labels: { cluster: 'fake-cluster3', - instance: 'emulator-test-instance', + instance: 'emulator-test-instance2', project_id: 'some-project', table: 'my-table', - zone: 'us-west1-c', + zone: 'us-central1-f', }, }, metricKind: 'CUMULATIVE', @@ -37,185 +38,63 @@ export const expectedRequest = { { interval: { endTime: { - seconds: fakeEndTime, - nanos: 948000000, + seconds: Math.floor(Date.now() / 1000), }, startTime: { - seconds: fakeStartTime, - nanos: 951000000, + seconds: Math.floor(Date.now() / 1000) - 1000, }, }, value: { distributionValue: { - count: '99', - mean: 121, + count: '1', + mean: 376.103605, bucketOptions: { explicitBuckets: { bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, + 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, ], }, }, bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '93', - '0', - '5', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'mutateRows', - status: '0', - streaming: 'true', - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: fakeEndTime, - nanos: 948000000, - }, - startTime: { - seconds: fakeStartTime, - nanos: 951000000, - }, - }, - value: { - distributionValue: { - count: '99', - mean: 121, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '93', - '0', - '5', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'sampleRowKeys', - status: '0', - streaming: 'true', - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: fakeEndTime, - nanos: 948000000, - }, - startTime: { - seconds: fakeStartTime, - nanos: 951000000, - }, - }, - value: { - distributionValue: { - count: '99', - mean: 121, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '93', - '0', - '5', - '0', - '0', - '1', - '0', - '0', - '0', - '0', + '0', //1 + '0', //2 + '0', //3 + '0', //4 + '0', //5 + '0', //6 + '0', //7 + '0', //8 + '0', //9 + '0', //10 + '0', //11 + '0', //12 + '0', //13 + '0', //14 + '0', //15 + '0', //16 + '0', //17 + '0', //18 + '0', //19 + '0', //20 + '0', //21 + '0', //22 + '0', //23 + '0', //24 + '1', //25 + '0', //26 + '0', //27 + '0', //28 + '0', //29 + '0', //30 + '0', //31 + '0', //32 + '0', //33 + '0', //34 + '0', //35 + '0', //36 + '0', //37 ], }, }, @@ -227,7 +106,7 @@ export const expectedRequest = { }; // TODO: Generate the export code -describe.only('Bigtable/metricsToRequest', () => { +describe('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); From 285454f748b2ce1f1cf6bfe0de1f9dad6b83feb1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 12 Feb 2025 17:20:35 -0500 Subject: [PATCH 171/448] Change the metrics handler fixture --- test-common/metrics-handler-fixture.ts | 63 ++++++++++++++------------ 1 file changed, 35 insertions(+), 28 deletions(-) diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index f5899ce43..f008f86fd 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -17,59 +17,66 @@ export const expectedRequestsHandled = [ metrics: { attemptLatency: 2000, serverLatency: 101, - connectivityErrorCount: 1, + connectivityErrorCount: 0, }, attributes: { - appProfileId: undefined, streamingOperation: 'true', attemptStatus: 4, - connectivityErrorCount: 1, - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', clientName: 'nodejs-bigtable', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', }, }, { metrics: { - attemptLatency: 1000, + attemptLatency: 2000, serverLatency: 103, - connectivityErrorCount: 1, + connectivityErrorCount: 0, }, attributes: { - appProfileId: undefined, streamingOperation: 'true', attemptStatus: 0, - connectivityErrorCount: 1, - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', clientName: 'nodejs-bigtable', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', }, }, { metrics: { - operationLatency: 6000, + operationLatency: 7000, retryCount: 1, - firstResponseLatency: 2000, + firstResponseLatency: 5000, }, attributes: { - appProfileId: undefined, finalOperationStatus: 0, streamingOperation: 'true', - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, clientName: 'nodejs-bigtable', + projectId: 'my-project', }, }, ]; From 12a5cc72ee7d4754bbf83067949108a0d031bffa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 12:53:38 -0500 Subject: [PATCH 172/448] Stop using stub in exporter --- src/client-side-metrics/exporter.ts | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 598a38926..ef9016fec 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -271,25 +271,9 @@ export class CloudMonitoringExporter extends MetricExporter { try { // TODO: Remove casting. const request = metricsToRequest(metrics as unknown as ExportInput); - /* - const result = await this.monitoringClient.createTimeSeries( - request as ICreateTimeSeriesRequest - ); - */ - const usedRequest = JSON.parse( - JSON.stringify(expectedRequest).replace( - /some-project/g, - 'cloud-native-db-dpes-shared' - ) - ); await this.monitoringClient.createTimeSeries( - usedRequest as ICreateTimeSeriesRequest - ); - /* - await this.monitoringClient.createTimeSeries( - goRequestToExporter as ICreateTimeSeriesRequest + request as ICreateTimeSeriesRequest ); - */ const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { From 54239f3039a4a6357200d0c3f8ba0f418f726c4c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 13:42:53 -0500 Subject: [PATCH 173/448] Use more realistic buckets --- src/client-side-metrics/exporter.ts | 2 + test-common/export-input-fixture.ts | 487 +-------------------- test/metrics-collector/metricsToRequest.ts | 54 ++- 3 files changed, 51 insertions(+), 492 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index ef9016fec..7b8897acc 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -63,6 +63,7 @@ export interface ExportInput { zone: string; methodName: string; clientName: string; + clientUid: string; }; startTime: number[]; endTime: number[]; @@ -96,6 +97,7 @@ export function metricsToRequest(exportArgs: ExportInput) { method: allAttributes.methodName, status: allAttributes.finalOperationStatus.toString(), streaming: allAttributes.streamingOperation, + client_uid: allAttributes.clientUid, }; const resourceLabels = { cluster: allAttributes.cluster, diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts index 2d29306b0..028e9e7c0 100644 --- a/test-common/export-input-fixture.ts +++ b/test-common/export-input-fixture.ts @@ -72,6 +72,7 @@ export const exportInput = { zone: 'us-west1-c', methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', + clientUid: 'fake-uuid', }, startTime: [fakeStartTime, 951000000], endTime: [fakeEndTime, 948000000], @@ -91,493 +92,7 @@ export const exportInput = { }, ], }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/operation_latencies', - type: 'HISTOGRAM', - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - appProfileId: 'fake-app-profile-id', - finalOperationStatus: 0, - streamingOperation: 'true', - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'mutateRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [fakeStartTime, 951000000], - endTime: [fakeEndTime, 948000000], - value: { - min: 76, - max: 1337, - sum: 11979, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/operation_latencies', - type: 'HISTOGRAM', - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - appProfileId: 'fake-app-profile-id', - finalOperationStatus: 0, - streamingOperation: 'true', - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'sampleRowKeys', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [fakeStartTime, 951000000], - endTime: [fakeEndTime, 948000000], - value: { - min: 76, - max: 1337, - sum: 11979, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, - ], - }, - ], -}; - -const serverLatencyExportOutput = { - name: 'projects/cloud-native-db-dpes-shared', - timeSeries: [ - { - metric: { - type: 'bigtable.googleapis.com/internal/client/server_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.MutateRows', - status: 'OK', - // "streaming": "true" - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'bahaaiman-instance-01', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 2000, - }, - startTime: { - seconds: 1000, - }, - }, - value: { - distributionValue: { - count: '1', - mean: 376.103605, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, - 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, - 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, - 800000, 1600000, 3200000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/attempt_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.ReadRows', - status: 'OK', - // "streaming": "true" - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'bahaaiman-instance-01', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 2000, - }, - startTime: { - seconds: 1000, - }, - }, - value: { - distributionValue: { - count: '5', - mean: 272.559932, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, - 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, - 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, - 800000, 1600000, 3200000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '0', - '2', - '2', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/server_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.MutateRows', - status: 'OK', - // "streaming": "true" - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'bahaaiman-instance-01', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 2000, - }, - startTime: { - seconds: 1000, - }, - }, - value: { - distributionValue: { - count: '1', - mean: 331, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, - 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, - 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, - 800000, 1600000, 3200000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/server_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.ReadRows', - status: 'OK', - // "streaming": "true" - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'bahaaiman-instance-01', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 2000, - }, - startTime: { - seconds: 1000, - }, - }, - value: { - distributionValue: { - count: '5', - mean: 230, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, - 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, - 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, - 800000, 1600000, 3200000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '2', - '2', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, ], - unit: 'ms', }, ], }; diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index cfa9cfe98..ce04b8ad8 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -16,8 +16,7 @@ export const expectedRequest = { labels: { app_profile: 'fake-app-profile-id', client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + client_uid: 'fake-client-uid', method: 'Bigtable.ReadRows', status: 'OK', }, @@ -51,9 +50,47 @@ export const expectedRequest = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, - 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, + 0, // 1 + 0.01, // 2 + 0.05, // 3 + 0.1, // 4 + 0.3, // 5 + 0.6, // 6 + 0.8, // 7 + 1, // 8 + 2, // 9 + 3, // 10 + 4, // 11 + 5, // 12 + 6, // 13 + 8, // 14 + 10, // 15 + 13, // 16 + 16, // 17 + 20, // 18 + 25, // 19 + 30, // 20 + 40, // 21 + 50, // 22 + 65, // 23 + 80, // 24 + 100, // 25 + 130, // 26 + 160, // 27 + 200, // 28 + 250, // 29 + 300, // 30 + 400, // 31 + 500, // 32 + 650, // 33 + 800, // 34 + 1000, // 35 + 2000, // 36 + 5000, // 37 + 10000, // 38 + 20000, // 39 + 50000, // 40 + 100000, // 41 ], }, }, @@ -95,6 +132,11 @@ export const expectedRequest = { '0', //35 '0', //36 '0', //37 + '0', //38 + '0', //39 + '0', //40 + '0', //41 + '0', //42 ], }, }, @@ -106,7 +148,7 @@ export const expectedRequest = { }; // TODO: Generate the export code -describe('Bigtable/metricsToRequest', () => { +describe.only('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); From 86890aa20c5deab9634f49f02fe1e8bdfd2b9fd5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 13:43:49 -0500 Subject: [PATCH 174/448] Remove the go request to export --- src/client-side-metrics/exporter.ts | 106 ---------------------------- 1 file changed, 106 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 7b8897acc..09c9e0ec1 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,8 +18,6 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -import {expectedRequest} from '../../test/metrics-collector/metricsToRequest'; -import {exportInput} from '../../test-common/export-input-fixture'; interface ExportResult { code: number; @@ -157,110 +155,6 @@ export function metricsToRequest(exportArgs: ExportInput) { }; } -const goRequestToExporter = { - name: 'projects/cloud-native-db-dpes-shared', - timeSeries: [ - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.MutateRows', - status: 'OK', - // "streaming": "true" - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'interim-instance3', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: Math.floor(Date.now() / 1000), - }, - startTime: { - seconds: Math.floor(Date.now() / 1000) - 1000, - }, - }, - value: { - distributionValue: { - count: '1', - mean: 331, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, - 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, - 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, - 800000, 1600000, 3200000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - ], -}; - // TODO: Add test for when the export fails export class CloudMonitoringExporter extends MetricExporter { private monitoringClient = new MetricServiceClient(); From a0fa7e42e951cf96455f413073b47bf52bed209b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 14:08:31 -0500 Subject: [PATCH 175/448] Modify the fixtures to be more realistic --- src/client-side-metrics/exporter.ts | 2 - test-common/export-input-fixture.ts | 94 ++++++++++++++++++++-- test/metrics-collector/metricsToRequest.ts | 15 ++-- 3 files changed, 96 insertions(+), 15 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 09c9e0ec1..462f80df0 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -122,11 +122,9 @@ export function metricsToRequest(exportArgs: ExportInput) { interval: { endTime: { seconds: dataPoint.endTime[0], - nanos: dataPoint.endTime[1], }, startTime: { seconds: dataPoint.startTime[0], - nanos: dataPoint.startTime[1], }, }, value: { diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts index 028e9e7c0..3a8495fe3 100644 --- a/test-common/export-input-fixture.ts +++ b/test-common/export-input-fixture.ts @@ -69,8 +69,8 @@ export const exportInput = { instanceId: 'emulator-test-instance', table: 'my-table', cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', + zone: 'us-central1-f', + methodName: 'Bigtable.ReadRows', clientName: 'nodejs-bigtable/5.1.2', clientUid: 'fake-uuid', }, @@ -82,12 +82,94 @@ export const exportInput = { sum: 11979, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, // 1 + 0.01, // 2 + 0.05, // 3 + 0.1, // 4 + 0.3, // 5 + 0.6, // 6 + 0.8, // 7 + 1, // 8 + 2, // 9 + 3, // 10 + 4, // 11 + 5, // 12 + 6, // 13 + 8, // 14 + 10, // 15 + 13, // 16 + 16, // 17 + 20, // 18 + 25, // 19 + 30, // 20 + 40, // 21 + 50, // 22 + 65, // 23 + 80, // 24 + 100, // 25 + 130, // 26 + 160, // 27 + 200, // 28 + 250, // 29 + 300, // 30 + 400, // 31 + 500, // 32 + 650, // 33 + 800, // 34 + 1000, // 35 + 2000, // 36 + 5000, // 37 + 10000, // 38 + 20000, // 39 + 50000, // 40 + 100000, // 41 + ], + counts: [ + 0, //1 + 0, //2 + 0, //3 + 0, //4 + 0, //5 + 0, //6 + 0, //7 + 0, //8 + 0, //9 + 0, //10 + 0, //11 + 0, //12 + 0, //13 + 0, //14 + 0, //15 + 0, //16 + 0, //17 + 0, //18 + 0, //19 + 0, //20 + 0, //21 + 0, //22 + 0, //23 + 0, //24 + 1, //25 + 0, //26 + 0, //27 + 0, //28 + 0, //29 + 0, //30 + 0, //31 + 0, //32 + 0, //33 + 0, //34 + 0, //35 + 0, //36 + 0, //37 + 0, //38 + 0, //39 + 0, //40 + 0, //41 + 0, //42 ], - counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], }, - count: 99, + count: 1, }, }, ], diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index ce04b8ad8..97cf61124 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -15,17 +15,18 @@ export const expectedRequest = { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { app_profile: 'fake-app-profile-id', - client_name: 'go-bigtable/1.35.0', - client_uid: 'fake-client-uid', + client_name: 'nodejs-bigtable/5.1.2', + client_uid: 'fake-uuid', method: 'Bigtable.ReadRows', - status: 'OK', + status: '0', + streaming: 'true', }, }, resource: { type: 'bigtable_client_raw', labels: { cluster: 'fake-cluster3', - instance: 'emulator-test-instance2', + instance: 'emulator-test-instance', project_id: 'some-project', table: 'my-table', zone: 'us-central1-f', @@ -37,16 +38,16 @@ export const expectedRequest = { { interval: { endTime: { - seconds: Math.floor(Date.now() / 1000), + seconds: fakeEndTime, }, startTime: { - seconds: Math.floor(Date.now() / 1000) - 1000, + seconds: fakeStartTime, }, }, value: { distributionValue: { count: '1', - mean: 376.103605, + mean: 121, bucketOptions: { explicitBuckets: { bounds: [ From f5267c1745717585c55efdaab43f3a12ceab7b3d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 14:15:23 -0500 Subject: [PATCH 176/448] Change the mean --- test/metrics-collector/metricsToRequest.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 97cf61124..c8162c766 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -47,7 +47,7 @@ export const expectedRequest = { value: { distributionValue: { count: '1', - mean: 121, + mean: 11979, bucketOptions: { explicitBuckets: { bounds: [ From 887d98887f432ff63cdff13d5f376921dd2e414b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 14:17:50 -0500 Subject: [PATCH 177/448] Remove only --- test/metrics-collector/metricsToRequest.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index c8162c766..15b839969 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -149,7 +149,7 @@ export const expectedRequest = { }; // TODO: Generate the export code -describe.only('Bigtable/metricsToRequest', () => { +describe('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); From 075bf9b89d9cd8c83b1e962362e4f7753362ac09 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 16:26:29 -0500 Subject: [PATCH 178/448] Add the export input fixture --- .../client-side-metrics-attributes.ts | 122 ++---- src/client-side-metrics/exporter.ts | 2 +- .../gcp-metrics-handler.ts | 25 +- test-common/expected-otel-export-input.ts | 405 ++++++++++++++++++ test-common/metrics-handler-fixture.ts | 17 +- test/metrics-collector/gcp-metrics-handler.ts | 89 ++++ 6 files changed, 550 insertions(+), 110 deletions(-) create mode 100644 test-common/expected-otel-export-input.ts create mode 100644 test/metrics-collector/gcp-metrics-handler.ts diff --git a/src/client-side-metrics/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts index 0672f6f1c..ffc6dfa44 100644 --- a/src/client-side-metrics/client-side-metrics-attributes.ts +++ b/src/client-side-metrics/client-side-metrics-attributes.ts @@ -14,88 +14,23 @@ import {grpc} from 'google-gax'; -/** - * Standard attributes common to various Bigtable client-side metrics. These attributes provide - * contextual information about the Bigtable environment and operation. - */ -interface StandardAttributes { - projectId: string; +// The backend is expecting true/false and will fail if other values are provided. +// export in open telemetry is expecting string value attributes so we don't use boolean +// true/false. +export enum StreamingState { + STREAMING = 'true', + UNARY = 'false', +} + +type IMetricsCollectorData = { instanceId: string; table: string; cluster?: string; zone?: string; appProfileId?: string; methodName: MethodName; - clientName: string; -} - -export enum StreamingState { - STREAMING = 'streaming', - UNARY = 'unary', -} - -/** - * Attributes associated with operation latency metrics for Bigtable client operations. - * These attributes provide context about the Bigtable environment and the completed operation. - */ -interface OperationLatencyAttributes extends StandardAttributes { - finalOperationStatus: grpc.status; - streamingOperation: StreamingState; -} - -/** - * Attributes associated with attempt latency metrics for Bigtable client operations. - * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. - */ -interface AttemptLatencyAttributes extends StandardAttributes { - attemptStatus: grpc.status; - streamingOperation: StreamingState; -} - -/** - * Attributes associated with retry count metrics for Bigtable client operations. These attributes - * provide context about the Bigtable environment and the final status of the operation. - */ -interface RetryCountAttributes extends StandardAttributes { - finalOperationStatus: grpc.status; -} - -/** - * Attributes associated with application blocking latencies for Bigtable client operations. - * These attributes provide context about the Bigtable environment and the operation being performed. - */ -type ApplicationBlockingLatenciesAttributes = StandardAttributes; - -/** - * Attributes associated with first response latency metrics for Bigtable client operations. - * These attributes provide context about the Bigtable environment and the final status of the operation. - */ -interface FirstResponseLatencyAttributes extends StandardAttributes { - finalOperationStatus: grpc.status; -} - -/** - * Attributes associated with server latency metrics for Bigtable client operations. - * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. - */ -interface ServerLatenciesAttributes extends StandardAttributes { - attemptStatus: grpc.status; - streamingOperation: StreamingState; -} - -/** - * Attributes associated with connectivity error count metrics for Bigtable client operations. - * These attributes provide context about the Bigtable environment and the status of the attempt. - */ -interface ConnectivityErrorCountAttributes extends StandardAttributes { - attemptStatus: grpc.status; -} - -/** - * Attributes associated with client blocking latencies for Bigtable client operations. - * These attributes provide context about the Bigtable environment and the operation being performed. - */ -type ClientBlockingLatenciesAttributes = StandardAttributes; + clientUid: string; +}; /** * Attributes associated with the completion of a Bigtable operation. These @@ -103,10 +38,13 @@ type ClientBlockingLatenciesAttributes = StandardAttributes; * operation, and its final status. They are used for recording metrics such as * operation latency, first response latency, and retry count. */ -export type OnOperationCompleteAttributes = - | OperationLatencyAttributes - | FirstResponseLatencyAttributes - | RetryCountAttributes; +export type OnOperationCompleteAttributes = { + projectId: string; + metricsCollectorData: IMetricsCollectorData; + clientName: string; + finalOperationStatus: grpc.status; + streamingOperation: StreamingState; +}; /** * Attributes associated with the completion of a single attempt of a Bigtable @@ -115,21 +53,23 @@ export type OnOperationCompleteAttributes = * are used for recording metrics such as attempt latency, server latency, and * connectivity errors. */ -export type OnAttemptCompleteAttributes = - | AttemptLatencyAttributes - | ConnectivityErrorCountAttributes - | ServerLatenciesAttributes - | ClientBlockingLatenciesAttributes; +export type OnAttemptCompleteAttributes = { + projectId: string; + metricsCollectorData: IMetricsCollectorData; + clientName: string; + attemptStatus: grpc.status; + streamingOperation: StreamingState; +}; /** * Represents the names of Bigtable methods. These are used as attributes for * metrics, allowing for differentiation of performance by method. */ export enum MethodName { - READ_ROWS = 'readRows', - MUTATE_ROW = 'mutateRow', - CHECK_AND_MUTATE_ROW = 'checkAndMutateRow', - READ_MODIFY_WRITE_ROW = 'readModifyWriteRow', - SAMPLE_ROW_KEYS = 'sampleRowKeys', - MUTATE_ROWS = 'mutateRows', + READ_ROWS = 'Bigtable.ReadRows', + MUTATE_ROW = 'Bigtable.MutateRow', + CHECK_AND_MUTATE_ROW = 'Bigtable.CheckAndMutateRow', + READ_MODIFY_WRITE_ROW = 'Bigtable.ReadModifyWriteRow', + SAMPLE_ROW_KEYS = 'Bigtable.SampleRowKeys', + MUTATE_ROWS = 'Bigtable.MutateRows', } diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 462f80df0..905003a57 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -19,7 +19,7 @@ import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -interface ExportResult { +export interface ExportResult { code: number; } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 2487d0c70..46026ed8d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -53,12 +53,15 @@ interface Metrics { * This handler records metrics such as operation latency, attempt latency, retry count, and more, * associating them with relevant attributes for detailed analysis in Cloud Monitoring. */ -export class GCPMetricsHandler implements IMetricsHandler { +export class GCPMetricsHandler + implements IMetricsHandler +{ private initialized = false; private otelMetrics?: Metrics; - private exporter: typeof MetricExporter; + private exporter: T; - constructor(exporter: typeof MetricExporter) { + constructor(exporter: T) { + console.log('Passing in exporter'); this.exporter = exporter; } @@ -77,6 +80,7 @@ export class GCPMetricsHandler implements IMetricsHandler { 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, ]; + /* const histogramAggregation = new ExplicitBucketHistogramAggregation(); const viewList = [ 'operation_latencies', @@ -95,8 +99,9 @@ export class GCPMetricsHandler implements IMetricsHandler { aggregation: name.slice(-9) ? sumAggregation : histogramAggregation, }) ); + */ const meterProvider = new MeterProvider({ - views: viewList, + // views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', 'cloud.provider': 'gcp', @@ -109,7 +114,7 @@ export class GCPMetricsHandler implements IMetricsHandler { new PeriodicExportingMetricReader({ // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. - exportIntervalMillis: 100_000, + exportIntervalMillis: 10_000, exporter: this.exporter, }), ], @@ -122,9 +127,6 @@ export class GCPMetricsHandler implements IMetricsHandler { description: "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", unit: 'ms', - advice: { - explicitBucketBoundaries: buckets, - }, } ), attemptLatencies: meter.createHistogram( @@ -133,9 +135,6 @@ export class GCPMetricsHandler implements IMetricsHandler { description: 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', - advice: { - explicitBucketBoundaries: buckets, - }, } ), retryCount: meter.createHistogram( @@ -185,6 +184,7 @@ export class GCPMetricsHandler implements IMetricsHandler { } ), }; + console.log('Done initializing'); } } @@ -198,6 +198,7 @@ export class GCPMetricsHandler implements IMetricsHandler { metrics: OnOperationCompleteMetrics, attributes: OnOperationCompleteAttributes ) { + console.log('onOperationComplete'); this.initialize(attributes.projectId); this.otelMetrics?.operationLatencies.record( metrics.operationLatency, @@ -221,7 +222,7 @@ export class GCPMetricsHandler implements IMetricsHandler { metrics: OnAttemptCompleteMetrics, attributes: OnAttemptCompleteAttributes ) { - console.log('onAttemptComplete handler'); + console.log('onAttemptComplete'); this.initialize(attributes.projectId); this.otelMetrics?.attemptLatencies.record( metrics.attemptLatency, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts new file mode 100644 index 000000000..9cd3e30ad --- /dev/null +++ b/test-common/expected-otel-export-input.ts @@ -0,0 +1,405 @@ +export const expectedOtelExportInput = { + resource: { + _attributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'my-project', + 'monitored_resource.type': 'bigtable_client_raw', + }, + asyncAttributesPending: false, + _syncAttributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'my-project', + 'monitored_resource.type': 'bigtable_client_raw', + }, + _asyncAttributesPromise: {}, + }, + scopeMetrics: [ + { + scope: { + name: 'bigtable.googleapis.com', + version: '', + }, + metrics: [ + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/operation_latencies', + type: 'HISTOGRAM', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + finalOperationStatus: 0, + streamingOperation: 'true', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + clientName: 'nodejs-bigtable', + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 7000, + max: 7000, + sum: 7000, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + }, + count: 1, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/attempt_latencies', + type: 'HISTOGRAM', + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + streamingOperation: 'true', + attemptStatus: 4, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 2000, + max: 2000, + sum: 2000, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + }, + count: 1, + }, + }, + { + attributes: { + streamingOperation: 'true', + attemptStatus: 0, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 2000, + max: 2000, + sum: 2000, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/retry_count', + type: 'HISTOGRAM', + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + finalOperationStatus: 0, + streamingOperation: 'true', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + clientName: 'nodejs-bigtable', + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 1, + max: 1, + sum: 1, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/first_response_latencies', + type: 'HISTOGRAM', + description: + 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + finalOperationStatus: 0, + streamingOperation: 'true', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + clientName: 'nodejs-bigtable', + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 5000, + max: 5000, + sum: 5000, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/server_latencies', + type: 'HISTOGRAM', + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + streamingOperation: 'true', + attemptStatus: 4, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 101, + max: 101, + sum: 101, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + { + attributes: { + streamingOperation: 'true', + attemptStatus: 0, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 103, + max: 103, + sum: 103, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/connectivity_error_count', + type: 'HISTOGRAM', + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + streamingOperation: 'true', + attemptStatus: 4, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 0, + max: 0, + sum: 0, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + { + attributes: { + streamingOperation: 'true', + attemptStatus: 0, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 0, + max: 0, + sum: 0, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + ], + }, + ], +}; diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index f008f86fd..8238d3150 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -12,6 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +import { + MethodName, + StreamingState, +} from '../src/client-side-metrics/client-side-metrics-attributes'; + export const expectedRequestsHandled = [ { metrics: { @@ -20,7 +25,7 @@ export const expectedRequestsHandled = [ connectivityErrorCount: 0, }, attributes: { - streamingOperation: 'true', + streamingOperation: StreamingState.STREAMING, attemptStatus: 4, clientName: 'nodejs-bigtable', metricsCollectorData: { @@ -29,7 +34,7 @@ export const expectedRequestsHandled = [ table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'readRows', + methodName: MethodName.READ_ROWS, clientUid: 'fake-uuid', }, projectId: 'my-project', @@ -42,7 +47,7 @@ export const expectedRequestsHandled = [ connectivityErrorCount: 0, }, attributes: { - streamingOperation: 'true', + streamingOperation: StreamingState.STREAMING, attemptStatus: 0, clientName: 'nodejs-bigtable', metricsCollectorData: { @@ -51,7 +56,7 @@ export const expectedRequestsHandled = [ table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'readRows', + methodName: MethodName.READ_ROWS, clientUid: 'fake-uuid', }, projectId: 'my-project', @@ -65,14 +70,14 @@ export const expectedRequestsHandled = [ }, attributes: { finalOperationStatus: 0, - streamingOperation: 'true', + streamingOperation: StreamingState.STREAMING, metricsCollectorData: { appProfileId: undefined, instanceId: 'fakeInstanceId', table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'readRows', + methodName: MethodName.READ_ROWS, clientUid: 'fake-uuid', }, clientName: 'nodejs-bigtable', diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts new file mode 100644 index 000000000..7e24b41c1 --- /dev/null +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -0,0 +1,89 @@ +import {describe} from 'mocha'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {ExportResult} from '../../src/client-side-metrics/exporter'; +import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; +import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from '../../src/client-side-metrics/client-side-metrics-attributes'; +import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; +import {expectedOtelExportInput} from '../../test-common/expected-otel-export-input'; + +function replaceTimestamps( + request: typeof expectedOtelExportInput, + newStartTime: [number, number], + newEndTime: [number, number] +) { + request.scopeMetrics.forEach(scopeMetric => { + scopeMetric.metrics.forEach(metric => { + metric.dataPoints.forEach(dataPoint => { + dataPoint.startTime = newStartTime; + dataPoint.endTime = newEndTime; + }); + }); + }); +} + +// Example usage: +replaceTimestamps(expectedOtelExportInput, [123, 789], [456, 789]); + +// You can now use updatedInput with metricsToRequest, and it will have the new timestamps. + +describe.only('Bigtable/GCPMetricsHandler', () => { + it('Should export a value ready for sending to the CloudMonitoringExporter', done => { + let testDone = false; + let resolvePlaceholder: (arg: string) => void; + class TestExporter extends MetricExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ) { + replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + // super.export(metrics, resultCallback); + console.log('in export'); + try { + // Add assert statement here. + if (!testDone) { + testDone = true; + resultCallback({code: 0}); + resolvePlaceholder('done'); + } + } catch (e) { + resolvePlaceholder('error'); + } + } + } + const handler = new GCPMetricsHandler( + new TestExporter({projectId: 'cloud-native-db-dpes-shared'}) + ); + for (let i = 0; i < expectedRequestsHandled.length; i++) { + const request = expectedRequestsHandled[i]; + if (request.metrics.attemptLatency) { + handler.onAttemptComplete( + request.metrics, + request.attributes as OnAttemptCompleteAttributes + ); + } else { + // TODO: Use a type guard here instead of casting. + handler.onOperationComplete( + request.metrics as OnOperationCompleteMetrics, + request.attributes as OnOperationCompleteAttributes + ); + } + } + // Wait for the metric to be exported + console.log('waiting'); + // This promise is needed because the test completes prematurely otherwise + // before the metric is exported. + // TODO: Try removing this promise + new Promise(resolve => { + resolvePlaceholder = resolve; + }); + }); +}); From 94f422c06c3bdc989e12772f1a699b8301729f96 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 16:41:58 -0500 Subject: [PATCH 179/448] Reducing export interval makes time complete --- src/client-side-metrics/gcp-metrics-handler.ts | 2 +- test-common/metrics-handler-fixture.ts | 12 ++++++------ test/metrics-collector/gcp-metrics-handler.ts | 7 +++++-- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 46026ed8d..24e5c3b6c 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -114,7 +114,7 @@ export class GCPMetricsHandler new PeriodicExportingMetricReader({ // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. - exportIntervalMillis: 10_000, + exportIntervalMillis: 1_000, exporter: this.exporter, }), ], diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index 8238d3150..3304fb13b 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -25,7 +25,7 @@ export const expectedRequestsHandled = [ connectivityErrorCount: 0, }, attributes: { - streamingOperation: StreamingState.STREAMING, + streamingOperation: 'true', attemptStatus: 4, clientName: 'nodejs-bigtable', metricsCollectorData: { @@ -34,7 +34,7 @@ export const expectedRequestsHandled = [ table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: MethodName.READ_ROWS, + methodName: 'readRows', clientUid: 'fake-uuid', }, projectId: 'my-project', @@ -47,7 +47,7 @@ export const expectedRequestsHandled = [ connectivityErrorCount: 0, }, attributes: { - streamingOperation: StreamingState.STREAMING, + streamingOperation: 'true', attemptStatus: 0, clientName: 'nodejs-bigtable', metricsCollectorData: { @@ -56,7 +56,7 @@ export const expectedRequestsHandled = [ table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: MethodName.READ_ROWS, + methodName: 'readRows', clientUid: 'fake-uuid', }, projectId: 'my-project', @@ -70,14 +70,14 @@ export const expectedRequestsHandled = [ }, attributes: { finalOperationStatus: 0, - streamingOperation: StreamingState.STREAMING, + streamingOperation: 'true', metricsCollectorData: { appProfileId: undefined, instanceId: 'fakeInstanceId', table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: MethodName.READ_ROWS, + methodName: 'readRows', clientUid: 'fake-uuid', }, clientName: 'nodejs-bigtable', diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 7e24b41c1..9a3534221 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -32,7 +32,7 @@ replaceTimestamps(expectedOtelExportInput, [123, 789], [456, 789]); // You can now use updatedInput with metricsToRequest, and it will have the new timestamps. describe.only('Bigtable/GCPMetricsHandler', () => { - it('Should export a value ready for sending to the CloudMonitoringExporter', done => { + it('Should export a value ready for sending to the CloudMonitoringExporter', async () => { let testDone = false; let resolvePlaceholder: (arg: string) => void; class TestExporter extends MetricExporter { @@ -40,11 +40,13 @@ describe.only('Bigtable/GCPMetricsHandler', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ) { + /* replaceTimestamps( metrics as unknown as typeof expectedOtelExportInput, [123, 789], [456, 789] ); + */ // super.export(metrics, resultCallback); console.log('in export'); try { @@ -82,8 +84,9 @@ describe.only('Bigtable/GCPMetricsHandler', () => { // This promise is needed because the test completes prematurely otherwise // before the metric is exported. // TODO: Try removing this promise - new Promise(resolve => { + await new Promise(resolve => { resolvePlaceholder = resolve; }); + console.log('done waiting'); }); }); From 1c3c290df90e0b6d87d0ec940da82b9ed6cfae7f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 17:26:07 -0500 Subject: [PATCH 180/448] Add the GCPMetricsHandler test --- .../gcp-metrics-handler.ts | 2 +- test/metrics-collector/gcp-metrics-handler.ts | 78 +++++++++++++++++-- 2 files changed, 71 insertions(+), 9 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 24e5c3b6c..46026ed8d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -114,7 +114,7 @@ export class GCPMetricsHandler new PeriodicExportingMetricReader({ // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. - exportIntervalMillis: 1_000, + exportIntervalMillis: 10_000, exporter: this.exporter, }), ], diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 9a3534221..7c57abef9 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -27,10 +27,70 @@ function replaceTimestamps( } // Example usage: -replaceTimestamps(expectedOtelExportInput, [123, 789], [456, 789]); +// replaceTimestamps(expectedOtelExportInput, [123, 789], [456, 789]); // You can now use updatedInput with metricsToRequest, and it will have the new timestamps. +describe.only('Bigtable/GCPMetricsHandler', () => { + it('Should export a value ready for sending to the CloudMonitoringExporter', done => { + (async () => { + // let exportPromiseResolve: (value: unknown) => void; + /* + const exportPromise = new Promise(resolve => { + setTimeout(() => { + resolve(undefined); + }, 30000); + }); + */ + + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 30000); + + class TestExporter extends MetricExporter { + async export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): Promise { + // Make export async + console.log('in export'); + // Perform your assertions here on the 'metrics' object + // ... (your assertion logic) + clearTimeout(timeout); + resultCallback({code: 0}); + done(); + // exportPromiseResolve(undefined); // Resolve the promise after export + } + } + + const handler = new GCPMetricsHandler( + new TestExporter({projectId: 'cloud-native-db-dpes-shared'}) + ); + + for (const request of expectedRequestsHandled) { + if (request.metrics.attemptLatency) { + handler.onAttemptComplete( + request.metrics, + request.attributes as OnAttemptCompleteAttributes + ); + } else { + handler.onOperationComplete( + request.metrics as OnOperationCompleteMetrics, + request.attributes as OnOperationCompleteAttributes + ); + } + } + + // await exportPromise; // Wait for the export to complete + + console.log('done waiting'); // This will now be reached + })(); + }); +}); +/* describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', async () => { let testDone = false; @@ -40,13 +100,6 @@ describe.only('Bigtable/GCPMetricsHandler', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ) { - /* - replaceTimestamps( - metrics as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] - ); - */ // super.export(metrics, resultCallback); console.log('in export'); try { @@ -90,3 +143,12 @@ describe.only('Bigtable/GCPMetricsHandler', () => { console.log('done waiting'); }); }); +*/ + +/* +replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] +); + */ From b118bc17543f63328b297bc28f42dbf684b5ebd6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Feb 2025 15:10:36 -0500 Subject: [PATCH 181/448] Add a stub for the otel export --- .../gcp-metrics-handler.ts | 27 ++- test-common/expected-otel-export-input.ts | 160 +++++++++++++----- test/metrics-collector/gcp-metrics-handler.ts | 7 + 3 files changed, 147 insertions(+), 47 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 46026ed8d..c259290d7 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -75,7 +75,7 @@ export class GCPMetricsHandler if (!this.initialized) { this.initialized = true; const sumAggregation = Aggregation.Sum(); - const buckets = [ + const latencyBuckets = [ 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, @@ -127,6 +127,9 @@ export class GCPMetricsHandler description: "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), attemptLatencies: meter.createHistogram( @@ -135,9 +138,12 @@ export class GCPMetricsHandler description: 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), - retryCount: meter.createHistogram( + retryCount: meter.createCounter( 'bigtable.googleapis.com/internal/client/retry_count', { description: @@ -150,6 +156,9 @@ export class GCPMetricsHandler description: 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), firstResponseLatencies: meter.createHistogram( @@ -158,6 +167,9 @@ export class GCPMetricsHandler description: 'Latencies from when a client sends a request and receives the first row of the response.', unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), serverLatencies: meter.createHistogram( @@ -166,6 +178,9 @@ export class GCPMetricsHandler description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), connectivityErrorCount: meter.createHistogram( @@ -173,6 +188,9 @@ export class GCPMetricsHandler { description: "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), clientBlockingLatencies: meter.createHistogram( @@ -181,6 +199,9 @@ export class GCPMetricsHandler description: 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), }; @@ -204,7 +225,7 @@ export class GCPMetricsHandler metrics.operationLatency, attributes ); - this.otelMetrics?.retryCount.record(metrics.retryCount, attributes); + this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); this.otelMetrics?.firstResponseLatencies.record( metrics.firstResponseLatency, attributes diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 9cd3e30ad..0941ab51d 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -38,7 +38,14 @@ export const expectedOtelExportInput = { "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", unit: 'ms', valueType: 1, - advice: {}, + advice: { + explicitBucketBoundaries: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, + 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, + 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, + 100000, + ], + }, }, aggregationTemporality: 1, dataPointType: 0, @@ -66,10 +73,16 @@ export const expectedOtelExportInput = { sum: 7000, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], }, count: 1, }, @@ -84,7 +97,14 @@ export const expectedOtelExportInput = { 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', valueType: 1, - advice: {}, + advice: { + explicitBucketBoundaries: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, + 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, + 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, + 100000, + ], + }, }, aggregationTemporality: 1, dataPointType: 0, @@ -112,10 +132,16 @@ export const expectedOtelExportInput = { sum: 2000, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], }, count: 1, }, @@ -143,10 +169,16 @@ export const expectedOtelExportInput = { sum: 2000, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], }, count: 1, }, @@ -156,7 +188,7 @@ export const expectedOtelExportInput = { { descriptor: { name: 'bigtable.googleapis.com/internal/client/retry_count', - type: 'HISTOGRAM', + type: 'COUNTER', description: 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', unit: '', @@ -164,7 +196,7 @@ export const expectedOtelExportInput = { advice: {}, }, aggregationTemporality: 1, - dataPointType: 0, + dataPointType: 3, dataPoints: [ { attributes: { @@ -183,21 +215,10 @@ export const expectedOtelExportInput = { }, startTime: [123, 789], endTime: [456, 789], - value: { - min: 1, - max: 1, - sum: 1, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - }, - count: 1, - }, + value: 1, }, ], + isMonotonic: true, }, { descriptor: { @@ -207,7 +228,14 @@ export const expectedOtelExportInput = { 'Latencies from when a client sends a request and receives the first row of the response.', unit: 'ms', valueType: 1, - advice: {}, + advice: { + explicitBucketBoundaries: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, + 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, + 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, + 100000, + ], + }, }, aggregationTemporality: 1, dataPointType: 0, @@ -235,10 +263,16 @@ export const expectedOtelExportInput = { sum: 5000, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], }, count: 1, }, @@ -253,7 +287,14 @@ export const expectedOtelExportInput = { 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', unit: 'ms', valueType: 1, - advice: {}, + advice: { + explicitBucketBoundaries: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, + 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, + 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, + 100000, + ], + }, }, aggregationTemporality: 1, dataPointType: 0, @@ -281,10 +322,16 @@ export const expectedOtelExportInput = { sum: 101, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], }, count: 1, }, @@ -312,10 +359,16 @@ export const expectedOtelExportInput = { sum: 103, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], }, count: 1, }, @@ -330,7 +383,14 @@ export const expectedOtelExportInput = { "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", unit: '', valueType: 1, - advice: {}, + advice: { + explicitBucketBoundaries: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, + 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, + 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, + 100000, + ], + }, }, aggregationTemporality: 1, dataPointType: 0, @@ -358,10 +418,16 @@ export const expectedOtelExportInput = { sum: 0, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, ], - counts: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, count: 1, }, @@ -389,10 +455,16 @@ export const expectedOtelExportInput = { sum: 0, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, ], - counts: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, count: 1, }, diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 7c57abef9..b46c52d42 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -10,6 +10,7 @@ import { } from '../../src/client-side-metrics/client-side-metrics-attributes'; import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; import {expectedOtelExportInput} from '../../test-common/expected-otel-export-input'; +import * as assert from 'assert'; function replaceTimestamps( request: typeof expectedOtelExportInput, @@ -56,6 +57,12 @@ describe.only('Bigtable/GCPMetricsHandler', () => { resultCallback: (result: ExportResult) => void ): Promise { // Make export async + replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + assert.deepStrictEqual(metrics, expectedOtelExportInput); console.log('in export'); // Perform your assertions here on the 'metrics' object // ... (your assertion logic) From 8aefe114636b1a35fade6367f1045806f2c7a5f4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Feb 2025 16:14:09 -0500 Subject: [PATCH 182/448] Test calling export is now working --- src/client-side-metrics/exporter.ts | 101 ++- .../gcp-metrics-handler.ts | 1 + test-common/expected-otel-export-input.ts | 763 ++++++++++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 46 +- 4 files changed, 862 insertions(+), 49 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 905003a57..2074929b5 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,6 +18,7 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; +import {RETRY_COUNT_NAME} from '../../test-common/expected-otel-export-input'; export interface ExportResult { code: number; @@ -93,7 +94,7 @@ export function metricsToRequest(exportArgs: ExportInput) { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, method: allAttributes.methodName, - status: allAttributes.finalOperationStatus.toString(), + status: allAttributes.finalOperationStatus?.toString(), streaming: allAttributes.streamingOperation, client_uid: allAttributes.clientUid, }; @@ -104,46 +105,78 @@ export function metricsToRequest(exportArgs: ExportInput) { table: allAttributes.table, zone: allAttributes.zone, }; - const timeSeries = { - metric: { - type: metricName, - labels: metricLabels, - }, - resource: { - type: exportArgs.resource._syncAttributes[ - 'monitored_resource.type' - ], - labels: resourceLabels, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: dataPoint.endTime[0], + if (metricName === RETRY_COUNT_NAME) { + const timeSeries = { + metric: { + type: metricName, + labels: metricLabels, + }, + resource: { + type: exportArgs.resource._syncAttributes[ + 'monitored_resource.type' + ], + labels: resourceLabels, + }, + valueType: 'INT64', + points: [ + { + interval: { + endTime: { + seconds: dataPoint.endTime[0], + }, + startTime: { + seconds: dataPoint.startTime[0], + }, }, - startTime: { - seconds: dataPoint.startTime[0], + value: { + int64Value: dataPoint.value, }, }, - value: { - distributionValue: { - count: String(dataPoint.value.count), - mean: dataPoint.value.sum / dataPoint.value.count, - bucketOptions: { - explicitBuckets: { - bounds: dataPoint.value.buckets.boundaries, + ], + }; + timeSeriesArray.push(timeSeries); + } else { + const timeSeries = { + metric: { + type: metricName, + labels: metricLabels, + }, + resource: { + type: exportArgs.resource._syncAttributes[ + 'monitored_resource.type' + ], + labels: resourceLabels, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: dataPoint.endTime[0], + }, + startTime: { + seconds: dataPoint.startTime[0], + }, + }, + value: { + distributionValue: { + count: String(dataPoint.value.count), + mean: dataPoint.value.sum / dataPoint.value.count, + bucketOptions: { + explicitBuckets: { + bounds: dataPoint.value.buckets.boundaries, + }, }, + bucketCounts: dataPoint.value.buckets.counts.map(String), }, - bucketCounts: dataPoint.value.buckets.counts.map(String), }, }, - }, - ], - unit: metric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified - }; - timeSeriesArray.push(timeSeries); + ], + unit: metric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + }; + timeSeriesArray.push(timeSeries); + } } } } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index c259290d7..e5b2f3bec 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -178,6 +178,7 @@ export class GCPMetricsHandler description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', unit: 'ms', + advice: { explicitBucketBoundaries: latencyBuckets, }, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 0941ab51d..3159f35d8 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -1,3 +1,766 @@ +export const RETRY_COUNT_NAME = + 'bigtable.googleapis.com/internal/client/retry_count'; + +const expectedOtelExportConvertedValue = { + name: 'projects/my-project', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + client_name: 'nodejs-bigtable', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 7000, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/attempt_latencies', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 2000, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/attempt_latencies', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 2000, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/retry_count', + labels: { + client_name: 'nodejs-bigtable', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + valueType: 'INT64', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + int64Value: 1, + }, + }, + ], + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/first_response_latencies', + labels: { + client_name: 'nodejs-bigtable', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 5000, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/server_latencies', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 101, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/server_latencies', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 103, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 0, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 0, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + ], +}; + export const expectedOtelExportInput = { resource: { _attributes: { diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index b46c52d42..231b75b83 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -1,6 +1,10 @@ import {describe} from 'mocha'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; -import {ExportResult} from '../../src/client-side-metrics/exporter'; +import { + ExportInput, + ExportResult, + metricsToRequest, +} from '../../src/client-side-metrics/exporter'; import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; @@ -11,6 +15,7 @@ import { import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; import {expectedOtelExportInput} from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; +import {exportInput} from '../../test-common/export-input-fixture'; function replaceTimestamps( request: typeof expectedOtelExportInput, @@ -56,20 +61,31 @@ describe.only('Bigtable/GCPMetricsHandler', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): Promise { - // Make export async - replaceTimestamps( - metrics as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] - ); - assert.deepStrictEqual(metrics, expectedOtelExportInput); - console.log('in export'); - // Perform your assertions here on the 'metrics' object - // ... (your assertion logic) - clearTimeout(timeout); - resultCallback({code: 0}); - done(); - // exportPromiseResolve(undefined); // Resolve the promise after export + try { + console.log('in exporter'); + // Make export async + replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + assert.deepStrictEqual( + JSON.parse(JSON.stringify(metrics)), + expectedOtelExportInput + ); + const convertedRequest = metricsToRequest( + expectedOtelExportInput as unknown as ExportInput + ); + console.log('in export'); + // Perform your assertions here on the 'metrics' object + // ... (your assertion logic) + clearTimeout(timeout); + resultCallback({code: 0}); + done(); + // exportPromiseResolve(undefined); // Resolve the promise after export + } catch (e) { + done(e); + } } } From a30d3ec457bbbd2e7e1487922ab8340b45a9bb1b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Feb 2025 16:15:06 -0500 Subject: [PATCH 183/448] Remove old unused code --- test/metrics-collector/gcp-metrics-handler.ts | 62 ------------------- 1 file changed, 62 deletions(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 231b75b83..72e5a2e24 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -113,65 +113,3 @@ describe.only('Bigtable/GCPMetricsHandler', () => { })(); }); }); -/* -describe.only('Bigtable/GCPMetricsHandler', () => { - it('Should export a value ready for sending to the CloudMonitoringExporter', async () => { - let testDone = false; - let resolvePlaceholder: (arg: string) => void; - class TestExporter extends MetricExporter { - export( - metrics: ResourceMetrics, - resultCallback: (result: ExportResult) => void - ) { - // super.export(metrics, resultCallback); - console.log('in export'); - try { - // Add assert statement here. - if (!testDone) { - testDone = true; - resultCallback({code: 0}); - resolvePlaceholder('done'); - } - } catch (e) { - resolvePlaceholder('error'); - } - } - } - const handler = new GCPMetricsHandler( - new TestExporter({projectId: 'cloud-native-db-dpes-shared'}) - ); - for (let i = 0; i < expectedRequestsHandled.length; i++) { - const request = expectedRequestsHandled[i]; - if (request.metrics.attemptLatency) { - handler.onAttemptComplete( - request.metrics, - request.attributes as OnAttemptCompleteAttributes - ); - } else { - // TODO: Use a type guard here instead of casting. - handler.onOperationComplete( - request.metrics as OnOperationCompleteMetrics, - request.attributes as OnOperationCompleteAttributes - ); - } - } - // Wait for the metric to be exported - console.log('waiting'); - // This promise is needed because the test completes prematurely otherwise - // before the metric is exported. - // TODO: Try removing this promise - await new Promise(resolve => { - resolvePlaceholder = resolve; - }); - console.log('done waiting'); - }); -}); -*/ - -/* -replaceTimestamps( - metrics as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] -); - */ From c3f296300112602466ae85b6bcd5e6bfc98b2d46 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Feb 2025 17:05:08 -0500 Subject: [PATCH 184/448] Change tests and json structure to work with metricsCollectorData structure --- src/client-side-metrics/exporter.ts | 26 +++++++------- .../gcp-metrics-handler.ts | 1 - system-test/cloud-monitoring-exporter.ts | 2 +- test-common/expected-otel-export-input.ts | 2 +- test-common/export-input-fixture.ts | 16 +++++---- test/metrics-collector/gcp-metrics-handler.ts | 36 +++++-------------- test/metrics-collector/metricsToRequest.ts | 8 ++--- 7 files changed, 38 insertions(+), 53 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 2074929b5..eb960dfcf 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -56,13 +56,15 @@ export interface ExportInput { finalOperationStatus: number; streamingOperation: string; projectId: string; - instanceId: string; - table: string; - cluster: string; - zone: string; - methodName: string; clientName: string; - clientUid: string; + metricsCollectorData: { + instanceId: string; + table: string; + cluster: string; + zone: string; + methodName: string; + clientUid: string; + }; }; startTime: number[]; endTime: number[]; @@ -93,17 +95,17 @@ export function metricsToRequest(exportArgs: ExportInput) { const metricLabels = { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, - method: allAttributes.methodName, + method: allAttributes.metricsCollectorData.methodName, status: allAttributes.finalOperationStatus?.toString(), streaming: allAttributes.streamingOperation, - client_uid: allAttributes.clientUid, + client_uid: allAttributes.metricsCollectorData.clientUid, }; const resourceLabels = { - cluster: allAttributes.cluster, - instance: allAttributes.instanceId, + cluster: allAttributes.metricsCollectorData.cluster, + instance: allAttributes.metricsCollectorData.instanceId, project_id: allAttributes.projectId, - table: allAttributes.table, - zone: allAttributes.zone, + table: allAttributes.metricsCollectorData.table, + zone: allAttributes.metricsCollectorData.zone, }; if (metricName === RETRY_COUNT_NAME) { const timeSeries = { diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index e5b2f3bec..48d8286b1 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -206,7 +206,6 @@ export class GCPMetricsHandler } ), }; - console.log('Done initializing'); } } diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 3e78c77c6..75381025f 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -4,7 +4,7 @@ import {exportInput} from '../test-common/export-input-fixture'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; -describe.only('Bigtable/CloudMonitoringExporter', () => { +describe('Bigtable/CloudMonitoringExporter', () => { it('exports client side metrics to cloud monitoring', done => { // When this test is run, metrics should be visible at the following link: // https://pantheon.corp.google.com/monitoring/metrics-explorer;duration=PT1H?inv=1&invt=Abo9_A&project={projectId} diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 3159f35d8..5212349e8 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -1,7 +1,7 @@ export const RETRY_COUNT_NAME = 'bigtable.googleapis.com/internal/client/retry_count'; -const expectedOtelExportConvertedValue = { +export const expectedOtelExportConvertedValue = { name: 'projects/my-project', timeSeries: [ { diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts index 3a8495fe3..17717aea1 100644 --- a/test-common/export-input-fixture.ts +++ b/test-common/export-input-fixture.ts @@ -65,14 +65,16 @@ export const exportInput = { appProfileId: 'fake-app-profile-id', finalOperationStatus: 0, streamingOperation: 'true', - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-central1-f', - methodName: 'Bigtable.ReadRows', clientName: 'nodejs-bigtable/5.1.2', - clientUid: 'fake-uuid', + projectId: 'some-project', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', + }, }, startTime: [fakeStartTime, 951000000], endTime: [fakeEndTime, 948000000], diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 72e5a2e24..4aabba8be 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -13,9 +13,11 @@ import { OnOperationCompleteAttributes, } from '../../src/client-side-metrics/client-side-metrics-attributes'; import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; -import {expectedOtelExportInput} from '../../test-common/expected-otel-export-input'; +import { + expectedOtelExportConvertedValue, + expectedOtelExportInput, +} from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; -import {exportInput} from '../../test-common/export-input-fixture'; function replaceTimestamps( request: typeof expectedOtelExportInput, @@ -32,23 +34,9 @@ function replaceTimestamps( }); } -// Example usage: -// replaceTimestamps(expectedOtelExportInput, [123, 789], [456, 789]); - -// You can now use updatedInput with metricsToRequest, and it will have the new timestamps. - -describe.only('Bigtable/GCPMetricsHandler', () => { +describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { - // let exportPromiseResolve: (value: unknown) => void; - /* - const exportPromise = new Promise(resolve => { - setTimeout(() => { - resolve(undefined); - }, 30000); - }); - */ - /* We need to create a timeout here because if we don't then mocha shuts down the test as it is sleeping before the GCPMetricsHandler has a chance to @@ -62,8 +50,6 @@ describe.only('Bigtable/GCPMetricsHandler', () => { resultCallback: (result: ExportResult) => void ): Promise { try { - console.log('in exporter'); - // Make export async replaceTimestamps( metrics as unknown as typeof expectedOtelExportInput, [123, 789], @@ -76,13 +62,13 @@ describe.only('Bigtable/GCPMetricsHandler', () => { const convertedRequest = metricsToRequest( expectedOtelExportInput as unknown as ExportInput ); - console.log('in export'); - // Perform your assertions here on the 'metrics' object - // ... (your assertion logic) + assert.deepStrictEqual( + JSON.parse(JSON.stringify(convertedRequest)), + expectedOtelExportConvertedValue + ); clearTimeout(timeout); resultCallback({code: 0}); done(); - // exportPromiseResolve(undefined); // Resolve the promise after export } catch (e) { done(e); } @@ -106,10 +92,6 @@ describe.only('Bigtable/GCPMetricsHandler', () => { ); } } - - // await exportPromise; // Wait for the export to complete - - console.log('done waiting'); // This will now be reached })(); }); }); diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 15b839969..2042d895d 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -26,10 +26,10 @@ export const expectedRequest = { type: 'bigtable_client_raw', labels: { cluster: 'fake-cluster3', - instance: 'emulator-test-instance', + instance: 'fakeInstanceId', project_id: 'some-project', - table: 'my-table', - zone: 'us-central1-f', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -149,7 +149,7 @@ export const expectedRequest = { }; // TODO: Generate the export code -describe('Bigtable/metricsToRequest', () => { +describe.only('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); From 67b647852b1f9408d126bbee58c2a6b75f9fa637 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Feb 2025 17:15:17 -0500 Subject: [PATCH 185/448] Update all the fixtures --- .../gcp-metrics-handler.ts | 3 -- test-common/expected-otel-export-input.ts | 54 +++++++++++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 2 +- test/metrics-collector/metricsToRequest.ts | 2 +- 4 files changed, 56 insertions(+), 5 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 48d8286b1..21d7704fb 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -61,7 +61,6 @@ export class GCPMetricsHandler private exporter: T; constructor(exporter: T) { - console.log('Passing in exporter'); this.exporter = exporter; } @@ -219,7 +218,6 @@ export class GCPMetricsHandler metrics: OnOperationCompleteMetrics, attributes: OnOperationCompleteAttributes ) { - console.log('onOperationComplete'); this.initialize(attributes.projectId); this.otelMetrics?.operationLatencies.record( metrics.operationLatency, @@ -243,7 +241,6 @@ export class GCPMetricsHandler metrics: OnAttemptCompleteMetrics, attributes: OnAttemptCompleteAttributes ) { - console.log('onAttemptComplete'); this.initialize(attributes.projectId); this.otelMetrics?.attemptLatencies.record( metrics.attemptLatency, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 5212349e8..3f35b9c0a 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -9,14 +9,20 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', status: '0', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -100,13 +106,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -190,13 +202,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -280,14 +298,20 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/retry_count', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', status: '0', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, valueType: 'INT64', @@ -312,14 +336,20 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/first_response_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', status: '0', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -403,13 +433,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -493,13 +529,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -583,13 +625,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -673,13 +721,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 4aabba8be..1ed11d63e 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -34,7 +34,7 @@ function replaceTimestamps( }); } -describe('Bigtable/GCPMetricsHandler', () => { +describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { /* diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 2042d895d..2123384d1 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -149,7 +149,7 @@ export const expectedRequest = { }; // TODO: Generate the export code -describe.only('Bigtable/metricsToRequest', () => { +describe('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); From 8544410a1fb2845606b3d51c632ec3c4367e64eb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 19 Feb 2025 11:28:31 -0500 Subject: [PATCH 186/448] Fix the view creation code Pass in latency buckets --- src/client-side-metrics/gcp-metrics-handler.ts | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 21d7704fb..fb4fd9406 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -73,14 +73,11 @@ export class GCPMetricsHandler private initialize(projectId?: string) { if (!this.initialized) { this.initialized = true; - const sumAggregation = Aggregation.Sum(); const latencyBuckets = [ 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, ]; - /* - const histogramAggregation = new ExplicitBucketHistogramAggregation(); const viewList = [ 'operation_latencies', 'first_response_latencies', @@ -95,12 +92,14 @@ export class GCPMetricsHandler new View({ instrumentName: name, name, - aggregation: name.slice(-9) ? sumAggregation : histogramAggregation, + aggregation: + name === 'retry_count' + ? Aggregation.Sum() + : new ExplicitBucketHistogramAggregation(latencyBuckets), }) ); - */ const meterProvider = new MeterProvider({ - // views: viewList, + views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', 'cloud.provider': 'gcp', From 96dbc1c524c5b49d7684d5228c469a1542b44ec8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 19 Feb 2025 12:58:16 -0500 Subject: [PATCH 187/448] Starting test for gcp-metrics-handler --- .../gcp-metrics-handler.ts | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 system-test/client-side-metrics/gcp-metrics-handler.ts diff --git a/system-test/client-side-metrics/gcp-metrics-handler.ts b/system-test/client-side-metrics/gcp-metrics-handler.ts new file mode 100644 index 000000000..18d3513cf --- /dev/null +++ b/system-test/client-side-metrics/gcp-metrics-handler.ts @@ -0,0 +1,36 @@ +import {describe} from 'mocha'; +import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; +import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; +import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from '../../src/client-side-metrics/client-side-metrics-attributes'; +import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; +import {CloudMonitoringExporter} from '../../src/client-side-metrics/exporter'; + +// TODO: Test that calls export. +// TODO: Test whole process. +describe.only('Bigtable/GCPMetricsHandler', () => { + it('Should export a value to the CloudMonitoringExporter', done => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 30000); + const handler = new GCPMetricsHandler( + new CloudMonitoringExporter({projectId: 'cloud-native-db-dpes-shared'}) + ); + + for (const request of expectedRequestsHandled) { + if (request.metrics.attemptLatency) { + handler.onAttemptComplete( + request.metrics, + request.attributes as OnAttemptCompleteAttributes + ); + } else { + handler.onOperationComplete( + request.metrics as OnOperationCompleteMetrics, + request.attributes as OnOperationCompleteAttributes + ); + } + } + }); +}); From 416e18cd9cf1d3250978b1624fc14b520b42b3b3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 19 Feb 2025 15:30:11 -0500 Subject: [PATCH 188/448] Put tests in the proper places --- .../gcp-metrics-handler.ts | 36 --------------- system-test/cloud-monitoring-exporter.ts | 42 ++++++++++++++++- system-test/gcp-metrics-handler.ts | 45 +++++++++++++++++++ 3 files changed, 86 insertions(+), 37 deletions(-) delete mode 100644 system-test/client-side-metrics/gcp-metrics-handler.ts create mode 100644 system-test/gcp-metrics-handler.ts diff --git a/system-test/client-side-metrics/gcp-metrics-handler.ts b/system-test/client-side-metrics/gcp-metrics-handler.ts deleted file mode 100644 index 18d3513cf..000000000 --- a/system-test/client-side-metrics/gcp-metrics-handler.ts +++ /dev/null @@ -1,36 +0,0 @@ -import {describe} from 'mocha'; -import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; -import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; -import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from '../../src/client-side-metrics/client-side-metrics-attributes'; -import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; -import {CloudMonitoringExporter} from '../../src/client-side-metrics/exporter'; - -// TODO: Test that calls export. -// TODO: Test whole process. -describe.only('Bigtable/GCPMetricsHandler', () => { - it('Should export a value to the CloudMonitoringExporter', done => { - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. - */ - const timeout = setTimeout(() => {}, 30000); - const handler = new GCPMetricsHandler( - new CloudMonitoringExporter({projectId: 'cloud-native-db-dpes-shared'}) - ); - - for (const request of expectedRequestsHandled) { - if (request.metrics.attemptLatency) { - handler.onAttemptComplete( - request.metrics, - request.attributes as OnAttemptCompleteAttributes - ); - } else { - handler.onOperationComplete( - request.metrics as OnOperationCompleteMetrics, - request.attributes as OnOperationCompleteAttributes - ); - } - } - }); -}); diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 75381025f..48c3f537c 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -1,8 +1,13 @@ import {describe} from 'mocha'; -import {CloudMonitoringExporter} from '../src/client-side-metrics/exporter'; +import { + CloudMonitoringExporter, + ExportResult, +} from '../src/client-side-metrics/exporter'; import {exportInput} from '../test-common/export-input-fixture'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; +import * as assert from 'assert'; +import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; describe('Bigtable/CloudMonitoringExporter', () => { it('exports client side metrics to cloud monitoring', done => { @@ -36,4 +41,39 @@ describe('Bigtable/CloudMonitoringExporter', () => { ); })(); }); + it.only('Should send an otel exported value to the CloudMonitoringExporter', done => { + (async () => { + const resultCallback: (result: ExportResult) => void = ( + result: ExportResult + ) => { + try { + assert.deepStrictEqual(result, {code: 0}); + done(); + } catch (error) { + done(error); + } + }; + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const transformedExportInput = JSON.parse( + JSON.stringify(expectedOtelExportInput).replace( + /my-project/g, + projectId + ) + ); + const exporter = new CloudMonitoringExporter(); + exporter.export( + transformedExportInput as unknown as ResourceMetrics, + resultCallback + ); + })(); + }); }); diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts new file mode 100644 index 000000000..9e7f3cf1b --- /dev/null +++ b/system-test/gcp-metrics-handler.ts @@ -0,0 +1,45 @@ +import {describe} from 'mocha'; +import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; +import {expectedRequestsHandled} from '../test-common/metrics-handler-fixture'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from '../src/client-side-metrics/client-side-metrics-attributes'; +import {OnOperationCompleteMetrics} from '../src/client-side-metrics/metrics-handler'; +import * as assert from 'assert'; +import { + CloudMonitoringExporter, + ExportResult, +} from '../src/client-side-metrics/exporter'; +import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; + +// TODO: Test that calls export. +// TODO: Test whole process. +describe('Bigtable/GCPMetricsHandler', () => { + it('Should export a value to the CloudMonitoringExporter', done => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 30000); + const handler = new GCPMetricsHandler( + new CloudMonitoringExporter({projectId: 'cloud-native-db-dpes-shared'}) + ); + + for (const request of expectedRequestsHandled) { + if (request.metrics.attemptLatency) { + handler.onAttemptComplete( + request.metrics, + request.attributes as OnAttemptCompleteAttributes + ); + } else { + handler.onOperationComplete( + request.metrics as OnOperationCompleteMetrics, + request.attributes as OnOperationCompleteAttributes + ); + } + } + }); +}); From 991f5c8be2a110f172107ba6218b6fd31ee7e392 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 19 Feb 2025 16:25:36 -0500 Subject: [PATCH 189/448] Replace start and end time with more recent values --- system-test/cloud-monitoring-exporter.ts | 12 +++++++++++- system-test/gcp-metrics-handler.ts | 8 +------- test-common/replace-timestamps.ts | 16 ++++++++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 16 +--------------- 4 files changed, 29 insertions(+), 23 deletions(-) create mode 100644 test-common/replace-timestamps.ts diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 48c3f537c..c8464457e 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -3,11 +3,16 @@ import { CloudMonitoringExporter, ExportResult, } from '../src/client-side-metrics/exporter'; -import {exportInput} from '../test-common/export-input-fixture'; +import { + exportInput, + fakeEndTime, + fakeStartTime, +} from '../test-common/export-input-fixture'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; +import {replaceTimestamps} from '../test-common/replace-timestamps'; describe('Bigtable/CloudMonitoringExporter', () => { it('exports client side metrics to cloud monitoring', done => { @@ -69,6 +74,11 @@ describe('Bigtable/CloudMonitoringExporter', () => { projectId ) ); + replaceTimestamps( + transformedExportInput as unknown as typeof expectedOtelExportInput, + [fakeStartTime, 0], + [fakeEndTime, 0] + ); const exporter = new CloudMonitoringExporter(); exporter.export( transformedExportInput as unknown as ResourceMetrics, diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 9e7f3cf1b..b1fe47d83 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -6,13 +6,7 @@ import { OnOperationCompleteAttributes, } from '../src/client-side-metrics/client-side-metrics-attributes'; import {OnOperationCompleteMetrics} from '../src/client-side-metrics/metrics-handler'; -import * as assert from 'assert'; -import { - CloudMonitoringExporter, - ExportResult, -} from '../src/client-side-metrics/exporter'; -import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; -import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {CloudMonitoringExporter} from '../src/client-side-metrics/exporter'; // TODO: Test that calls export. // TODO: Test whole process. diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts new file mode 100644 index 000000000..ea081cd81 --- /dev/null +++ b/test-common/replace-timestamps.ts @@ -0,0 +1,16 @@ +import {expectedOtelExportInput} from './expected-otel-export-input'; + +export function replaceTimestamps( + request: typeof expectedOtelExportInput, + newStartTime: [number, number], + newEndTime: [number, number] +) { + request.scopeMetrics.forEach(scopeMetric => { + scopeMetric.metrics.forEach(metric => { + metric.dataPoints.forEach(dataPoint => { + dataPoint.startTime = newStartTime; + dataPoint.endTime = newEndTime; + }); + }); + }); +} diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 1ed11d63e..7c1f93f0f 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -18,21 +18,7 @@ import { expectedOtelExportInput, } from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; - -function replaceTimestamps( - request: typeof expectedOtelExportInput, - newStartTime: [number, number], - newEndTime: [number, number] -) { - request.scopeMetrics.forEach(scopeMetric => { - scopeMetric.metrics.forEach(metric => { - metric.dataPoints.forEach(dataPoint => { - dataPoint.startTime = newStartTime; - dataPoint.endTime = newEndTime; - }); - }); - }); -} +import {replaceTimestamps} from '../../test-common/replace-timestamps'; describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', done => { From 09389eeffcbfe85994c75408835a1d80aca06eb1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Feb 2025 14:12:55 -0500 Subject: [PATCH 190/448] Utilize the new metrics handler interface --- .../gcp-metrics-handler.ts | 115 ++++++++++++------ src/client-side-metrics/metrics-handler.ts | 53 ++++---- test-common/metrics-handler-fixture.ts | 107 +++++++--------- 3 files changed, 155 insertions(+), 120 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index fb4fd9406..cd896f6e9 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -14,8 +14,8 @@ import { IMetricsHandler, - OnAttemptCompleteMetrics, - OnOperationCompleteMetrics, + OnAttemptCompleteData, + OnOperationCompleteData, } from './metrics-handler'; import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; @@ -48,6 +48,14 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } +interface MonitoredResourceData { + projectId: string; + instanceId: string; + table: string; + cluster?: string; + zone?: string; +} + /** * A metrics handler implementation that uses OpenTelemetry to export metrics to Google Cloud Monitoring. * This handler records metrics such as operation latency, attempt latency, retry count, and more, @@ -70,7 +78,7 @@ export class GCPMetricsHandler * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. * @param {string} [projectId] The Google Cloud project ID. Used for metric export. If not provided, it will attempt to detect it from the environment. */ - private initialize(projectId?: string) { + private initialize(data: MonitoredResourceData) { if (!this.initialized) { this.initialized = true; const latencyBuckets = [ @@ -104,8 +112,13 @@ export class GCPMetricsHandler 'service.name': 'Cloud Bigtable Table', 'cloud.provider': 'gcp', 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': projectId, + 'cloud.resource_manager.project_id': data.projectId, 'monitored_resource.type': 'bigtable_client_raw', + 'monitored_resource.project_id': data.projectId, + 'monitored_resource.instance_id': data.instanceId, + 'monitored_resource.table': data.table, + 'monitored_resource.cluster': data.cluster, + 'monitored_resource.zone': data.zone, }).merge(new ResourceUtil.GcpDetectorSync().detect()), readers: [ // Register the exporter @@ -210,45 +223,79 @@ export class GCPMetricsHandler /** * Records metrics for a completed Bigtable operation. * This method records the operation latency and retry count, associating them with provided attributes. - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. + * @param {OnOperationCompleteData} data Data related to the completed operation. */ - onOperationComplete( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes - ) { - this.initialize(attributes.projectId); - this.otelMetrics?.operationLatencies.record( - metrics.operationLatency, - attributes - ); - this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); - this.otelMetrics?.firstResponseLatencies.record( - metrics.firstResponseLatency, - attributes - ); + onOperationComplete(data: OnOperationCompleteData) { + this.initialize({ + projectId: data.projectId, + instanceId: data.metricsCollectorData.instanceId, + table: data.metricsCollectorData.table, + cluster: data.metricsCollectorData.cluster, + zone: data.metricsCollectorData.zone, + }); + this.otelMetrics?.operationLatencies.record(data.operationLatency, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + finalOperationStatus: data.finalOperationStatus, + streamingOperation: data.streamingOperation, + clientName: data.clientName, + }); + this.otelMetrics?.retryCount.add(data.retryCount, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + finalOperationStatus: data.finalOperationStatus, + clientName: data.clientName, + }); + this.otelMetrics?.firstResponseLatencies.record(data.firstResponseLatency, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + finalOperationStatus: data.finalOperationStatus, + clientName: data.clientName, + }); } /** * Records metrics for a completed attempt of a Bigtable operation. * This method records attempt latency, connectivity error count, server latency, and first response latency, * along with the provided attributes. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. + * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ - onAttemptComplete( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes - ) { - this.initialize(attributes.projectId); - this.otelMetrics?.attemptLatencies.record( - metrics.attemptLatency, - attributes - ); + onAttemptComplete(data: OnAttemptCompleteData) { + this.initialize({ + projectId: data.projectId, + instanceId: data.metricsCollectorData.instanceId, + table: data.metricsCollectorData.table, + cluster: data.metricsCollectorData.cluster, + zone: data.metricsCollectorData.zone, + }); + this.otelMetrics?.attemptLatencies.record(data.attemptLatency, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + attemptStatus: data.attemptStatus, + streamingOperation: data.streamingOperation, + clientName: data.clientName, + }); this.otelMetrics?.connectivityErrorCount.record( - metrics.connectivityErrorCount, - attributes + data.connectivityErrorCount, + { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + attemptStatus: data.attemptStatus, + clientName: data.clientName, + } ); - this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); + this.otelMetrics?.serverLatencies.record(data.serverLatency, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + attemptStatus: data.attemptStatus, + streamingOperation: data.streamingOperation, + clientName: data.clientName, + }); } } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 38a98ae59..0a701d3a0 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -12,10 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from './client-side-metrics-attributes'; +import {MethodName, StreamingState} from './client-side-metrics-attributes'; +import {grpc} from 'google-gax'; /** * The interfaces below use undefined instead of null to indicate a metric is @@ -23,22 +21,36 @@ import { * without requiring users to change the methods in their metrics handler. */ -/** - * Metrics related to the completion of a Bigtable operation. - */ -export interface OnOperationCompleteMetrics { +type IMetricsCollectorData = { + instanceId: string; + table: string; + cluster?: string; + zone?: string; + appProfileId?: string; + methodName: MethodName; + clientUid: string; +}; + +export interface OnOperationCompleteData { firstResponseLatency?: number; operationLatency: number; retryCount?: number; + projectId: string; + metricsCollectorData: IMetricsCollectorData; + clientName: string; + finalOperationStatus: grpc.status; + streamingOperation: StreamingState; } -/** - * Metrics related to the completion of a single attempt of a Bigtable operation. - */ -export interface OnAttemptCompleteMetrics { +export interface OnAttemptCompleteData { attemptLatency: number; serverLatency?: number; connectivityErrorCount: number; + projectId: string; + metricsCollectorData: IMetricsCollectorData; + clientName: string; + attemptStatus: grpc.status; + streamingOperation: StreamingState; } /** @@ -48,20 +60,13 @@ export interface OnAttemptCompleteMetrics { export interface IMetricsHandler { /** * Called when an operation completes (successfully or unsuccessfully). - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. + * @param {OnOperationCompleteData} data Metrics and attributes related to the completed operation. */ - onOperationComplete?( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes - ): void; + onOperationComplete?(data: OnOperationCompleteData): void; + /** * Called when an attempt (e.g., an RPC attempt) completes. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. + * @param {OnAttemptCompleteData} data Metrics and attributes related to the completed attempt. */ - onAttemptComplete?( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes - ): void; + onAttemptComplete?(data: OnAttemptCompleteData): void; } diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index 3304fb13b..a49a91158 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -12,76 +12,59 @@ // See the License for the specific language governing permissions and // limitations under the License. -import { - MethodName, - StreamingState, -} from '../src/client-side-metrics/client-side-metrics-attributes'; - export const expectedRequestsHandled = [ { - metrics: { - attemptLatency: 2000, - serverLatency: 101, - connectivityErrorCount: 0, - }, - attributes: { - streamingOperation: 'true', - attemptStatus: 4, - clientName: 'nodejs-bigtable', - metricsCollectorData: { - appProfileId: undefined, - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', + attemptLatency: 2000, + serverLatency: 101, + connectivityErrorCount: 0, + streamingOperation: 'true', + attemptStatus: 4, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', }, + projectId: 'my-project', }, { - metrics: { - attemptLatency: 2000, - serverLatency: 103, - connectivityErrorCount: 0, - }, - attributes: { - streamingOperation: 'true', - attemptStatus: 0, - clientName: 'nodejs-bigtable', - metricsCollectorData: { - appProfileId: undefined, - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', + attemptLatency: 2000, + serverLatency: 103, + connectivityErrorCount: 0, + streamingOperation: 'true', + attemptStatus: 0, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', }, + projectId: 'my-project', }, { - metrics: { - operationLatency: 7000, - retryCount: 1, - firstResponseLatency: 5000, - }, - attributes: { - finalOperationStatus: 0, - streamingOperation: 'true', - metricsCollectorData: { - appProfileId: undefined, - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - clientName: 'nodejs-bigtable', - projectId: 'my-project', + finalOperationStatus: 0, + streamingOperation: 'true', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', }, + clientName: 'nodejs-bigtable', + projectId: 'my-project', + operationLatency: 7000, + retryCount: 1, + firstResponseLatency: 5000, }, ]; From 87d5592d7c8f18d76afd5384cad0756b21e8e8aa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Feb 2025 16:10:34 -0500 Subject: [PATCH 191/448] Solve compiler errors resulting from metrics handl --- system-test/gcp-metrics-handler.ts | 19 ++++++------------- test/metrics-collector/gcp-metrics-handler.ts | 19 ++++++------------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index b1fe47d83..07b08e36f 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -2,10 +2,9 @@ import {describe} from 'mocha'; import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import {expectedRequestsHandled} from '../test-common/metrics-handler-fixture'; import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from '../src/client-side-metrics/client-side-metrics-attributes'; -import {OnOperationCompleteMetrics} from '../src/client-side-metrics/metrics-handler'; + OnAttemptCompleteData, + OnOperationCompleteData, +} from '../src/client-side-metrics/metrics-handler'; import {CloudMonitoringExporter} from '../src/client-side-metrics/exporter'; // TODO: Test that calls export. @@ -23,16 +22,10 @@ describe('Bigtable/GCPMetricsHandler', () => { ); for (const request of expectedRequestsHandled) { - if (request.metrics.attemptLatency) { - handler.onAttemptComplete( - request.metrics, - request.attributes as OnAttemptCompleteAttributes - ); + if (request.attemptLatency) { + handler.onAttemptComplete(request as OnAttemptCompleteData); } else { - handler.onOperationComplete( - request.metrics as OnOperationCompleteMetrics, - request.attributes as OnOperationCompleteAttributes - ); + handler.onOperationComplete(request as OnOperationCompleteData); } } }); diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 7c1f93f0f..b73082404 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -9,10 +9,9 @@ import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handl import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from '../../src/client-side-metrics/client-side-metrics-attributes'; -import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; + OnAttemptCompleteData, + OnOperationCompleteData, +} from '../../src/client-side-metrics/metrics-handler'; import { expectedOtelExportConvertedValue, expectedOtelExportInput, @@ -66,16 +65,10 @@ describe.only('Bigtable/GCPMetricsHandler', () => { ); for (const request of expectedRequestsHandled) { - if (request.metrics.attemptLatency) { - handler.onAttemptComplete( - request.metrics, - request.attributes as OnAttemptCompleteAttributes - ); + if (request.attemptLatency) { + handler.onAttemptComplete(request as OnAttemptCompleteData); } else { - handler.onOperationComplete( - request.metrics as OnOperationCompleteMetrics, - request.attributes as OnOperationCompleteAttributes - ); + handler.onOperationComplete(request as OnOperationCompleteData); } } })(); From 9ad2ef8c2ebe2c8e44011e095b133894248b5a08 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Feb 2025 17:01:01 -0500 Subject: [PATCH 192/448] Update the fixture --- test-common/expected-otel-export-input.ts | 125 ++++++---------------- 1 file changed, 34 insertions(+), 91 deletions(-) diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 3f35b9c0a..c8ff81c87 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -821,22 +821,32 @@ export const expectedOtelExportInput = { 'service.name': 'Cloud Bigtable Table', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', + 'telemetry.sdk.version': '1.30.1', 'cloud.provider': 'gcp', 'cloud.platform': 'gce_instance', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', + 'monitored_resource.project_id': 'my-project', + 'monitored_resource.instance_id': 'fakeInstanceId', + 'monitored_resource.table': 'fakeTableId', + 'monitored_resource.cluster': 'fake-cluster3', + 'monitored_resource.zone': 'us-west1-c', }, asyncAttributesPending: false, _syncAttributes: { 'service.name': 'Cloud Bigtable Table', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', + 'telemetry.sdk.version': '1.30.1', 'cloud.provider': 'gcp', 'cloud.platform': 'gce_instance', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', + 'monitored_resource.project_id': 'my-project', + 'monitored_resource.instance_id': 'fakeInstanceId', + 'monitored_resource.table': 'fakeTableId', + 'monitored_resource.cluster': 'fake-cluster3', + 'monitored_resource.zone': 'us-west1-c', }, _asyncAttributesPromise: {}, }, @@ -869,18 +879,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', finalOperationStatus: 0, streamingOperation: 'true', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, clientName: 'nodejs-bigtable', - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -928,18 +931,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 4, + streamingOperation: 'true', clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -965,18 +961,11 @@ export const expectedOtelExportInput = { }, { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 0, + streamingOperation: 'true', clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1017,18 +1006,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', finalOperationStatus: 0, - streamingOperation: 'true', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, clientName: 'nodejs-bigtable', - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1059,18 +1040,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', finalOperationStatus: 0, - streamingOperation: 'true', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, clientName: 'nodejs-bigtable', - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1118,18 +1091,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 4, + streamingOperation: 'true', clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1155,18 +1121,11 @@ export const expectedOtelExportInput = { }, { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 0, + streamingOperation: 'true', clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1214,18 +1173,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 4, clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1251,18 +1202,10 @@ export const expectedOtelExportInput = { }, { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 0, clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], From c59fcab6c3235887e4d42bc2881131156e33a1fa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Feb 2025 17:32:51 -0500 Subject: [PATCH 193/448] rewrite the metric to request method --- src/client-side-metrics/exporter.ts | 62 ++++++++++++++-------- test/metrics-collector/metricsToRequest.ts | 12 +++++ 2 files changed, 51 insertions(+), 23 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index eb960dfcf..4f973d0e1 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -32,6 +32,11 @@ export interface ExportInput { }; _syncAttributes: { 'monitored_resource.type': string; + 'monitored_resource.project_id': string; + 'monitored_resource.instance_id': string; + 'monitored_resource.table': string; + 'monitored_resource.cluster': string; + 'monitored_resource.zone': string; }; }; scopeMetrics: { @@ -51,21 +56,23 @@ export interface ExportInput { aggregationTemporality?: number; dataPointType?: number; dataPoints: { - attributes: { - appProfileId?: string; - finalOperationStatus: number; - streamingOperation: string; - projectId: string; - clientName: string; - metricsCollectorData: { - instanceId: string; - table: string; - cluster: string; - zone: string; - methodName: string; - clientUid: string; - }; - }; + attributes: + | { + methodName: string; + clientUid: string; + appProfileId?: string; + finalOperationStatus: number; + streamingOperation?: string; + clientName: string; + } + | { + methodName: string; + clientUid: string; + appProfileId?: string; + attemptStatus: number; + streamingOperation?: string; + clientName: string; + }; startTime: number[]; endTime: number[]; value: { @@ -92,20 +99,29 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; + // TODO: Type guard for final operation status / attempt status const metricLabels = { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, - method: allAttributes.metricsCollectorData.methodName, - status: allAttributes.finalOperationStatus?.toString(), + method: allAttributes.methodName, + status: '0', streaming: allAttributes.streamingOperation, - client_uid: allAttributes.metricsCollectorData.clientUid, + client_uid: allAttributes.clientUid, }; const resourceLabels = { - cluster: allAttributes.metricsCollectorData.cluster, - instance: allAttributes.metricsCollectorData.instanceId, - project_id: allAttributes.projectId, - table: allAttributes.metricsCollectorData.table, - zone: allAttributes.metricsCollectorData.zone, + cluster: + exportArgs.resource._syncAttributes['monitored_resource.cluster'], + instance: + exportArgs.resource._syncAttributes[ + 'monitored_resource.instance_id' + ], + project_id: + exportArgs.resource._syncAttributes[ + 'monitored_resource.project_id' + ], + table: + exportArgs.resource._syncAttributes['monitored_resource.table'], + zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], }; if (metricName === RETRY_COUNT_NAME) { const timeSeries = { diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 2123384d1..2fb8458bb 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -6,6 +6,10 @@ import { fakeStartTime, } from '../../test-common/export-input-fixture'; import {metricsToRequest} from '../../src/client-side-metrics/exporter'; +import { + expectedOtelExportConvertedValue, + expectedOtelExportInput, +} from '../../test-common/expected-otel-export-input'; export const expectedRequest = { name: 'projects/some-project', @@ -150,8 +154,16 @@ export const expectedRequest = { // TODO: Generate the export code describe('Bigtable/metricsToRequest', () => { + /* it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); }); + */ + it('Converts an otel request to a request ready for the metric service client', () => { + assert.deepStrictEqual( + metricsToRequest(expectedOtelExportInput), + expectedOtelExportConvertedValue + ); + }); }); From 5848588743eb43e77eade489c276aee74ca54c46 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 10:18:32 -0500 Subject: [PATCH 194/448] Add interfaces to work with type guards --- src/client-side-metrics/exporter.ts | 189 ++++++++++++++++------------ 1 file changed, 108 insertions(+), 81 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 4f973d0e1..9aa9e5b90 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -24,7 +24,65 @@ export interface ExportResult { code: number; } -// TODO: Only involves the values that we care about +interface OnAttemptAttribute { + methodName: string; + clientUid: string; + appProfileId?: string; + attemptStatus: number; + streamingOperation?: string; + clientName: string; +} + +interface OnOperationAttribute { + methodName: string; + clientUid: string; + appProfileId?: string; + finalOperationStatus: number; + streamingOperation?: string; + clientName: string; +} + +interface ScopeMetric { + scope: { + name: string; + version: string; + }; + metrics: { + descriptor: { + name: string; + unit: string; + description?: string; + type?: string; + valueType?: number; + advice?: {}; + }; + aggregationTemporality?: number; + dataPointType?: number; + dataPoints: { + attributes: Attributes; + startTime: number[]; + endTime: number[]; + value: Value; + }[]; + }[]; +} + +type OtherMetric = ScopeMetric< + OnAttemptAttribute | OnOperationAttribute, + { + min?: number; + max?: number; + sum: number; + count: number; + buckets: { + boundaries: number[]; + counts: number[]; + }; + } +>; + +type RetryMetric = ScopeMetric; + export interface ExportInput { resource: { _attributes: { @@ -39,91 +97,43 @@ export interface ExportInput { 'monitored_resource.zone': string; }; }; - scopeMetrics: { - scope: { - name: string; - version: string; - }; - metrics: { - descriptor: { - name: string; - unit: string; - description?: string; - type?: string; - valueType?: number; - advice?: {}; - }; - aggregationTemporality?: number; - dataPointType?: number; - dataPoints: { - attributes: - | { - methodName: string; - clientUid: string; - appProfileId?: string; - finalOperationStatus: number; - streamingOperation?: string; - clientName: string; - } - | { - methodName: string; - clientUid: string; - appProfileId?: string; - attemptStatus: number; - streamingOperation?: string; - clientName: string; - }; - startTime: number[]; - endTime: number[]; - value: { - min?: number; - max?: number; - sum: number; - count: number; - buckets: { - boundaries: number[]; - counts: number[]; - }; - }; - }[]; - }[]; - }[]; + scopeMetrics: (OtherMetric | RetryMetric)[]; +} + +function isRetryMetric( + scopeMetric: OtherMetric | RetryMetric +): scopeMetric is RetryMetric { + return scopeMetric.scope.name === RETRY_COUNT_NAME; } export function metricsToRequest(exportArgs: ExportInput) { const timeSeriesArray = []; + const resourceLabels = { + cluster: exportArgs.resource._syncAttributes['monitored_resource.cluster'], + instance: + exportArgs.resource._syncAttributes['monitored_resource.instance_id'], + project_id: + exportArgs.resource._syncAttributes['monitored_resource.project_id'], + table: exportArgs.resource._syncAttributes['monitored_resource.table'], + zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], + }; for (const scopeMetrics of exportArgs.scopeMetrics) { - for (const metric of scopeMetrics.metrics) { - const metricName = metric.descriptor.name; + if (isRetryMetric(scopeMetrics)) { + for (const metric of scopeMetrics.metrics) { + const metricName = metric.descriptor.name; - for (const dataPoint of metric.dataPoints) { - // Extract attributes to labels based on their intended target (resource or metric) - const allAttributes = dataPoint.attributes; - // TODO: Type guard for final operation status / attempt status - const metricLabels = { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: '0', - streaming: allAttributes.streamingOperation, - client_uid: allAttributes.clientUid, - }; - const resourceLabels = { - cluster: - exportArgs.resource._syncAttributes['monitored_resource.cluster'], - instance: - exportArgs.resource._syncAttributes[ - 'monitored_resource.instance_id' - ], - project_id: - exportArgs.resource._syncAttributes[ - 'monitored_resource.project_id' - ], - table: - exportArgs.resource._syncAttributes['monitored_resource.table'], - zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], - }; - if (metricName === RETRY_COUNT_NAME) { + for (const dataPoint of metric.dataPoints) { + // Extract attributes to labels based on their intended target (resource or metric) + const allAttributes = dataPoint.attributes; + // TODO: Type guard for final operation status / attempt status + const metricLabels = { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + status: '0', + streaming: allAttributes.streamingOperation, + client_uid: allAttributes.clientUid, + }; const timeSeries = { metric: { type: metricName, @@ -153,7 +163,24 @@ export function metricsToRequest(exportArgs: ExportInput) { ], }; timeSeriesArray.push(timeSeries); - } else { + } + } + } else { + for (const metric of scopeMetrics.metrics) { + const metricName = metric.descriptor.name; + + for (const dataPoint of metric.dataPoints) { + // Extract attributes to labels based on their intended target (resource or metric) + const allAttributes = dataPoint.attributes; + // TODO: Type guard for final operation status / attempt status + const metricLabels = { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + status: '0', + streaming: allAttributes.streamingOperation, + client_uid: allAttributes.clientUid, + }; const timeSeries = { metric: { type: metricName, From 9c35dfbd04dacb4d66af6e8e97c596e8012a0b9b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 10:42:38 -0500 Subject: [PATCH 195/448] Correct the compile error problems different metric types are under metrics not scope metrics --- src/client-side-metrics/exporter.ts | 65 +++++++++++++---------------- 1 file changed, 30 insertions(+), 35 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 9aa9e5b90..886fa9b43 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -42,32 +42,26 @@ interface OnOperationAttribute { clientName: string; } -interface ScopeMetric { - scope: { +interface Metric { + descriptor: { name: string; - version: string; + unit: string; + description?: string; + type?: string; + valueType?: number; + advice?: {}; }; - metrics: { - descriptor: { - name: string; - unit: string; - description?: string; - type?: string; - valueType?: number; - advice?: {}; - }; - aggregationTemporality?: number; - dataPointType?: number; - dataPoints: { - attributes: Attributes; - startTime: number[]; - endTime: number[]; - value: Value; - }[]; + aggregationTemporality?: number; + dataPointType?: number; + dataPoints: { + attributes: Attributes; + startTime: number[]; + endTime: number[]; + value: Value; }[]; } -type OtherMetric = ScopeMetric< +type OtherMetric = Metric< OnAttemptAttribute | OnOperationAttribute, { min?: number; @@ -81,7 +75,7 @@ type OtherMetric = ScopeMetric< } >; -type RetryMetric = ScopeMetric; +type RetryMetric = Metric; export interface ExportInput { resource: { @@ -97,13 +91,19 @@ export interface ExportInput { 'monitored_resource.zone': string; }; }; - scopeMetrics: (OtherMetric | RetryMetric)[]; + scopeMetrics: { + scope: { + name: string; + version: string; + }; + metrics: (RetryMetric | OtherMetric)[]; + }[]; } function isRetryMetric( - scopeMetric: OtherMetric | RetryMetric -): scopeMetric is RetryMetric { - return scopeMetric.scope.name === RETRY_COUNT_NAME; + metric: OtherMetric | RetryMetric +): metric is RetryMetric { + return metric.descriptor.name === RETRY_COUNT_NAME; } export function metricsToRequest(exportArgs: ExportInput) { @@ -118,10 +118,9 @@ export function metricsToRequest(exportArgs: ExportInput) { zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], }; for (const scopeMetrics of exportArgs.scopeMetrics) { - if (isRetryMetric(scopeMetrics)) { - for (const metric of scopeMetrics.metrics) { - const metricName = metric.descriptor.name; - + for (const metric of scopeMetrics.metrics) { + const metricName = metric.descriptor.name; + if (isRetryMetric(metric)) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; @@ -164,11 +163,7 @@ export function metricsToRequest(exportArgs: ExportInput) { }; timeSeriesArray.push(timeSeries); } - } - } else { - for (const metric of scopeMetrics.metrics) { - const metricName = metric.descriptor.name; - + } else { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; From 07ec90dea7242f10b0f394d46820f8d4d7172354 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 11:39:36 -0500 Subject: [PATCH 196/448] Fix expected OTEL output test --- src/client-side-metrics/exporter.ts | 44 ++++++++++++------- test-common/expected-otel-export-input.ts | 37 ++++++++++------ test/metrics-collector/gcp-metrics-handler.ts | 2 +- test/metrics-collector/metricsToRequest.ts | 14 ++++-- 4 files changed, 64 insertions(+), 33 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 886fa9b43..227536f65 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -125,14 +125,17 @@ export function metricsToRequest(exportArgs: ExportInput) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; // TODO: Type guard for final operation status / attempt status - const metricLabels = { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: '0', - streaming: allAttributes.streamingOperation, - client_uid: allAttributes.clientUid, - }; + const streaming = allAttributes.streamingOperation; + const metricLabels = Object.assign( + { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + status: allAttributes.finalOperationStatus.toString(), + client_uid: allAttributes.clientUid, + }, + streaming ? {streaming} : null + ); const timeSeries = { metric: { type: metricName, @@ -168,14 +171,23 @@ export function metricsToRequest(exportArgs: ExportInput) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; // TODO: Type guard for final operation status / attempt status - const metricLabels = { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: '0', - streaming: allAttributes.streamingOperation, - client_uid: allAttributes.clientUid, - }; + const streaming = allAttributes.streamingOperation; + const metricLabels = Object.assign( + { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + status: + ( + allAttributes as OnAttemptAttribute + ).attemptStatus?.toString() ?? + ( + allAttributes as OnOperationAttribute + ).finalOperationStatus?.toString(), + client_uid: allAttributes.clientUid, + }, + streaming ? {streaming} : null + ); const timeSeries = { metric: { type: metricName, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index c8ff81c87..ea5ff41e9 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -8,8 +8,9 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', status: '0', streaming: 'true', client_uid: 'fake-uuid', @@ -105,8 +106,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', + status: '4', streaming: 'true', client_uid: 'fake-uuid', }, @@ -201,8 +204,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', + status: '0', streaming: 'true', client_uid: 'fake-uuid', }, @@ -297,10 +302,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/retry_count', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', status: '0', - streaming: 'true', client_uid: 'fake-uuid', }, }, @@ -335,10 +340,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/first_response_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', status: '0', - streaming: 'true', client_uid: 'fake-uuid', }, }, @@ -432,8 +437,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', + status: '4', streaming: 'true', client_uid: 'fake-uuid', }, @@ -528,8 +535,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', + status: '0', streaming: 'true', client_uid: 'fake-uuid', }, @@ -624,9 +633,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', - streaming: 'true', + method: 'Bigtable.ReadRows', + status: '4', client_uid: 'fake-uuid', }, }, @@ -720,9 +730,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', - streaming: 'true', + method: 'Bigtable.ReadRows', + status: '0', client_uid: 'fake-uuid', }, }, diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index b73082404..91eabd82e 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -19,7 +19,7 @@ import { import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; -describe.only('Bigtable/GCPMetricsHandler', () => { +describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { /* diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 2fb8458bb..c26ad0085 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -160,10 +160,18 @@ describe('Bigtable/metricsToRequest', () => { assert.deepStrictEqual(actualRequest, expectedRequest); }); */ - it('Converts an otel request to a request ready for the metric service client', () => { + it.only('Converts an otel request to a request ready for the metric service client', () => { + const convertedValue = metricsToRequest(expectedOtelExportInput); assert.deepStrictEqual( - metricsToRequest(expectedOtelExportInput), - expectedOtelExportConvertedValue + convertedValue.timeSeries.length, + expectedOtelExportConvertedValue.timeSeries.length ); + for (let index = 0; index < convertedValue.timeSeries.length; index++) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + convertedValue.timeSeries[index], + expectedOtelExportConvertedValue.timeSeries[index] + ); + } }); }); From bf54c8c2090c927ba7c1d16b607c08cfb14c7000 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 11:41:20 -0500 Subject: [PATCH 197/448] Remove TODOs --- src/client-side-metrics/exporter.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 227536f65..2b17dee6d 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -124,7 +124,6 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; - // TODO: Type guard for final operation status / attempt status const streaming = allAttributes.streamingOperation; const metricLabels = Object.assign( { @@ -170,7 +169,6 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; - // TODO: Type guard for final operation status / attempt status const streaming = allAttributes.streamingOperation; const metricLabels = Object.assign( { From f226b5fdda88d7a69cab4d8dc4a7bce93f52f3b0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 11:50:37 -0500 Subject: [PATCH 198/448] Fix test to compare pointwise --- test/metrics-collector/gcp-metrics-handler.ts | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 91eabd82e..bf2ddb8d8 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -19,7 +19,7 @@ import { import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; -describe('Bigtable/GCPMetricsHandler', () => { +describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { /* @@ -48,9 +48,20 @@ describe('Bigtable/GCPMetricsHandler', () => { expectedOtelExportInput as unknown as ExportInput ); assert.deepStrictEqual( - JSON.parse(JSON.stringify(convertedRequest)), - expectedOtelExportConvertedValue + convertedRequest.timeSeries.length, + expectedOtelExportConvertedValue.timeSeries.length ); + for ( + let index = 0; + index < convertedRequest.timeSeries.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + convertedRequest.timeSeries[index], + expectedOtelExportConvertedValue.timeSeries[index] + ); + } clearTimeout(timeout); resultCallback({code: 0}); done(); From b42b4f4e912045d0008b1c95cc5a2f7935d44d7e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 13:46:20 -0500 Subject: [PATCH 199/448] connectivity error count corrections --- src/client-side-metrics/exporter.ts | 21 ++- .../gcp-metrics-handler.ts | 22 +-- test-common/expected-otel-export-input.ts | 168 +----------------- 3 files changed, 31 insertions(+), 180 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 2b17dee6d..7a76366a4 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,7 +18,7 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -import {RETRY_COUNT_NAME} from '../../test-common/expected-otel-export-input'; +import {CONNECTIIVTY_ERROR_COUNT, RETRY_COUNT_NAME} from '../../test-common/expected-otel-export-input'; export interface ExportResult { code: number; @@ -75,7 +75,7 @@ type OtherMetric = Metric< } >; -type RetryMetric = Metric; +type RetryMetric = Metric; export interface ExportInput { resource: { @@ -100,10 +100,13 @@ export interface ExportInput { }[]; } -function isRetryMetric( +function isIntegerMetric( metric: OtherMetric | RetryMetric ): metric is RetryMetric { - return metric.descriptor.name === RETRY_COUNT_NAME; + return ( + metric.descriptor.name === RETRY_COUNT_NAME || + metric.descriptor.name === CONNECTIIVTY_ERROR_COUNT + ); } export function metricsToRequest(exportArgs: ExportInput) { @@ -120,7 +123,7 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const scopeMetrics of exportArgs.scopeMetrics) { for (const metric of scopeMetrics.metrics) { const metricName = metric.descriptor.name; - if (isRetryMetric(metric)) { + if (isIntegerMetric(metric)) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; @@ -130,7 +133,13 @@ export function metricsToRequest(exportArgs: ExportInput) { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, method: allAttributes.methodName, - status: allAttributes.finalOperationStatus.toString(), + status: + ( + allAttributes as OnAttemptAttribute + ).attemptStatus?.toString() ?? + ( + allAttributes as OnOperationAttribute + ).finalOperationStatus?.toString(), client_uid: allAttributes.clientUid, }, streaming ? {streaming} : null diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index cd896f6e9..70d8b1d35 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -195,14 +195,11 @@ export class GCPMetricsHandler }, } ), - connectivityErrorCount: meter.createHistogram( + connectivityErrorCount: meter.createCounter( 'bigtable.googleapis.com/internal/client/connectivity_error_count', { description: "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - advice: { - explicitBucketBoundaries: latencyBuckets, - }, } ), clientBlockingLatencies: meter.createHistogram( @@ -279,16 +276,13 @@ export class GCPMetricsHandler streamingOperation: data.streamingOperation, clientName: data.clientName, }); - this.otelMetrics?.connectivityErrorCount.record( - data.connectivityErrorCount, - { - appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, - attemptStatus: data.attemptStatus, - clientName: data.clientName, - } - ); + this.otelMetrics?.connectivityErrorCount.add(data.connectivityErrorCount, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + attemptStatus: data.attemptStatus, + clientName: data.clientName, + }); this.otelMetrics?.serverLatencies.record(data.serverLatency, { appProfileId: data.metricsCollectorData.appProfileId, methodName: data.metricsCollectorData.methodName, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index ea5ff41e9..25ff2ac51 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -1,5 +1,7 @@ export const RETRY_COUNT_NAME = 'bigtable.googleapis.com/internal/client/retry_count'; +export const CONNECTIIVTY_ERROR_COUNT = + 'bigtable.googleapis.com/internal/client/connectivity_error_count'; export const expectedOtelExportConvertedValue = { name: 'projects/my-project', @@ -650,8 +652,7 @@ export const expectedOtelExportConvertedValue = { zone: 'us-west1-c', }, }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', + valueType: 'INT64', points: [ { interval: { @@ -663,68 +664,10 @@ export const expectedOtelExportConvertedValue = { }, }, value: { - distributionValue: { - count: '1', - mean: 0, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, - ], - }, - }, - bucketCounts: [ - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, + int64Value: 1, }, }, ], - unit: 'ms', }, { metric: { @@ -747,8 +690,7 @@ export const expectedOtelExportConvertedValue = { zone: 'us-west1-c', }, }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', + valueType: 'INT64', points: [ { interval: { @@ -760,68 +702,10 @@ export const expectedOtelExportConvertedValue = { }, }, value: { - distributionValue: { - count: '1', - mean: 0, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, - ], - }, - }, - bucketCounts: [ - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, + int64Value: 1, }, }, ], - unit: 'ms', }, ], }; @@ -1191,25 +1075,7 @@ export const expectedOtelExportInput = { }, startTime: [123, 789], endTime: [456, 789], - value: { - min: 0, - max: 0, - sum: 0, - buckets: { - boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, - ], - counts: [ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], - }, - count: 1, - }, + value: 1, }, { attributes: { @@ -1220,25 +1086,7 @@ export const expectedOtelExportInput = { }, startTime: [123, 789], endTime: [456, 789], - value: { - min: 0, - max: 0, - sum: 0, - buckets: { - boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, - ], - counts: [ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], - }, - count: 1, - }, + value: 1, }, ], }, From 54ac764485c31e3de68bcb4b660f9713f7c4dd64 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:21:28 -0500 Subject: [PATCH 200/448] Correct the fixtures --- test-common/expected-otel-export-input.ts | 22 +++++++----------- test/metrics-collector/gcp-metrics-handler.ts | 23 +++++++++++++++++-- test/metrics-collector/metricsToRequest.ts | 2 +- 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 25ff2ac51..6dda2ca70 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -664,7 +664,7 @@ export const expectedOtelExportConvertedValue = { }, }, value: { - int64Value: 1, + int64Value: 0, }, }, ], @@ -702,7 +702,7 @@ export const expectedOtelExportConvertedValue = { }, }, value: { - int64Value: 1, + int64Value: 0, }, }, ], @@ -1049,22 +1049,16 @@ export const expectedOtelExportInput = { { descriptor: { name: 'bigtable.googleapis.com/internal/client/connectivity_error_count', - type: 'HISTOGRAM', + type: 'COUNTER', description: "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", unit: '', valueType: 1, - advice: { - explicitBucketBoundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, - 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, - 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, - 100000, - ], - }, + advice: {}, }, aggregationTemporality: 1, - dataPointType: 0, + dataPointType: 3, + isMonotonic: true, dataPoints: [ { attributes: { @@ -1075,7 +1069,7 @@ export const expectedOtelExportInput = { }, startTime: [123, 789], endTime: [456, 789], - value: 1, + value: 0, }, { attributes: { @@ -1086,7 +1080,7 @@ export const expectedOtelExportInput = { }, startTime: [123, 789], endTime: [456, 789], - value: 1, + value: 0, }, ], }, diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index bf2ddb8d8..0fab8c718 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -19,8 +19,8 @@ import { import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; -describe.only('Bigtable/GCPMetricsHandler', () => { - it('Should export a value ready for sending to the CloudMonitoringExporter', done => { +describe('Bigtable/GCPMetricsHandler', () => { + it.only('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { /* We need to create a timeout here because if we don't then mocha shuts down @@ -40,6 +40,25 @@ describe.only('Bigtable/GCPMetricsHandler', () => { [123, 789], [456, 789] ); + const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length, + expectedOtelExportInput.scopeMetrics[0].metrics.length + ); + for ( + let index = 0; + index < + (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ + index + ], + expectedOtelExportInput.scopeMetrics[0].metrics[index] + ); + } assert.deepStrictEqual( JSON.parse(JSON.stringify(metrics)), expectedOtelExportInput diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index c26ad0085..0fed23dc1 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -160,7 +160,7 @@ describe('Bigtable/metricsToRequest', () => { assert.deepStrictEqual(actualRequest, expectedRequest); }); */ - it.only('Converts an otel request to a request ready for the metric service client', () => { + it('Converts an otel request to a request ready for the metric service client', () => { const convertedValue = metricsToRequest(expectedOtelExportInput); assert.deepStrictEqual( convertedValue.timeSeries.length, From eb8f14b95cf7aee72cee8b20fbc5f2d2145f321e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:33:11 -0500 Subject: [PATCH 201/448] Eliminate tests using the old export input fixture --- system-test/cloud-monitoring-exporter.ts | 36 +---- test-common/export-input-fixture.ts | 166 --------------------- test/metrics-collector/metricsToRequest.ts | 153 ------------------- 3 files changed, 2 insertions(+), 353 deletions(-) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index c8464457e..0023898a7 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -3,11 +3,7 @@ import { CloudMonitoringExporter, ExportResult, } from '../src/client-side-metrics/exporter'; -import { - exportInput, - fakeEndTime, - fakeStartTime, -} from '../test-common/export-input-fixture'; +import {fakeEndTime, fakeStartTime} from '../test-common/export-input-fixture'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; @@ -15,38 +11,10 @@ import {expectedOtelExportInput} from '../test-common/expected-otel-export-input import {replaceTimestamps} from '../test-common/replace-timestamps'; describe('Bigtable/CloudMonitoringExporter', () => { - it('exports client side metrics to cloud monitoring', done => { + it('Should send an otel exported value to the CloudMonitoringExporter', done => { // When this test is run, metrics should be visible at the following link: // https://pantheon.corp.google.com/monitoring/metrics-explorer;duration=PT1H?inv=1&invt=Abo9_A&project={projectId} // This test will add metrics so that they are available in Pantheon - (async () => { - const bigtable = new Bigtable(); - const projectId: string = await new Promise((resolve, reject) => { - bigtable.getProjectId_((err, projectId) => { - if (err) { - reject(err); - } else { - resolve(projectId as string); - } - }); - }); - const transformedExportInput = JSON.parse( - JSON.stringify(exportInput).replace(/some-project/g, projectId) - ); - const exporter = new CloudMonitoringExporter(); - exporter.export( - transformedExportInput as unknown as ResourceMetrics, - (result: {code: number}) => { - if (result.code === 0) { - done(); - } else { - done(result.code); - } - } - ); - })(); - }); - it.only('Should send an otel exported value to the CloudMonitoringExporter', done => { (async () => { const resultCallback: (result: ExportResult) => void = ( result: ExportResult diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts index 17717aea1..43b73980b 100644 --- a/test-common/export-input-fixture.ts +++ b/test-common/export-input-fixture.ts @@ -14,169 +14,3 @@ export const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; export const fakeEndTime = fakeStartTime + 1000; - -export const exportInput = { - resource: { - _attributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'some-project', - 'monitored_resource.type': 'bigtable_client_raw', - }, - asyncAttributesPending: false, - _syncAttributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'some-project', - 'monitored_resource.type': 'bigtable_client_raw', - }, - _asyncAttributesPromise: {}, - }, - scopeMetrics: [ - { - scope: { - name: 'bigtable.googleapis.com', - version: '', - }, - metrics: [ - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/operation_latencies', - type: 'HISTOGRAM', - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - appProfileId: 'fake-app-profile-id', - finalOperationStatus: 0, - streamingOperation: 'true', - clientName: 'nodejs-bigtable/5.1.2', - projectId: 'some-project', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', - }, - }, - startTime: [fakeStartTime, 951000000], - endTime: [fakeEndTime, 948000000], - value: { - min: 76, - max: 1337, - sum: 11979, - buckets: { - boundaries: [ - 0, // 1 - 0.01, // 2 - 0.05, // 3 - 0.1, // 4 - 0.3, // 5 - 0.6, // 6 - 0.8, // 7 - 1, // 8 - 2, // 9 - 3, // 10 - 4, // 11 - 5, // 12 - 6, // 13 - 8, // 14 - 10, // 15 - 13, // 16 - 16, // 17 - 20, // 18 - 25, // 19 - 30, // 20 - 40, // 21 - 50, // 22 - 65, // 23 - 80, // 24 - 100, // 25 - 130, // 26 - 160, // 27 - 200, // 28 - 250, // 29 - 300, // 30 - 400, // 31 - 500, // 32 - 650, // 33 - 800, // 34 - 1000, // 35 - 2000, // 36 - 5000, // 37 - 10000, // 38 - 20000, // 39 - 50000, // 40 - 100000, // 41 - ], - counts: [ - 0, //1 - 0, //2 - 0, //3 - 0, //4 - 0, //5 - 0, //6 - 0, //7 - 0, //8 - 0, //9 - 0, //10 - 0, //11 - 0, //12 - 0, //13 - 0, //14 - 0, //15 - 0, //16 - 0, //17 - 0, //18 - 0, //19 - 0, //20 - 0, //21 - 0, //22 - 0, //23 - 0, //24 - 1, //25 - 0, //26 - 0, //27 - 0, //28 - 0, //29 - 0, //30 - 0, //31 - 0, //32 - 0, //33 - 0, //34 - 0, //35 - 0, //36 - 0, //37 - 0, //38 - 0, //39 - 0, //40 - 0, //41 - 0, //42 - ], - }, - count: 1, - }, - }, - ], - }, - ], - }, - ], -}; diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 0fed23dc1..9832cf0e4 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -1,165 +1,12 @@ import * as assert from 'assert'; import {describe} from 'mocha'; -import { - exportInput, - fakeEndTime, - fakeStartTime, -} from '../../test-common/export-input-fixture'; import {metricsToRequest} from '../../src/client-side-metrics/exporter'; import { expectedOtelExportConvertedValue, expectedOtelExportInput, } from '../../test-common/expected-otel-export-input'; -export const expectedRequest = { - name: 'projects/some-project', - timeSeries: [ - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - client_uid: 'fake-uuid', - method: 'Bigtable.ReadRows', - status: '0', - streaming: 'true', - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'fakeInstanceId', - project_id: 'some-project', - table: 'fakeTableId', - zone: 'us-west1-c', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: fakeEndTime, - }, - startTime: { - seconds: fakeStartTime, - }, - }, - value: { - distributionValue: { - count: '1', - mean: 11979, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, // 1 - 0.01, // 2 - 0.05, // 3 - 0.1, // 4 - 0.3, // 5 - 0.6, // 6 - 0.8, // 7 - 1, // 8 - 2, // 9 - 3, // 10 - 4, // 11 - 5, // 12 - 6, // 13 - 8, // 14 - 10, // 15 - 13, // 16 - 16, // 17 - 20, // 18 - 25, // 19 - 30, // 20 - 40, // 21 - 50, // 22 - 65, // 23 - 80, // 24 - 100, // 25 - 130, // 26 - 160, // 27 - 200, // 28 - 250, // 29 - 300, // 30 - 400, // 31 - 500, // 32 - 650, // 33 - 800, // 34 - 1000, // 35 - 2000, // 36 - 5000, // 37 - 10000, // 38 - 20000, // 39 - 50000, // 40 - 100000, // 41 - ], - }, - }, - bucketCounts: [ - '0', //1 - '0', //2 - '0', //3 - '0', //4 - '0', //5 - '0', //6 - '0', //7 - '0', //8 - '0', //9 - '0', //10 - '0', //11 - '0', //12 - '0', //13 - '0', //14 - '0', //15 - '0', //16 - '0', //17 - '0', //18 - '0', //19 - '0', //20 - '0', //21 - '0', //22 - '0', //23 - '0', //24 - '1', //25 - '0', //26 - '0', //27 - '0', //28 - '0', //29 - '0', //30 - '0', //31 - '0', //32 - '0', //33 - '0', //34 - '0', //35 - '0', //36 - '0', //37 - '0', //38 - '0', //39 - '0', //40 - '0', //41 - '0', //42 - ], - }, - }, - }, - ], - unit: 'ms', - }, - ], -}; - -// TODO: Generate the export code describe('Bigtable/metricsToRequest', () => { - /* - it('Converts a counter and a histogram to the cloud monitoring format', () => { - const actualRequest = metricsToRequest(exportInput); - assert.deepStrictEqual(actualRequest, expectedRequest); - }); - */ it('Converts an otel request to a request ready for the metric service client', () => { const convertedValue = metricsToRequest(expectedOtelExportInput); assert.deepStrictEqual( From 6ecb1a6bb5757d00bce489e7ecc09547d3f08579 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:36:01 -0500 Subject: [PATCH 202/448] Add headers --- system-test/cloud-monitoring-exporter.ts | 14 ++++++++++++++ system-test/gcp-metrics-handler.ts | 14 ++++++++++++++ test-common/expected-otel-export-input.ts | 14 ++++++++++++++ test-common/replace-timestamps.ts | 14 ++++++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 14 ++++++++++++++ test/metrics-collector/metricsToRequest.ts | 14 ++++++++++++++ 6 files changed, 84 insertions(+) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 0023898a7..5116e6b8f 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {describe} from 'mocha'; import { CloudMonitoringExporter, diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 07b08e36f..abbc55820 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {describe} from 'mocha'; import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import {expectedRequestsHandled} from '../test-common/metrics-handler-fixture'; diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 6dda2ca70..6f3953e62 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + export const RETRY_COUNT_NAME = 'bigtable.googleapis.com/internal/client/retry_count'; export const CONNECTIIVTY_ERROR_COUNT = diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts index ea081cd81..43e5c6c77 100644 --- a/test-common/replace-timestamps.ts +++ b/test-common/replace-timestamps.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {expectedOtelExportInput} from './expected-otel-export-input'; export function replaceTimestamps( diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 0fab8c718..e2d3abd75 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {describe} from 'mocha'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import { diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 9832cf0e4..e77917d79 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import * as assert from 'assert'; import {describe} from 'mocha'; import {metricsToRequest} from '../../src/client-side-metrics/exporter'; From fa0a56e34edb65e502cc19b836db1804d2b1175c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:37:51 -0500 Subject: [PATCH 203/448] run linter --- src/client-side-metrics/exporter.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 7a76366a4..b224afd4d 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,7 +18,10 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -import {CONNECTIIVTY_ERROR_COUNT, RETRY_COUNT_NAME} from '../../test-common/expected-otel-export-input'; +import { + CONNECTIIVTY_ERROR_COUNT, + RETRY_COUNT_NAME, +} from '../../test-common/expected-otel-export-input'; export interface ExportResult { code: number; From fcef83d5dd5504aea02f7099eb3a14799732c59d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:55:45 -0500 Subject: [PATCH 204/448] Modify the gcp-metrics-handler and test the proces end to end --- src/client-side-metrics/exporter.ts | 1 + system-test/gcp-metrics-handler.ts | 77 ++++++++++++++++++++++------- 2 files changed, 59 insertions(+), 19 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index b224afd4d..dd1104276 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -263,6 +263,7 @@ export class CloudMonitoringExporter extends MetricExporter { await this.monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest ); + // {code: 0} is typically the format the callback expects in the super class. const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index abbc55820..9d04d4258 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -19,28 +19,67 @@ import { OnAttemptCompleteData, OnOperationCompleteData, } from '../src/client-side-metrics/metrics-handler'; -import {CloudMonitoringExporter} from '../src/client-side-metrics/exporter'; +import { + CloudMonitoringExporter, + ExportResult, +} from '../src/client-side-metrics/exporter'; +import {Bigtable} from '../src'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import * as assert from 'assert'; -// TODO: Test that calls export. -// TODO: Test whole process. -describe('Bigtable/GCPMetricsHandler', () => { +describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the CloudMonitoringExporter', done => { - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. - */ - const timeout = setTimeout(() => {}, 30000); - const handler = new GCPMetricsHandler( - new CloudMonitoringExporter({projectId: 'cloud-native-db-dpes-shared'}) - ); + (async () => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 30000); + const testResultCallback: (result: ExportResult) => void = ( + result: ExportResult + ) => { + try { + clearTimeout(timeout); + assert.deepStrictEqual(result, {code: 0}); + done(); + } catch (error) { + done(error); + } + }; + class MockExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + super.export(metrics, testResultCallback); + } + } - for (const request of expectedRequestsHandled) { - if (request.attemptLatency) { - handler.onAttemptComplete(request as OnAttemptCompleteData); - } else { - handler.onOperationComplete(request as OnOperationCompleteData); + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const handler = new GCPMetricsHandler(new MockExporter({projectId})); + const transformedRequestsHandled = JSON.parse( + JSON.stringify(expectedRequestsHandled).replace( + /my-project/g, + projectId + ) + ); + for (const request of transformedRequestsHandled) { + if (request.attemptLatency) { + handler.onAttemptComplete(request as OnAttemptCompleteData); + } else { + handler.onOperationComplete(request as OnOperationCompleteData); + } } - } + })(); }); }); From cd2efacc08a65666bcd6853b7921668a35a61c2d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:57:07 -0500 Subject: [PATCH 205/448] Remove only --- system-test/gcp-metrics-handler.ts | 2 +- test/metrics-collector/gcp-metrics-handler.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 9d04d4258..1e1df78c1 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -27,7 +27,7 @@ import {Bigtable} from '../src'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import * as assert from 'assert'; -describe.only('Bigtable/GCPMetricsHandler', () => { +describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the CloudMonitoringExporter', done => { (async () => { /* diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index e2d3abd75..5803ef119 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -34,7 +34,7 @@ import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; describe('Bigtable/GCPMetricsHandler', () => { - it.only('Should export a value ready for sending to the CloudMonitoringExporter', done => { + it('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { /* We need to create a timeout here because if we don't then mocha shuts down From eba027c21997a559dbc07b50ff64b97521672304 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 15:02:51 -0500 Subject: [PATCH 206/448] Use a fake projectId --- test/metrics-collector/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 5803ef119..fd24d33ad 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -105,7 +105,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } const handler = new GCPMetricsHandler( - new TestExporter({projectId: 'cloud-native-db-dpes-shared'}) + new TestExporter({projectId: 'some-project'}) ); for (const request of expectedRequestsHandled) { From 5929a9dda86afd894bf60e867eb3a923c2229f1f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 15:31:53 -0500 Subject: [PATCH 207/448] Only call export once --- .../gcp-metrics-handler.ts | 4 - test/metrics-collector/gcp-metrics-handler.ts | 122 ++++++++++-------- 2 files changed, 68 insertions(+), 58 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 70d8b1d35..c11286d32 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -20,10 +20,6 @@ import { import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from './client-side-metrics-attributes'; import {View} from '@opentelemetry/sdk-metrics'; const { Aggregation, diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index fd24d33ad..714a2dd17 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -42,64 +42,78 @@ describe('Bigtable/GCPMetricsHandler', () => { export the data. */ const timeout = setTimeout(() => {}, 30000); + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exporter ensures we only test the value export receives one time. + */ + let exported = false; class TestExporter extends MetricExporter { - async export( + export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void - ): Promise { - try { - replaceTimestamps( - metrics as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] - ); - const parsedExportInput = JSON.parse(JSON.stringify(metrics)); - assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length, - expectedOtelExportInput.scopeMetrics[0].metrics.length - ); - for ( - let index = 0; - index < - (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. - assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ - index - ], - expectedOtelExportInput.scopeMetrics[0].metrics[index] - ); - } - assert.deepStrictEqual( - JSON.parse(JSON.stringify(metrics)), - expectedOtelExportInput - ); - const convertedRequest = metricsToRequest( - expectedOtelExportInput as unknown as ExportInput - ); - assert.deepStrictEqual( - convertedRequest.timeSeries.length, - expectedOtelExportConvertedValue.timeSeries.length - ); - for ( - let index = 0; - index < convertedRequest.timeSeries.length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. - assert.deepStrictEqual( - convertedRequest.timeSeries[index], - expectedOtelExportConvertedValue.timeSeries[index] - ); - } - clearTimeout(timeout); - resultCallback({code: 0}); - done(); - } catch (e) { - done(e); + ): void { + if (!exported) { + exported = true; + (async () => { + try { + replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics + .length, + expectedOtelExportInput.scopeMetrics[0].metrics.length + ); + for ( + let index = 0; + index < + (parsedExportInput as ExportInput).scopeMetrics[0].metrics + .length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ + index + ], + expectedOtelExportInput.scopeMetrics[0].metrics[index] + ); + } + assert.deepStrictEqual( + JSON.parse(JSON.stringify(metrics)), + expectedOtelExportInput + ); + const convertedRequest = metricsToRequest( + expectedOtelExportInput as unknown as ExportInput + ); + assert.deepStrictEqual( + convertedRequest.timeSeries.length, + expectedOtelExportConvertedValue.timeSeries.length + ); + for ( + let index = 0; + index < convertedRequest.timeSeries.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + convertedRequest.timeSeries[index], + expectedOtelExportConvertedValue.timeSeries[index] + ); + } + clearTimeout(timeout); + await this.shutdown(); + resultCallback({code: 0}); + done(); + } catch (e) { + done(e); + } + })(); } } } From 3b48c8e353b91002a2bbc8e3784c35ca34f2fa6c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 16:01:55 -0500 Subject: [PATCH 208/448] Ensure test suite completes --- test/metrics-collector/gcp-metrics-handler.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 714a2dd17..6c4dbada9 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -114,6 +114,10 @@ describe('Bigtable/GCPMetricsHandler', () => { done(e); } })(); + } else { + // The test suite will not complete if unanswered callbacks + // remain on subsequent export calls. + resultCallback({code: 0}); } } } From 8edc4ab8ce82028daba71981fdd0016255f1ace8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 16:04:42 -0500 Subject: [PATCH 209/448] Remove shutdown --- test/metrics-collector/gcp-metrics-handler.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 6c4dbada9..633134d7e 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -107,7 +107,6 @@ describe('Bigtable/GCPMetricsHandler', () => { ); } clearTimeout(timeout); - await this.shutdown(); resultCallback({code: 0}); done(); } catch (e) { From 8c9d23f700e024709762a2d8c9e98368a436c0ba Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 16:07:45 -0500 Subject: [PATCH 210/448] remove async --- test/metrics-collector/gcp-metrics-handler.ts | 102 +++++++++--------- 1 file changed, 50 insertions(+), 52 deletions(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 633134d7e..ccad1b1a4 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -56,63 +56,61 @@ describe('Bigtable/GCPMetricsHandler', () => { ): void { if (!exported) { exported = true; - (async () => { - try { - replaceTimestamps( - metrics as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] - ); - const parsedExportInput = JSON.parse(JSON.stringify(metrics)); - assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics - .length, - expectedOtelExportInput.scopeMetrics[0].metrics.length - ); - for ( - let index = 0; - index < - (parsedExportInput as ExportInput).scopeMetrics[0].metrics - .length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. - assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ - index - ], - expectedOtelExportInput.scopeMetrics[0].metrics[index] - ); - } + try { + replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics + .length, + expectedOtelExportInput.scopeMetrics[0].metrics.length + ); + for ( + let index = 0; + index < + (parsedExportInput as ExportInput).scopeMetrics[0].metrics + .length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. assert.deepStrictEqual( - JSON.parse(JSON.stringify(metrics)), - expectedOtelExportInput - ); - const convertedRequest = metricsToRequest( - expectedOtelExportInput as unknown as ExportInput + (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ + index + ], + expectedOtelExportInput.scopeMetrics[0].metrics[index] ); + } + assert.deepStrictEqual( + JSON.parse(JSON.stringify(metrics)), + expectedOtelExportInput + ); + const convertedRequest = metricsToRequest( + expectedOtelExportInput as unknown as ExportInput + ); + assert.deepStrictEqual( + convertedRequest.timeSeries.length, + expectedOtelExportConvertedValue.timeSeries.length + ); + for ( + let index = 0; + index < convertedRequest.timeSeries.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. assert.deepStrictEqual( - convertedRequest.timeSeries.length, - expectedOtelExportConvertedValue.timeSeries.length + convertedRequest.timeSeries[index], + expectedOtelExportConvertedValue.timeSeries[index] ); - for ( - let index = 0; - index < convertedRequest.timeSeries.length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. - assert.deepStrictEqual( - convertedRequest.timeSeries[index], - expectedOtelExportConvertedValue.timeSeries[index] - ); - } - clearTimeout(timeout); - resultCallback({code: 0}); - done(); - } catch (e) { - done(e); } - })(); + clearTimeout(timeout); + resultCallback({code: 0}); + done(); + } catch (e) { + done(e); + } } else { // The test suite will not complete if unanswered callbacks // remain on subsequent export calls. From 7b49f012cf62a127f57ae0b17820a83e13a49fef Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 16:48:28 -0500 Subject: [PATCH 211/448] =?UTF-8?q?Don=E2=80=99t=20export=20the=20data=20t?= =?UTF-8?q?wice?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- system-test/gcp-metrics-handler.ts | 37 +++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 1e1df78c1..94cb6afdd 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -36,22 +36,37 @@ describe('Bigtable/GCPMetricsHandler', () => { export the data. */ const timeout = setTimeout(() => {}, 30000); - const testResultCallback: (result: ExportResult) => void = ( - result: ExportResult - ) => { - try { - clearTimeout(timeout); - assert.deepStrictEqual(result, {code: 0}); - done(); - } catch (error) { - done(error); - } - }; + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exporter ensures we only test the value export receives one time. + */ + let exported = false; + function getTestResultCallback( + resultCallback: (result: ExportResult) => void + ) { + return (result: ExportResult) => { + if (!exported) { + exported = true; + try { + clearTimeout(timeout); + assert.deepStrictEqual(result, {code: 0}); + done(); + resultCallback({code: 0}); + } catch (error) { + done(error); + } + } else { + resultCallback({code: 0}); + } + }; + } class MockExporter extends CloudMonitoringExporter { export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): void { + const testResultCallback = getTestResultCallback(resultCallback); super.export(metrics, testResultCallback); } } From b4f7705769efb8d16f99b8d0cdf7c1d8f8a4a25a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 13:48:05 -0500 Subject: [PATCH 212/448] Increase the timeout --- test/metrics-collector/gcp-metrics-handler.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index ccad1b1a4..81d595281 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -34,7 +34,8 @@ import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; describe('Bigtable/GCPMetricsHandler', () => { - it('Should export a value ready for sending to the CloudMonitoringExporter', done => { + it('Should export a value ready for sending to the CloudMonitoringExporter', function (done) { + this.timeout(600000); (async () => { /* We need to create a timeout here because if we don't then mocha shuts down From 2a3245957e10bfaedfb0fb287726d477a183d6c3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 15:26:00 -0500 Subject: [PATCH 213/448] Use the PushMetricExporter interface --- src/client-side-metrics/gcp-metrics-handler.ts | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index c11286d32..bef66a304 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -19,8 +19,7 @@ import { } from './metrics-handler'; import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; -import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import {View} from '@opentelemetry/sdk-metrics'; +import {PushMetricExporter, View} from '@opentelemetry/sdk-metrics'; const { Aggregation, ExplicitBucketHistogramAggregation, @@ -57,14 +56,12 @@ interface MonitoredResourceData { * This handler records metrics such as operation latency, attempt latency, retry count, and more, * associating them with relevant attributes for detailed analysis in Cloud Monitoring. */ -export class GCPMetricsHandler - implements IMetricsHandler -{ +export class GCPMetricsHandler implements IMetricsHandler { private initialized = false; private otelMetrics?: Metrics; - private exporter: T; + private exporter: PushMetricExporter; - constructor(exporter: T) { + constructor(exporter: PushMetricExporter) { this.exporter = exporter; } From e5caa9e9e3c48d755a47248e4060b9c10893454e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 15:31:34 -0500 Subject: [PATCH 214/448] Removed some interfaces that are not used anymore --- .../client-side-metrics-attributes.ts | 41 ------------------- 1 file changed, 41 deletions(-) diff --git a/src/client-side-metrics/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts index ffc6dfa44..f5fbf911d 100644 --- a/src/client-side-metrics/client-side-metrics-attributes.ts +++ b/src/client-side-metrics/client-side-metrics-attributes.ts @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {grpc} from 'google-gax'; - // The backend is expecting true/false and will fail if other values are provided. // export in open telemetry is expecting string value attributes so we don't use boolean // true/false. @@ -22,45 +20,6 @@ export enum StreamingState { UNARY = 'false', } -type IMetricsCollectorData = { - instanceId: string; - table: string; - cluster?: string; - zone?: string; - appProfileId?: string; - methodName: MethodName; - clientUid: string; -}; - -/** - * Attributes associated with the completion of a Bigtable operation. These - * attributes provide context about the Bigtable environment, the completed - * operation, and its final status. They are used for recording metrics such as - * operation latency, first response latency, and retry count. - */ -export type OnOperationCompleteAttributes = { - projectId: string; - metricsCollectorData: IMetricsCollectorData; - clientName: string; - finalOperationStatus: grpc.status; - streamingOperation: StreamingState; -}; - -/** - * Attributes associated with the completion of a single attempt of a Bigtable - * operation. These attributes provide context about the Bigtable environment, - * the specific attempt, its status, and whether the operation was streaming. They - * are used for recording metrics such as attempt latency, server latency, and - * connectivity errors. - */ -export type OnAttemptCompleteAttributes = { - projectId: string; - metricsCollectorData: IMetricsCollectorData; - clientName: string; - attemptStatus: grpc.status; - streamingOperation: StreamingState; -}; - /** * Represents the names of Bigtable methods. These are used as attributes for * metrics, allowing for differentiation of performance by method. From fc114ffa4ab6d012c17393c0285395cf3a8ca123 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 15:38:30 -0500 Subject: [PATCH 215/448] Update JSdoc --- src/client-side-metrics/gcp-metrics-handler.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index bef66a304..479d7fbc6 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -69,7 +69,10 @@ export class GCPMetricsHandler implements IMetricsHandler { * Initializes the OpenTelemetry metrics instruments if they haven't been already. * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. - * @param {string} [projectId] The Google Cloud project ID. Used for metric export. If not provided, it will attempt to detect it from the environment. + * + * @param {MonitoredResourceData} [data] The data that will be used to set up the monitored resource + * which will be provided to the exporter in every export call. + * */ private initialize(data: MonitoredResourceData) { if (!this.initialized) { From 6fb59441ed1c70ad8e094487d30f5288522b298c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 15:44:43 -0500 Subject: [PATCH 216/448] Move fake start time and fake end time --- system-test/cloud-monitoring-exporter.ts | 5 ++++- test-common/expected-otel-export-input.ts | 3 +++ test-common/export-input-fixture.ts | 16 ---------------- 3 files changed, 7 insertions(+), 17 deletions(-) delete mode 100644 test-common/export-input-fixture.ts diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 5116e6b8f..96f0ffe53 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -17,7 +17,10 @@ import { CloudMonitoringExporter, ExportResult, } from '../src/client-side-metrics/exporter'; -import {fakeEndTime, fakeStartTime} from '../test-common/export-input-fixture'; +import { + fakeEndTime, + fakeStartTime, +} from '../test-common/expected-otel-export-input'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 6f3953e62..c5cf9ef60 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -17,6 +17,9 @@ export const RETRY_COUNT_NAME = export const CONNECTIIVTY_ERROR_COUNT = 'bigtable.googleapis.com/internal/client/connectivity_error_count'; +export const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; +export const fakeEndTime = fakeStartTime + 1000; + export const expectedOtelExportConvertedValue = { name: 'projects/my-project', timeSeries: [ diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts deleted file mode 100644 index 43b73980b..000000000 --- a/test-common/export-input-fixture.ts +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -export const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; -export const fakeEndTime = fakeStartTime + 1000; From ca6f05e4793a93aae463e8d61a1f71ad9b78bee5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 15:53:05 -0500 Subject: [PATCH 217/448] Remove the TODO --- src/client-side-metrics/exporter.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index dd1104276..c9d359987 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -258,7 +258,6 @@ export class CloudMonitoringExporter extends MetricExporter { ): void { (async () => { try { - // TODO: Remove casting. const request = metricsToRequest(metrics as unknown as ExportInput); await this.monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest From 4bec216709f79a60fba1bf4e3ae949e857ce90b6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 16:16:35 -0500 Subject: [PATCH 218/448] Update documentation --- src/client-side-metrics/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 479d7fbc6..d1ddae973 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -252,7 +252,7 @@ export class GCPMetricsHandler implements IMetricsHandler { /** * Records metrics for a completed attempt of a Bigtable operation. - * This method records attempt latency, connectivity error count, server latency, and first response latency, + * This method records attempt latency, connectivity error count, server latency, * along with the provided attributes. * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ From bd4b0acf24c03728c61aa1fe4a084bfaede26145 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 10:06:59 -0500 Subject: [PATCH 219/448] Add additional information to the error reported --- src/client-side-metrics/exporter.ts | 3 +-- src/client-side-metrics/gcp-metrics-handler.ts | 2 +- system-test/cloud-monitoring-exporter.ts | 4 +++- system-test/gcp-metrics-handler.ts | 4 +++- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index c9d359987..73dea8e8e 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -266,8 +266,7 @@ export class CloudMonitoringExporter extends MetricExporter { const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { - const exportResult = {code: (error as ServiceError).code as number}; - resultCallback(exportResult); + resultCallback(error as ServiceError); } })(); } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index d1ddae973..95a0863df 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -70,7 +70,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. * - * @param {MonitoredResourceData} [data] The data that will be used to set up the monitored resource + * @param {MonitoredResourceData} data The data that will be used to set up the monitored resource * which will be provided to the exporter in every export call. * */ diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 96f0ffe53..4b8298d9d 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -37,9 +37,11 @@ describe('Bigtable/CloudMonitoringExporter', () => { result: ExportResult ) => { try { - assert.deepStrictEqual(result, {code: 0}); + assert.strictEqual(result.code, 0); done(); } catch (error) { + // Code isn't 0 so report the original error. + done(result); done(error); } }; diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 94cb6afdd..5a5c83f8f 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -50,10 +50,12 @@ describe('Bigtable/GCPMetricsHandler', () => { exported = true; try { clearTimeout(timeout); - assert.deepStrictEqual(result, {code: 0}); + assert.strictEqual(result.code, 0); done(); resultCallback({code: 0}); } catch (error) { + // Code isn't 0 so report the original error. + done(result); done(error); } } else { From c1916149c0ecef99d907ae9dcb942490a92efdf5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 11:36:59 -0500 Subject: [PATCH 220/448] Move start time and end time --- system-test/cloud-monitoring-exporter.ts | 9 ++++----- test-common/expected-otel-export-input.ts | 3 --- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 4b8298d9d..a99f4c2a1 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -17,18 +17,17 @@ import { CloudMonitoringExporter, ExportResult, } from '../src/client-side-metrics/exporter'; -import { - fakeEndTime, - fakeStartTime, -} from '../test-common/expected-otel-export-input'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; import {replaceTimestamps} from '../test-common/replace-timestamps'; -describe('Bigtable/CloudMonitoringExporter', () => { +describe.only('Bigtable/CloudMonitoringExporter', () => { it('Should send an otel exported value to the CloudMonitoringExporter', done => { + // TODO: In this test make sure the start time and end time are increasing? + const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; + const fakeEndTime = fakeStartTime + 1000; // When this test is run, metrics should be visible at the following link: // https://pantheon.corp.google.com/monitoring/metrics-explorer;duration=PT1H?inv=1&invt=Abo9_A&project={projectId} // This test will add metrics so that they are available in Pantheon diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index c5cf9ef60..6f3953e62 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -17,9 +17,6 @@ export const RETRY_COUNT_NAME = export const CONNECTIIVTY_ERROR_COUNT = 'bigtable.googleapis.com/internal/client/connectivity_error_count'; -export const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; -export const fakeEndTime = fakeStartTime + 1000; - export const expectedOtelExportConvertedValue = { name: 'projects/my-project', timeSeries: [ From 86be1ea6c64cf9dd86fa66173bdf9d32d4628f8e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 11:58:22 -0500 Subject: [PATCH 221/448] Try to use timestamps in order --- system-test/cloud-monitoring-exporter.ts | 8 +++----- test-common/replace-timestamps.ts | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index a99f4c2a1..b1ca3ecfd 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -21,7 +21,7 @@ import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; -import {replaceTimestamps} from '../test-common/replace-timestamps'; +import {addFakeRecentTimestamps} from '../test-common/replace-timestamps'; describe.only('Bigtable/CloudMonitoringExporter', () => { it('Should send an otel exported value to the CloudMonitoringExporter', done => { @@ -60,10 +60,8 @@ describe.only('Bigtable/CloudMonitoringExporter', () => { projectId ) ); - replaceTimestamps( - transformedExportInput as unknown as typeof expectedOtelExportInput, - [fakeStartTime, 0], - [fakeEndTime, 0] + addFakeRecentTimestamps( + transformedExportInput as unknown as typeof expectedOtelExportInput ); const exporter = new CloudMonitoringExporter(); exporter.export( diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts index 43e5c6c77..eedba6148 100644 --- a/test-common/replace-timestamps.ts +++ b/test-common/replace-timestamps.ts @@ -14,6 +14,8 @@ import {expectedOtelExportInput} from './expected-otel-export-input'; +// TODO: Move these methods into their respective modules or inline. + export function replaceTimestamps( request: typeof expectedOtelExportInput, newStartTime: [number, number], @@ -28,3 +30,20 @@ export function replaceTimestamps( }); }); } + +export function addFakeRecentTimestamps( + request: typeof expectedOtelExportInput +) { + // TODO: Reference the error here. + let latestTime = Math.floor(Date.now() / 1000) - 2000; + [...request.scopeMetrics].reverse().forEach(scopeMetric => { + [...scopeMetric.metrics].reverse().forEach(metric => { + [...metric.dataPoints].reverse().forEach(dataPoint => { + dataPoint.endTime = [latestTime, 0]; + latestTime -= 1000; + dataPoint.startTime = [latestTime, 0]; + latestTime -= 1000; + }); + }); + }); +} From 3b0f0812f197ed3c5bf025ba9352fdc70d6b07fa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 12:57:35 -0500 Subject: [PATCH 222/448] Reduce timestamp delay --- test-common/replace-timestamps.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts index eedba6148..4652a34bb 100644 --- a/test-common/replace-timestamps.ts +++ b/test-common/replace-timestamps.ts @@ -35,14 +35,14 @@ export function addFakeRecentTimestamps( request: typeof expectedOtelExportInput ) { // TODO: Reference the error here. - let latestTime = Math.floor(Date.now() / 1000) - 2000; + let latestTime = Math.floor(Date.now() / 1000) - 5; [...request.scopeMetrics].reverse().forEach(scopeMetric => { [...scopeMetric.metrics].reverse().forEach(metric => { [...metric.dataPoints].reverse().forEach(dataPoint => { dataPoint.endTime = [latestTime, 0]; - latestTime -= 1000; + latestTime -= 5; dataPoint.startTime = [latestTime, 0]; - latestTime -= 1000; + latestTime -= 5; }); }); }); From 3ebb9ff2171fba37bea60d962cbc12550126553a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 12:58:12 -0500 Subject: [PATCH 223/448] Remove only --- system-test/cloud-monitoring-exporter.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index b1ca3ecfd..29ede051b 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -23,7 +23,7 @@ import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; import {addFakeRecentTimestamps} from '../test-common/replace-timestamps'; -describe.only('Bigtable/CloudMonitoringExporter', () => { +describe('Bigtable/CloudMonitoringExporter', () => { it('Should send an otel exported value to the CloudMonitoringExporter', done => { // TODO: In this test make sure the start time and end time are increasing? const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; From cf321314ef4944a86b01035b8c011c4a724cfc84 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 13:12:34 -0500 Subject: [PATCH 224/448] Inline addFakeRecentTimestamps --- system-test/cloud-monitoring-exporter.ts | 23 +++++++++++++++-------- test-common/replace-timestamps.ts | 17 ----------------- 2 files changed, 15 insertions(+), 25 deletions(-) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 29ede051b..268ae44a6 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -21,13 +21,9 @@ import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; -import {addFakeRecentTimestamps} from '../test-common/replace-timestamps'; describe('Bigtable/CloudMonitoringExporter', () => { it('Should send an otel exported value to the CloudMonitoringExporter', done => { - // TODO: In this test make sure the start time and end time are increasing? - const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; - const fakeEndTime = fakeStartTime + 1000; // When this test is run, metrics should be visible at the following link: // https://pantheon.corp.google.com/monitoring/metrics-explorer;duration=PT1H?inv=1&invt=Abo9_A&project={projectId} // This test will add metrics so that they are available in Pantheon @@ -59,10 +55,21 @@ describe('Bigtable/CloudMonitoringExporter', () => { /my-project/g, projectId ) - ); - addFakeRecentTimestamps( - transformedExportInput as unknown as typeof expectedOtelExportInput - ); + ) as unknown as typeof expectedOtelExportInput; + { + // This replaces the fake dates in time series with recent dates in the right order. + let latestTime = Math.floor(Date.now() / 1000) - 5; + transformedExportInput.scopeMetrics.reverse().forEach(scopeMetric => { + scopeMetric.metrics.reverse().forEach(metric => { + metric.dataPoints.reverse().forEach(dataPoint => { + dataPoint.endTime = [latestTime, 0]; + latestTime -= 5; + dataPoint.startTime = [latestTime, 0]; + latestTime -= 5; + }); + }); + }); + } const exporter = new CloudMonitoringExporter(); exporter.export( transformedExportInput as unknown as ResourceMetrics, diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts index 4652a34bb..8b54b1aa5 100644 --- a/test-common/replace-timestamps.ts +++ b/test-common/replace-timestamps.ts @@ -30,20 +30,3 @@ export function replaceTimestamps( }); }); } - -export function addFakeRecentTimestamps( - request: typeof expectedOtelExportInput -) { - // TODO: Reference the error here. - let latestTime = Math.floor(Date.now() / 1000) - 5; - [...request.scopeMetrics].reverse().forEach(scopeMetric => { - [...scopeMetric.metrics].reverse().forEach(metric => { - [...metric.dataPoints].reverse().forEach(dataPoint => { - dataPoint.endTime = [latestTime, 0]; - latestTime -= 5; - dataPoint.startTime = [latestTime, 0]; - latestTime -= 5; - }); - }); - }); -} From 78a20d4694983a24e4c83aedfcc2b9d50bf95842 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 13:22:26 -0500 Subject: [PATCH 225/448] Move replace timestamps into the only file it is used --- test-common/replace-timestamps.ts | 32 ------------------- test/metrics-collector/gcp-metrics-handler.ts | 16 +++++++++- 2 files changed, 15 insertions(+), 33 deletions(-) delete mode 100644 test-common/replace-timestamps.ts diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts deleted file mode 100644 index 8b54b1aa5..000000000 --- a/test-common/replace-timestamps.ts +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {expectedOtelExportInput} from './expected-otel-export-input'; - -// TODO: Move these methods into their respective modules or inline. - -export function replaceTimestamps( - request: typeof expectedOtelExportInput, - newStartTime: [number, number], - newEndTime: [number, number] -) { - request.scopeMetrics.forEach(scopeMetric => { - scopeMetric.metrics.forEach(metric => { - metric.dataPoints.forEach(dataPoint => { - dataPoint.startTime = newStartTime; - dataPoint.endTime = newEndTime; - }); - }); - }); -} diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 81d595281..300419992 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -31,7 +31,21 @@ import { expectedOtelExportInput, } from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; -import {replaceTimestamps} from '../../test-common/replace-timestamps'; + +function replaceTimestamps( + request: typeof expectedOtelExportInput, + newStartTime: [number, number], + newEndTime: [number, number] +) { + request.scopeMetrics.forEach(scopeMetric => { + scopeMetric.metrics.forEach(metric => { + metric.dataPoints.forEach(dataPoint => { + dataPoint.startTime = newStartTime; + dataPoint.endTime = newEndTime; + }); + }); + }); +} describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', function (done) { From 105b58ba6cbdb36b97bc41d338f4db2704b351a8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 13:26:00 -0500 Subject: [PATCH 226/448] Fix comment --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 5a5c83f8f..7ab9951aa 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -39,7 +39,7 @@ describe('Bigtable/GCPMetricsHandler', () => { /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, - exporter ensures we only test the value export receives one time. + exported variable ensures we only test the value export receives one time. */ let exported = false; function getTestResultCallback( From d4022fd1589f6e1eeb9822fab8c9fc5060aa67c5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 10:08:33 -0500 Subject: [PATCH 227/448] Rename the metric types --- src/client-side-metrics/exporter.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 73dea8e8e..13d5cd8ed 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -64,7 +64,7 @@ interface Metric { }[]; } -type OtherMetric = Metric< +type DistributionMetric = Metric< OnAttemptAttribute | OnOperationAttribute, { min?: number; @@ -78,7 +78,7 @@ type OtherMetric = Metric< } >; -type RetryMetric = Metric; +type CounterMetric = Metric; export interface ExportInput { resource: { @@ -99,13 +99,13 @@ export interface ExportInput { name: string; version: string; }; - metrics: (RetryMetric | OtherMetric)[]; + metrics: (CounterMetric | DistributionMetric)[]; }[]; } -function isIntegerMetric( - metric: OtherMetric | RetryMetric -): metric is RetryMetric { +function isCounterMetric( + metric: DistributionMetric | CounterMetric +): metric is CounterMetric { return ( metric.descriptor.name === RETRY_COUNT_NAME || metric.descriptor.name === CONNECTIIVTY_ERROR_COUNT @@ -126,7 +126,7 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const scopeMetrics of exportArgs.scopeMetrics) { for (const metric of scopeMetrics.metrics) { const metricName = metric.descriptor.name; - if (isIntegerMetric(metric)) { + if (isCounterMetric(metric)) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; From 7ea28d2e9355a61333d8a5fbcffa2c10a44e1a17 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 11:30:44 -0500 Subject: [PATCH 228/448] Generate documentation for the new classes --- src/client-side-metrics/exporter.ts | 139 +++++++++++++++++++++- test-common/expected-otel-export-input.ts | 16 +++ 2 files changed, 154 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 13d5cd8ed..466b53a1e 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -27,6 +27,26 @@ export interface ExportResult { code: number; } +/** + * Attributes associated with the completion of a single attempt of a Bigtable + * operation. These attributes provide context about the specific attempt, + * its status, and the method involved. They are used for recording metrics + * such as attempt latency and connectivity errors. + * + * @property methodName - The name of the Bigtable method that was attempted (e.g., + * 'Bigtable.ReadRows', 'Bigtable.MutateRows'). + * @property clientUid - A unique identifier for the client that initiated the + * attempt. + * @property appProfileId - (Optional) The ID of the application profile used for + * the attempt. + * @property attemptStatus - The status code of the attempt. A value of `0` + * typically indicates success (grpc.status.OK), while other values indicate + * different types of errors. + * @property streamingOperation - (Optional) Indicates if the operation is a streaming operation. + * Will be "true" or "false" if present. + * @property clientName - The name of the client library making the attempt + * (e.g., 'nodejs-bigtable', 'go-bigtable/1.35.0'). + */ interface OnAttemptAttribute { methodName: string; clientUid: string; @@ -36,6 +56,26 @@ interface OnAttemptAttribute { clientName: string; } +/** + * Attributes associated with the completion of a Bigtable operation. These + * attributes provide context about the operation, its final status, and the + * method involved. They are used for recording metrics such as operation + * latency. + * + * @property methodName - The name of the Bigtable method that was performed + * (e.g., 'Bigtable.ReadRows', 'Bigtable.MutateRows'). + * @property clientUid - A unique identifier for the client that initiated the + * operation. + * @property appProfileId - (Optional) The ID of the application profile used for + * the operation. + * @property finalOperationStatus - The final status code of the operation. A + * value of `0` typically indicates success (grpc.status.OK), while other + * values indicate different types of errors. + * @property streamingOperation - (Optional) Indicates if the operation is a streaming operation. + * Will be "true" or "false" if present. + * @property clientName - The name of the client library performing the operation + * (e.g., 'nodejs-bigtable', 'go-bigtable/1.35.0'). + */ interface OnOperationAttribute { methodName: string; clientUid: string; @@ -45,6 +85,14 @@ interface OnOperationAttribute { clientName: string; } +/** + * Represents a generic metric in the OpenTelemetry format. + * + * This interface describes the structure of a metric, which can represent + * either a counter or a distribution (histogram). It includes the metric's + * descriptor, the type of data it collects, and the actual data points. + * + */ interface Metric { descriptor: { name: string; @@ -64,6 +112,16 @@ interface Metric { }[]; } +/** + * Represents a metric that measures the distribution of values. + * + * Distribution metrics, also known as histograms, are used to track the + * statistical distribution of a set of measurements. They allow you to capture + * not only the count and sum of the measurements but also how they are spread + * across different ranges (buckets). This makes them suitable for tracking + * latencies, sizes, or other metrics where the distribution is important. + * + */ type DistributionMetric = Metric< OnAttemptAttribute | OnOperationAttribute, { @@ -112,6 +170,41 @@ function isCounterMetric( ); } +/** + * Converts OpenTelemetry metrics data into a format suitable for the Google Cloud + * Monitoring API's `createTimeSeries` method. + * + * This function transforms the structured metrics data, including resource and + * metric attributes, data points, and aggregation information, into an object + * that conforms to the expected request format of the Cloud Monitoring API. + * + * @param {ExportInput} exportArgs - The OpenTelemetry metrics data to be converted. This + * object contains resource attributes, scope information, and a list of + * metrics with their associated data points. + * + * @returns An object representing a `CreateTimeSeriesRequest`, ready for sending + * to the Google Cloud Monitoring API. This object contains the project name + * and an array of time series data points, formatted for ingestion by + * Cloud Monitoring. + * + * @throws Will throw an error if there are issues converting the data. + * + * @remarks + * The output format is specific to the Cloud Monitoring API and involves + * mapping OpenTelemetry concepts to Cloud Monitoring's data model, including: + * - Mapping resource attributes to resource labels. + * - Mapping metric attributes to metric labels. + * - Handling different metric types (counter, distribution). + * - Converting data points to the correct structure, including start and end + * times, values, and bucket information for distributions. + * + * @example + * const exportInput: ExportInput = { ... }; // Example ExportInput object + * const monitoringRequest = metricsToRequest(exportInput); + * // monitoringRequest can now be used in monitoringClient.createTimeSeries(monitoringRequest) + * + * + */ export function metricsToRequest(exportArgs: ExportInput) { const timeSeriesArray = []; const resourceLabels = { @@ -248,7 +341,51 @@ export function metricsToRequest(exportArgs: ExportInput) { }; } -// TODO: Add test for when the export fails +/** + * A custom OpenTelemetry `MetricExporter` that sends metrics data to Google Cloud + * Monitoring. + * + * This class extends the base `MetricExporter` from `@google-cloud/opentelemetry-cloud-monitoring-exporter` + * and handles the process of converting OpenTelemetry metrics data into the + * format required by the Google Cloud Monitoring API. It uses the + * `MetricServiceClient` to send the data to Google Cloud Monitoring's + * `createTimeSeries` method. + * + * @remarks + * This exporter relies on the `metricsToRequest` function to perform the + * necessary transformation of OpenTelemetry metrics into Cloud Monitoring + * `TimeSeries` data. + * + * The exporter is asynchronous and will not block the calling thread while + * sending metrics. It manages the Google Cloud Monitoring client and handles + * potential errors during the export process. + * + * The class expects the `ResourceMetrics` to have been correctly configured + * and populated with the required resource attributes to correctly identify + * the monitored resource in Cloud Monitoring. + * + * @example + * // Create an instance of the CloudMonitoringExporter + * const exporter = new CloudMonitoringExporter(); + * + * // Use the exporter with a MeterProvider + * const meterProvider = new MeterProvider({ + * resource: new Resource({ + * 'service.name': 'my-service', + * // ... other resource attributes + * }), + * readers: [new PeriodicExportingMetricReader({ + * exporter: exporter, + * exportIntervalMillis: 10000 // Export every 10 seconds + * })] + * }); + * + * // Now start instrumenting your application using the meter + * const meter = meterProvider.getMeter('my-meter'); + * // ... create counters, histograms, etc. + * + * @beta + */ export class CloudMonitoringExporter extends MetricExporter { private monitoringClient = new MetricServiceClient(); diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 6f3953e62..39ea091bb 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -724,6 +724,22 @@ export const expectedOtelExportConvertedValue = { ], }; +/** + * An example of an `ExportInput` object, representing OpenTelemetry metrics + * data in the format expected by the `metricsToRequest` function. + * + * This object demonstrates the structure of the input data, including + * resource attributes, scope information, and a collection of metrics + * (both counter and distribution types) with their associated data points. + * + * @remarks + * This structure is designed to be converted into a Google Cloud Monitoring + * `CreateTimeSeriesRequest` using the `metricsToRequest` function. It + * includes various types of metrics that are sent by the Bigtable client + * library, such as operation latencies, attempt latencies, retry counts, + * and server latencies. + * + */ export const expectedOtelExportInput = { resource: { _attributes: { From 7848643509d3bfef4a55f8f3957af01b8e95d775 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 11:45:18 -0500 Subject: [PATCH 229/448] Add documentation for monitored resource --- src/client-side-metrics/gcp-metrics-handler.ts | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 95a0863df..b4681b3f8 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -43,6 +43,18 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } +/** + * Represents the data associated with a monitored resource in Google Cloud Monitoring. + * + * This interface defines the structure of data that is used to identify and + * describe a specific resource being monitored, such as a Bigtable instance, + * cluster, or table. It is used to construct the `resource` part of a + * `TimeSeries` object in the Cloud Monitoring API. + * + * When an open telemetry instrument is created in the GCPMetricsHandler, all + * recordings to that instrument are expected to have the same + * MonitoredResourceData properties. + */ interface MonitoredResourceData { projectId: string; instanceId: string; From 722917407e6ae255af0017976592e817a5e31b68 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 11:55:03 -0500 Subject: [PATCH 230/448] Generate documentation for the other metrics --- src/client-side-metrics/exporter.ts | 50 +++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 466b53a1e..600c0c8d1 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -136,8 +136,40 @@ type DistributionMetric = Metric< } >; +/** + * Represents a metric that counts the number of occurrences of an event or + * the cumulative value of a quantity over time. + * + * Counter metrics are used to track quantities that increase over time, such + * as the number of requests, errors, or retries. They are always + * non-negative and can only increase or remain constant. + * + */ type CounterMetric = Metric; +/** + * Represents the input data structure for exporting OpenTelemetry metrics. + * + * This interface defines the structure of the object that is passed to the + * `metricsToRequest` function to convert OpenTelemetry metrics into a format + * suitable for the Google Cloud Monitoring API. + * + * It contains information about the monitored resource and an array of + * scope metrics, which include various types of metrics (counters and + * distributions) and their associated data points. + * + * @remarks + * This structure is specifically designed to hold OpenTelemetry metrics data + * as it is exported from the Bigtable client library. It represents the data + * before it is transformed into the Cloud Monitoring API's `TimeSeries` + * format. + * + * Each `CounterMetric` and `DistributionMetric` within the `scopeMetrics` + * array represents a different type of measurement, such as retry counts, + * operation latencies, attempt latencies etc. Each metric contains an array of dataPoints + * Each `dataPoint` contains the `attributes`, `startTime`, `endTime` and `value`. + * `value` will be a number for a counter metric and an object for a distribution metric. + */ export interface ExportInput { resource: { _attributes: { @@ -161,6 +193,24 @@ export interface ExportInput { }[]; } +/** + * Type guard function to determine if a given metric is a CounterMetric. + * + * This function checks if a metric is a CounterMetric by inspecting its + * `descriptor.name` property and comparing it against known counter metric + * names. + * + * @param metric - The metric to check. This can be either a + * `DistributionMetric` or a `CounterMetric`. + * @returns `true` if the metric is a `CounterMetric`, `false` otherwise. + * + * @remarks + * This function uses a type guard to narrow down the type of the `metric` + * parameter to `CounterMetric` if it returns `true`. This allows TypeScript + * to perform more precise type checking and provides better code + * completion when working with metrics. + * + */ function isCounterMetric( metric: DistributionMetric | CounterMetric ): metric is CounterMetric { From 7f4e167239356904019778619d72ce6577ab1375 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 12:00:22 -0500 Subject: [PATCH 231/448] Generate documentation for the constructor --- src/client-side-metrics/gcp-metrics-handler.ts | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index b4681b3f8..4a53c2bf4 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -73,6 +73,17 @@ export class GCPMetricsHandler implements IMetricsHandler { private otelMetrics?: Metrics; private exporter: PushMetricExporter; + /** + * The `GCPMetricsHandler` is responsible for managing and recording + * client-side metrics for Google Cloud Bigtable using OpenTelemetry. It + * handles the creation and configuration of various metric instruments + * (histograms and counters) and exports them to Google Cloud Monitoring + * through the provided `PushMetricExporter`. + * + * @param exporter - The `PushMetricExporter` instance to use for exporting + * metrics to Google Cloud Monitoring. This exporter is responsible for + * sending the collected metrics data to the monitoring backend. The provided exporter must be fully configured, for example the projectId must have been set. + */ constructor(exporter: PushMetricExporter) { this.exporter = exporter; } From c86196aeab47ab69656f57d5cf33f32f96e768b3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 13:24:15 -0500 Subject: [PATCH 232/448] Get documentation for replaceTimestamps, fixtures --- test-common/expected-otel-export-input.ts | 11 +++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 9 +++++++++ 2 files changed, 20 insertions(+) diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 39ea091bb..10fc26139 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -17,6 +17,17 @@ export const RETRY_COUNT_NAME = export const CONNECTIIVTY_ERROR_COUNT = 'bigtable.googleapis.com/internal/client/connectivity_error_count'; +/** + * An example of the expected output format after converting + * `expectedOtelExportInput` using the `metricsToRequest` function. + * + * This object represents the data structure that is ready to be sent to the + * Google Cloud Monitoring API's `createTimeSeries` method. It demonstrates + * how OpenTelemetry metrics are transformed into the Cloud Monitoring format, + * including the structure of time series data, metric types, resource labels, + * and data point values. + * + */ export const expectedOtelExportConvertedValue = { name: 'projects/my-project', timeSeries: [ diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 300419992..425851e86 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -32,6 +32,15 @@ import { } from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; +/** + * Replaces the timestamp values within an `ExportInput` object with + * standardized test values. + * + * This function is designed for testing purposes to make timestamp comparisons + * in tests more predictable and reliable. It recursively traverses the + * `ExportInput` object, finds all `startTime` and `endTime` properties, and + * replaces their numeric values with standardized test values. + */ function replaceTimestamps( request: typeof expectedOtelExportInput, newStartTime: [number, number], From d76fa14e3b1f854d0047d1785667a81e1a6448dd Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 27 Feb 2025 14:41:30 -0500 Subject: [PATCH 233/448] Reduce the interval time --- src/client-side-metrics/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 4a53c2bf4..8a418b8aa 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -144,7 +144,7 @@ export class GCPMetricsHandler implements IMetricsHandler { new PeriodicExportingMetricReader({ // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. - exportIntervalMillis: 10_000, + exportIntervalMillis: 1_000, exporter: this.exporter, }), ], From 2f3b4e5f43cffc8c51d1798bd3a11502f88d2df8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 27 Feb 2025 14:47:02 -0500 Subject: [PATCH 234/448] Make view list based on latencies --- src/client-side-metrics/gcp-metrics-handler.ts | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 8a418b8aa..c9a4a1235 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -119,10 +119,9 @@ export class GCPMetricsHandler implements IMetricsHandler { new View({ instrumentName: name, name, - aggregation: - name === 'retry_count' - ? Aggregation.Sum() - : new ExplicitBucketHistogramAggregation(latencyBuckets), + aggregation: name.endsWith('latencies') + ? Aggregation.Sum() + : new ExplicitBucketHistogramAggregation(latencyBuckets), }) ); const meterProvider = new MeterProvider({ From 7a4b33ec32e7dafb65c9d2184c4ba46e30b5e750 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Feb 2025 09:58:35 -0500 Subject: [PATCH 235/448] Add a guard for count --- src/client-side-metrics/exporter.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 600c0c8d1..7bdcf8119 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -367,7 +367,9 @@ export function metricsToRequest(exportArgs: ExportInput) { value: { distributionValue: { count: String(dataPoint.value.count), - mean: dataPoint.value.sum / dataPoint.value.count, + mean: dataPoint.value.count + ? dataPoint.value.sum / dataPoint.value.count + : 0, bucketOptions: { explicitBuckets: { bounds: dataPoint.value.buckets.boundaries, From eaf88794ea08558a0aef1523ee206d58a1fe32e5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 3 Mar 2025 15:54:10 -0500 Subject: [PATCH 236/448] Add a CSM test for Bigtable --- .../client-side-metrics.ts | 143 ++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 system-test/client-side-metrics/client-side-metrics.ts diff --git a/system-test/client-side-metrics/client-side-metrics.ts b/system-test/client-side-metrics/client-side-metrics.ts new file mode 100644 index 000000000..984fe4962 --- /dev/null +++ b/system-test/client-side-metrics/client-side-metrics.ts @@ -0,0 +1,143 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {describe, it, before, after} from 'mocha'; +import * as fs from 'node:fs'; +import {Bigtable} from '../../src/index'; +import { + ITabularApiSurface, + OperationMetricsCollector, +} from '../../src/client-side-metrics/operation-metrics-collector'; +import {IMetricsHandler} from '../../src/client-side-metrics/metrics-handler'; +import * as proxyquire from 'proxyquire'; +import {TabularApiSurface} from '../../src/tabular-api-surface'; +import {google} from '../../protos/protos'; + +class Logger { + private messages = ''; + + log(message: string) { + console.log(message); + this.messages = this.messages + message + '\n'; + } + + getMessages() { + return this.messages; + } +} + +const logger = new Logger(); + +/* +class TestMetricsCollector extends OperationMetricsCollector { + constructor( + tabularApiSurface: ITabularApiSurface, + metricsHandlers: IMetricsHandler[], + methodName: MethodName, + projectId?: string + ) { + super( + tabularApiSurface, + metricsHandlers, + methodName, + projectId, + new TestDateProvider(logger) + ); + } +} + */ + +describe.only('Bigtable/MetricsCollector', () => { + /* + const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { + './client-side-metrics/operation-metrics-collector': { + MetricsCollector: TestMetricsCollector, + }, + }).TabularApiSurface; + const FakeTable: TabularApiSurface = proxyquire('../src/table.js', { + './tabular-api-surface.js': {Table: FakeTabularApiSurface}, + }).Table; + const FakeInstance = proxyquire('../src/instance.js', { + './table.js': {Table: FakeTable}, + }).Instance; + const FakeBigtable = proxyquire('../src/index.js', { + './instance.js': {Table: FakeInstance}, + }).Bigtable; + */ + const bigtable = new Bigtable(); + const instanceId = 'emulator-test-instance'; + const tableId = 'my-table'; + const columnFamilyId = 'cf1'; + + before(async () => { + // TODO: Change `any` + const instance = bigtable.instance(instanceId); + try { + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, + }, + }); + await operation.promise(); + } + + const table = instance.table(tableId); + const [tableExists] = await table.exists(); + if (!tableExists) { + await table.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await table.getFamilies(); + + if ( + !families.some((family: {id: string}) => family.id === columnFamilyId) + ) { + await table.createFamily(columnFamilyId); + } + } + } catch (error) { + console.error('Error during setup:', error); + // Consider re-throwing error, to actually stop tests. + } + }); + + after(async () => { + const instance = bigtable.instance(instanceId); + await instance.delete({}); + }); + + it('should read rows after inserting data', async () => { + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId); + for (let i = 0; i < 100; i++) { + await table.getRows(); + } + const myString = logger.getMessages(); // 'This is the string I want to write to the file.'; + const filename = 'metricsCollected.txt'; + console.log('waiting'); + await new Promise(resolve => { + setTimeout(async () => { + resolve('value'); + }, 30_000); + }); + console.log('stop waiting'); + + // Write the string to the file + fs.writeFileSync(filename, myString); + }); +}); From 22d25fa36dc072e223a6cb6c14636998c22b373c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 3 Mar 2025 16:12:03 -0500 Subject: [PATCH 237/448] Add handlers to the middleware layer --- src/index.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/index.ts b/src/index.ts index c4c77e822..188254f03 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,6 +35,8 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; +import {IMetricsHandler} from './client-side-metrics/metrics-handler'; +import {GCPMetricsHandler} from './client-side-metrics/gcp-metrics-handler'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -100,6 +102,8 @@ export interface BigtableOptions extends gax.GoogleAuthOptions { * Internal only. */ BigtableTableAdminClient?: gax.ClientOptions; + + metricsHandlers?: IMetricsHandler[]; } /** @@ -904,6 +908,8 @@ export class Bigtable { gaxStream .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) + // TODO: Uncomment the next line after client-side metrics are well tested. + .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); From a3068bf461899387140237f9a69f934643943a6e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 3 Mar 2025 16:34:07 -0500 Subject: [PATCH 238/448] Add the metrics to the tabular api surface --- src/tabular-api-surface.ts | 113 ++++++++++++++++++++++++++----------- 1 file changed, 81 insertions(+), 32 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index a7f86e0a2..e32c493c3 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -12,26 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +import {DefaultDateProvider, OperationMetricsCollector,} from './client-side-metrics/operation-metrics-collector'; import {promisifyAll} from '@google-cloud/promisify'; -import arrify = require('arrify'); import {Instance} from './instance'; import {Mutation} from './mutation'; -import { - AbortableDuplex, - Bigtable, - Entry, - MutateOptions, - SampleRowKeysCallback, - SampleRowsKeysResponse, -} from './index'; -import {Filter, BoundData, RawFilter} from './filter'; +import {AbortableDuplex, Bigtable, Entry, MutateOptions, SampleRowKeysCallback, SampleRowsKeysResponse,} from './index'; +import {BoundData, Filter, RawFilter} from './filter'; import {Row} from './row'; -import { - ChunkPushData, - ChunkPushLastScannedRowData, - ChunkTransformer, - DataEvent, -} from './chunktransformer'; +import {ChunkPushData, ChunkPushLastScannedRowData, ChunkTransformer, DataEvent,} from './chunktransformer'; import {BackoffSettings} from 'google-gax/build/src/gax'; import {google} from '../protos/protos'; import {CallOptions, grpc, ServiceError} from 'google-gax'; @@ -39,6 +27,13 @@ import {Duplex, PassThrough, Transform} from 'stream'; import * as is from 'is'; import {GoogleInnerError} from './table'; import {TableUtils} from './utils/table'; +import {IMetricsHandler} from './client-side-metrics/metrics-handler'; +import {MethodName} from '../common/client-side-metrics-attributes'; +import {StreamingState} from './client-side-metrics/client-side-metrics-attributes'; + +let attemptCounter = 0; + +import arrify = require('arrify'); // See protos/google/rpc/code.proto // (4=DEADLINE_EXCEEDED, 8=RESOURCE_EXHAUSTED, 10=ABORTED, 14=UNAVAILABLE) @@ -210,6 +205,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { + attemptCounter++; const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; let activeRequestStream: AbortableDuplex | null; @@ -225,11 +221,11 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowKeys = options.keys || []; /* - The following line of code sets the timeout if it was provided while - creating the client. This will be used to determine if the client should - retry on DEADLINE_EXCEEDED errors. Eventually, this will be handled - downstream in google-gax. - */ + The following line of code sets the timeout if it was provided while + creating the client. This will be used to determine if the client should + retry on DEADLINE_EXCEEDED errors. Eventually, this will be handled + downstream in google-gax. + */ const timeout = opts?.gaxOptions?.timeout || (this?.bigtable?.options?.BigtableClient?.clientConfig?.interfaces && @@ -286,14 +282,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const row = event; if (TableUtils.lessThanOrEqualTo(row.id, lastRowKey)) { /* - Sometimes duplicate rows reach this point. To avoid delivering - duplicate rows to the user, rows are thrown away if they don't exceed - the last row key. We can expect each row to reach this point and rows - are delivered in order so if the last row key equals or exceeds the - row id then we know data for this row has already reached this point - and been delivered to the user. In this case we want to throw the row - away and we do not want to deliver this row to the user again. - */ + Sometimes duplicate rows reach this point. To avoid delivering + duplicate rows to the user, rows are thrown away if they don't exceed + the last row key. We can expect each row to reach this point and rows + are delivered in order so if the last row key equals or exceeds the + row id then we know data for this row has already reached this point + and been delivered to the user. In this case we want to throw the row + away and we do not want to deliver this row to the user again. + */ callback(); return; } @@ -332,14 +328,25 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return originalEnd(chunk, encoding, cb); }; - + // this.bigtable.getProjectId_((err, projectId) => { + const metricsCollector = new OperationMetricsCollector( + this, + this.bigtable.options.metricsHandlers as IMetricsHandler[], + MethodName.READ_ROWS, + StreamingState.STREAMING + ); + metricsCollector.onOperationStart(); const makeNewRequest = () => { + metricsCollector.onAttemptStart(); + // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry retryTimer = null; // eslint-disable-next-line @typescript-eslint/no-explicit-any - chunkTransformer = new ChunkTransformer({decode: options.decode} as any); + chunkTransformer = new ChunkTransformer({ + decode: options.decode, + } as any); // If the viewName is provided then request will be made for an // authorized view. Otherwise, the request is made for a table. @@ -507,8 +514,26 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return false; }; + requestStream + .on( + 'metadata', + (metadata: {internalRepr: Map; options: {}}) => { + console.log(`event metadata: ${this.bigtable.projectId}`); + metricsCollector.onMetadataReceived(metadata); + } + ) + .on( + 'status', + (status: { + metadata: {internalRepr: Map; options: {}}; + }) => { + console.log(`event status: ${this.bigtable.projectId}`); + metricsCollector.onStatusReceived(status); + } + ); rowStream .on('error', (error: ServiceError) => { + console.log(`event error: ${this.bigtable.projectId}`); rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; if (IGNORED_STATUS_CODES.has(error.code)) { @@ -532,6 +557,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); + metricsCollector.onAttemptComplete({ + attemptStatus: error.code, + streamingOperation: true, + }); retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -547,21 +576,41 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // error.code = grpc.status.CANCELLED; } + metricsCollector.onAttemptComplete({ + attemptStatus: error.code, + streamingOperation: true, + }); + metricsCollector.onOperationComplete({ + finalOperationStatus: error.code, + streamingOperation: true, + }); userStream.emit('error', error); } }) .on('data', _ => { + console.log(`event data: ${this.bigtable.projectId}`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; + metricsCollector.onResponse(); }) .on('end', () => { + console.log(`event end: ${this.bigtable.projectId}`); + numRequestsMade++; activeRequestStream = null; + metricsCollector.onAttemptComplete({ + attemptStatus: 0, // Grpc OK status + streamingOperation: true, + }); + metricsCollector.onOperationComplete({ + finalOperationStatus: 0, // Grpc OK status + streamingOperation: true, + }); }); rowStreamPipe(rowStream, userStream); }; - makeNewRequest(); + // }); return userStream; } From 61187ea76adcca99bca004c49ec86d5642e3580c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 3 Mar 2025 17:29:54 -0500 Subject: [PATCH 239/448] Move the tests --- ...trics.ts => client-side-metrics-to-gcm.ts} | 12 +- system-test/client-side-metrics2.ts | 143 ++++++++++++++++++ 2 files changed, 150 insertions(+), 5 deletions(-) rename system-test/{client-side-metrics/client-side-metrics.ts => client-side-metrics-to-gcm.ts} (92%) create mode 100644 system-test/client-side-metrics2.ts diff --git a/system-test/client-side-metrics/client-side-metrics.ts b/system-test/client-side-metrics-to-gcm.ts similarity index 92% rename from system-test/client-side-metrics/client-side-metrics.ts rename to system-test/client-side-metrics-to-gcm.ts index 984fe4962..624d2fbf4 100644 --- a/system-test/client-side-metrics/client-side-metrics.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -12,17 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +// TODO: Must be put in root folder or will not run + import {describe, it, before, after} from 'mocha'; import * as fs from 'node:fs'; -import {Bigtable} from '../../src/index'; +import {Bigtable} from '../src'; import { ITabularApiSurface, OperationMetricsCollector, -} from '../../src/client-side-metrics/operation-metrics-collector'; -import {IMetricsHandler} from '../../src/client-side-metrics/metrics-handler'; +} from '../src/client-side-metrics/operation-metrics-collector'; +import {IMetricsHandler} from '../src/client-side-metrics/metrics-handler'; import * as proxyquire from 'proxyquire'; -import {TabularApiSurface} from '../../src/tabular-api-surface'; -import {google} from '../../protos/protos'; +import {TabularApiSurface} from '../src/tabular-api-surface'; +import {google} from '../protos/protos'; class Logger { private messages = ''; diff --git a/system-test/client-side-metrics2.ts b/system-test/client-side-metrics2.ts new file mode 100644 index 000000000..b613abd9b --- /dev/null +++ b/system-test/client-side-metrics2.ts @@ -0,0 +1,143 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {describe, it, before, after} from 'mocha'; +import * as fs from 'node:fs'; +import {Bigtable} from '../src'; +import { + ITabularApiSurface, + OperationMetricsCollector, +} from '../src/client-side-metrics/operation-metrics-collector'; +import {IMetricsHandler} from '../src/client-side-metrics/metrics-handler'; +import * as proxyquire from 'proxyquire'; +import {TabularApiSurface} from '../src/tabular-api-surface'; +import {google} from '../protos/protos'; + +class Logger { + private messages = ''; + + log(message: string) { + console.log(message); + this.messages = this.messages + message + '\n'; + } + + getMessages() { + return this.messages; + } +} + +const logger = new Logger(); + +/* +class TestMetricsCollector extends OperationMetricsCollector { + constructor( + tabularApiSurface: ITabularApiSurface, + metricsHandlers: IMetricsHandler[], + methodName: MethodName, + projectId?: string + ) { + super( + tabularApiSurface, + metricsHandlers, + methodName, + projectId, + new TestDateProvider(logger) + ); + } +} + */ + +describe('Bigtable/MetricsCollector', () => { + /* + const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { + './client-side-metrics/operation-metrics-collector': { + MetricsCollector: TestMetricsCollector, + }, + }).TabularApiSurface; + const FakeTable: TabularApiSurface = proxyquire('../src/table.js', { + './tabular-api-surface.js': {Table: FakeTabularApiSurface}, + }).Table; + const FakeInstance = proxyquire('../src/instance.js', { + './table.js': {Table: FakeTable}, + }).Instance; + const FakeBigtable = proxyquire('../src/index.js', { + './instance.js': {Table: FakeInstance}, + }).Bigtable; + */ + const bigtable = new Bigtable(); + const instanceId = 'emulator-test-instance'; + const tableId = 'my-table'; + const columnFamilyId = 'cf1'; + + before(async () => { + // TODO: Change `any` + const instance = bigtable.instance(instanceId); + try { + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, + }, + }); + await operation.promise(); + } + + const table = instance.table(tableId); + const [tableExists] = await table.exists(); + if (!tableExists) { + await table.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await table.getFamilies(); + + if ( + !families.some((family: {id: string}) => family.id === columnFamilyId) + ) { + await table.createFamily(columnFamilyId); + } + } + } catch (error) { + console.error('Error during setup:', error); + // Consider re-throwing error, to actually stop tests. + } + }); + + after(async () => { + const instance = bigtable.instance(instanceId); + await instance.delete({}); + }); + + it('should read rows after inserting data', async () => { + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId); + for (let i = 0; i < 100; i++) { + await table.getRows(); + } + const myString = logger.getMessages(); // 'This is the string I want to write to the file.'; + const filename = 'metricsCollected.txt'; + console.log('waiting'); + await new Promise(resolve => { + setTimeout(async () => { + resolve('value'); + }, 30_000); + }); + console.log('stop waiting'); + + // Write the string to the file + fs.writeFileSync(filename, myString); + }); +}); From a1276b4d4e9a607c7a4694d389709ac441728923 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 3 Mar 2025 17:31:08 -0500 Subject: [PATCH 240/448] Use the GCPMetricsHandler in the tabular api surface --- metricsCollected.txt | 0 src/index.ts | 4 +- src/tabular-api-surface.ts | 76 +++++++++++++++++++++++--------------- 3 files changed, 49 insertions(+), 31 deletions(-) create mode 100644 metricsCollected.txt diff --git a/metricsCollected.txt b/metricsCollected.txt new file mode 100644 index 000000000..e69de29bb diff --git a/src/index.ts b/src/index.ts index 188254f03..d822787ba 100644 --- a/src/index.ts +++ b/src/index.ts @@ -36,11 +36,12 @@ import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; import {IMetricsHandler} from './client-side-metrics/metrics-handler'; -import {GCPMetricsHandler} from './client-side-metrics/gcp-metrics-handler'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); +const crypto = require('crypto'); + // eslint-disable-next-line @typescript-eslint/no-var-requires const PKG = require('../../package.json'); @@ -421,6 +422,7 @@ export class Bigtable { appProfileId?: string; projectName: string; shouldReplaceProjectIdToken: boolean; + clientUid = crypto.randomUUID(); static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index e32c493c3..f705cb703 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -12,14 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {DefaultDateProvider, OperationMetricsCollector,} from './client-side-metrics/operation-metrics-collector'; +import {OperationMetricsCollector} from './client-side-metrics/operation-metrics-collector'; import {promisifyAll} from '@google-cloud/promisify'; import {Instance} from './instance'; import {Mutation} from './mutation'; -import {AbortableDuplex, Bigtable, Entry, MutateOptions, SampleRowKeysCallback, SampleRowsKeysResponse,} from './index'; +import { + AbortableDuplex, + Bigtable, + Entry, + MutateOptions, + SampleRowKeysCallback, + SampleRowsKeysResponse, +} from './index'; import {BoundData, Filter, RawFilter} from './filter'; import {Row} from './row'; -import {ChunkPushData, ChunkPushLastScannedRowData, ChunkTransformer, DataEvent,} from './chunktransformer'; +import { + ChunkPushData, + ChunkPushLastScannedRowData, + ChunkTransformer, + DataEvent, +} from './chunktransformer'; import {BackoffSettings} from 'google-gax/build/src/gax'; import {google} from '../protos/protos'; import {CallOptions, grpc, ServiceError} from 'google-gax'; @@ -28,12 +40,16 @@ import * as is from 'is'; import {GoogleInnerError} from './table'; import {TableUtils} from './utils/table'; import {IMetricsHandler} from './client-side-metrics/metrics-handler'; -import {MethodName} from '../common/client-side-metrics-attributes'; -import {StreamingState} from './client-side-metrics/client-side-metrics-attributes'; +import { + MethodName, + StreamingState, +} from './client-side-metrics/client-side-metrics-attributes'; let attemptCounter = 0; import arrify = require('arrify'); +import {GCPMetricsHandler} from './client-side-metrics/gcp-metrics-handler'; +import {CloudMonitoringExporter} from './client-side-metrics/exporter'; // See protos/google/rpc/code.proto // (4=DEADLINE_EXCEEDED, 8=RESOURCE_EXHAUSTED, 10=ABORTED, 14=UNAVAILABLE) @@ -331,7 +347,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // this.bigtable.getProjectId_((err, projectId) => { const metricsCollector = new OperationMetricsCollector( this, - this.bigtable.options.metricsHandlers as IMetricsHandler[], + [new GCPMetricsHandler(new CloudMonitoringExporter())], MethodName.READ_ROWS, StreamingState.STREAMING ); @@ -517,7 +533,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); requestStream .on( 'metadata', - (metadata: {internalRepr: Map; options: {}}) => { + (metadata: {internalRepr: Map; options: {}}) => { console.log(`event metadata: ${this.bigtable.projectId}`); metricsCollector.onMetadataReceived(metadata); } @@ -525,10 +541,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); .on( 'status', (status: { - metadata: {internalRepr: Map; options: {}}; + metadata: {internalRepr: Map; options: {}}; }) => { console.log(`event status: ${this.bigtable.projectId}`); - metricsCollector.onStatusReceived(status); + metricsCollector.onStatusMetadataReceived(status); } ); rowStream @@ -557,10 +573,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); - metricsCollector.onAttemptComplete({ - attemptStatus: error.code, - streamingOperation: true, - }); + metricsCollector.onAttemptComplete( + this.bigtable.projectId, + error.code + ); retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -576,14 +592,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // error.code = grpc.status.CANCELLED; } - metricsCollector.onAttemptComplete({ - attemptStatus: error.code, - streamingOperation: true, - }); - metricsCollector.onOperationComplete({ - finalOperationStatus: error.code, - streamingOperation: true, - }); + metricsCollector.onAttemptComplete( + this.bigtable.projectId, + error.code + ); + metricsCollector.onOperationComplete( + this.bigtable.projectId, + error.code + ); userStream.emit('error', error); } }) @@ -592,20 +608,20 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; - metricsCollector.onResponse(); + metricsCollector.onResponse(this.bigtable.projectId); }) .on('end', () => { console.log(`event end: ${this.bigtable.projectId}`); numRequestsMade++; activeRequestStream = null; - metricsCollector.onAttemptComplete({ - attemptStatus: 0, // Grpc OK status - streamingOperation: true, - }); - metricsCollector.onOperationComplete({ - finalOperationStatus: 0, // Grpc OK status - streamingOperation: true, - }); + metricsCollector.onAttemptComplete( + this.bigtable.projectId, + grpc.status.OK + ); + metricsCollector.onOperationComplete( + this.bigtable.projectId, + grpc.status.OK + ); }); rowStreamPipe(rowStream, userStream); }; From d16d3ec9138d731f5ec2b7e62b1b14c3a36e1335 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Mar 2025 10:31:54 -0500 Subject: [PATCH 241/448] Add a guard in the metrics collector --- .../operation-metrics-collector.ts | 33 ++++++++++++------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 4fd24b4d9..0ab4da320 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -22,6 +22,8 @@ const root = gax.protobuf.loadSync( ); const ResponseParams = root.lookupType('ResponseParams'); +// TODO: Add guards in the metrics collector. + /** * An interface representing a tabular API surface, such as a Bigtable table. */ @@ -283,18 +285,25 @@ export class OperationMetricsCollector { const mappedValue = status.metadata.internalRepr.get( INSTANCE_INFORMATION_KEY ) as Buffer[]; - const decodedValue = ResponseParams.decode( - mappedValue[0], - mappedValue[0].length - ); - if (decodedValue && (decodedValue as unknown as {zoneId: string}).zoneId) { - this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; - } - if ( - decodedValue && - (decodedValue as unknown as {clusterId: string}).clusterId - ) { - this.cluster = (decodedValue as unknown as {clusterId: string}).clusterId; + if (mappedValue && mappedValue[0]) { + const decodedValue = ResponseParams.decode( + mappedValue[0], + mappedValue[0].length + ); + if ( + decodedValue && + (decodedValue as unknown as {zoneId: string}).zoneId + ) { + this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; + } + if ( + decodedValue && + (decodedValue as unknown as {clusterId: string}).clusterId + ) { + this.cluster = ( + decodedValue as unknown as {clusterId: string} + ).clusterId; + } } } } From 874af797b612e3b0213bca902d6a49bc13fd3022 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Mar 2025 10:33:41 -0500 Subject: [PATCH 242/448] Fix the proxyquire issue --- system-test/client-side-metrics-to-gcm.ts | 188 ++++++++++------------ 1 file changed, 85 insertions(+), 103 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 624d2fbf4..61399086a 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -15,107 +15,100 @@ // TODO: Must be put in root folder or will not run import {describe, it, before, after} from 'mocha'; -import * as fs from 'node:fs'; import {Bigtable} from '../src'; -import { - ITabularApiSurface, - OperationMetricsCollector, -} from '../src/client-side-metrics/operation-metrics-collector'; -import {IMetricsHandler} from '../src/client-side-metrics/metrics-handler'; import * as proxyquire from 'proxyquire'; import {TabularApiSurface} from '../src/tabular-api-surface'; -import {google} from '../protos/protos'; +import {PushMetricExporter, ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import { + CloudMonitoringExporter, + ExportResult, +} from '../src/client-side-metrics/exporter'; +import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; +import * as mocha from 'mocha'; -class Logger { - private messages = ''; +describe.only('Bigtable/MetricsCollector', () => { + async function mockBigtable(done: mocha.Done) { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 120000); + class TestExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + super.export(metrics, (result: ExportResult) => { + if (result.code === 0) { + clearTimeout(timeout); + done(); + } else { + done(result); // Report the error to the test runner. + } + }); + } + } - log(message: string) { - console.log(message); - this.messages = this.messages + message + '\n'; - } + class TestGCPMetricsHandler extends GCPMetricsHandler { + constructor(exporter: PushMetricExporter) { + super(new TestExporter()); + } + } + const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { + './client-side-metrics/gcp-metrics-handler': { + GCPMetricsHandler: TestGCPMetricsHandler, + }, + }).TabularApiSurface; + const FakeTable: TabularApiSurface = proxyquire('../src/table.js', { + './tabular-api-surface.js': {TabularApiSurface: FakeTabularApiSurface}, + }).Table; + const FakeInstance = proxyquire('../src/instance.js', { + './table.js': {Table: FakeTable}, + }).Instance; + const FakeBigtable = proxyquire('../src/index.js', { + './instance.js': {Instance: FakeInstance}, + }).Bigtable; + bigtable = new FakeBigtable(); - getMessages() { - return this.messages; - } -} + const instance = bigtable.instance(instanceId); + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, + }, + }); + await operation.promise(); + } -const logger = new Logger(); + const table = instance.table(tableId); + const [tableExists] = await table.exists(); + if (!tableExists) { + await table.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await table.getFamilies(); -/* -class TestMetricsCollector extends OperationMetricsCollector { - constructor( - tabularApiSurface: ITabularApiSurface, - metricsHandlers: IMetricsHandler[], - methodName: MethodName, - projectId?: string - ) { - super( - tabularApiSurface, - metricsHandlers, - methodName, - projectId, - new TestDateProvider(logger) - ); + if ( + !families.some((family: {id: string}) => family.id === columnFamilyId) + ) { + await table.createFamily(columnFamilyId); + } + } } -} - */ -describe.only('Bigtable/MetricsCollector', () => { - /* - const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { - './client-side-metrics/operation-metrics-collector': { - MetricsCollector: TestMetricsCollector, - }, - }).TabularApiSurface; - const FakeTable: TabularApiSurface = proxyquire('../src/table.js', { - './tabular-api-surface.js': {Table: FakeTabularApiSurface}, - }).Table; - const FakeInstance = proxyquire('../src/instance.js', { - './table.js': {Table: FakeTable}, - }).Instance; - const FakeBigtable = proxyquire('../src/index.js', { - './instance.js': {Table: FakeInstance}, - }).Bigtable; - */ - const bigtable = new Bigtable(); const instanceId = 'emulator-test-instance'; const tableId = 'my-table'; const columnFamilyId = 'cf1'; + let bigtable: Bigtable; before(async () => { - // TODO: Change `any` - const instance = bigtable.instance(instanceId); - try { - const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { - const [, operation] = await instance.create({ - clusters: { - id: 'fake-cluster3', - location: 'us-west1-c', - nodes: 1, - }, - }); - await operation.promise(); - } - - const table = instance.table(tableId); - const [tableExists] = await table.exists(); - if (!tableExists) { - await table.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await table.getFamilies(); - - if ( - !families.some((family: {id: string}) => family.id === columnFamilyId) - ) { - await table.createFamily(columnFamilyId); - } - } - } catch (error) { - console.error('Error during setup:', error); - // Consider re-throwing error, to actually stop tests. - } + // This line is added just to make sure the bigtable variable is assigned. + // It is needed to solve a compile time error in the after hook. + bigtable = new Bigtable(); }); after(async () => { @@ -123,23 +116,12 @@ describe.only('Bigtable/MetricsCollector', () => { await instance.delete({}); }); - it('should read rows after inserting data', async () => { - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId); - for (let i = 0; i < 100; i++) { + it('should read rows after inserting data', done => { + (async () => { + mockBigtable(done); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId); await table.getRows(); - } - const myString = logger.getMessages(); // 'This is the string I want to write to the file.'; - const filename = 'metricsCollected.txt'; - console.log('waiting'); - await new Promise(resolve => { - setTimeout(async () => { - resolve('value'); - }, 30_000); - }); - console.log('stop waiting'); - - // Write the string to the file - fs.writeFileSync(filename, myString); + })(); }); }); From c000a1cd19176f364adbc234eae883f9bdb524d8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Mar 2025 10:34:06 -0500 Subject: [PATCH 243/448] Export every 60 seconds instead --- src/client-side-metrics/gcp-metrics-handler.ts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index c9a4a1235..6c5eccd67 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -141,9 +141,8 @@ export class GCPMetricsHandler implements IMetricsHandler { readers: [ // Register the exporter new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 1_000, + // Export metrics every 60 seconds. + exportIntervalMillis: 60_000, exporter: this.exporter, }), ], From 9f28d3e70ea81a39058cc1f1461d98cda93461be Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Mar 2025 10:49:29 -0500 Subject: [PATCH 244/448] Add a guard to the metadata collector --- .../operation-metrics-collector.ts | 20 ++++++++++--------- src/tabular-api-surface.ts | 1 - 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 0ab4da320..57fbea00f 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -261,16 +261,18 @@ export class OperationMetricsCollector { const SERVER_TIMING_REGEX = /.*gfet4t7;\s*dur=(\d+\.?\d*).*/; const SERVER_TIMING_KEY = 'server-timing'; const durationValues = mappedEntries.get(SERVER_TIMING_KEY); - const matchedDuration = durationValues?.match(SERVER_TIMING_REGEX); - if (matchedDuration && matchedDuration[1]) { - if (!this.serverTimeRead) { - this.serverTimeRead = true; - this.serverTime = isNaN(parseInt(matchedDuration[1])) - ? null - : parseInt(matchedDuration[1]); + if (durationValues) { + const matchedDuration = durationValues?.match(SERVER_TIMING_REGEX); + if (matchedDuration && matchedDuration[1]) { + if (!this.serverTimeRead) { + this.serverTimeRead = true; + this.serverTime = isNaN(parseInt(matchedDuration[1])) + ? null + : parseInt(matchedDuration[1]); + } + } else { + this.connectivityErrorCount++; } - } else { - this.connectivityErrorCount++; } } diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index f705cb703..0e7d60b8f 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -39,7 +39,6 @@ import {Duplex, PassThrough, Transform} from 'stream'; import * as is from 'is'; import {GoogleInnerError} from './table'; import {TableUtils} from './utils/table'; -import {IMetricsHandler} from './client-side-metrics/metrics-handler'; import { MethodName, StreamingState, From 8cc0757959a26f574af5d6c8e2244fe0726841d7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Mar 2025 11:16:22 -0500 Subject: [PATCH 245/448] Add a guard to the export function --- system-test/client-side-metrics-to-gcm.ts | 28 +++++++++++++++++------ 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 61399086a..a23722d8a 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -15,10 +15,11 @@ // TODO: Must be put in root folder or will not run import {describe, it, before, after} from 'mocha'; +import * as assert from 'assert'; import {Bigtable} from '../src'; import * as proxyquire from 'proxyquire'; import {TabularApiSurface} from '../src/tabular-api-surface'; -import {PushMetricExporter, ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import { CloudMonitoringExporter, ExportResult, @@ -34,24 +35,37 @@ describe.only('Bigtable/MetricsCollector', () => { export the data. */ const timeout = setTimeout(() => {}, 120000); + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exported variable ensures we only test the value export receives one time. + */ + let exported = false; class TestExporter extends CloudMonitoringExporter { export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): void { super.export(metrics, (result: ExportResult) => { - if (result.code === 0) { - clearTimeout(timeout); - done(); - } else { - done(result); // Report the error to the test runner. + if (!exported) { + exported = true; + try { + clearTimeout(timeout); + assert.strictEqual(result.code, 0); + done(); + resultCallback({code: 0}); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); + } } }); } } class TestGCPMetricsHandler extends GCPMetricsHandler { - constructor(exporter: PushMetricExporter) { + constructor() { super(new TestExporter()); } } From 88e05270b42062533ab4d3d9727941b60a642ef6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Mar 2025 16:57:09 -0500 Subject: [PATCH 246/448] Make sure to await to avoid a race condition --- system-test/client-side-metrics-to-gcm.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index a23722d8a..058cafb64 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -41,6 +41,7 @@ describe.only('Bigtable/MetricsCollector', () => { exported variable ensures we only test the value export receives one time. */ let exported = false; + class TestExporter extends CloudMonitoringExporter { export( metrics: ResourceMetrics, @@ -69,6 +70,7 @@ describe.only('Bigtable/MetricsCollector', () => { super(new TestExporter()); } } + const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { './client-side-metrics/gcp-metrics-handler': { GCPMetricsHandler: TestGCPMetricsHandler, @@ -132,7 +134,7 @@ describe.only('Bigtable/MetricsCollector', () => { it('should read rows after inserting data', done => { (async () => { - mockBigtable(done); + await mockBigtable(done); const instance = bigtable.instance(instanceId); const table = instance.table(tableId); await table.getRows(); From d6d409ed57a9ea60b5f81e2b27b5710b2d012628 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Mar 2025 17:03:45 -0500 Subject: [PATCH 247/448] Adjust headers --- system-test/client-side-metrics-to-gcm.ts | 2 +- .../client-side-metrics-to-metrics-handler.ts | 143 ++++++++++++++++++ 2 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 system-test/client-side-metrics-to-metrics-handler.ts diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 058cafb64..e2c517b7d 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts new file mode 100644 index 000000000..e2c517b7d --- /dev/null +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -0,0 +1,143 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO: Must be put in root folder or will not run + +import {describe, it, before, after} from 'mocha'; +import * as assert from 'assert'; +import {Bigtable} from '../src'; +import * as proxyquire from 'proxyquire'; +import {TabularApiSurface} from '../src/tabular-api-surface'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import { + CloudMonitoringExporter, + ExportResult, +} from '../src/client-side-metrics/exporter'; +import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; +import * as mocha from 'mocha'; + +describe.only('Bigtable/MetricsCollector', () => { + async function mockBigtable(done: mocha.Done) { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 120000); + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exported variable ensures we only test the value export receives one time. + */ + let exported = false; + + class TestExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + super.export(metrics, (result: ExportResult) => { + if (!exported) { + exported = true; + try { + clearTimeout(timeout); + assert.strictEqual(result.code, 0); + done(); + resultCallback({code: 0}); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); + } + } + }); + } + } + + class TestGCPMetricsHandler extends GCPMetricsHandler { + constructor() { + super(new TestExporter()); + } + } + + const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { + './client-side-metrics/gcp-metrics-handler': { + GCPMetricsHandler: TestGCPMetricsHandler, + }, + }).TabularApiSurface; + const FakeTable: TabularApiSurface = proxyquire('../src/table.js', { + './tabular-api-surface.js': {TabularApiSurface: FakeTabularApiSurface}, + }).Table; + const FakeInstance = proxyquire('../src/instance.js', { + './table.js': {Table: FakeTable}, + }).Instance; + const FakeBigtable = proxyquire('../src/index.js', { + './instance.js': {Instance: FakeInstance}, + }).Bigtable; + bigtable = new FakeBigtable(); + + const instance = bigtable.instance(instanceId); + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, + }, + }); + await operation.promise(); + } + + const table = instance.table(tableId); + const [tableExists] = await table.exists(); + if (!tableExists) { + await table.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await table.getFamilies(); + + if ( + !families.some((family: {id: string}) => family.id === columnFamilyId) + ) { + await table.createFamily(columnFamilyId); + } + } + } + + const instanceId = 'emulator-test-instance'; + const tableId = 'my-table'; + const columnFamilyId = 'cf1'; + let bigtable: Bigtable; + + before(async () => { + // This line is added just to make sure the bigtable variable is assigned. + // It is needed to solve a compile time error in the after hook. + bigtable = new Bigtable(); + }); + + after(async () => { + const instance = bigtable.instance(instanceId); + await instance.delete({}); + }); + + it('should read rows after inserting data', done => { + (async () => { + await mockBigtable(done); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId); + await table.getRows(); + })(); + }); +}); From 6b99e0942a5db63a7ff5a375435f948bbf41db1c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 10:33:51 -0500 Subject: [PATCH 248/448] Add the right assertions for the TestMetricsHandle --- system-test/client-side-metrics-to-gcm.ts | 2 +- .../client-side-metrics-to-metrics-handler.ts | 95 ++++++++++--------- 2 files changed, 51 insertions(+), 46 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index e2c517b7d..22c02c1ce 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -27,7 +27,7 @@ import { import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import * as mocha from 'mocha'; -describe.only('Bigtable/MetricsCollector', () => { +describe('Bigtable/MetricsCollector', () => { async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index e2c517b7d..922be51a0 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -15,59 +15,64 @@ // TODO: Must be put in root folder or will not run import {describe, it, before, after} from 'mocha'; -import * as assert from 'assert'; import {Bigtable} from '../src'; import * as proxyquire from 'proxyquire'; import {TabularApiSurface} from '../src/tabular-api-surface'; -import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; -import { - CloudMonitoringExporter, - ExportResult, -} from '../src/client-side-metrics/exporter'; -import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import * as mocha from 'mocha'; +import * as assert from 'assert'; +import {TestMetricsHandler} from '../test-common/test-metrics-handler'; +import { + OnOperationCompleteData, +} from '../src/client-side-metrics/metrics-handler'; describe.only('Bigtable/MetricsCollector', () => { async function mockBigtable(done: mocha.Done) { - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. - */ - const timeout = setTimeout(() => {}, 120000); - /* - The exporter is called every x seconds, but we only want to test the value - it receives once. Since done cannot be called multiple times in mocha, - exported variable ensures we only test the value export receives one time. - */ - let exported = false; - - class TestExporter extends CloudMonitoringExporter { - export( - metrics: ResourceMetrics, - resultCallback: (result: ExportResult) => void - ): void { - super.export(metrics, (result: ExportResult) => { - if (!exported) { - exported = true; - try { - clearTimeout(timeout); - assert.strictEqual(result.code, 0); - done(); - resultCallback({code: 0}); - } catch (error) { - // Code isn't 0 so report the original error. - done(result); - done(error); - } - } + class TestGCPMetricsHandler extends TestMetricsHandler { + onOperationComplete(data: OnOperationCompleteData) { + super.onOperationComplete(data); + assert.strictEqual(this.requestsHandled.length, 2); + const firstRequest = this.requestsHandled[0] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison. + delete firstRequest.attemptLatency; + delete firstRequest.serverLatency; + delete firstRequest.metricsCollectorData.clientUid; + delete firstRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(firstRequest, { + connectivityErrorCount: 0, + streamingOperation: 'true', + attemptStatus: 0, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'Bigtable.ReadRows', + }, + projectId: 'cloud-native-db-dpes-shared', }); - } - } - - class TestGCPMetricsHandler extends GCPMetricsHandler { - constructor() { - super(new TestExporter()); + const secondRequest = this.requestsHandled[1] as any; + delete secondRequest.operationLatency; + delete secondRequest.firstResponseLatency; + delete secondRequest.metricsCollectorData.clientUid; + delete secondRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(secondRequest, { + finalOperationStatus: 0, + streamingOperation: 'true', + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'Bigtable.ReadRows', + }, + projectId: 'cloud-native-db-dpes-shared', + retryCount: 0, + }); + // Do assertion checks here to + done(); } } From f4cb545fafb5acfff063d3949f51ad9ba821825f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 10:51:34 -0500 Subject: [PATCH 249/448] Send the project id to the mock --- system-test/client-side-metrics-to-gcm.ts | 2 +- .../client-side-metrics-to-metrics-handler.ts | 23 ++++++++++++------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 22c02c1ce..316aac4b3 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -132,7 +132,7 @@ describe('Bigtable/MetricsCollector', () => { await instance.delete({}); }); - it('should read rows after inserting data', done => { + it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { (async () => { await mockBigtable(done); const instance = bigtable.instance(instanceId); diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index 922be51a0..19e296aa7 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -21,12 +21,10 @@ import {TabularApiSurface} from '../src/tabular-api-surface'; import * as mocha from 'mocha'; import * as assert from 'assert'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; -import { - OnOperationCompleteData, -} from '../src/client-side-metrics/metrics-handler'; +import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; describe.only('Bigtable/MetricsCollector', () => { - async function mockBigtable(done: mocha.Done) { + async function mockBigtable(projectId: string, done: mocha.Done) { class TestGCPMetricsHandler extends TestMetricsHandler { onOperationComplete(data: OnOperationCompleteData) { super.onOperationComplete(data); @@ -50,7 +48,7 @@ describe.only('Bigtable/MetricsCollector', () => { zone: 'us-west1-c', methodName: 'Bigtable.ReadRows', }, - projectId: 'cloud-native-db-dpes-shared', + projectId, }); const secondRequest = this.requestsHandled[1] as any; delete secondRequest.operationLatency; @@ -68,7 +66,7 @@ describe.only('Bigtable/MetricsCollector', () => { zone: 'us-west1-c', methodName: 'Bigtable.ReadRows', }, - projectId: 'cloud-native-db-dpes-shared', + projectId, retryCount: 0, }); // Do assertion checks here to @@ -137,9 +135,18 @@ describe.only('Bigtable/MetricsCollector', () => { await instance.delete({}); }); - it('should read rows after inserting data', done => { + it('should send the metrics to the metrics handler for a ReadRows call', done => { (async () => { - await mockBigtable(done); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + await mockBigtable(projectId, done); const instance = bigtable.instance(instanceId); const table = instance.table(tableId); await table.getRows(); From 5a09c337491695a45818442d97e178ad102f4d93 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 13:34:08 -0500 Subject: [PATCH 250/448] Invert the for and if --- src/client-side-metrics/exporter.ts | 89 +++++++++++------------------ 1 file changed, 32 insertions(+), 57 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 7bdcf8119..2857cf2a7 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -93,7 +93,7 @@ interface OnOperationAttribute { * descriptor, the type of data it collects, and the actual data points. * */ -interface Metric { +interface Metric { descriptor: { name: string; unit: string; @@ -104,12 +104,25 @@ interface Metric { }; aggregationTemporality?: number; dataPointType?: number; - dataPoints: { - attributes: Attributes; - startTime: number[]; - endTime: number[]; - value: Value; - }[]; + dataPoints: DataPoint[]; +} + +interface DataPoint { + attributes: OnAttemptAttribute | OnOperationAttribute; + startTime: number[]; + endTime: number[]; + value: Value; +} + +interface DistributionValue { + min?: number; + max?: number; + sum: number; + count: number; + buckets: { + boundaries: number[]; + counts: number[]; + }; } /** @@ -122,19 +135,7 @@ interface Metric { * latencies, sizes, or other metrics where the distribution is important. * */ -type DistributionMetric = Metric< - OnAttemptAttribute | OnOperationAttribute, - { - min?: number; - max?: number; - sum: number; - count: number; - buckets: { - boundaries: number[]; - counts: number[]; - }; - } ->; +type DistributionMetric = Metric; /** * Represents a metric that counts the number of occurrences of an event or @@ -145,7 +146,7 @@ type DistributionMetric = Metric< * non-negative and can only increase or remain constant. * */ -type CounterMetric = Metric; +type CounterMetric = Metric; /** * Represents the input data structure for exporting OpenTelemetry metrics. @@ -193,31 +194,8 @@ export interface ExportInput { }[]; } -/** - * Type guard function to determine if a given metric is a CounterMetric. - * - * This function checks if a metric is a CounterMetric by inspecting its - * `descriptor.name` property and comparing it against known counter metric - * names. - * - * @param metric - The metric to check. This can be either a - * `DistributionMetric` or a `CounterMetric`. - * @returns `true` if the metric is a `CounterMetric`, `false` otherwise. - * - * @remarks - * This function uses a type guard to narrow down the type of the `metric` - * parameter to `CounterMetric` if it returns `true`. This allows TypeScript - * to perform more precise type checking and provides better code - * completion when working with metrics. - * - */ -function isCounterMetric( - metric: DistributionMetric | CounterMetric -): metric is CounterMetric { - return ( - metric.descriptor.name === RETRY_COUNT_NAME || - metric.descriptor.name === CONNECTIIVTY_ERROR_COUNT - ); +function isCounterValue(value: DistributionValue | number): value is number { + return typeof value === 'number'; } /** @@ -269,8 +247,9 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const scopeMetrics of exportArgs.scopeMetrics) { for (const metric of scopeMetrics.metrics) { const metricName = metric.descriptor.name; - if (isCounterMetric(metric)) { - for (const dataPoint of metric.dataPoints) { + for (const dataPoint of metric.dataPoints) { + const value = dataPoint.value; + if (isCounterValue(value)) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; const streaming = allAttributes.streamingOperation; @@ -319,9 +298,7 @@ export function metricsToRequest(exportArgs: ExportInput) { ], }; timeSeriesArray.push(timeSeries); - } - } else { - for (const dataPoint of metric.dataPoints) { + } else { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; const streaming = allAttributes.streamingOperation; @@ -366,16 +343,14 @@ export function metricsToRequest(exportArgs: ExportInput) { }, value: { distributionValue: { - count: String(dataPoint.value.count), - mean: dataPoint.value.count - ? dataPoint.value.sum / dataPoint.value.count - : 0, + count: String(value.count), + mean: value.count ? value.sum / value.count : 0, bucketOptions: { explicitBuckets: { - bounds: dataPoint.value.buckets.boundaries, + bounds: value.buckets.boundaries, }, }, - bucketCounts: dataPoint.value.buckets.counts.map(String), + bucketCounts: value.buckets.counts.map(String), }, }, }, From aa18c1ed9428e14b15854e1895355bd79fc65af9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 13:48:02 -0500 Subject: [PATCH 251/448] Pull all attributes out --- src/client-side-metrics/exporter.ts | 115 ++++++++++------------------ 1 file changed, 41 insertions(+), 74 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 2857cf2a7..8d676f731 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -249,48 +249,47 @@ export function metricsToRequest(exportArgs: ExportInput) { const metricName = metric.descriptor.name; for (const dataPoint of metric.dataPoints) { const value = dataPoint.value; + // Extract attributes to labels based on their intended target (resource or metric) + const allAttributes = dataPoint.attributes; + const streaming = allAttributes.streamingOperation; + const metricLabels = Object.assign( + { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + status: + (allAttributes as OnAttemptAttribute).attemptStatus?.toString() ?? + ( + allAttributes as OnOperationAttribute + ).finalOperationStatus?.toString(), + client_uid: allAttributes.clientUid, + }, + streaming ? {streaming} : null + ); + const metric = { + type: metricName, + labels: metricLabels, + }; + const resource = { + type: exportArgs.resource._syncAttributes['monitored_resource.type'], + labels: resourceLabels, + }; + const interval = { + endTime: { + seconds: dataPoint.endTime[0], + }, + startTime: { + seconds: dataPoint.startTime[0], + }, + }; if (isCounterValue(value)) { - // Extract attributes to labels based on their intended target (resource or metric) - const allAttributes = dataPoint.attributes; - const streaming = allAttributes.streamingOperation; - const metricLabels = Object.assign( - { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: - ( - allAttributes as OnAttemptAttribute - ).attemptStatus?.toString() ?? - ( - allAttributes as OnOperationAttribute - ).finalOperationStatus?.toString(), - client_uid: allAttributes.clientUid, - }, - streaming ? {streaming} : null - ); const timeSeries = { - metric: { - type: metricName, - labels: metricLabels, - }, - resource: { - type: exportArgs.resource._syncAttributes[ - 'monitored_resource.type' - ], - labels: resourceLabels, - }, + metric, + resource, valueType: 'INT64', points: [ { - interval: { - endTime: { - seconds: dataPoint.endTime[0], - }, - startTime: { - seconds: dataPoint.startTime[0], - }, - }, + interval, value: { int64Value: dataPoint.value, }, @@ -300,47 +299,14 @@ export function metricsToRequest(exportArgs: ExportInput) { timeSeriesArray.push(timeSeries); } else { // Extract attributes to labels based on their intended target (resource or metric) - const allAttributes = dataPoint.attributes; - const streaming = allAttributes.streamingOperation; - const metricLabels = Object.assign( - { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: - ( - allAttributes as OnAttemptAttribute - ).attemptStatus?.toString() ?? - ( - allAttributes as OnOperationAttribute - ).finalOperationStatus?.toString(), - client_uid: allAttributes.clientUid, - }, - streaming ? {streaming} : null - ); const timeSeries = { - metric: { - type: metricName, - labels: metricLabels, - }, - resource: { - type: exportArgs.resource._syncAttributes[ - 'monitored_resource.type' - ], - labels: resourceLabels, - }, + metric, + resource, metricKind: 'CUMULATIVE', valueType: 'DISTRIBUTION', points: [ { - interval: { - endTime: { - seconds: dataPoint.endTime[0], - }, - startTime: { - seconds: dataPoint.startTime[0], - }, - }, + interval, value: { distributionValue: { count: String(value.count), @@ -355,7 +321,8 @@ export function metricsToRequest(exportArgs: ExportInput) { }, }, ], - unit: metric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + unit: + (metric as unknown as DistributionMetric).descriptor.unit || 'ms', // Default to 'ms' if no unit is specified }; timeSeriesArray.push(timeSeries); } From 791e70dcb71cbf067eac351da6f62ef438be3d5c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 13:51:24 -0500 Subject: [PATCH 252/448] Eliminate the need for the type guard --- src/client-side-metrics/exporter.ts | 75 ++++++++++++++--------------- 1 file changed, 36 insertions(+), 39 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 8d676f731..03ea30ea3 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -282,50 +282,47 @@ export function metricsToRequest(exportArgs: ExportInput) { seconds: dataPoint.startTime[0], }, }; - if (isCounterValue(value)) { - const timeSeries = { - metric, - resource, - valueType: 'INT64', - points: [ - { - interval, - value: { - int64Value: dataPoint.value, + const timeSeries = isCounterValue(value) + ? { + metric, + resource, + valueType: 'INT64', + points: [ + { + interval, + value: { + int64Value: dataPoint.value, + }, }, - }, - ], - }; - timeSeriesArray.push(timeSeries); - } else { - // Extract attributes to labels based on their intended target (resource or metric) - const timeSeries = { - metric, - resource, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval, - value: { - distributionValue: { - count: String(value.count), - mean: value.count ? value.sum / value.count : 0, - bucketOptions: { - explicitBuckets: { - bounds: value.buckets.boundaries, + ], + } + : { + metric, + resource, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval, + value: { + distributionValue: { + count: String(value.count), + mean: value.count ? value.sum / value.count : 0, + bucketOptions: { + explicitBuckets: { + bounds: value.buckets.boundaries, + }, }, + bucketCounts: value.buckets.counts.map(String), }, - bucketCounts: value.buckets.counts.map(String), }, }, - }, - ], - unit: - (metric as unknown as DistributionMetric).descriptor.unit || 'ms', // Default to 'ms' if no unit is specified - }; - timeSeriesArray.push(timeSeries); - } + ], + unit: + (metric as unknown as DistributionMetric).descriptor.unit || + 'ms', // Default to 'ms' if no unit is specified + }; + timeSeriesArray.push(timeSeries); } } } From 2e215032afaebf40dcaaa3cebb8b0bbd7a160fdc Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:09:57 -0500 Subject: [PATCH 253/448] Eliminate the data points interface --- src/client-side-metrics/exporter.ts | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 03ea30ea3..686516a23 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -104,14 +104,12 @@ interface Metric { }; aggregationTemporality?: number; dataPointType?: number; - dataPoints: DataPoint[]; -} - -interface DataPoint { - attributes: OnAttemptAttribute | OnOperationAttribute; - startTime: number[]; - endTime: number[]; - value: Value; + dataPoints: { + attributes: OnAttemptAttribute | OnOperationAttribute; + startTime: number[]; + endTime: number[]; + value: Value; + }[]; } interface DistributionValue { @@ -194,6 +192,15 @@ export interface ExportInput { }[]; } +/** + * Type guard function to determine if a given value is a counter value (a number). + * + * This function checks if a value, which could be either a `DistributionValue` + * object or a `number`, is specifically a `number`. This is used to differentiate + * between counter metrics (which have numeric values) and distribution metrics + * (which have more complex, object-based values). + * + */ function isCounterValue(value: DistributionValue | number): value is number { return typeof value === 'number'; } From e936aaa671740b5f8654ca286e5fee61e31ffc98 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:11:30 -0500 Subject: [PATCH 254/448] only --- test/metrics-collector/metricsToRequest.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index e77917d79..a752ce47f 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -21,7 +21,7 @@ import { } from '../../test-common/expected-otel-export-input'; describe('Bigtable/metricsToRequest', () => { - it('Converts an otel request to a request ready for the metric service client', () => { + it.only('Converts an otel request to a request ready for the metric service client', () => { const convertedValue = metricsToRequest(expectedOtelExportInput); assert.deepStrictEqual( convertedValue.timeSeries.length, From ab07a580ba71227695dfbf2867a6c9b4652843cb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:12:39 -0500 Subject: [PATCH 255/448] Revert "Eliminate the need for the type guard" This reverts commit 791e70dcb71cbf067eac351da6f62ef438be3d5c. --- src/client-side-metrics/exporter.ts | 75 +++++++++++++++-------------- 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 686516a23..6face5806 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -289,47 +289,50 @@ export function metricsToRequest(exportArgs: ExportInput) { seconds: dataPoint.startTime[0], }, }; - const timeSeries = isCounterValue(value) - ? { - metric, - resource, - valueType: 'INT64', - points: [ - { - interval, - value: { - int64Value: dataPoint.value, - }, + if (isCounterValue(value)) { + const timeSeries = { + metric, + resource, + valueType: 'INT64', + points: [ + { + interval, + value: { + int64Value: dataPoint.value, }, - ], - } - : { - metric, - resource, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval, - value: { - distributionValue: { - count: String(value.count), - mean: value.count ? value.sum / value.count : 0, - bucketOptions: { - explicitBuckets: { - bounds: value.buckets.boundaries, - }, + }, + ], + }; + timeSeriesArray.push(timeSeries); + } else { + // Extract attributes to labels based on their intended target (resource or metric) + const timeSeries = { + metric, + resource, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval, + value: { + distributionValue: { + count: String(value.count), + mean: value.count ? value.sum / value.count : 0, + bucketOptions: { + explicitBuckets: { + bounds: value.buckets.boundaries, }, - bucketCounts: value.buckets.counts.map(String), }, + bucketCounts: value.buckets.counts.map(String), }, }, - ], - unit: - (metric as unknown as DistributionMetric).descriptor.unit || - 'ms', // Default to 'ms' if no unit is specified - }; - timeSeriesArray.push(timeSeries); + }, + ], + unit: + (metric as unknown as DistributionMetric).descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + }; + timeSeriesArray.push(timeSeries); + } } } } From 7fe9a4679f57794e8663c743f39dc95ae5fa8367 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:27:35 -0500 Subject: [PATCH 256/448] Pull timeseries into one variable Solve the descriptor issue --- src/client-side-metrics/exporter.ts | 79 ++++++++++++++--------------- 1 file changed, 37 insertions(+), 42 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 6face5806..3b4886ee9 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -252,9 +252,9 @@ export function metricsToRequest(exportArgs: ExportInput) { zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], }; for (const scopeMetrics of exportArgs.scopeMetrics) { - for (const metric of scopeMetrics.metrics) { - const metricName = metric.descriptor.name; - for (const dataPoint of metric.dataPoints) { + for (const scopeMetric of scopeMetrics.metrics) { + const metricName = scopeMetric.descriptor.name; + for (const dataPoint of scopeMetric.dataPoints) { const value = dataPoint.value; // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; @@ -289,50 +289,45 @@ export function metricsToRequest(exportArgs: ExportInput) { seconds: dataPoint.startTime[0], }, }; - if (isCounterValue(value)) { - const timeSeries = { - metric, - resource, - valueType: 'INT64', - points: [ - { - interval, - value: { - int64Value: dataPoint.value, + const timeSeries = isCounterValue(value) + ? { + metric, + resource, + valueType: 'INT64', + points: [ + { + interval, + value: { + int64Value: dataPoint.value, + }, }, - }, - ], - }; - timeSeriesArray.push(timeSeries); - } else { - // Extract attributes to labels based on their intended target (resource or metric) - const timeSeries = { - metric, - resource, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval, - value: { - distributionValue: { - count: String(value.count), - mean: value.count ? value.sum / value.count : 0, - bucketOptions: { - explicitBuckets: { - bounds: value.buckets.boundaries, + ], + } + : { + metric, + resource, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval, + value: { + distributionValue: { + count: String(value.count), + mean: value.count ? value.sum / value.count : 0, + bucketOptions: { + explicitBuckets: { + bounds: value.buckets.boundaries, + }, }, + bucketCounts: value.buckets.counts.map(String), }, - bucketCounts: value.buckets.counts.map(String), }, }, - }, - ], - unit: - (metric as unknown as DistributionMetric).descriptor.unit || 'ms', // Default to 'ms' if no unit is specified - }; - timeSeriesArray.push(timeSeries); - } + ], + unit: scopeMetric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + }; + timeSeriesArray.push(timeSeries); } } } From 26b9ca7e9c1580ea542749571c5dd2a9b7271c96 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:35:51 -0500 Subject: [PATCH 257/448] Eliminate an unused import --- src/client-side-metrics/exporter.ts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 3b4886ee9..8fe4e95be 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,10 +18,6 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -import { - CONNECTIIVTY_ERROR_COUNT, - RETRY_COUNT_NAME, -} from '../../test-common/expected-otel-export-input'; export interface ExportResult { code: number; From 29ef6b0ccd977a6e83a6b57f760769bb369ce19a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:52:18 -0500 Subject: [PATCH 258/448] Add a comment that explains the usefulness of each metric attribute category --- src/client-side-metrics/exporter.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 8fe4e95be..62774c237 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -252,9 +252,14 @@ export function metricsToRequest(exportArgs: ExportInput) { const metricName = scopeMetric.descriptor.name; for (const dataPoint of scopeMetric.dataPoints) { const value = dataPoint.value; - // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; const streaming = allAttributes.streamingOperation; + /* + metricLabels are built from the open telemetry attributes that are set + when a data point is recorded. This means that for one metric there may + be multiple time series' with different attributes, but the resource + labels will always be the same for a particular export call. + */ const metricLabels = Object.assign( { app_profile: allAttributes.appProfileId, From 8fd59f446d13ae765de1344713d449d1a58f086c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 16:34:59 -0500 Subject: [PATCH 259/448] Remove the gce instance setting --- src/client-side-metrics/gcp-metrics-handler.ts | 1 - test-common/expected-otel-export-input.ts | 2 -- 2 files changed, 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index c9a4a1235..edb3b6864 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -129,7 +129,6 @@ export class GCPMetricsHandler implements IMetricsHandler { resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', 'cloud.resource_manager.project_id': data.projectId, 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': data.projectId, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 10fc26139..bce9d5121 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -759,7 +759,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', @@ -775,7 +774,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', From 183bdb7c8a4552246e6cf02cca0149baa2d28e2e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 16:38:17 -0500 Subject: [PATCH 260/448] Eliminate the gcp cloud provider setting --- src/client-side-metrics/gcp-metrics-handler.ts | 1 - test-common/expected-otel-export-input.ts | 2 -- 2 files changed, 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index edb3b6864..9ad1c3f78 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -128,7 +128,6 @@ export class GCPMetricsHandler implements IMetricsHandler { views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', - 'cloud.provider': 'gcp', 'cloud.resource_manager.project_id': data.projectId, 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': data.projectId, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index bce9d5121..0d19445f3 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -758,7 +758,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'cloud.provider': 'gcp', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', @@ -773,7 +772,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'cloud.provider': 'gcp', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', From 92059aa8e034a1bbc4c4b40c9eb2d64d9196fa18 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 16:38:32 -0500 Subject: [PATCH 261/448] run all tests --- test/metrics-collector/metricsToRequest.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index a752ce47f..e77917d79 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -21,7 +21,7 @@ import { } from '../../test-common/expected-otel-export-input'; describe('Bigtable/metricsToRequest', () => { - it.only('Converts an otel request to a request ready for the metric service client', () => { + it('Converts an otel request to a request ready for the metric service client', () => { const convertedValue = metricsToRequest(expectedOtelExportInput); assert.deepStrictEqual( convertedValue.timeSeries.length, From af3aa73f5832f5980f356ab29011a97b4375d877 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 17:17:37 -0500 Subject: [PATCH 262/448] Eliminate duplicate project id from monitored resource --- src/client-side-metrics/exporter.ts | 5 +---- src/client-side-metrics/gcp-metrics-handler.ts | 6 ++---- test-common/expected-otel-export-input.ts | 2 -- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 62774c237..2aa1f9816 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -167,9 +167,6 @@ type CounterMetric = Metric; */ export interface ExportInput { resource: { - _attributes: { - 'cloud.resource_manager.project_id': string; - }; _syncAttributes: { 'monitored_resource.type': string; 'monitored_resource.project_id': string; @@ -333,7 +330,7 @@ export function metricsToRequest(exportArgs: ExportInput) { } } return { - name: `projects/${exportArgs.resource._attributes['cloud.resource_manager.project_id']}`, + name: `projects/${exportArgs.resource._syncAttributes['monitored_resource.project_id']}`, timeSeries: timeSeriesArray, }; } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 9ad1c3f78..67dadab55 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -128,7 +128,6 @@ export class GCPMetricsHandler implements IMetricsHandler { views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', - 'cloud.resource_manager.project_id': data.projectId, 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': data.projectId, 'monitored_resource.instance_id': data.instanceId, @@ -139,9 +138,8 @@ export class GCPMetricsHandler implements IMetricsHandler { readers: [ // Register the exporter new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 1_000, + // Export metrics every 60 seconds. + exportIntervalMillis: 60_000, exporter: this.exporter, }), ], diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 0d19445f3..86f4e6ad5 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -758,7 +758,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', 'monitored_resource.instance_id': 'fakeInstanceId', @@ -772,7 +771,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', 'monitored_resource.instance_id': 'fakeInstanceId', From 422060e0e02433ae68dee3878d8ed17c3edb34f1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 17:22:24 -0500 Subject: [PATCH 263/448] Change the two metric names --- src/client-side-metrics/gcp-metrics-handler.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 67dadab55..a21564ecb 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -176,7 +176,7 @@ export class GCPMetricsHandler implements IMetricsHandler { } ), applicationBlockingLatencies: meter.createHistogram( - 'bigtable.googleapis.com/internal/client/application_blocking_latencies', + 'bigtable.googleapis.com/internal/client/application_latencies', { description: 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', @@ -217,7 +217,7 @@ export class GCPMetricsHandler implements IMetricsHandler { } ), clientBlockingLatencies: meter.createHistogram( - 'bigtable.googleapis.com/internal/client/client_blocking_latencies', + 'bigtable.googleapis.com/internal/client/throttling_latencies', { description: 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', From f93a7211b38362ab685418dac5ff6c5a4fa6600f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 09:59:48 -0500 Subject: [PATCH 264/448] Extend the timeout so that the exporter has chance to work --- system-test/gcp-metrics-handler.ts | 2 +- test/metrics-collector/gcp-metrics-handler.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 7ab9951aa..e112f7ec9 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -35,7 +35,7 @@ describe('Bigtable/GCPMetricsHandler', () => { the test as it is sleeping before the GCPMetricsHandler has a chance to export the data. */ - const timeout = setTimeout(() => {}, 30000); + const timeout = setTimeout(() => {}, 120000); /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 425851e86..c22b32ea9 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -65,7 +65,7 @@ describe('Bigtable/GCPMetricsHandler', () => { the test as it is sleeping before the GCPMetricsHandler has a chance to export the data. */ - const timeout = setTimeout(() => {}, 30000); + const timeout = setTimeout(() => {}, 120000); /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, From ea230aa1bd5b62443a2be53db11cc285d3de9411 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 11:31:36 -0500 Subject: [PATCH 265/448] Use spread syntax --- .../gcp-metrics-handler.ts | 50 ++++++++----------- 1 file changed, 20 insertions(+), 30 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index a21564ecb..70110fc9a 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -244,28 +244,22 @@ export class GCPMetricsHandler implements IMetricsHandler { cluster: data.metricsCollectorData.cluster, zone: data.metricsCollectorData.zone, }); - this.otelMetrics?.operationLatencies.record(data.operationLatency, { - appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, - finalOperationStatus: data.finalOperationStatus, - streamingOperation: data.streamingOperation, - clientName: data.clientName, - }); - this.otelMetrics?.retryCount.add(data.retryCount, { - appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, - finalOperationStatus: data.finalOperationStatus, - clientName: data.clientName, - }); - this.otelMetrics?.firstResponseLatencies.record(data.firstResponseLatency, { + const commonAttributes = { appProfileId: data.metricsCollectorData.appProfileId, methodName: data.metricsCollectorData.methodName, clientUid: data.metricsCollectorData.clientUid, finalOperationStatus: data.finalOperationStatus, clientName: data.clientName, + }; + this.otelMetrics?.operationLatencies.record(data.operationLatency, { + streamingOperation: data.streamingOperation, + ...commonAttributes, }); + this.otelMetrics?.retryCount.add(data.retryCount, commonAttributes); + this.otelMetrics?.firstResponseLatencies.record( + data.firstResponseLatency, + commonAttributes + ); } /** @@ -282,28 +276,24 @@ export class GCPMetricsHandler implements IMetricsHandler { cluster: data.metricsCollectorData.cluster, zone: data.metricsCollectorData.zone, }); - this.otelMetrics?.attemptLatencies.record(data.attemptLatency, { - appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, - attemptStatus: data.attemptStatus, - streamingOperation: data.streamingOperation, - clientName: data.clientName, - }); - this.otelMetrics?.connectivityErrorCount.add(data.connectivityErrorCount, { + const commonAttributes = { appProfileId: data.metricsCollectorData.appProfileId, methodName: data.metricsCollectorData.methodName, clientUid: data.metricsCollectorData.clientUid, attemptStatus: data.attemptStatus, clientName: data.clientName, + }; + this.otelMetrics?.attemptLatencies.record(data.attemptLatency, { + streamingOperation: data.streamingOperation, + ...commonAttributes, }); + this.otelMetrics?.connectivityErrorCount.add( + data.connectivityErrorCount, + commonAttributes + ); this.otelMetrics?.serverLatencies.record(data.serverLatency, { - appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, - attemptStatus: data.attemptStatus, streamingOperation: data.streamingOperation, - clientName: data.clientName, + ...commonAttributes, }); } } From 5ecfb707851c453ed39e3e82c8a75a709cb5b518 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 11:33:53 -0500 Subject: [PATCH 266/448] Changed metric to otel instruments --- src/client-side-metrics/gcp-metrics-handler.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 70110fc9a..56f649b1e 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -70,7 +70,7 @@ interface MonitoredResourceData { */ export class GCPMetricsHandler implements IMetricsHandler { private initialized = false; - private otelMetrics?: Metrics; + private otelInstruments?: Metrics; private exporter: PushMetricExporter; /** @@ -145,7 +145,7 @@ export class GCPMetricsHandler implements IMetricsHandler { ], }); const meter = meterProvider.getMeter('bigtable.googleapis.com'); - this.otelMetrics = { + this.otelInstruments = { operationLatencies: meter.createHistogram( 'bigtable.googleapis.com/internal/client/operation_latencies', { @@ -251,12 +251,12 @@ export class GCPMetricsHandler implements IMetricsHandler { finalOperationStatus: data.finalOperationStatus, clientName: data.clientName, }; - this.otelMetrics?.operationLatencies.record(data.operationLatency, { + this.otelInstruments?.operationLatencies.record(data.operationLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); - this.otelMetrics?.retryCount.add(data.retryCount, commonAttributes); - this.otelMetrics?.firstResponseLatencies.record( + this.otelInstruments?.retryCount.add(data.retryCount, commonAttributes); + this.otelInstruments?.firstResponseLatencies.record( data.firstResponseLatency, commonAttributes ); @@ -283,15 +283,15 @@ export class GCPMetricsHandler implements IMetricsHandler { attemptStatus: data.attemptStatus, clientName: data.clientName, }; - this.otelMetrics?.attemptLatencies.record(data.attemptLatency, { + this.otelInstruments?.attemptLatencies.record(data.attemptLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); - this.otelMetrics?.connectivityErrorCount.add( + this.otelInstruments?.connectivityErrorCount.add( data.connectivityErrorCount, commonAttributes ); - this.otelMetrics?.serverLatencies.record(data.serverLatency, { + this.otelInstruments?.serverLatencies.record(data.serverLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); From 303c57c37d273064a79037d9367a59838e89f1bd Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 12:01:22 -0500 Subject: [PATCH 267/448] Remove optional on Otel instruments --- .../gcp-metrics-handler.ts | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 56f649b1e..5550fc90d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -69,7 +69,6 @@ interface MonitoredResourceData { * associating them with relevant attributes for detailed analysis in Cloud Monitoring. */ export class GCPMetricsHandler implements IMetricsHandler { - private initialized = false; private otelInstruments?: Metrics; private exporter: PushMetricExporter; @@ -97,9 +96,8 @@ export class GCPMetricsHandler implements IMetricsHandler { * which will be provided to the exporter in every export call. * */ - private initialize(data: MonitoredResourceData) { - if (!this.initialized) { - this.initialized = true; + private initialize(data: MonitoredResourceData): Metrics { + if (!this.otelInstruments) { const latencyBuckets = [ 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, @@ -229,6 +227,7 @@ export class GCPMetricsHandler implements IMetricsHandler { ), }; } + return this.otelInstruments; } /** @@ -237,7 +236,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnOperationCompleteData} data Data related to the completed operation. */ onOperationComplete(data: OnOperationCompleteData) { - this.initialize({ + const otelInstruments = this.initialize({ projectId: data.projectId, instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, @@ -251,12 +250,12 @@ export class GCPMetricsHandler implements IMetricsHandler { finalOperationStatus: data.finalOperationStatus, clientName: data.clientName, }; - this.otelInstruments?.operationLatencies.record(data.operationLatency, { + otelInstruments.operationLatencies.record(data.operationLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); - this.otelInstruments?.retryCount.add(data.retryCount, commonAttributes); - this.otelInstruments?.firstResponseLatencies.record( + otelInstruments.retryCount.add(data.retryCount, commonAttributes); + otelInstruments?.firstResponseLatencies.record( data.firstResponseLatency, commonAttributes ); @@ -269,7 +268,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ onAttemptComplete(data: OnAttemptCompleteData) { - this.initialize({ + const otelInstruments = this.initialize({ projectId: data.projectId, instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, @@ -283,15 +282,15 @@ export class GCPMetricsHandler implements IMetricsHandler { attemptStatus: data.attemptStatus, clientName: data.clientName, }; - this.otelInstruments?.attemptLatencies.record(data.attemptLatency, { + otelInstruments.attemptLatencies.record(data.attemptLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); - this.otelInstruments?.connectivityErrorCount.add( + otelInstruments.connectivityErrorCount.add( data.connectivityErrorCount, commonAttributes ); - this.otelInstruments?.serverLatencies.record(data.serverLatency, { + otelInstruments.serverLatencies.record(data.serverLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); From a8229612e28d483c707784790caf998ed4621584 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 12:02:02 -0500 Subject: [PATCH 268/448] Rename initialize to getMetrics --- src/client-side-metrics/gcp-metrics-handler.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 5550fc90d..c27327a3d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -96,7 +96,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * which will be provided to the exporter in every export call. * */ - private initialize(data: MonitoredResourceData): Metrics { + private getMetrics(data: MonitoredResourceData): Metrics { if (!this.otelInstruments) { const latencyBuckets = [ 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, @@ -236,7 +236,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnOperationCompleteData} data Data related to the completed operation. */ onOperationComplete(data: OnOperationCompleteData) { - const otelInstruments = this.initialize({ + const otelInstruments = this.getMetrics({ projectId: data.projectId, instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, @@ -268,7 +268,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ onAttemptComplete(data: OnAttemptCompleteData) { - const otelInstruments = this.initialize({ + const otelInstruments = this.getMetrics({ projectId: data.projectId, instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, From 31fe18477abb2cb6f570c451eaa5629a000f4b1c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 15:43:22 -0500 Subject: [PATCH 269/448] Pin promisify to version 4 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 903a4b89c..a200f3687 100644 --- a/package.json +++ b/package.json @@ -52,7 +52,7 @@ "@google-cloud/opentelemetry-resource-util": "^2.4.0", "@google-cloud/precise-date": "^4.0.0", "@google-cloud/projectify": "^4.0.0", - "@google-cloud/promisify": "^4.0.0", + "@google-cloud/promisify": "4.0.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/resources": "^1.30.0", "@opentelemetry/sdk-metrics": "^1.30.0", From 30152d610949bb6360082a8270062c32ff3306dd Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 16:07:18 -0500 Subject: [PATCH 270/448] cast to string - compiler errors --- src/instance.ts | 2 +- src/table.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/instance.ts b/src/instance.ts index 7d5569bd5..420d9cbf5 100644 --- a/src/instance.ts +++ b/src/instance.ts @@ -1310,7 +1310,7 @@ Please use the format 'my-instance' or '${bigtable.projectName}/instances/my-ins : callback!; if (policy.etag !== null && policy.etag !== undefined) { - (policy.etag as {} as Buffer) = Buffer.from(policy.etag); + (policy.etag as {} as Buffer) = Buffer.from(policy.etag as string); } const reqOpts = { resource: this.name, diff --git a/src/table.ts b/src/table.ts index ea721b3ac..d59eaf0ca 100644 --- a/src/table.ts +++ b/src/table.ts @@ -1042,7 +1042,7 @@ export class Table extends TabularApiSurface { : callback!; if (policy.etag !== null && policy.etag !== undefined) { - (policy.etag as {} as Buffer) = Buffer.from(policy.etag); + (policy.etag as {} as Buffer) = Buffer.from(policy.etag as string); } const reqOpts = { resource: this.name, From ccaaa0731aefa86befd582d195f55bead0599f55 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 10:08:02 -0400 Subject: [PATCH 271/448] Cast to string when building buffer --- src/instance.ts | 2 +- src/table.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/instance.ts b/src/instance.ts index 7d5569bd5..420d9cbf5 100644 --- a/src/instance.ts +++ b/src/instance.ts @@ -1310,7 +1310,7 @@ Please use the format 'my-instance' or '${bigtable.projectName}/instances/my-ins : callback!; if (policy.etag !== null && policy.etag !== undefined) { - (policy.etag as {} as Buffer) = Buffer.from(policy.etag); + (policy.etag as {} as Buffer) = Buffer.from(policy.etag as string); } const reqOpts = { resource: this.name, diff --git a/src/table.ts b/src/table.ts index ea721b3ac..d59eaf0ca 100644 --- a/src/table.ts +++ b/src/table.ts @@ -1042,7 +1042,7 @@ export class Table extends TabularApiSurface { : callback!; if (policy.etag !== null && policy.etag !== undefined) { - (policy.etag as {} as Buffer) = Buffer.from(policy.etag); + (policy.etag as {} as Buffer) = Buffer.from(policy.etag as string); } const reqOpts = { resource: this.name, From 236c42a2db0929c2dde37df57b4614b01752049d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 10:09:38 -0400 Subject: [PATCH 272/448] Remove a TODO - it is done --- src/client-side-metrics/operation-metrics-collector.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 57fbea00f..9504f1c42 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -22,8 +22,6 @@ const root = gax.protobuf.loadSync( ); const ResponseParams = root.lookupType('ResponseParams'); -// TODO: Add guards in the metrics collector. - /** * An interface representing a tabular API surface, such as a Bigtable table. */ From 6c28f253a82f5378fd2fc9ee364899d9020d4081 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 10:34:04 -0400 Subject: [PATCH 273/448] Delete the test that writes to metrics collected --- metricsCollected.txt | 0 system-test/client-side-metrics2.ts | 143 ---------------------------- 2 files changed, 143 deletions(-) delete mode 100644 metricsCollected.txt delete mode 100644 system-test/client-side-metrics2.ts diff --git a/metricsCollected.txt b/metricsCollected.txt deleted file mode 100644 index e69de29bb..000000000 diff --git a/system-test/client-side-metrics2.ts b/system-test/client-side-metrics2.ts deleted file mode 100644 index b613abd9b..000000000 --- a/system-test/client-side-metrics2.ts +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {describe, it, before, after} from 'mocha'; -import * as fs from 'node:fs'; -import {Bigtable} from '../src'; -import { - ITabularApiSurface, - OperationMetricsCollector, -} from '../src/client-side-metrics/operation-metrics-collector'; -import {IMetricsHandler} from '../src/client-side-metrics/metrics-handler'; -import * as proxyquire from 'proxyquire'; -import {TabularApiSurface} from '../src/tabular-api-surface'; -import {google} from '../protos/protos'; - -class Logger { - private messages = ''; - - log(message: string) { - console.log(message); - this.messages = this.messages + message + '\n'; - } - - getMessages() { - return this.messages; - } -} - -const logger = new Logger(); - -/* -class TestMetricsCollector extends OperationMetricsCollector { - constructor( - tabularApiSurface: ITabularApiSurface, - metricsHandlers: IMetricsHandler[], - methodName: MethodName, - projectId?: string - ) { - super( - tabularApiSurface, - metricsHandlers, - methodName, - projectId, - new TestDateProvider(logger) - ); - } -} - */ - -describe('Bigtable/MetricsCollector', () => { - /* - const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { - './client-side-metrics/operation-metrics-collector': { - MetricsCollector: TestMetricsCollector, - }, - }).TabularApiSurface; - const FakeTable: TabularApiSurface = proxyquire('../src/table.js', { - './tabular-api-surface.js': {Table: FakeTabularApiSurface}, - }).Table; - const FakeInstance = proxyquire('../src/instance.js', { - './table.js': {Table: FakeTable}, - }).Instance; - const FakeBigtable = proxyquire('../src/index.js', { - './instance.js': {Table: FakeInstance}, - }).Bigtable; - */ - const bigtable = new Bigtable(); - const instanceId = 'emulator-test-instance'; - const tableId = 'my-table'; - const columnFamilyId = 'cf1'; - - before(async () => { - // TODO: Change `any` - const instance = bigtable.instance(instanceId); - try { - const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { - const [, operation] = await instance.create({ - clusters: { - id: 'fake-cluster3', - location: 'us-west1-c', - nodes: 1, - }, - }); - await operation.promise(); - } - - const table = instance.table(tableId); - const [tableExists] = await table.exists(); - if (!tableExists) { - await table.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await table.getFamilies(); - - if ( - !families.some((family: {id: string}) => family.id === columnFamilyId) - ) { - await table.createFamily(columnFamilyId); - } - } - } catch (error) { - console.error('Error during setup:', error); - // Consider re-throwing error, to actually stop tests. - } - }); - - after(async () => { - const instance = bigtable.instance(instanceId); - await instance.delete({}); - }); - - it('should read rows after inserting data', async () => { - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId); - for (let i = 0; i < 100; i++) { - await table.getRows(); - } - const myString = logger.getMessages(); // 'This is the string I want to write to the file.'; - const filename = 'metricsCollected.txt'; - console.log('waiting'); - await new Promise(resolve => { - setTimeout(async () => { - resolve('value'); - }, 30_000); - }); - console.log('stop waiting'); - - // Write the string to the file - fs.writeFileSync(filename, myString); - }); -}); From 59f9c5190de2b7ac0d4eea74b00d2718654072d2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 10:36:48 -0400 Subject: [PATCH 274/448] eliminate unnecessary spacing --- src/tabular-api-surface.ts | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 0e7d60b8f..94dc5c852 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -236,11 +236,11 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowKeys = options.keys || []; /* - The following line of code sets the timeout if it was provided while - creating the client. This will be used to determine if the client should - retry on DEADLINE_EXCEEDED errors. Eventually, this will be handled - downstream in google-gax. - */ + The following line of code sets the timeout if it was provided while + creating the client. This will be used to determine if the client should + retry on DEADLINE_EXCEEDED errors. Eventually, this will be handled + downstream in google-gax. + */ const timeout = opts?.gaxOptions?.timeout || (this?.bigtable?.options?.BigtableClient?.clientConfig?.interfaces && @@ -297,14 +297,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const row = event; if (TableUtils.lessThanOrEqualTo(row.id, lastRowKey)) { /* - Sometimes duplicate rows reach this point. To avoid delivering - duplicate rows to the user, rows are thrown away if they don't exceed - the last row key. We can expect each row to reach this point and rows - are delivered in order so if the last row key equals or exceeds the - row id then we know data for this row has already reached this point - and been delivered to the user. In this case we want to throw the row - away and we do not want to deliver this row to the user again. - */ + Sometimes duplicate rows reach this point. To avoid delivering + duplicate rows to the user, rows are thrown away if they don't exceed + the last row key. We can expect each row to reach this point and rows + are delivered in order so if the last row key equals or exceeds the + row id then we know data for this row has already reached this point + and been delivered to the user. In this case we want to throw the row + away and we do not want to deliver this row to the user again. + */ callback(); return; } From ef322d545d216e5690cb1bcc75f40418d744d906 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 10:41:11 -0400 Subject: [PATCH 275/448] Remove console logs from the code --- src/tabular-api-surface.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 94dc5c852..f77a04552 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -343,7 +343,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return originalEnd(chunk, encoding, cb); }; - // this.bigtable.getProjectId_((err, projectId) => { const metricsCollector = new OperationMetricsCollector( this, [new GCPMetricsHandler(new CloudMonitoringExporter())], @@ -533,7 +532,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); .on( 'metadata', (metadata: {internalRepr: Map; options: {}}) => { - console.log(`event metadata: ${this.bigtable.projectId}`); metricsCollector.onMetadataReceived(metadata); } ) @@ -542,13 +540,11 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); (status: { metadata: {internalRepr: Map; options: {}}; }) => { - console.log(`event status: ${this.bigtable.projectId}`); metricsCollector.onStatusMetadataReceived(status); } ); rowStream .on('error', (error: ServiceError) => { - console.log(`event error: ${this.bigtable.projectId}`); rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; if (IGNORED_STATUS_CODES.has(error.code)) { @@ -625,7 +621,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowStreamPipe(rowStream, userStream); }; makeNewRequest(); - // }); return userStream; } From b17680c9f6179edd5ecb287c0ee1e792a68ee799 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 10:43:21 -0400 Subject: [PATCH 276/448] Eliminate the TODO in index --- src/index.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/index.ts b/src/index.ts index d822787ba..835b28e92 100644 --- a/src/index.ts +++ b/src/index.ts @@ -910,7 +910,6 @@ export class Bigtable { gaxStream .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) - // TODO: Uncomment the next line after client-side metrics are well tested. .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); From c5877228548b3af5afe32758a126c743e77e8bf6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 10:46:53 -0400 Subject: [PATCH 277/448] Add comment to mention mocha --- system-test/client-side-metrics-to-gcm.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 316aac4b3..930eac9e9 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -56,7 +56,7 @@ describe('Bigtable/MetricsCollector', () => { done(); resultCallback({code: 0}); } catch (error) { - // Code isn't 0 so report the original error. + // The code here isn't 0 so we report the original error to the mocha test runner. done(result); done(error); } From f00993c0c9dfefd2f8be6f220eacd431037e9494 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 10:49:00 -0400 Subject: [PATCH 278/448] Add another comment for code 0 --- system-test/client-side-metrics-to-gcm.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 930eac9e9..69e9ada22 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -52,6 +52,8 @@ describe('Bigtable/MetricsCollector', () => { exported = true; try { clearTimeout(timeout); + // The test passes when the code is 0 because that means the + // result from calling export was successful. assert.strictEqual(result.code, 0); done(); resultCallback({code: 0}); From 8f61f4b3bcbe5a8f177f01e1feb7b3b28e083f64 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 10:54:11 -0400 Subject: [PATCH 279/448] Add a comment clarifying what the test does --- system-test/client-side-metrics-to-gcm.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 69e9ada22..b6a73c75a 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -73,6 +73,12 @@ describe('Bigtable/MetricsCollector', () => { } } + /* + Below we mock out the table so that it sends the metrics to a test exporter + that will still send the metrics to Google Cloud Monitoring, but then also + ensure the export was successful and pass the test with code 0 if it is + successful. + */ const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { './client-side-metrics/gcp-metrics-handler': { GCPMetricsHandler: TestGCPMetricsHandler, From 4fb25eb3679540a49a6bd769aa45f9b4e88d61eb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 11:56:09 -0400 Subject: [PATCH 280/448] Pin promisify to solve the linting issues --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 903a4b89c..a200f3687 100644 --- a/package.json +++ b/package.json @@ -52,7 +52,7 @@ "@google-cloud/opentelemetry-resource-util": "^2.4.0", "@google-cloud/precise-date": "^4.0.0", "@google-cloud/projectify": "^4.0.0", - "@google-cloud/promisify": "^4.0.0", + "@google-cloud/promisify": "4.0.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/resources": "^1.30.0", "@opentelemetry/sdk-metrics": "^1.30.0", From 5fafe96e356ed71ad59dfb1271b5606fa079f23a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 11:59:19 -0400 Subject: [PATCH 281/448] Rename test to ClientSideMetricsToMetricsHandler Remove only --- system-test/client-side-metrics-to-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index 19e296aa7..a759e9b1f 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -23,7 +23,7 @@ import * as assert from 'assert'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; -describe.only('Bigtable/MetricsCollector', () => { +describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { async function mockBigtable(projectId: string, done: mocha.Done) { class TestGCPMetricsHandler extends TestMetricsHandler { onOperationComplete(data: OnOperationCompleteData) { From 68c2a1a34e197801c7ee5e452e2d34813e1685c9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 12:00:11 -0400 Subject: [PATCH 282/448] Rename another test suite Client side metrics to GCM --- system-test/client-side-metrics-to-gcm.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index b6a73c75a..fd5de588e 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -27,7 +27,7 @@ import { import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import * as mocha from 'mocha'; -describe('Bigtable/MetricsCollector', () => { +describe('Bigtable/ClientSideMetricsToGCM', () => { async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down From 1a2ace76d96b603576d7af4daf887e60f156bebc Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 13:49:20 -0400 Subject: [PATCH 283/448] Remove these console logs --- src/tabular-api-surface.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index f77a04552..d33cf9f4d 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -599,14 +599,12 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } }) .on('data', _ => { - console.log(`event data: ${this.bigtable.projectId}`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; metricsCollector.onResponse(this.bigtable.projectId); }) .on('end', () => { - console.log(`event end: ${this.bigtable.projectId}`); numRequestsMade++; activeRequestStream = null; metricsCollector.onAttemptComplete( From 886f99912307f0dd899e1fc82a9c5e0e905e203e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 13:49:50 -0400 Subject: [PATCH 284/448] Add guards to the after hooks on this test --- system-test/client-side-metrics-to-gcm.ts | 12 +++++++++--- .../client-side-metrics-to-metrics-handler.ts | 10 ++++++++-- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index fd5de588e..e9f14ec28 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -27,7 +27,7 @@ import { import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import * as mocha from 'mocha'; -describe('Bigtable/ClientSideMetricsToGCM', () => { +describe.only('Bigtable/ClientSideMetricsToGCM', () => { async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down @@ -136,8 +136,14 @@ describe('Bigtable/ClientSideMetricsToGCM', () => { }); after(async () => { - const instance = bigtable.instance(instanceId); - await instance.delete({}); + try { + // If the instance has been deleted already by another source, we don't + // want this after hook to block the continuous integration pipeline. + const instance = bigtable.instance(instanceId); + await instance.delete({}); + } catch (e) { + console.warn('The instance has been deleted already'); + } }); it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index a759e9b1f..7d40e43ee 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -131,8 +131,14 @@ describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { }); after(async () => { - const instance = bigtable.instance(instanceId); - await instance.delete({}); + try { + // If the instance has been deleted already by another source, we don't + // want this after hook to block the continuous integration pipeline. + const instance = bigtable.instance(instanceId); + await instance.delete({}); + } catch (e) { + console.warn('The instance has been deleted already'); + } }); it('should send the metrics to the metrics handler for a ReadRows call', done => { From bb3b177e560ba109da5e2b32e694a6a0aebf3063 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 13:54:00 -0400 Subject: [PATCH 285/448] Remove only --- system-test/client-side-metrics-to-gcm.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index e9f14ec28..2f9c94d26 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -27,7 +27,7 @@ import { import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import * as mocha from 'mocha'; -describe.only('Bigtable/ClientSideMetricsToGCM', () => { +describe('Bigtable/ClientSideMetricsToGCM', () => { async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down From c3d918c8728cf0e3e1857401c5b228916f94c8f9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 14:43:29 -0400 Subject: [PATCH 286/448] Increase the GCPMetricsHandler time --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 7ab9951aa..e112f7ec9 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -35,7 +35,7 @@ describe('Bigtable/GCPMetricsHandler', () => { the test as it is sleeping before the GCPMetricsHandler has a chance to export the data. */ - const timeout = setTimeout(() => {}, 30000); + const timeout = setTimeout(() => {}, 120000); /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, From 99c62718bb18533ac85e35f8dc72c727505d076f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 15:20:43 -0400 Subject: [PATCH 287/448] Fix samples test issue --- .../operation-metrics-collector.ts | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 9504f1c42..0a0f92f7a 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -17,10 +17,6 @@ import {IMetricsHandler} from './metrics-handler'; import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; -const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto' -); -const ResponseParams = root.lookupType('ResponseParams'); /** * An interface representing a tabular API surface, such as a Bigtable table. @@ -76,6 +72,11 @@ export class OperationMetricsCollector { private serverTime: number | null; private connectivityErrorCount: number; private streamingOperation: StreamingState; + // Define response params only when creating an OperationMetricsCollector + // in order to avoid unnecessary loading of modules. + private responseParams = gax.protobuf + .loadSync('./protos/google/bigtable/v2/response_params.proto') + .lookupType('ResponseParams'); /** * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. @@ -286,7 +287,7 @@ export class OperationMetricsCollector { INSTANCE_INFORMATION_KEY ) as Buffer[]; if (mappedValue && mappedValue[0]) { - const decodedValue = ResponseParams.decode( + const decodedValue = this.responseParams.decode( mappedValue[0], mappedValue[0].length ); From 3843e7bbc721b68f369deddb8f1adc98140671a2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 15:54:22 -0400 Subject: [PATCH 288/448] Add a comment to test that works with the mock ser --- system-test/read-rows.ts | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/system-test/read-rows.ts b/system-test/read-rows.ts index 37beab74c..65f9574cf 100644 --- a/system-test/read-rows.ts +++ b/system-test/read-rows.ts @@ -25,6 +25,9 @@ import {EventEmitter} from 'events'; import {Test} from './testTypes'; import {ServiceError, GrpcClient, GoogleError, CallOptions} from 'google-gax'; import {PassThrough} from 'stream'; +import * as proxyquire from 'proxyquire'; +import {TabularApiSurface} from '../src/tabular-api-surface'; +import {Row} from '../src'; const {grpc} = new GrpcClient(); @@ -76,7 +79,32 @@ function rowResponse(rowKey: {}) { } describe('Bigtable/Table', () => { - const bigtable = new Bigtable(); + /** + * We have to mock out the metrics handler because the metrics handler with + * open telemetry causes clock.runAll() to throw an infinite loop error. This + * is most likely because of the periodic reader as it schedules pending + * events on the node event loop which conflicts with the sinon clock. + */ + class TestGCPMetricsHandler { + onOperationComplete() {} + onAttemptComplete() {} + } + const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { + './client-side-metrics/gcp-metrics-handler': { + GCPMetricsHandler: TestGCPMetricsHandler, + }, + }).TabularApiSurface; + const FakeTable: TabularApiSurface = proxyquire('../src/table.js', { + './tabular-api-surface.js': {TabularApiSurface: FakeTabularApiSurface}, + }).Table; + const FakeInstance = proxyquire('../src/instance.js', { + './table.js': {Table: FakeTable}, + }).Instance; + const FakeBigtable = proxyquire('../src/index.js', { + './instance.js': {Instance: FakeInstance}, + }).Bigtable; + + const bigtable = new FakeBigtable(); const INSTANCE_NAME = 'fake-instance2'; // eslint-disable-next-line @typescript-eslint/no-explicit-any (bigtable as any).grpcCredentials = grpc.credentials.createInsecure(); @@ -151,7 +179,7 @@ describe('Bigtable/Table', () => { rowKeysRead = []; requestedOptions = []; stub = sinon.stub(bigtable, 'request').callsFake(cfg => { - const reqOpts = cfg.reqOpts; + const reqOpts = (cfg as any).reqOpts; const requestOptions = {} as google.bigtable.v2.IRowSet; if (reqOpts.rows && reqOpts.rows.rowRanges) { requestOptions.rowRanges = reqOpts.rows.rowRanges.map( @@ -195,9 +223,11 @@ describe('Bigtable/Table', () => { responses = test.responses; TABLE.maxRetries = test.max_retries; TABLE.createReadStream(test.createReadStream_options) - .on('data', row => rowKeysRead[rowKeysRead.length - 1].push(row.id)) + .on('data', (row: Row) => + rowKeysRead[rowKeysRead.length - 1].push(row.id) + ) .on('end', () => (endCalled = true)) - .on('error', err => (error = err as ServiceError)); + .on('error', (err: ServiceError) => (error = err as ServiceError)); clock.runAll(); if (test.error) { From 05787e5600a45c8d3be4ffab0c857a698514543d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 16:10:41 -0400 Subject: [PATCH 289/448] Revert "Fix samples test issue" This reverts commit 99c62718bb18533ac85e35f8dc72c727505d076f. --- .../operation-metrics-collector.ts | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 0a0f92f7a..9504f1c42 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -17,6 +17,10 @@ import {IMetricsHandler} from './metrics-handler'; import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; +const root = gax.protobuf.loadSync( + './protos/google/bigtable/v2/response_params.proto' +); +const ResponseParams = root.lookupType('ResponseParams'); /** * An interface representing a tabular API surface, such as a Bigtable table. @@ -72,11 +76,6 @@ export class OperationMetricsCollector { private serverTime: number | null; private connectivityErrorCount: number; private streamingOperation: StreamingState; - // Define response params only when creating an OperationMetricsCollector - // in order to avoid unnecessary loading of modules. - private responseParams = gax.protobuf - .loadSync('./protos/google/bigtable/v2/response_params.proto') - .lookupType('ResponseParams'); /** * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. @@ -287,7 +286,7 @@ export class OperationMetricsCollector { INSTANCE_INFORMATION_KEY ) as Buffer[]; if (mappedValue && mappedValue[0]) { - const decodedValue = this.responseParams.decode( + const decodedValue = ResponseParams.decode( mappedValue[0], mappedValue[0].length ); From 78550a8b11106f5849b8ea2a3a75ccf36c67611f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 16:22:20 -0400 Subject: [PATCH 290/448] Increase the timeout --- test/metrics-collector/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 425851e86..c22b32ea9 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -65,7 +65,7 @@ describe('Bigtable/GCPMetricsHandler', () => { the test as it is sleeping before the GCPMetricsHandler has a chance to export the data. */ - const timeout = setTimeout(() => {}, 30000); + const timeout = setTimeout(() => {}, 120000); /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, From ee022ce420415fe95234c46e143e193b811facab Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 16:22:42 -0400 Subject: [PATCH 291/448] Catch failures to load ResponseParams --- .../operation-metrics-collector.ts | 23 +++++++++++++++---- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 9504f1c42..7d9283339 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -17,10 +17,23 @@ import {IMetricsHandler} from './metrics-handler'; import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; -const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto' -); -const ResponseParams = root.lookupType('ResponseParams'); + +let ResponseParams: gax.protobuf.Type | null; +try { + /* + * Likely due to the Node 18 upgrade, the samples tests are failing with the + * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or + * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since + * these tests don't use this module we can suppress the error for now to + * unblock the CI pipeline. + */ + const root = gax.protobuf.loadSync( + './protos/google/bigtable/v2/response_params.proto' + ); + ResponseParams = root.lookupType('ResponseParams'); +} catch (e) { + ResponseParams = null; +} /** * An interface representing a tabular API surface, such as a Bigtable table. @@ -285,7 +298,7 @@ export class OperationMetricsCollector { const mappedValue = status.metadata.internalRepr.get( INSTANCE_INFORMATION_KEY ) as Buffer[]; - if (mappedValue && mappedValue[0]) { + if (mappedValue && mappedValue[0] && ResponseParams) { const decodedValue = ResponseParams.decode( mappedValue[0], mappedValue[0].length From 9584c69847316ac21852706ff827203fa852ebe0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 17:07:42 -0400 Subject: [PATCH 292/448] Change the latency buckets Adjust the test fixtures as well to account for the latency bucket change. --- .../gcp-metrics-handler.ts | 7 +- test-common/expected-otel-export-input.ts | 192 +++++++++--------- 2 files changed, 97 insertions(+), 102 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index c27327a3d..46aa23873 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -99,9 +99,10 @@ export class GCPMetricsHandler implements IMetricsHandler { private getMetrics(data: MonitoredResourceData): Metrics { if (!this.otelInstruments) { const latencyBuckets = [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, - 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, - 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, + 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, + 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, + 400.0, 500.0, 650.0, 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, + 50000.0, 100000.0, 200000.0, 400000.0, 800000.0, 1600000.0, 3200000.0, ]; const viewList = [ 'operation_latencies', diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 86f4e6ad5..2a32d423d 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -39,8 +39,8 @@ export const expectedOtelExportConvertedValue = { client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', - streaming: 'true', client_uid: 'fake-uuid', + streaming: 'true', }, }, resource: { @@ -72,10 +72,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -111,14 +111,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -137,8 +136,8 @@ export const expectedOtelExportConvertedValue = { client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', - streaming: 'true', client_uid: 'fake-uuid', + streaming: 'true', }, }, resource: { @@ -170,10 +169,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -207,14 +206,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -235,8 +233,8 @@ export const expectedOtelExportConvertedValue = { client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', - streaming: 'true', client_uid: 'fake-uuid', + streaming: 'true', }, }, resource: { @@ -268,10 +266,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -305,14 +303,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -403,10 +400,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -441,14 +438,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -468,8 +464,8 @@ export const expectedOtelExportConvertedValue = { client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', - streaming: 'true', client_uid: 'fake-uuid', + streaming: 'true', }, }, resource: { @@ -501,10 +497,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -528,14 +524,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -566,8 +561,8 @@ export const expectedOtelExportConvertedValue = { client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', - streaming: 'true', client_uid: 'fake-uuid', + streaming: 'true', }, }, resource: { @@ -599,10 +594,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -626,14 +621,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -797,10 +791,10 @@ export const expectedOtelExportInput = { valueType: 1, advice: { explicitBucketBoundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, - 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, - 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, - 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, + 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, + 5000, 10000, 20000, 50000, 100000, 200000, 400000, 800000, + 1600000, 3200000, ], }, }, @@ -809,10 +803,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', finalOperationStatus: 0, - streamingOperation: 'true', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -823,15 +817,15 @@ export const expectedOtelExportInput = { sum: 7000, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, - 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, ], }, count: 1, @@ -849,10 +843,10 @@ export const expectedOtelExportInput = { valueType: 1, advice: { explicitBucketBoundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, - 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, - 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, - 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, + 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, + 5000, 10000, 20000, 50000, 100000, 200000, 400000, 800000, + 1600000, 3200000, ], }, }, @@ -861,10 +855,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', attemptStatus: 4, - streamingOperation: 'true', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -875,15 +869,15 @@ export const expectedOtelExportInput = { sum: 2000, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, ], }, count: 1, @@ -891,10 +885,10 @@ export const expectedOtelExportInput = { }, { attributes: { + streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', attemptStatus: 0, - streamingOperation: 'true', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -905,15 +899,15 @@ export const expectedOtelExportInput = { sum: 2000, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, ], }, count: 1, @@ -958,10 +952,10 @@ export const expectedOtelExportInput = { valueType: 1, advice: { explicitBucketBoundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, - 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, - 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, - 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, + 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, + 5000, 10000, 20000, 50000, 100000, 200000, 400000, 800000, + 1600000, 3200000, ], }, }, @@ -983,15 +977,15 @@ export const expectedOtelExportInput = { sum: 5000, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, - 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, ], }, count: 1, @@ -1009,10 +1003,10 @@ export const expectedOtelExportInput = { valueType: 1, advice: { explicitBucketBoundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, - 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, - 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, - 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, + 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, + 5000, 10000, 20000, 50000, 100000, 200000, 400000, 800000, + 1600000, 3200000, ], }, }, @@ -1021,10 +1015,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', attemptStatus: 4, - streamingOperation: 'true', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1035,15 +1029,15 @@ export const expectedOtelExportInput = { sum: 101, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, + 0, ], }, count: 1, @@ -1051,10 +1045,10 @@ export const expectedOtelExportInput = { }, { attributes: { + streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', attemptStatus: 0, - streamingOperation: 'true', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1065,15 +1059,15 @@ export const expectedOtelExportInput = { sum: 103, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, + 0, ], }, count: 1, @@ -1093,7 +1087,6 @@ export const expectedOtelExportInput = { }, aggregationTemporality: 1, dataPointType: 3, - isMonotonic: true, dataPoints: [ { attributes: { @@ -1118,6 +1111,7 @@ export const expectedOtelExportInput = { value: 0, }, ], + isMonotonic: true, }, ], }, From 082c049c6ec3b17ba8cbbed40cef23fbc0910a53 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 17:14:38 -0400 Subject: [PATCH 293/448] Updated the resultCallback comment --- src/client-side-metrics/exporter.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 2aa1f9816..1f31fc0f5 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -393,7 +393,12 @@ export class CloudMonitoringExporter extends MetricExporter { await this.monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest ); - // {code: 0} is typically the format the callback expects in the super class. + // The resultCallback typically accepts a value equal to {code: x} + // for some value x along with other info. When the code is equal to 0 + // then the operation completed successfully. When the code is not equal + // to 0 then the operation failed. Open telemetry logs errors to the + // console when the resultCallback passes in non-zero code values and + // logs nothing when the code is 0. const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { From f2e46a4d1355cbfdc929d4089602e22e7c0761f1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 09:51:02 -0400 Subject: [PATCH 294/448] Change the test description --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index e112f7ec9..7571fcf20 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -28,7 +28,7 @@ import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import * as assert from 'assert'; describe('Bigtable/GCPMetricsHandler', () => { - it('Should export a value to the CloudMonitoringExporter', done => { + it('Should export a value to the GCPMetricsHandler', done => { (async () => { /* We need to create a timeout here because if we don't then mocha shuts down From dfcf56eb1839dd19d5ee91276176561ddee09738 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 09:58:21 -0400 Subject: [PATCH 295/448] Add a comment explaining what the test is about --- system-test/gcp-metrics-handler.ts | 75 ++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 7571fcf20..5ba2dbaef 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -99,4 +99,79 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); + it('Should export a value to two GCPMetricsHandlers', done => { + // This test ensures that when we create two GCPMetricsHandlers much like + // what we would be doing when calling readRows on two separate tables that + // the data doesn't store duplicates in the same place and export twice as + // much data as it should. + (async () => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 120000); + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exported variable ensures we only test the value export receives one time. + */ + let exported = false; + function getTestResultCallback( + resultCallback: (result: ExportResult) => void + ) { + return (result: ExportResult) => { + if (!exported) { + exported = true; + try { + clearTimeout(timeout); + assert.strictEqual(result.code, 0); + done(); + resultCallback({code: 0}); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); + } + } else { + resultCallback({code: 0}); + } + }; + } + class MockExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + const testResultCallback = getTestResultCallback(resultCallback); + super.export(metrics, testResultCallback); + } + } + + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const handler = new GCPMetricsHandler(new MockExporter({projectId})); + const transformedRequestsHandled = JSON.parse( + JSON.stringify(expectedRequestsHandled).replace( + /my-project/g, + projectId + ) + ); + for (const request of transformedRequestsHandled) { + if (request.attemptLatency) { + handler.onAttemptComplete(request as OnAttemptCompleteData); + } else { + handler.onOperationComplete(request as OnOperationCompleteData); + } + } + })(); + }); }); From 35da5c6b27748508211b11b057ec7301fc6abaa4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 10:32:39 -0400 Subject: [PATCH 296/448] Completed the test for two metrics handlers --- system-test/gcp-metrics-handler.ts | 110 +++++++++++++++++++++++++---- 1 file changed, 96 insertions(+), 14 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 5ba2dbaef..610dee1c9 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -21,11 +21,37 @@ import { } from '../src/client-side-metrics/metrics-handler'; import { CloudMonitoringExporter, + ExportInput, ExportResult, } from '../src/client-side-metrics/exporter'; import {Bigtable} from '../src'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import * as assert from 'assert'; +import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; + +/** + * Replaces the timestamp values within an `ExportInput` object with + * standardized test values. + * + * This function is designed for testing purposes to make timestamp comparisons + * in tests more predictable and reliable. It recursively traverses the + * `ExportInput` object, finds all `startTime` and `endTime` properties, and + * replaces their numeric values with standardized test values. + */ +function replaceTimestamps( + request: typeof expectedOtelExportInput, + newStartTime: [number, number], + newEndTime: [number, number] +) { + request.scopeMetrics.forEach(scopeMetric => { + scopeMetric.metrics.forEach(metric => { + metric.dataPoints.forEach(dataPoint => { + dataPoint.startTime = newStartTime; + dataPoint.endTime = newEndTime; + }); + }); + }); +} describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the GCPMetricsHandler', done => { @@ -99,7 +125,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); - it('Should export a value to two GCPMetricsHandlers', done => { + it.only('Should export a value to two GCPMetricsHandlers', done => { // This test ensures that when we create two GCPMetricsHandlers much like // what we would be doing when calling readRows on two separate tables that // the data doesn't store duplicates in the same place and export twice as @@ -116,33 +142,81 @@ describe('Bigtable/GCPMetricsHandler', () => { it receives once. Since done cannot be called multiple times in mocha, exported variable ensures we only test the value export receives one time. */ - let exported = false; + let exportedCount = 0; function getTestResultCallback( - resultCallback: (result: ExportResult) => void + resultCallback: (result: ExportResult) => void ) { return (result: ExportResult) => { - if (!exported) { - exported = true; + if (exportedCount < 2) { + exportedCount++; try { - clearTimeout(timeout); assert.strictEqual(result.code, 0); - done(); - resultCallback({code: 0}); } catch (error) { // Code isn't 0 so report the original error. done(result); done(error); } + if (exportedCount === 2) { + // We are expecting two calls to an exporter. One for each + // metrics handler. + clearTimeout(timeout); + done(); + } + resultCallback({code: 0}); } else { + // After the test is complete the periodic exporter may still be + // running in which case we don't want to do any checks. We just + // want to call the resultCallback so that there are no hanging + // threads. resultCallback({code: 0}); } }; } class MockExporter extends CloudMonitoringExporter { export( - metrics: ResourceMetrics, - resultCallback: (result: ExportResult) => void + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void ): void { + try { + // The code block ensures the metrics are correct. Mainly, the metrics + // shouldn't contain two copies of the data. It should only contain + // one. + + // For this test since we are still writing a time series with + // metrics variable we don't want to modify the metrics variable + // to have artificial times because then sending the data to the + // metric service client will fail. Therefore, we must make a copy + // of the metrics and use that. + const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + replaceTimestamps( + parsedExportInput as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length, + expectedOtelExportInput.scopeMetrics[0].metrics.length + ); + for ( + let index = 0; + index < + (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ + index + ], + expectedOtelExportInput.scopeMetrics[0].metrics[index] + ); + } + } catch (e) { + // The error needs to be caught so it can be reported to the mocha + // test runner. + done(e); + } + // The code below uses the test callback to ensure the export was successful. const testResultCallback = getTestResultCallback(resultCallback); super.export(metrics, testResultCallback); } @@ -159,11 +233,12 @@ describe('Bigtable/GCPMetricsHandler', () => { }); }); const handler = new GCPMetricsHandler(new MockExporter({projectId})); + const handler2 = new GCPMetricsHandler(new MockExporter({projectId})); const transformedRequestsHandled = JSON.parse( - JSON.stringify(expectedRequestsHandled).replace( - /my-project/g, - projectId - ) + JSON.stringify(expectedRequestsHandled).replace( + /my-project/g, + projectId + ) ); for (const request of transformedRequestsHandled) { if (request.attemptLatency) { @@ -172,6 +247,13 @@ describe('Bigtable/GCPMetricsHandler', () => { handler.onOperationComplete(request as OnOperationCompleteData); } } + for (const request of transformedRequestsHandled) { + if (request.attemptLatency) { + handler2.onAttemptComplete(request as OnAttemptCompleteData); + } else { + handler2.onOperationComplete(request as OnOperationCompleteData); + } + } })(); }); }); From b5ae964074158a00111d0edf32a7c79b84c6366b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 10:33:39 -0400 Subject: [PATCH 297/448] remove only --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 610dee1c9..11deebece 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -125,7 +125,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); - it.only('Should export a value to two GCPMetricsHandlers', done => { + it('Should export a value to two GCPMetricsHandlers', done => { // This test ensures that when we create two GCPMetricsHandlers much like // what we would be doing when calling readRows on two separate tables that // the data doesn't store duplicates in the same place and export twice as From b575010f22532f9d940fec4cf27dd44af91f304c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 11:06:59 -0400 Subject: [PATCH 298/448] Cast as histogram to make compile error go away --- src/client-side-metrics/exporter.ts | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 1f31fc0f5..b65b5a79b 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -13,7 +13,7 @@ // limitations under the License. import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {ExponentialHistogram, Histogram, ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; @@ -312,13 +312,16 @@ export function metricsToRequest(exportArgs: ExportInput) { value: { distributionValue: { count: String(value.count), - mean: value.count ? value.sum / value.count : 0, + mean: + value.count && value.sum ? value.sum / value.count : 0, bucketOptions: { explicitBuckets: { - bounds: value.buckets.boundaries, + bounds: (value as Histogram).buckets.boundaries, }, }, - bucketCounts: value.buckets.counts.map(String), + bucketCounts: (value as Histogram).buckets.counts.map( + String + ), }, }, }, From 822c14e57bb003ab298bceb45682f706f5ebfd42 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 11:17:19 -0400 Subject: [PATCH 299/448] Move the duplicate copies of replaceTimestamps into one file --- system-test/gcp-metrics-handler.ts | 27 ++----------------- test-common/replace-timestamps.ts | 25 +++++++++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 25 +---------------- 3 files changed, 28 insertions(+), 49 deletions(-) create mode 100644 test-common/replace-timestamps.ts diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 11deebece..fe040a7b0 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -28,33 +28,10 @@ import {Bigtable} from '../src'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; - -/** - * Replaces the timestamp values within an `ExportInput` object with - * standardized test values. - * - * This function is designed for testing purposes to make timestamp comparisons - * in tests more predictable and reliable. It recursively traverses the - * `ExportInput` object, finds all `startTime` and `endTime` properties, and - * replaces their numeric values with standardized test values. - */ -function replaceTimestamps( - request: typeof expectedOtelExportInput, - newStartTime: [number, number], - newEndTime: [number, number] -) { - request.scopeMetrics.forEach(scopeMetric => { - scopeMetric.metrics.forEach(metric => { - metric.dataPoints.forEach(dataPoint => { - dataPoint.startTime = newStartTime; - dataPoint.endTime = newEndTime; - }); - }); - }); -} +import {replaceTimestamps} from '../test-common/replace-timestamps'; describe('Bigtable/GCPMetricsHandler', () => { - it('Should export a value to the GCPMetricsHandler', done => { + it.only('Should export a value to the GCPMetricsHandler', done => { (async () => { /* We need to create a timeout here because if we don't then mocha shuts down diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts new file mode 100644 index 000000000..616e496ad --- /dev/null +++ b/test-common/replace-timestamps.ts @@ -0,0 +1,25 @@ +import {expectedOtelExportInput} from './expected-otel-export-input'; + +/** + * Replaces the timestamp values within an `ExportInput` object with + * standardized test values. + * + * This function is designed for testing purposes to make timestamp comparisons + * in tests more predictable and reliable. It recursively traverses the + * `ExportInput` object, finds all `startTime` and `endTime` properties, and + * replaces their numeric values with standardized test values. + */ +export function replaceTimestamps( + request: typeof expectedOtelExportInput, + newStartTime: [number, number], + newEndTime: [number, number] +) { + request.scopeMetrics.forEach(scopeMetric => { + scopeMetric.metrics.forEach(metric => { + metric.dataPoints.forEach(dataPoint => { + dataPoint.startTime = newStartTime; + dataPoint.endTime = newEndTime; + }); + }); + }); +} diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index c22b32ea9..1858bf9a8 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -31,30 +31,7 @@ import { expectedOtelExportInput, } from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; - -/** - * Replaces the timestamp values within an `ExportInput` object with - * standardized test values. - * - * This function is designed for testing purposes to make timestamp comparisons - * in tests more predictable and reliable. It recursively traverses the - * `ExportInput` object, finds all `startTime` and `endTime` properties, and - * replaces their numeric values with standardized test values. - */ -function replaceTimestamps( - request: typeof expectedOtelExportInput, - newStartTime: [number, number], - newEndTime: [number, number] -) { - request.scopeMetrics.forEach(scopeMetric => { - scopeMetric.metrics.forEach(metric => { - metric.dataPoints.forEach(dataPoint => { - dataPoint.startTime = newStartTime; - dataPoint.endTime = newEndTime; - }); - }); - }); -} +import {replaceTimestamps} from '../../test-common/replace-timestamps'; describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', function (done) { From 561889c99627189c58ca0491a16934e706e76009 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 11:59:11 -0400 Subject: [PATCH 300/448] Take steps to eliminate the ExportInput interface --- src/client-side-metrics/exporter.ts | 49 +++++++++++++------ system-test/gcp-metrics-handler.ts | 14 +++--- test/metrics-collector/gcp-metrics-handler.ts | 20 +++----- test/metrics-collector/metricsToRequest.ts | 5 +- 4 files changed, 52 insertions(+), 36 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index b65b5a79b..0824b40ec 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -13,7 +13,11 @@ // limitations under the License. import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import {ExponentialHistogram, Histogram, ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import { + ExponentialHistogram, + Histogram, + ResourceMetrics, +} from '@opentelemetry/sdk-metrics'; import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; @@ -194,7 +198,9 @@ export interface ExportInput { * (which have more complex, object-based values). * */ -function isCounterValue(value: DistributionValue | number): value is number { +function isCounterValue( + value: number | Histogram | ExponentialHistogram +): value is number { return typeof value === 'number'; } @@ -206,7 +212,7 @@ function isCounterValue(value: DistributionValue | number): value is number { * metric attributes, data points, and aggregation information, into an object * that conforms to the expected request format of the Cloud Monitoring API. * - * @param {ExportInput} exportArgs - The OpenTelemetry metrics data to be converted. This + * @param {ResourceMetrics} exportArgs - The OpenTelemetry metrics data to be converted. This * object contains resource attributes, scope information, and a list of * metrics with their associated data points. * @@ -233,16 +239,27 @@ function isCounterValue(value: DistributionValue | number): value is number { * * */ -export function metricsToRequest(exportArgs: ExportInput) { +export function metricsToRequest(exportArgs: ResourceMetrics) { + type WithSyncAttributes = {_syncAttributes: {[index: string]: string}}; + const resourcesWithSyncAttributes = + exportArgs.resource as unknown as WithSyncAttributes; const timeSeriesArray = []; const resourceLabels = { - cluster: exportArgs.resource._syncAttributes['monitored_resource.cluster'], + cluster: + resourcesWithSyncAttributes._syncAttributes['monitored_resource.cluster'], instance: - exportArgs.resource._syncAttributes['monitored_resource.instance_id'], + resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.instance_id' + ], project_id: - exportArgs.resource._syncAttributes['monitored_resource.project_id'], - table: exportArgs.resource._syncAttributes['monitored_resource.table'], - zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], + resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.project_id' + ], + table: + resourcesWithSyncAttributes._syncAttributes['monitored_resource.table'], + zone: resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.zone' + ], }; for (const scopeMetrics of exportArgs.scopeMetrics) { for (const scopeMetric of scopeMetrics.metrics) { @@ -263,9 +280,11 @@ export function metricsToRequest(exportArgs: ExportInput) { client_name: allAttributes.clientName, method: allAttributes.methodName, status: - (allAttributes as OnAttemptAttribute).attemptStatus?.toString() ?? ( - allAttributes as OnOperationAttribute + allAttributes as {attemptStatus: number} + ).attemptStatus?.toString() ?? + ( + allAttributes as {finalOperationStatus: number} ).finalOperationStatus?.toString(), client_uid: allAttributes.clientUid, }, @@ -276,7 +295,9 @@ export function metricsToRequest(exportArgs: ExportInput) { labels: metricLabels, }; const resource = { - type: exportArgs.resource._syncAttributes['monitored_resource.type'], + type: resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.type' + ], labels: resourceLabels, }; const interval = { @@ -333,7 +354,7 @@ export function metricsToRequest(exportArgs: ExportInput) { } } return { - name: `projects/${exportArgs.resource._syncAttributes['monitored_resource.project_id']}`, + name: `projects/${resourcesWithSyncAttributes._syncAttributes['monitored_resource.project_id']}`, timeSeries: timeSeriesArray, }; } @@ -392,7 +413,7 @@ export class CloudMonitoringExporter extends MetricExporter { ): void { (async () => { try { - const request = metricsToRequest(metrics as unknown as ExportInput); + const request = metricsToRequest(metrics); await this.monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest ); diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index fe040a7b0..52ecf28b3 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -21,7 +21,6 @@ import { } from '../src/client-side-metrics/metrics-handler'; import { CloudMonitoringExporter, - ExportInput, ExportResult, } from '../src/client-side-metrics/exporter'; import {Bigtable} from '../src'; @@ -164,27 +163,26 @@ describe('Bigtable/GCPMetricsHandler', () => { // to have artificial times because then sending the data to the // metric service client will fail. Therefore, we must make a copy // of the metrics and use that. - const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + const parsedExportInput: ResourceMetrics = JSON.parse( + JSON.stringify(metrics) + ); replaceTimestamps( parsedExportInput as unknown as typeof expectedOtelExportInput, [123, 789], [456, 789] ); assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length, + parsedExportInput.scopeMetrics[0].metrics.length, expectedOtelExportInput.scopeMetrics[0].metrics.length ); for ( let index = 0; - index < - (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length; + index < parsedExportInput.scopeMetrics[0].metrics.length; index++ ) { // We need to compare pointwise because mocha truncates to an 8192 character limit. assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ - index - ], + parsedExportInput.scopeMetrics[0].metrics[index], expectedOtelExportInput.scopeMetrics[0].metrics[index] ); } diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 1858bf9a8..b870a17ac 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -15,7 +15,6 @@ import {describe} from 'mocha'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import { - ExportInput, ExportResult, metricsToRequest, } from '../../src/client-side-metrics/exporter'; @@ -63,24 +62,21 @@ describe('Bigtable/GCPMetricsHandler', () => { [123, 789], [456, 789] ); - const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + const parsedExportInput: ResourceMetrics = JSON.parse( + JSON.stringify(metrics) + ); assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics - .length, + parsedExportInput.scopeMetrics[0].metrics.length, expectedOtelExportInput.scopeMetrics[0].metrics.length ); for ( let index = 0; - index < - (parsedExportInput as ExportInput).scopeMetrics[0].metrics - .length; + index < parsedExportInput.scopeMetrics[0].metrics.length; index++ ) { // We need to compare pointwise because mocha truncates to an 8192 character limit. assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ - index - ], + parsedExportInput.scopeMetrics[0].metrics[index], expectedOtelExportInput.scopeMetrics[0].metrics[index] ); } @@ -88,9 +84,7 @@ describe('Bigtable/GCPMetricsHandler', () => { JSON.parse(JSON.stringify(metrics)), expectedOtelExportInput ); - const convertedRequest = metricsToRequest( - expectedOtelExportInput as unknown as ExportInput - ); + const convertedRequest = metricsToRequest(metrics); assert.deepStrictEqual( convertedRequest.timeSeries.length, expectedOtelExportConvertedValue.timeSeries.length diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index e77917d79..8777bba4f 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -19,10 +19,13 @@ import { expectedOtelExportConvertedValue, expectedOtelExportInput, } from '../../test-common/expected-otel-export-input'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; describe('Bigtable/metricsToRequest', () => { it('Converts an otel request to a request ready for the metric service client', () => { - const convertedValue = metricsToRequest(expectedOtelExportInput); + const convertedValue = metricsToRequest( + expectedOtelExportInput as unknown as ResourceMetrics + ); assert.deepStrictEqual( convertedValue.timeSeries.length, expectedOtelExportConvertedValue.timeSeries.length From f423c578e9b8e812feb8f9630fda5ebc975434d3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 13:12:08 -0400 Subject: [PATCH 301/448] Add a header --- test-common/replace-timestamps.ts | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts index 616e496ad..07095d906 100644 --- a/test-common/replace-timestamps.ts +++ b/test-common/replace-timestamps.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {expectedOtelExportInput} from './expected-otel-export-input'; /** From 9076be156cf9e88f76256231cb849011ec955958 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 13:13:35 -0400 Subject: [PATCH 302/448] Remove only --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 52ecf28b3..a4e0a3160 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -30,7 +30,7 @@ import {expectedOtelExportInput} from '../test-common/expected-otel-export-input import {replaceTimestamps} from '../test-common/replace-timestamps'; describe('Bigtable/GCPMetricsHandler', () => { - it.only('Should export a value to the GCPMetricsHandler', done => { + it('Should export a value to the GCPMetricsHandler', done => { (async () => { /* We need to create a timeout here because if we don't then mocha shuts down From ead3f5e797b4563c998311d7d5aded08a66f83c4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 13:23:03 -0400 Subject: [PATCH 303/448] Eliminate ExportInput and dependencies --- src/client-side-metrics/exporter.ts | 162 ---------------------------- 1 file changed, 162 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 0824b40ec..1a04bcc18 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -27,168 +27,6 @@ export interface ExportResult { code: number; } -/** - * Attributes associated with the completion of a single attempt of a Bigtable - * operation. These attributes provide context about the specific attempt, - * its status, and the method involved. They are used for recording metrics - * such as attempt latency and connectivity errors. - * - * @property methodName - The name of the Bigtable method that was attempted (e.g., - * 'Bigtable.ReadRows', 'Bigtable.MutateRows'). - * @property clientUid - A unique identifier for the client that initiated the - * attempt. - * @property appProfileId - (Optional) The ID of the application profile used for - * the attempt. - * @property attemptStatus - The status code of the attempt. A value of `0` - * typically indicates success (grpc.status.OK), while other values indicate - * different types of errors. - * @property streamingOperation - (Optional) Indicates if the operation is a streaming operation. - * Will be "true" or "false" if present. - * @property clientName - The name of the client library making the attempt - * (e.g., 'nodejs-bigtable', 'go-bigtable/1.35.0'). - */ -interface OnAttemptAttribute { - methodName: string; - clientUid: string; - appProfileId?: string; - attemptStatus: number; - streamingOperation?: string; - clientName: string; -} - -/** - * Attributes associated with the completion of a Bigtable operation. These - * attributes provide context about the operation, its final status, and the - * method involved. They are used for recording metrics such as operation - * latency. - * - * @property methodName - The name of the Bigtable method that was performed - * (e.g., 'Bigtable.ReadRows', 'Bigtable.MutateRows'). - * @property clientUid - A unique identifier for the client that initiated the - * operation. - * @property appProfileId - (Optional) The ID of the application profile used for - * the operation. - * @property finalOperationStatus - The final status code of the operation. A - * value of `0` typically indicates success (grpc.status.OK), while other - * values indicate different types of errors. - * @property streamingOperation - (Optional) Indicates if the operation is a streaming operation. - * Will be "true" or "false" if present. - * @property clientName - The name of the client library performing the operation - * (e.g., 'nodejs-bigtable', 'go-bigtable/1.35.0'). - */ -interface OnOperationAttribute { - methodName: string; - clientUid: string; - appProfileId?: string; - finalOperationStatus: number; - streamingOperation?: string; - clientName: string; -} - -/** - * Represents a generic metric in the OpenTelemetry format. - * - * This interface describes the structure of a metric, which can represent - * either a counter or a distribution (histogram). It includes the metric's - * descriptor, the type of data it collects, and the actual data points. - * - */ -interface Metric { - descriptor: { - name: string; - unit: string; - description?: string; - type?: string; - valueType?: number; - advice?: {}; - }; - aggregationTemporality?: number; - dataPointType?: number; - dataPoints: { - attributes: OnAttemptAttribute | OnOperationAttribute; - startTime: number[]; - endTime: number[]; - value: Value; - }[]; -} - -interface DistributionValue { - min?: number; - max?: number; - sum: number; - count: number; - buckets: { - boundaries: number[]; - counts: number[]; - }; -} - -/** - * Represents a metric that measures the distribution of values. - * - * Distribution metrics, also known as histograms, are used to track the - * statistical distribution of a set of measurements. They allow you to capture - * not only the count and sum of the measurements but also how they are spread - * across different ranges (buckets). This makes them suitable for tracking - * latencies, sizes, or other metrics where the distribution is important. - * - */ -type DistributionMetric = Metric; - -/** - * Represents a metric that counts the number of occurrences of an event or - * the cumulative value of a quantity over time. - * - * Counter metrics are used to track quantities that increase over time, such - * as the number of requests, errors, or retries. They are always - * non-negative and can only increase or remain constant. - * - */ -type CounterMetric = Metric; - -/** - * Represents the input data structure for exporting OpenTelemetry metrics. - * - * This interface defines the structure of the object that is passed to the - * `metricsToRequest` function to convert OpenTelemetry metrics into a format - * suitable for the Google Cloud Monitoring API. - * - * It contains information about the monitored resource and an array of - * scope metrics, which include various types of metrics (counters and - * distributions) and their associated data points. - * - * @remarks - * This structure is specifically designed to hold OpenTelemetry metrics data - * as it is exported from the Bigtable client library. It represents the data - * before it is transformed into the Cloud Monitoring API's `TimeSeries` - * format. - * - * Each `CounterMetric` and `DistributionMetric` within the `scopeMetrics` - * array represents a different type of measurement, such as retry counts, - * operation latencies, attempt latencies etc. Each metric contains an array of dataPoints - * Each `dataPoint` contains the `attributes`, `startTime`, `endTime` and `value`. - * `value` will be a number for a counter metric and an object for a distribution metric. - */ -export interface ExportInput { - resource: { - _syncAttributes: { - 'monitored_resource.type': string; - 'monitored_resource.project_id': string; - 'monitored_resource.instance_id': string; - 'monitored_resource.table': string; - 'monitored_resource.cluster': string; - 'monitored_resource.zone': string; - }; - }; - scopeMetrics: { - scope: { - name: string; - version: string; - }; - metrics: (CounterMetric | DistributionMetric)[]; - }[]; -} - /** * Type guard function to determine if a given value is a counter value (a number). * From fa6c3fd877ccc34868842c5c12a3a1ce3f3b74af Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 13:36:08 -0400 Subject: [PATCH 304/448] Eliminate constant --- src/client-side-metrics/exporter.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 1a04bcc18..8fe8f1585 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -261,8 +261,7 @@ export class CloudMonitoringExporter extends MetricExporter { // to 0 then the operation failed. Open telemetry logs errors to the // console when the resultCallback passes in non-zero code values and // logs nothing when the code is 0. - const exportResult = {code: 0}; - resultCallback(exportResult); + resultCallback({code: 0}); } catch (error) { resultCallback(error as ServiceError); } From a587d0c97b83b061b20016cf5edb062049284d10 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 14:28:40 -0400 Subject: [PATCH 305/448] Push conversion to status back to metrics collect Also add if statement for timeseries. --- src/client-side-metrics/exporter.ts | 83 +++++++++---------- .../gcp-metrics-handler.ts | 4 +- src/client-side-metrics/metrics-handler.ts | 3 +- .../operation-metrics-collector.ts | 4 +- test-common/expected-otel-export-input.ts | 18 ++-- test-common/metrics-handler-fixture.ts | 6 +- .../metrics-collector/typical-method-call.txt | 6 +- 7 files changed, 59 insertions(+), 65 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 8fe8f1585..c49de82e3 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -117,13 +117,7 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, method: allAttributes.methodName, - status: - ( - allAttributes as {attemptStatus: number} - ).attemptStatus?.toString() ?? - ( - allAttributes as {finalOperationStatus: number} - ).finalOperationStatus?.toString(), + status: allAttributes.status, client_uid: allAttributes.clientUid, }, streaming ? {streaming} : null @@ -146,48 +140,49 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { seconds: dataPoint.startTime[0], }, }; - const timeSeries = isCounterValue(value) - ? { - metric, - resource, - valueType: 'INT64', - points: [ - { - interval, - value: { - int64Value: dataPoint.value, - }, + if (isCounterValue(value)) { + timeSeriesArray.push({ + metric, + resource, + valueType: 'INT64', + points: [ + { + interval, + value: { + int64Value: dataPoint.value, }, - ], - } - : { - metric, - resource, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval, - value: { - distributionValue: { - count: String(value.count), - mean: - value.count && value.sum ? value.sum / value.count : 0, - bucketOptions: { - explicitBuckets: { - bounds: (value as Histogram).buckets.boundaries, - }, + }, + ], + }); + } else { + timeSeriesArray.push({ + metric, + resource, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval, + value: { + distributionValue: { + count: String(value.count), + mean: + value.count && value.sum ? value.sum / value.count : 0, + bucketOptions: { + explicitBuckets: { + bounds: (value as Histogram).buckets.boundaries, }, - bucketCounts: (value as Histogram).buckets.counts.map( - String - ), }, + bucketCounts: (value as Histogram).buckets.counts.map( + String + ), }, }, - ], - unit: scopeMetric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified - }; - timeSeriesArray.push(timeSeries); + }, + ], + unit: scopeMetric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + }); + } } } } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 46aa23873..16fdfa9c2 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -248,7 +248,7 @@ export class GCPMetricsHandler implements IMetricsHandler { appProfileId: data.metricsCollectorData.appProfileId, methodName: data.metricsCollectorData.methodName, clientUid: data.metricsCollectorData.clientUid, - finalOperationStatus: data.finalOperationStatus, + status: data.status, clientName: data.clientName, }; otelInstruments.operationLatencies.record(data.operationLatency, { @@ -280,7 +280,7 @@ export class GCPMetricsHandler implements IMetricsHandler { appProfileId: data.metricsCollectorData.appProfileId, methodName: data.metricsCollectorData.methodName, clientUid: data.metricsCollectorData.clientUid, - attemptStatus: data.attemptStatus, + status: data.status, clientName: data.clientName, }; otelInstruments.attemptLatencies.record(data.attemptLatency, { diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 9b26176fb..7aad1bd7f 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -36,20 +36,19 @@ interface StandardData { metricsCollectorData: IMetricsCollectorData; clientName: string; streamingOperation: StreamingState; + status: string; } export interface OnOperationCompleteData extends StandardData { firstResponseLatency?: number; operationLatency: number; retryCount?: number; - finalOperationStatus: grpc.status; } export interface OnAttemptCompleteData extends StandardData { attemptLatency: number; serverLatency?: number; connectivityErrorCount: number; - attemptStatus: grpc.status; } /** diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 4fd24b4d9..12f405bc1 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -155,7 +155,7 @@ export class OperationMetricsCollector { serverLatency: this.serverTime ?? undefined, connectivityErrorCount: this.connectivityErrorCount, streamingOperation: this.streamingOperation, - attemptStatus, + status: attemptStatus.toString(), clientName: `nodejs-bigtable/${version}`, metricsCollectorData: this.getMetricsCollectorData(), projectId, @@ -224,7 +224,7 @@ export class OperationMetricsCollector { this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { metricsHandler.onOperationComplete({ - finalOperationStatus: finalOperationStatus, + status: finalOperationStatus.toString(), streamingOperation: this.streamingOperation, metricsCollectorData: this.getMetricsCollectorData(), clientName: `nodejs-bigtable/${version}`, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 2a32d423d..d822001dc 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -806,7 +806,7 @@ export const expectedOtelExportInput = { streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - finalOperationStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -858,7 +858,7 @@ export const expectedOtelExportInput = { streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 4, + status: '4', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -888,7 +888,7 @@ export const expectedOtelExportInput = { streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -932,7 +932,7 @@ export const expectedOtelExportInput = { attributes: { methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - finalOperationStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -966,7 +966,7 @@ export const expectedOtelExportInput = { attributes: { methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - finalOperationStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1018,7 +1018,7 @@ export const expectedOtelExportInput = { streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 4, + status: '4', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1048,7 +1048,7 @@ export const expectedOtelExportInput = { streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1092,7 +1092,7 @@ export const expectedOtelExportInput = { attributes: { methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 4, + status: '4', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1103,7 +1103,7 @@ export const expectedOtelExportInput = { attributes: { methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index a49a91158..3bfe4f490 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -18,7 +18,7 @@ export const expectedRequestsHandled = [ serverLatency: 101, connectivityErrorCount: 0, streamingOperation: 'true', - attemptStatus: 4, + status: '4', clientName: 'nodejs-bigtable', metricsCollectorData: { appProfileId: undefined, @@ -36,7 +36,7 @@ export const expectedRequestsHandled = [ serverLatency: 103, connectivityErrorCount: 0, streamingOperation: 'true', - attemptStatus: 0, + status: '0', clientName: 'nodejs-bigtable', metricsCollectorData: { appProfileId: undefined, @@ -50,7 +50,7 @@ export const expectedRequestsHandled = [ projectId: 'my-project', }, { - finalOperationStatus: 0, + status: '0', streamingOperation: 'true', metricsCollectorData: { appProfileId: undefined, diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 576a66102..da43e899b 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -11,7 +11,7 @@ getDate call returns 3000 ms 8. A transient error occurs. getDate call returns 4000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":0,"streamingOperation":"true","attemptStatus":4,"clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} +{"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":0,"streamingOperation":"true","status":"4","clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} 9. After a timeout, the second attempt is made. getDate call returns 5000 ms 10. Client receives status information. @@ -24,7 +24,7 @@ getDate call returns 6000 ms 16. Stream ends, operation completes getDate call returns 7000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":2000,"serverLatency":103,"connectivityErrorCount":0,"streamingOperation":"true","attemptStatus":0,"clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} +{"attemptLatency":2000,"serverLatency":103,"connectivityErrorCount":0,"streamingOperation":"true","status":"0","clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} getDate call returns 8000 ms Recording parameters for onOperationComplete: -{"finalOperationStatus":0,"streamingOperation":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"clientName":"nodejs-bigtable","projectId":"my-project","operationLatency":7000,"retryCount":1,"firstResponseLatency":5000} +{"status":"0","streamingOperation":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"clientName":"nodejs-bigtable","projectId":"my-project","operationLatency":7000,"retryCount":1,"firstResponseLatency":5000} From 311d55581bfd113bccd2b5e18894322810831048 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 15:44:03 -0400 Subject: [PATCH 306/448] Change property names to match MetricServiceClient expectation --- src/client-side-metrics/exporter.ts | 20 +---- .../gcp-metrics-handler.ts | 18 ++--- src/client-side-metrics/metrics-handler.ts | 8 +- .../operation-metrics-collector.ts | 29 ++++--- test-common/expected-otel-export-input.ts | 81 +++++++++---------- test-common/metrics-handler-fixture.ts | 27 +++---- test-common/test-metrics-handler.ts | 4 +- .../metrics-collector/typical-method-call.txt | 6 +- 8 files changed, 87 insertions(+), 106 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index c49de82e3..ad985ec90 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -104,27 +104,9 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { const metricName = scopeMetric.descriptor.name; for (const dataPoint of scopeMetric.dataPoints) { const value = dataPoint.value; - const allAttributes = dataPoint.attributes; - const streaming = allAttributes.streamingOperation; - /* - metricLabels are built from the open telemetry attributes that are set - when a data point is recorded. This means that for one metric there may - be multiple time series' with different attributes, but the resource - labels will always be the same for a particular export call. - */ - const metricLabels = Object.assign( - { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: allAttributes.status, - client_uid: allAttributes.clientUid, - }, - streaming ? {streaming} : null - ); const metric = { type: metricName, - labels: metricLabels, + labels: dataPoint.attributes, }; const resource = { type: resourcesWithSyncAttributes._syncAttributes[ diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 16fdfa9c2..69753b7c4 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -246,13 +246,13 @@ export class GCPMetricsHandler implements IMetricsHandler { }); const commonAttributes = { appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, + method: data.metricsCollectorData.method, + client_uid: data.metricsCollectorData.client_uid, status: data.status, - clientName: data.clientName, + client_name: data.client_name, }; otelInstruments.operationLatencies.record(data.operationLatency, { - streamingOperation: data.streamingOperation, + streaming: data.streaming, ...commonAttributes, }); otelInstruments.retryCount.add(data.retryCount, commonAttributes); @@ -278,13 +278,13 @@ export class GCPMetricsHandler implements IMetricsHandler { }); const commonAttributes = { appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, + method: data.metricsCollectorData.method, + client_uid: data.metricsCollectorData.client_uid, status: data.status, - clientName: data.clientName, + client_name: data.client_name, }; otelInstruments.attemptLatencies.record(data.attemptLatency, { - streamingOperation: data.streamingOperation, + streaming: data.streaming, ...commonAttributes, }); otelInstruments.connectivityErrorCount.add( @@ -292,7 +292,7 @@ export class GCPMetricsHandler implements IMetricsHandler { commonAttributes ); otelInstruments.serverLatencies.record(data.serverLatency, { - streamingOperation: data.streamingOperation, + streaming: data.streaming, ...commonAttributes, }); } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 7aad1bd7f..d7fb0af64 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -27,15 +27,15 @@ type IMetricsCollectorData = { cluster?: string; zone?: string; appProfileId?: string; - methodName: MethodName; - clientUid: string; + method: MethodName; + client_uid: string; }; interface StandardData { projectId: string; metricsCollectorData: IMetricsCollectorData; - clientName: string; - streamingOperation: StreamingState; + client_name: string; + streaming: StreamingState; status: string; } diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 12f405bc1..6c5e86232 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -105,15 +105,18 @@ export class OperationMetricsCollector { } private getMetricsCollectorData() { - return { - instanceId: this.tabularApiSurface.instance.id, - table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, - appProfileId: this.tabularApiSurface.bigtable.appProfileId, - methodName: this.methodName, - clientUid: this.tabularApiSurface.bigtable.clientUid, - }; + const appProfileId = this.tabularApiSurface.bigtable.appProfileId; + return Object.assign( + { + instanceId: this.tabularApiSurface.instance.id, + table: this.tabularApiSurface.id, + cluster: this.cluster, + zone: this.zone, + method: this.methodName, + client_uid: this.tabularApiSurface.bigtable.clientUid, + }, + appProfileId ? {app_profile_id: appProfileId} : {} + ); } /** @@ -154,9 +157,9 @@ export class OperationMetricsCollector { attemptLatency: totalTime, serverLatency: this.serverTime ?? undefined, connectivityErrorCount: this.connectivityErrorCount, - streamingOperation: this.streamingOperation, + streaming: this.streamingOperation, status: attemptStatus.toString(), - clientName: `nodejs-bigtable/${version}`, + client_name: `nodejs-bigtable/${version}`, metricsCollectorData: this.getMetricsCollectorData(), projectId, }); @@ -225,9 +228,9 @@ export class OperationMetricsCollector { if (metricsHandler.onOperationComplete) { metricsHandler.onOperationComplete({ status: finalOperationStatus.toString(), - streamingOperation: this.streamingOperation, + streaming: this.streamingOperation, metricsCollectorData: this.getMetricsCollectorData(), - clientName: `nodejs-bigtable/${version}`, + client_name: `nodejs-bigtable/${version}`, projectId, operationLatency: totalTime, retryCount: this.attemptCount - 1, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index d822001dc..e15833a01 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -35,7 +35,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { - app_profile: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -132,7 +131,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -229,7 +228,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -326,7 +325,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/retry_count', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -364,7 +363,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/first_response_latencies', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -460,7 +459,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -557,7 +556,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -654,7 +653,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -692,7 +691,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -803,11 +802,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + streaming: 'true', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -855,11 +854,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + streaming: 'true', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '4', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -885,11 +884,11 @@ export const expectedOtelExportInput = { }, { attributes: { - streamingOperation: 'true', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + streaming: 'true', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -930,10 +929,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -964,10 +963,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -1015,11 +1014,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + streaming: 'true', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '4', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -1045,11 +1044,11 @@ export const expectedOtelExportInput = { }, { attributes: { - streamingOperation: 'true', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + streaming: 'true', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -1090,10 +1089,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '4', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -1101,10 +1100,10 @@ export const expectedOtelExportInput = { }, { attributes: { - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index 3bfe4f490..5e947677e 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -17,17 +17,16 @@ export const expectedRequestsHandled = [ attemptLatency: 2000, serverLatency: 101, connectivityErrorCount: 0, - streamingOperation: 'true', + streaming: 'true', status: '4', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', metricsCollectorData: { - appProfileId: undefined, instanceId: 'fakeInstanceId', table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', }, projectId: 'my-project', }, @@ -35,33 +34,31 @@ export const expectedRequestsHandled = [ attemptLatency: 2000, serverLatency: 103, connectivityErrorCount: 0, - streamingOperation: 'true', + streaming: 'true', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', metricsCollectorData: { - appProfileId: undefined, instanceId: 'fakeInstanceId', table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', }, projectId: 'my-project', }, { status: '0', - streamingOperation: 'true', + streaming: 'true', metricsCollectorData: { - appProfileId: undefined, instanceId: 'fakeInstanceId', table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', }, - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', projectId: 'my-project', operationLatency: 7000, retryCount: 1, diff --git a/test-common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts index 1734cf18f..61257913f 100644 --- a/test-common/test-metrics-handler.ts +++ b/test-common/test-metrics-handler.ts @@ -35,7 +35,7 @@ export class TestMetricsHandler implements IMetricsHandler { */ onOperationComplete(data: OnOperationCompleteData) { this.requestsHandled.push(data); - data.clientName = 'nodejs-bigtable'; + data.client_name = 'nodejs-bigtable'; this.messages.value += 'Recording parameters for onOperationComplete:\n'; this.messages.value += `${JSON.stringify(data)}\n`; } @@ -46,7 +46,7 @@ export class TestMetricsHandler implements IMetricsHandler { */ onAttemptComplete(data: OnAttemptCompleteData) { this.requestsHandled.push(data); - data.clientName = 'nodejs-bigtable'; + data.client_name = 'nodejs-bigtable'; this.messages.value += 'Recording parameters for onAttemptComplete:\n'; this.messages.value += `${JSON.stringify(data)}\n`; } diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index da43e899b..bc6de7ad7 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -11,7 +11,7 @@ getDate call returns 3000 ms 8. A transient error occurs. getDate call returns 4000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":0,"streamingOperation":"true","status":"4","clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} +{"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":0,"streaming":"true","status":"4","client_name":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"projectId":"my-project"} 9. After a timeout, the second attempt is made. getDate call returns 5000 ms 10. Client receives status information. @@ -24,7 +24,7 @@ getDate call returns 6000 ms 16. Stream ends, operation completes getDate call returns 7000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":2000,"serverLatency":103,"connectivityErrorCount":0,"streamingOperation":"true","status":"0","clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} +{"attemptLatency":2000,"serverLatency":103,"connectivityErrorCount":0,"streaming":"true","status":"0","client_name":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"projectId":"my-project"} getDate call returns 8000 ms Recording parameters for onOperationComplete: -{"status":"0","streamingOperation":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"clientName":"nodejs-bigtable","projectId":"my-project","operationLatency":7000,"retryCount":1,"firstResponseLatency":5000} +{"status":"0","streaming":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"client_name":"nodejs-bigtable","projectId":"my-project","operationLatency":7000,"retryCount":1,"firstResponseLatency":5000} From 117473b2bfb956204f1619b0391dd526134bb368 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 16:37:51 -0400 Subject: [PATCH 307/448] Solve the app_profile_id issue --- src/client-side-metrics/gcp-metrics-handler.ts | 4 ++-- src/client-side-metrics/metrics-handler.ts | 2 +- test-common/expected-otel-export-input.ts | 8 -------- test/metrics-collector/gcp-metrics-handler.ts | 2 +- 4 files changed, 4 insertions(+), 12 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 69753b7c4..d80a8344d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -245,7 +245,7 @@ export class GCPMetricsHandler implements IMetricsHandler { zone: data.metricsCollectorData.zone, }); const commonAttributes = { - appProfileId: data.metricsCollectorData.appProfileId, + app_profile_id: data.metricsCollectorData.app_profile_id, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, @@ -277,7 +277,7 @@ export class GCPMetricsHandler implements IMetricsHandler { zone: data.metricsCollectorData.zone, }); const commonAttributes = { - appProfileId: data.metricsCollectorData.appProfileId, + app_profile_id: data.metricsCollectorData.app_profile_id, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index d7fb0af64..e4cb29758 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -26,7 +26,7 @@ type IMetricsCollectorData = { table: string; cluster?: string; zone?: string; - appProfileId?: string; + app_profile_id?: string; method: MethodName; client_uid: string; }; diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index e15833a01..2d7b43401 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -131,7 +131,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -228,7 +227,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -325,7 +323,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/retry_count', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -363,7 +360,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/first_response_latencies', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -459,7 +455,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -556,7 +551,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -653,7 +647,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -691,7 +684,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index b870a17ac..f4f81eacd 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -84,7 +84,7 @@ describe('Bigtable/GCPMetricsHandler', () => { JSON.parse(JSON.stringify(metrics)), expectedOtelExportInput ); - const convertedRequest = metricsToRequest(metrics); + const convertedRequest = metricsToRequest(parsedExportInput); assert.deepStrictEqual( convertedRequest.timeSeries.length, expectedOtelExportConvertedValue.timeSeries.length From e4154af705bf7d4a6e6304e17434610c5b26af46 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 17:03:12 -0400 Subject: [PATCH 308/448] It is actually app_profile not app_profile_id --- src/client-side-metrics/gcp-metrics-handler.ts | 4 ++-- src/client-side-metrics/metrics-handler.ts | 2 +- src/client-side-metrics/operation-metrics-collector.ts | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index d80a8344d..1855b88e0 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -245,7 +245,7 @@ export class GCPMetricsHandler implements IMetricsHandler { zone: data.metricsCollectorData.zone, }); const commonAttributes = { - app_profile_id: data.metricsCollectorData.app_profile_id, + app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, @@ -277,7 +277,7 @@ export class GCPMetricsHandler implements IMetricsHandler { zone: data.metricsCollectorData.zone, }); const commonAttributes = { - app_profile_id: data.metricsCollectorData.app_profile_id, + app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index e4cb29758..e69b951b8 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -26,7 +26,7 @@ type IMetricsCollectorData = { table: string; cluster?: string; zone?: string; - app_profile_id?: string; + app_profile?: string; method: MethodName; client_uid: string; }; diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 6c5e86232..d25538a31 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -115,7 +115,7 @@ export class OperationMetricsCollector { method: this.methodName, client_uid: this.tabularApiSurface.bigtable.clientUid, }, - appProfileId ? {app_profile_id: appProfileId} : {} + appProfileId ? {app_profile: appProfileId} : {} ); } From 0ccec5b0dea7c74d886bc03185f23541760ffd86 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 10:13:32 -0400 Subject: [PATCH 309/448] Add guards that stop the export call --- system-test/gcp-metrics-handler.ts | 150 +++++++++++++++-------------- 1 file changed, 76 insertions(+), 74 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index a4e0a3160..65470c706 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -29,7 +29,7 @@ import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; import {replaceTimestamps} from '../test-common/replace-timestamps'; -describe('Bigtable/GCPMetricsHandler', () => { +describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the GCPMetricsHandler', done => { (async () => { /* @@ -48,20 +48,16 @@ describe('Bigtable/GCPMetricsHandler', () => { resultCallback: (result: ExportResult) => void ) { return (result: ExportResult) => { - if (!exported) { - exported = true; - try { - clearTimeout(timeout); - assert.strictEqual(result.code, 0); - done(); - resultCallback({code: 0}); - } catch (error) { - // Code isn't 0 so report the original error. - done(result); - done(error); - } - } else { + exported = true; + try { + clearTimeout(timeout); + assert.strictEqual(result.code, 0); + done(); resultCallback({code: 0}); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); } }; } @@ -71,7 +67,11 @@ describe('Bigtable/GCPMetricsHandler', () => { resultCallback: (result: ExportResult) => void ): void { const testResultCallback = getTestResultCallback(resultCallback); - super.export(metrics, testResultCallback); + if (!exported) { + super.export(metrics, testResultCallback); + } else { + resultCallback({code: 0}); + } } } @@ -123,29 +123,23 @@ describe('Bigtable/GCPMetricsHandler', () => { resultCallback: (result: ExportResult) => void ) { return (result: ExportResult) => { - if (exportedCount < 2) { - exportedCount++; - try { - assert.strictEqual(result.code, 0); - } catch (error) { - // Code isn't 0 so report the original error. - done(result); - done(error); - } - if (exportedCount === 2) { - // We are expecting two calls to an exporter. One for each - // metrics handler. - clearTimeout(timeout); - done(); - } - resultCallback({code: 0}); - } else { - // After the test is complete the periodic exporter may still be - // running in which case we don't want to do any checks. We just - // want to call the resultCallback so that there are no hanging - // threads. - resultCallback({code: 0}); + exportedCount++; + try { + assert.strictEqual(result.code, 0); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); } + if (exportedCount === 2) { + // We are expecting two calls to an exporter. One for each + // metrics handler. + clearTimeout(timeout); + done(); + } + // The resultCallback needs to be called to end the exporter operation + // so that the test shuts down in mocha. + resultCallback({code: 0}); }; } class MockExporter extends CloudMonitoringExporter { @@ -153,47 +147,55 @@ describe('Bigtable/GCPMetricsHandler', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): void { - try { - // The code block ensures the metrics are correct. Mainly, the metrics - // shouldn't contain two copies of the data. It should only contain - // one. - - // For this test since we are still writing a time series with - // metrics variable we don't want to modify the metrics variable - // to have artificial times because then sending the data to the - // metric service client will fail. Therefore, we must make a copy - // of the metrics and use that. - const parsedExportInput: ResourceMetrics = JSON.parse( - JSON.stringify(metrics) - ); - replaceTimestamps( - parsedExportInput as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] - ); - assert.deepStrictEqual( - parsedExportInput.scopeMetrics[0].metrics.length, - expectedOtelExportInput.scopeMetrics[0].metrics.length - ); - for ( - let index = 0; - index < parsedExportInput.scopeMetrics[0].metrics.length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. + if (exportedCount < 2) { + try { + // This code block ensures the metrics are correct. Mainly, the metrics + // shouldn't contain two copies of the data. It should only contain + // one. + // + // For this test since we are still writing a time series with + // metrics variable we don't want to modify the metrics variable + // to have artificial times because then sending the data to the + // metric service client will fail. Therefore, we must make a copy + // of the metrics and use that. + const parsedExportInput: ResourceMetrics = JSON.parse( + JSON.stringify(metrics) + ); + replaceTimestamps( + parsedExportInput as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); assert.deepStrictEqual( - parsedExportInput.scopeMetrics[0].metrics[index], - expectedOtelExportInput.scopeMetrics[0].metrics[index] + parsedExportInput.scopeMetrics[0].metrics.length, + expectedOtelExportInput.scopeMetrics[0].metrics.length ); + for ( + let index = 0; + index < parsedExportInput.scopeMetrics[0].metrics.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + parsedExportInput.scopeMetrics[0].metrics[index], + expectedOtelExportInput.scopeMetrics[0].metrics[index] + ); + } + } catch (e) { + // The error needs to be caught so it can be reported to the mocha + // test runner. + done(e); } - } catch (e) { - // The error needs to be caught so it can be reported to the mocha - // test runner. - done(e); + // The code below uses the test callback to ensure the export was successful. + const testResultCallback = getTestResultCallback(resultCallback); + super.export(metrics, testResultCallback); + } else { + // After the test is complete the periodic exporter may still be + // running in which case we don't want to do any checks. We just + // want to call the resultCallback so that there are no hanging + // threads. + resultCallback({code: 0}); } - // The code below uses the test callback to ensure the export was successful. - const testResultCallback = getTestResultCallback(resultCallback); - super.export(metrics, testResultCallback); } } From 9f94b9e25ae749c87c08f71ead3098216914a495 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 10:16:55 -0400 Subject: [PATCH 310/448] Remove only --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 65470c706..4097d3216 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -29,7 +29,7 @@ import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; import {replaceTimestamps} from '../test-common/replace-timestamps'; -describe.only('Bigtable/GCPMetricsHandler', () => { +describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the GCPMetricsHandler', done => { (async () => { /* From 7e76c39953de2e81460d72a0ec0b03675f1aaafb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 10:44:21 -0400 Subject: [PATCH 311/448] Add a new test for ten metrics handlers --- system-test/gcp-metrics-handler.ts | 127 +++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 4097d3216..b159537dc 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -233,4 +233,131 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); + it.only('Should export a value to ten GCPMetricsHandlers', done => { + // This test ensures that when we create two GCPMetricsHandlers much like + // what we would be doing when calling readRows on two separate tables that + // the data doesn't store duplicates in the same place and export twice as + // much data as it should. + (async () => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 120000); + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exported variable ensures we only test the value export receives one time. + */ + let exportedCount = 0; + function getTestResultCallback( + resultCallback: (result: ExportResult) => void + ) { + return (result: ExportResult) => { + exportedCount++; + try { + assert.strictEqual(result.code, 0); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); + } + if (exportedCount === 10) { + // We are expecting ten calls to an exporter. One for each + // metrics handler. + clearTimeout(timeout); + done(); + } + // The resultCallback needs to be called to end the exporter operation + // so that the test shuts down in mocha. + resultCallback({code: 0}); + }; + } + class MockExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + if (exportedCount < 10) { + try { + // This code block ensures the metrics are correct. Mainly, the metrics + // shouldn't contain two copies of the data. It should only contain + // one. + // + // For this test since we are still writing a time series with + // metrics variable we don't want to modify the metrics variable + // to have artificial times because then sending the data to the + // metric service client will fail. Therefore, we must make a copy + // of the metrics and use that. + const parsedExportInput: ResourceMetrics = JSON.parse( + JSON.stringify(metrics) + ); + replaceTimestamps( + parsedExportInput as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + assert.deepStrictEqual( + parsedExportInput.scopeMetrics[0].metrics.length, + expectedOtelExportInput.scopeMetrics[0].metrics.length + ); + for ( + let index = 0; + index < parsedExportInput.scopeMetrics[0].metrics.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + parsedExportInput.scopeMetrics[0].metrics[index], + expectedOtelExportInput.scopeMetrics[0].metrics[index] + ); + } + } catch (e) { + // The error needs to be caught so it can be reported to the mocha + // test runner. + done(e); + } + // The code below uses the test callback to ensure the export was successful. + const testResultCallback = getTestResultCallback(resultCallback); + super.export(metrics, testResultCallback); + } else { + // After the test is complete the periodic exporter may still be + // running in which case we don't want to do any checks. We just + // want to call the resultCallback so that there are no hanging + // threads. + resultCallback({code: 0}); + } + } + } + + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const transformedRequestsHandled = JSON.parse( + JSON.stringify(expectedRequestsHandled).replace( + /my-project/g, + projectId + ) + ); + const handlers = []; + for (let i = 0; i < 10; i++) { + handlers.push(new GCPMetricsHandler(new MockExporter({projectId}))); + for (const request of transformedRequestsHandled) { + if (request.attemptLatency) { + handlers[i].onAttemptComplete(request as OnAttemptCompleteData); + } else { + handlers[i].onOperationComplete(request as OnOperationCompleteData); + } + } + } + })(); + }); }); From bb64fbe7cfc5d9f8d2f1e002ef4c36b65742f0c8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 10:49:16 -0400 Subject: [PATCH 312/448] Remove only --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index b159537dc..12cd9ef9c 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -233,7 +233,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); - it.only('Should export a value to ten GCPMetricsHandlers', done => { + it('Should export a value to ten GCPMetricsHandlers', done => { // This test ensures that when we create two GCPMetricsHandlers much like // what we would be doing when calling readRows on two separate tables that // the data doesn't store duplicates in the same place and export twice as From 27f5bcdbfe71f00dbd049718957892806371d219 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 13:21:42 -0400 Subject: [PATCH 313/448] Do not pass data through the Resource object anymore --- src/client-side-metrics/exporter.ts | 47 ++++++++++--------- .../gcp-metrics-handler.ts | 37 ++++++--------- test-common/expected-otel-export-input.ts | 46 ++++++++++++++---- 3 files changed, 76 insertions(+), 54 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index ad985ec90..a7b351fdb 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -81,37 +81,40 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { type WithSyncAttributes = {_syncAttributes: {[index: string]: string}}; const resourcesWithSyncAttributes = exportArgs.resource as unknown as WithSyncAttributes; + const projectId = + resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.project_id' + ]; const timeSeriesArray = []; - const resourceLabels = { - cluster: - resourcesWithSyncAttributes._syncAttributes['monitored_resource.cluster'], - instance: - resourcesWithSyncAttributes._syncAttributes[ - 'monitored_resource.instance_id' - ], - project_id: - resourcesWithSyncAttributes._syncAttributes[ - 'monitored_resource.project_id' - ], - table: - resourcesWithSyncAttributes._syncAttributes['monitored_resource.table'], - zone: resourcesWithSyncAttributes._syncAttributes[ - 'monitored_resource.zone' - ], - }; for (const scopeMetrics of exportArgs.scopeMetrics) { for (const scopeMetric of scopeMetrics.metrics) { const metricName = scopeMetric.descriptor.name; for (const dataPoint of scopeMetric.dataPoints) { const value = dataPoint.value; + const resourceLabels = { + cluster: dataPoint.attributes.cluster, + instance: dataPoint.attributes.instanceId, + project_id: projectId, + table: dataPoint.attributes.table, + zone: dataPoint.attributes.zone, + }; + const streaming = dataPoint.attributes.streaming; + const app_profile = dataPoint.attributes.app_profile; const metric = { type: metricName, - labels: dataPoint.attributes, + labels: Object.assign( + { + method: dataPoint.attributes.method, + client_uid: dataPoint.attributes.client_uid, + status: dataPoint.attributes.status, + client_name: dataPoint.attributes.client_name, + }, + streaming ? {streaming} : null, + app_profile ? {app_profile} : null + ), }; const resource = { - type: resourcesWithSyncAttributes._syncAttributes[ - 'monitored_resource.type' - ], + type: 'bigtable_client_raw', labels: resourceLabels, }; const interval = { @@ -169,7 +172,7 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { } } return { - name: `projects/${resourcesWithSyncAttributes._syncAttributes['monitored_resource.project_id']}`, + name: `projects/${projectId}`, timeSeries: timeSeriesArray, }; } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 1855b88e0..b73347422 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -92,11 +92,13 @@ export class GCPMetricsHandler implements IMetricsHandler { * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. * - * @param {MonitoredResourceData} data The data that will be used to set up the monitored resource * which will be provided to the exporter in every export call. * */ - private getMetrics(data: MonitoredResourceData): Metrics { + private getMetrics(projectId: string): Metrics { + // The projectId is needed per metrics handler because when the exporter is + // used it provides the project id for the name of the time series exported. + // ie. name: `projects/${....['monitored_resource.project_id']}`, if (!this.otelInstruments) { const latencyBuckets = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, @@ -127,12 +129,7 @@ export class GCPMetricsHandler implements IMetricsHandler { views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', - 'monitored_resource.type': 'bigtable_client_raw', - 'monitored_resource.project_id': data.projectId, - 'monitored_resource.instance_id': data.instanceId, - 'monitored_resource.table': data.table, - 'monitored_resource.cluster': data.cluster, - 'monitored_resource.zone': data.zone, + 'monitored_resource.project_id': projectId, }).merge(new ResourceUtil.GcpDetectorSync().detect()), readers: [ // Register the exporter @@ -237,19 +234,17 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnOperationCompleteData} data Data related to the completed operation. */ onOperationComplete(data: OnOperationCompleteData) { - const otelInstruments = this.getMetrics({ - projectId: data.projectId, - instanceId: data.metricsCollectorData.instanceId, - table: data.metricsCollectorData.table, - cluster: data.metricsCollectorData.cluster, - zone: data.metricsCollectorData.zone, - }); + const otelInstruments = this.getMetrics(data.projectId); const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, client_name: data.client_name, + instanceId: data.metricsCollectorData.instanceId, + table: data.metricsCollectorData.table, + cluster: data.metricsCollectorData.cluster, + zone: data.metricsCollectorData.zone, }; otelInstruments.operationLatencies.record(data.operationLatency, { streaming: data.streaming, @@ -269,19 +264,17 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ onAttemptComplete(data: OnAttemptCompleteData) { - const otelInstruments = this.getMetrics({ - projectId: data.projectId, - instanceId: data.metricsCollectorData.instanceId, - table: data.metricsCollectorData.table, - cluster: data.metricsCollectorData.cluster, - zone: data.metricsCollectorData.zone, - }); + const otelInstruments = this.getMetrics(data.projectId); const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, client_name: data.client_name, + instanceId: data.metricsCollectorData.instanceId, + table: data.metricsCollectorData.table, + cluster: data.metricsCollectorData.cluster, + zone: data.metricsCollectorData.zone, }; otelInstruments.attemptLatencies.record(data.attemptLatency, { streaming: data.streaming, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 2d7b43401..d9855ed7f 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -743,12 +743,7 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', - 'monitored_resource.instance_id': 'fakeInstanceId', - 'monitored_resource.table': 'fakeTableId', - 'monitored_resource.cluster': 'fake-cluster3', - 'monitored_resource.zone': 'us-west1-c', }, asyncAttributesPending: false, _syncAttributes: { @@ -756,12 +751,7 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', - 'monitored_resource.instance_id': 'fakeInstanceId', - 'monitored_resource.table': 'fakeTableId', - 'monitored_resource.cluster': 'fake-cluster3', - 'monitored_resource.zone': 'us-west1-c', }, _asyncAttributesPromise: {}, }, @@ -799,6 +789,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -851,6 +845,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '4', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -881,6 +879,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -925,6 +927,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -959,6 +965,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -1011,6 +1021,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '4', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -1041,6 +1055,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -1085,6 +1103,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '4', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -1096,6 +1118,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], From d1a292fd3e149dfcb095ace09261c2f83ce2e6f4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 13:51:27 -0400 Subject: [PATCH 314/448] Add a test for writing duplicate points to MH --- system-test/gcp-metrics-handler.ts | 73 ++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 12cd9ef9c..f67e5cf39 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -360,4 +360,77 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); + it('Should write two duplicate points inserted into the metrics handler', done => { + (async () => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 120000); + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exported variable ensures we only test the value export receives one time. + */ + let exported = false; + function getTestResultCallback( + resultCallback: (result: ExportResult) => void + ) { + return (result: ExportResult) => { + exported = true; + try { + clearTimeout(timeout); + assert.strictEqual(result.code, 0); + done(); + resultCallback({code: 0}); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); + } + }; + } + class MockExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + const testResultCallback = getTestResultCallback(resultCallback); + if (!exported) { + super.export(metrics, testResultCallback); + } else { + resultCallback({code: 0}); + } + } + } + + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const handler = new GCPMetricsHandler(new MockExporter({projectId})); + const transformedRequestsHandled = JSON.parse( + JSON.stringify(expectedRequestsHandled).replace( + /my-project/g, + projectId + ) + ); + for (let i = 0; i < 2; i++) { + for (const request of transformedRequestsHandled) { + if (request.attemptLatency) { + handler.onAttemptComplete(request as OnAttemptCompleteData); + } else { + handler.onOperationComplete(request as OnOperationCompleteData); + } + } + } + })(); + }); }); From 23a4d397a59b8cf512e9c9331a3793cd0fb8227e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 14:02:56 -0400 Subject: [PATCH 315/448] Eliminate interface --- .../gcp-metrics-handler.ts | 20 ------------------- 1 file changed, 20 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index b73347422..2c250aa1a 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -43,26 +43,6 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } -/** - * Represents the data associated with a monitored resource in Google Cloud Monitoring. - * - * This interface defines the structure of data that is used to identify and - * describe a specific resource being monitored, such as a Bigtable instance, - * cluster, or table. It is used to construct the `resource` part of a - * `TimeSeries` object in the Cloud Monitoring API. - * - * When an open telemetry instrument is created in the GCPMetricsHandler, all - * recordings to that instrument are expected to have the same - * MonitoredResourceData properties. - */ -interface MonitoredResourceData { - projectId: string; - instanceId: string; - table: string; - cluster?: string; - zone?: string; -} - /** * A metrics handler implementation that uses OpenTelemetry to export metrics to Google Cloud Monitoring. * This handler records metrics such as operation latency, attempt latency, retry count, and more, From 55dbd8f59aa60945dfc9c9cf41e35360aa27d78e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 14:32:28 -0400 Subject: [PATCH 316/448] Set connectivity error count to 1 (not increment) --- src/client-side-metrics/operation-metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index d25538a31..5c5c2ef3d 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -271,7 +271,7 @@ export class OperationMetricsCollector { : parseInt(matchedDuration[1]); } } else { - this.connectivityErrorCount++; + this.connectivityErrorCount = 1; } } From 71b2d48167515bde49309196ce46317521bb4488 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 17:11:13 -0400 Subject: [PATCH 317/448] Edit the fixtures --- .../client-side-metrics-to-metrics-handler.ts | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index 7d40e43ee..3148459ab 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -34,37 +34,37 @@ describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { // them from the comparison. delete firstRequest.attemptLatency; delete firstRequest.serverLatency; - delete firstRequest.metricsCollectorData.clientUid; + delete firstRequest.metricsCollectorData.client_uid; delete firstRequest.metricsCollectorData.appProfileId; assert.deepStrictEqual(firstRequest, { connectivityErrorCount: 0, - streamingOperation: 'true', - attemptStatus: 0, - clientName: 'nodejs-bigtable', + streaming: 'true', + status: '0', + client_name: 'nodejs-bigtable', metricsCollectorData: { instanceId: 'emulator-test-instance', table: 'my-table', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'Bigtable.ReadRows', + method: 'Bigtable.ReadRows', }, projectId, }); const secondRequest = this.requestsHandled[1] as any; delete secondRequest.operationLatency; delete secondRequest.firstResponseLatency; - delete secondRequest.metricsCollectorData.clientUid; + delete secondRequest.metricsCollectorData.client_uid; delete secondRequest.metricsCollectorData.appProfileId; assert.deepStrictEqual(secondRequest, { - finalOperationStatus: 0, - streamingOperation: 'true', - clientName: 'nodejs-bigtable', + status: '0', + streaming: 'true', + client_name: 'nodejs-bigtable', metricsCollectorData: { instanceId: 'emulator-test-instance', table: 'my-table', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'Bigtable.ReadRows', + method: 'Bigtable.ReadRows', }, projectId, retryCount: 0, From c581129e69eb6e9c2747554d69419837b878868f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Mar 2025 13:33:46 -0400 Subject: [PATCH 318/448] Remove the guard --- .../operation-metrics-collector.ts | 20 ++++--------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 231b13a9e..1c3e9a2e1 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -18,22 +18,10 @@ import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; -let ResponseParams: gax.protobuf.Type | null; -try { - /* - * Likely due to the Node 18 upgrade, the samples tests are failing with the - * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or - * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since - * these tests don't use this module we can suppress the error for now to - * unblock the CI pipeline. - */ - const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto' - ); - ResponseParams = root.lookupType('ResponseParams'); -} catch (e) { - ResponseParams = null; -} +const root = gax.protobuf.loadSync( + './protos/google/bigtable/v2/response_params.proto' +); +const ResponseParams = root.lookupType('ResponseParams'); /** * An interface representing a tabular API surface, such as a Bigtable table. From ba04208f113da13a0e67195a0ed2cf23821c5b95 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Mar 2025 13:38:57 -0400 Subject: [PATCH 319/448] Change attempt status to string --- src/client-side-metrics/operation-metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 1c3e9a2e1..45745c70a 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -159,7 +159,7 @@ export class OperationMetricsCollector { serverLatency: this.serverTime ?? undefined, connectivityErrorCount: this.connectivityErrorCount, streaming: this.streamingOperation, - status: attemptStatus.toString(), + status: attemptStatus?.toString(), client_name: `nodejs-bigtable/${version}`, metricsCollectorData: this.getMetricsCollectorData(), projectId, From 9c3e5b1539e57ffed12378acacdae798a829dae4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Mar 2025 13:40:03 -0400 Subject: [PATCH 320/448] Add a comment - undefined in conformance tests --- src/client-side-metrics/operation-metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 45745c70a..6e0173fd5 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -159,7 +159,7 @@ export class OperationMetricsCollector { serverLatency: this.serverTime ?? undefined, connectivityErrorCount: this.connectivityErrorCount, streaming: this.streamingOperation, - status: attemptStatus?.toString(), + status: attemptStatus?.toString(), // undefined in conformance tests. client_name: `nodejs-bigtable/${version}`, metricsCollectorData: this.getMetricsCollectorData(), projectId, From ab4545e52251c5525112f2a3fe7d9e293e26d8f3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 14:48:23 -0400 Subject: [PATCH 321/448] Make metrics handlers per client --- src/index.ts | 11 ++++++++++- src/tabular-api-surface.ts | 2 +- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/index.ts b/src/index.ts index 835b28e92..45abb4376 100644 --- a/src/index.ts +++ b/src/index.ts @@ -36,6 +36,8 @@ import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; import {IMetricsHandler} from './client-side-metrics/metrics-handler'; +import {GCPMetricsHandler} from './client-side-metrics/gcp-metrics-handler'; +import {CloudMonitoringExporter} from './client-side-metrics/exporter'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -104,7 +106,7 @@ export interface BigtableOptions extends gax.GoogleAuthOptions { */ BigtableTableAdminClient?: gax.ClientOptions; - metricsHandlers?: IMetricsHandler[]; + collectMetrics?: boolean; } /** @@ -426,6 +428,7 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; + metricsHandlers: IMetricsHandler[]; constructor(options: BigtableOptions = {}) { // Determine what scopes are needed. @@ -523,6 +526,12 @@ export class Bigtable { this.appProfileId = options.appProfileId; this.projectName = `projects/${this.projectId}`; this.shouldReplaceProjectIdToken = this.projectId === '{{projectId}}'; + + if (options.collectMetrics === false) { + this.metricsHandlers = []; + } else { + this.metricsHandlers = [new GCPMetricsHandler(new CloudMonitoringExporter())]; + } } createInstance( diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index d33cf9f4d..fc1bbfa2b 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -345,7 +345,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }; const metricsCollector = new OperationMetricsCollector( this, - [new GCPMetricsHandler(new CloudMonitoringExporter())], + this.bigtable.metricsHandlers, MethodName.READ_ROWS, StreamingState.STREAMING ); From 2d23a83732c9d7d6624c68fa03ee610ee0ba281d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 15:48:46 -0400 Subject: [PATCH 322/448] Add coment about latency --- src/index.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/index.ts b/src/index.ts index 45abb4376..bb3d4a068 100644 --- a/src/index.ts +++ b/src/index.ts @@ -428,6 +428,9 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; + // Each time a metrics handler is created it introduces significant latency. + // Therefore, metrics handlers should be created at the client level and + // reused throughout the library to reduce latency: metricsHandlers: IMetricsHandler[]; constructor(options: BigtableOptions = {}) { From efa147d58bb2389184bd3127faae2760f14ab7bf Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 15:49:37 -0400 Subject: [PATCH 323/448] Wrap in try --- .../client-side-metrics-to-metrics-handler.ts | 93 ++++++++++--------- 1 file changed, 48 insertions(+), 45 deletions(-) diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index 3148459ab..84fe6e780 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -23,54 +23,57 @@ import * as assert from 'assert'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; -describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { +describe.only('Bigtable/ClientSideMetricsToMetricsHandler', () => { async function mockBigtable(projectId: string, done: mocha.Done) { class TestGCPMetricsHandler extends TestMetricsHandler { onOperationComplete(data: OnOperationCompleteData) { - super.onOperationComplete(data); - assert.strictEqual(this.requestsHandled.length, 2); - const firstRequest = this.requestsHandled[0] as any; - // We would expect these parameters to be different every time so delete - // them from the comparison. - delete firstRequest.attemptLatency; - delete firstRequest.serverLatency; - delete firstRequest.metricsCollectorData.client_uid; - delete firstRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(firstRequest, { - connectivityErrorCount: 0, - streaming: 'true', - status: '0', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - }, - projectId, - }); - const secondRequest = this.requestsHandled[1] as any; - delete secondRequest.operationLatency; - delete secondRequest.firstResponseLatency; - delete secondRequest.metricsCollectorData.client_uid; - delete secondRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(secondRequest, { - status: '0', - streaming: 'true', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - }, - projectId, - retryCount: 0, - }); - // Do assertion checks here to - done(); + try { + super.onOperationComplete(data); + assert.strictEqual(this.requestsHandled.length, 2); + const firstRequest = this.requestsHandled[0] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison. + delete firstRequest.attemptLatency; + delete firstRequest.serverLatency; + delete firstRequest.metricsCollectorData.client_uid; + delete firstRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(firstRequest, { + connectivityErrorCount: 0, + streaming: 'true', + status: '0', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method: 'Bigtable.ReadRows', + }, + projectId, + }); + const secondRequest = this.requestsHandled[1] as any; + delete secondRequest.operationLatency; + delete secondRequest.firstResponseLatency; + delete secondRequest.metricsCollectorData.client_uid; + delete secondRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(secondRequest, { + status: '0', + streaming: 'true', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method: 'Bigtable.ReadRows', + }, + projectId, + retryCount: 0, + }); + done(); + } catch (e) { + done(e); + } } } From 5761892d5b8cc59b2c53fc0566c87684c75182cb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 16:06:32 -0400 Subject: [PATCH 324/448] Adjust the proxyquire mocks to mock handler in right layer --- system-test/client-side-metrics-to-gcm.ts | 11 +---------- .../client-side-metrics-to-metrics-handler.ts | 15 ++------------- 2 files changed, 3 insertions(+), 23 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 2f9c94d26..a42bf1db3 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -79,19 +79,10 @@ describe('Bigtable/ClientSideMetricsToGCM', () => { ensure the export was successful and pass the test with code 0 if it is successful. */ - const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { + const FakeBigtable = proxyquire('../src/index.js', { './client-side-metrics/gcp-metrics-handler': { GCPMetricsHandler: TestGCPMetricsHandler, }, - }).TabularApiSurface; - const FakeTable: TabularApiSurface = proxyquire('../src/table.js', { - './tabular-api-surface.js': {TabularApiSurface: FakeTabularApiSurface}, - }).Table; - const FakeInstance = proxyquire('../src/instance.js', { - './table.js': {Table: FakeTable}, - }).Instance; - const FakeBigtable = proxyquire('../src/index.js', { - './instance.js': {Instance: FakeInstance}, }).Bigtable; bigtable = new FakeBigtable(); diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index 84fe6e780..12de763c0 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -17,13 +17,12 @@ import {describe, it, before, after} from 'mocha'; import {Bigtable} from '../src'; import * as proxyquire from 'proxyquire'; -import {TabularApiSurface} from '../src/tabular-api-surface'; import * as mocha from 'mocha'; import * as assert from 'assert'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; -describe.only('Bigtable/ClientSideMetricsToMetricsHandler', () => { +describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { async function mockBigtable(projectId: string, done: mocha.Done) { class TestGCPMetricsHandler extends TestMetricsHandler { onOperationComplete(data: OnOperationCompleteData) { @@ -76,20 +75,10 @@ describe.only('Bigtable/ClientSideMetricsToMetricsHandler', () => { } } } - - const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { + const FakeBigtable = proxyquire('../src/index.js', { './client-side-metrics/gcp-metrics-handler': { GCPMetricsHandler: TestGCPMetricsHandler, }, - }).TabularApiSurface; - const FakeTable: TabularApiSurface = proxyquire('../src/table.js', { - './tabular-api-surface.js': {TabularApiSurface: FakeTabularApiSurface}, - }).Table; - const FakeInstance = proxyquire('../src/instance.js', { - './table.js': {Table: FakeTable}, - }).Instance; - const FakeBigtable = proxyquire('../src/index.js', { - './instance.js': {Instance: FakeInstance}, }).Bigtable; bigtable = new FakeBigtable(); From 333c5b988785990b7f7d90a35dfe489c91f7317d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 16:07:24 -0400 Subject: [PATCH 325/448] Eliminate TODO --- system-test/client-side-metrics-to-metrics-handler.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index 12de763c0..603daf950 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// TODO: Must be put in root folder or will not run - import {describe, it, before, after} from 'mocha'; import {Bigtable} from '../src'; import * as proxyquire from 'proxyquire'; From 3b71eff2225a63ec2e8a635ddf5e69ec9bb9b9ec Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 16:07:44 -0400 Subject: [PATCH 326/448] Eliminate TODO --- system-test/client-side-metrics-to-gcm.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index a42bf1db3..9668b55c6 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// TODO: Must be put in root folder or will not run - import {describe, it, before, after} from 'mocha'; import * as assert from 'assert'; import {Bigtable} from '../src'; From 4ab90842d976103f0e2e741e91fea93c657d5a2c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 27 Mar 2025 11:04:14 -0400 Subject: [PATCH 327/448] Add some code so that first response latencies gets recorded --- system-test/client-side-metrics-to-gcm.ts | 12 ++++++++++++ .../client-side-metrics-to-metrics-handler.ts | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 9668b55c6..7aa7de09d 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -111,6 +111,18 @@ describe('Bigtable/ClientSideMetricsToGCM', () => { await table.createFamily(columnFamilyId); } } + // Add some data so that a firstResponseLatency is recorded. + await table.insert([ + { + key: 'rowId', + data: { + [columnFamilyId]: { + gwashington: 1, + tjefferson: 1, + }, + }, + }, + ]); } const instanceId = 'emulator-test-instance'; diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index 603daf950..3e04fcdc9 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -107,6 +107,18 @@ describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { await table.createFamily(columnFamilyId); } } + // Add some data so that a firstResponseLatency is recorded. + await table.insert([ + { + key: 'rowId', + data: { + [columnFamilyId]: { + gwashington: 1, + tjefferson: 1, + }, + }, + }, + ]); } const instanceId = 'emulator-test-instance'; From 7daeb0f606a0002b4c18e86830ec2b4d72d3d661 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 27 Mar 2025 15:13:13 -0400 Subject: [PATCH 328/448] Run the linter --- src/index.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/index.ts b/src/index.ts index bb3d4a068..08370c065 100644 --- a/src/index.ts +++ b/src/index.ts @@ -533,7 +533,9 @@ export class Bigtable { if (options.collectMetrics === false) { this.metricsHandlers = []; } else { - this.metricsHandlers = [new GCPMetricsHandler(new CloudMonitoringExporter())]; + this.metricsHandlers = [ + new GCPMetricsHandler(new CloudMonitoringExporter()), + ]; } } From 25ab7c45047072b681fb6a175ff831bfa4a7261d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 27 Mar 2025 15:31:12 -0400 Subject: [PATCH 329/448] Reduce latency when collect metrics is false --- src/index.ts | 3 ++ src/tabular-api-surface.ts | 63 ++++++++++++++++++++------------------ 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/src/index.ts b/src/index.ts index 08370c065..b3bca49bc 100644 --- a/src/index.ts +++ b/src/index.ts @@ -432,6 +432,7 @@ export class Bigtable { // Therefore, metrics handlers should be created at the client level and // reused throughout the library to reduce latency: metricsHandlers: IMetricsHandler[]; + collectMetrics: boolean; constructor(options: BigtableOptions = {}) { // Determine what scopes are needed. @@ -531,8 +532,10 @@ export class Bigtable { this.shouldReplaceProjectIdToken = this.projectId === '{{projectId}}'; if (options.collectMetrics === false) { + this.collectMetrics = false; this.metricsHandlers = []; } else { + this.collectMetrics = true; this.metricsHandlers = [ new GCPMetricsHandler(new CloudMonitoringExporter()), ]; diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index fc1bbfa2b..cb1907df1 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -343,15 +343,17 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return originalEnd(chunk, encoding, cb); }; - const metricsCollector = new OperationMetricsCollector( - this, - this.bigtable.metricsHandlers, - MethodName.READ_ROWS, - StreamingState.STREAMING - ); - metricsCollector.onOperationStart(); + const metricsCollector = this.bigtable.collectMetrics + ? new OperationMetricsCollector( + this, + this.bigtable.metricsHandlers, + MethodName.READ_ROWS, + StreamingState.STREAMING + ) + : null; + metricsCollector?.onOperationStart(); const makeNewRequest = () => { - metricsCollector.onAttemptStart(); + metricsCollector?.onAttemptStart(); // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry @@ -528,21 +530,24 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return false; }; - requestStream - .on( - 'metadata', - (metadata: {internalRepr: Map; options: {}}) => { - metricsCollector.onMetadataReceived(metadata); - } - ) - .on( - 'status', - (status: { - metadata: {internalRepr: Map; options: {}}; - }) => { - metricsCollector.onStatusMetadataReceived(status); - } - ); + if (this.bigtable.collectMetrics) { + requestStream + .on( + 'metadata', + (metadata: {internalRepr: Map; options: {}}) => { + metricsCollector?.onMetadataReceived(metadata); + } + ) + .on( + 'status', + (status: { + metadata: {internalRepr: Map; options: {}}; + }) => { + metricsCollector?.onStatusMetadataReceived(status); + } + ); + } + rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); @@ -568,7 +573,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); - metricsCollector.onAttemptComplete( + metricsCollector?.onAttemptComplete( this.bigtable.projectId, error.code ); @@ -587,11 +592,11 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // error.code = grpc.status.CANCELLED; } - metricsCollector.onAttemptComplete( + metricsCollector?.onAttemptComplete( this.bigtable.projectId, error.code ); - metricsCollector.onOperationComplete( + metricsCollector?.onOperationComplete( this.bigtable.projectId, error.code ); @@ -602,16 +607,16 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; - metricsCollector.onResponse(this.bigtable.projectId); + metricsCollector?.onResponse(this.bigtable.projectId); }) .on('end', () => { numRequestsMade++; activeRequestStream = null; - metricsCollector.onAttemptComplete( + metricsCollector?.onAttemptComplete( this.bigtable.projectId, grpc.status.OK ); - metricsCollector.onOperationComplete( + metricsCollector?.onOperationComplete( this.bigtable.projectId, grpc.status.OK ); From fb8251cf04db2ae0c77eb68836880cfd414ba7ff Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 10:49:56 -0400 Subject: [PATCH 330/448] Add a comment for the collect metrics variable --- src/index.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/index.ts b/src/index.ts index b3bca49bc..6a15a2cb0 100644 --- a/src/index.ts +++ b/src/index.ts @@ -432,6 +432,10 @@ export class Bigtable { // Therefore, metrics handlers should be created at the client level and // reused throughout the library to reduce latency: metricsHandlers: IMetricsHandler[]; + // collectMetrics is a member variable that is used to ensure that if the + // user provides a `false` value and opts out of metrics collection that + // the metrics collector is ignored altogether to reduce latency in the + // client. collectMetrics: boolean; constructor(options: BigtableOptions = {}) { From e2a11bed7431f528b7c8a938a60f0e76de8144a7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 10:52:21 -0400 Subject: [PATCH 331/448] Remove unnecessary deltas --- src/tabular-api-surface.ts | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index cb1907df1..b2b8f34ef 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -14,6 +14,7 @@ import {OperationMetricsCollector} from './client-side-metrics/operation-metrics-collector'; import {promisifyAll} from '@google-cloud/promisify'; +import arrify = require('arrify'); import {Instance} from './instance'; import {Mutation} from './mutation'; import { @@ -24,7 +25,7 @@ import { SampleRowKeysCallback, SampleRowsKeysResponse, } from './index'; -import {BoundData, Filter, RawFilter} from './filter'; +import {Filter, BoundData, RawFilter} from './filter'; import {Row} from './row'; import { ChunkPushData, @@ -44,12 +45,6 @@ import { StreamingState, } from './client-side-metrics/client-side-metrics-attributes'; -let attemptCounter = 0; - -import arrify = require('arrify'); -import {GCPMetricsHandler} from './client-side-metrics/gcp-metrics-handler'; -import {CloudMonitoringExporter} from './client-side-metrics/exporter'; - // See protos/google/rpc/code.proto // (4=DEADLINE_EXCEEDED, 8=RESOURCE_EXHAUSTED, 10=ABORTED, 14=UNAVAILABLE) export const RETRYABLE_STATUS_CODES = new Set([4, 8, 10, 14]); @@ -220,7 +215,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { - attemptCounter++; const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; let activeRequestStream: AbortableDuplex | null; From 3e1b9e00d3fbb61d5eaf1aee872d5cf400476974 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 10:54:35 -0400 Subject: [PATCH 332/448] Add space back --- src/tabular-api-surface.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index b2b8f34ef..161aa6943 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -617,6 +617,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }); rowStreamPipe(rowStream, userStream); }; + makeNewRequest(); return userStream; } From 30538094097a95a666d590f29c4521afd953a131 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 10:56:17 -0400 Subject: [PATCH 333/448] Remove numRequests made increment --- src/tabular-api-surface.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 161aa6943..d8ab3a5d7 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -604,7 +604,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); metricsCollector?.onResponse(this.bigtable.projectId); }) .on('end', () => { - numRequestsMade++; activeRequestStream = null; metricsCollector?.onAttemptComplete( this.bigtable.projectId, @@ -617,7 +616,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }); rowStreamPipe(rowStream, userStream); }; - + makeNewRequest(); return userStream; } From 5022a3763c12b60265f3decdd3732fcb3eb4295f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 11:07:07 -0400 Subject: [PATCH 334/448] Check for existence of metrics --- system-test/client-side-metrics-to-metrics-handler.ts | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index 3e04fcdc9..d584252f1 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -29,7 +29,10 @@ describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { assert.strictEqual(this.requestsHandled.length, 2); const firstRequest = this.requestsHandled[0] as any; // We would expect these parameters to be different every time so delete - // them from the comparison. + // them from the comparison after checking they exist. + assert(firstRequest.attemptLatency); + assert(firstRequest.serverLatency); + assert(firstRequest.metricsCollectorData.client_uid); delete firstRequest.attemptLatency; delete firstRequest.serverLatency; delete firstRequest.metricsCollectorData.client_uid; @@ -49,6 +52,11 @@ describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { projectId, }); const secondRequest = this.requestsHandled[1] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + assert(secondRequest.operationLatency); + assert(secondRequest.firstResponseLatency); + assert(secondRequest.metricsCollectorData.client_uid); delete secondRequest.operationLatency; delete secondRequest.firstResponseLatency; delete secondRequest.metricsCollectorData.client_uid; From 8c9fd297f99c53a44db72d54f8868d9d1eed347f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 11:44:06 -0400 Subject: [PATCH 335/448] Finalize updates to do the test with 2 tables --- .../client-side-metrics-to-metrics-handler.ts | 203 +++++++++++------- 1 file changed, 130 insertions(+), 73 deletions(-) diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index d584252f1..3819ec743 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -20,62 +20,114 @@ import * as assert from 'assert'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; -describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { +describe.only('Bigtable/ClientSideMetricsToMetricsHandler', () => { async function mockBigtable(projectId: string, done: mocha.Done) { + let handlerRequestCount = 0; class TestGCPMetricsHandler extends TestMetricsHandler { onOperationComplete(data: OnOperationCompleteData) { + handlerRequestCount++; try { super.onOperationComplete(data); - assert.strictEqual(this.requestsHandled.length, 2); - const firstRequest = this.requestsHandled[0] as any; - // We would expect these parameters to be different every time so delete - // them from the comparison after checking they exist. - assert(firstRequest.attemptLatency); - assert(firstRequest.serverLatency); - assert(firstRequest.metricsCollectorData.client_uid); - delete firstRequest.attemptLatency; - delete firstRequest.serverLatency; - delete firstRequest.metricsCollectorData.client_uid; - delete firstRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(firstRequest, { - connectivityErrorCount: 0, - streaming: 'true', - status: '0', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - }, - projectId, - }); - const secondRequest = this.requestsHandled[1] as any; - // We would expect these parameters to be different every time so delete - // them from the comparison after checking they exist. - assert(secondRequest.operationLatency); - assert(secondRequest.firstResponseLatency); - assert(secondRequest.metricsCollectorData.client_uid); - delete secondRequest.operationLatency; - delete secondRequest.firstResponseLatency; - delete secondRequest.metricsCollectorData.client_uid; - delete secondRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(secondRequest, { - status: '0', - streaming: 'true', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - }, - projectId, - retryCount: 0, - }); - done(); + if (handlerRequestCount > 1) { + assert.strictEqual(this.requestsHandled.length, 4); + const firstRequest = this.requestsHandled[0] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + assert(firstRequest.attemptLatency); + assert(firstRequest.serverLatency); + assert(firstRequest.metricsCollectorData.client_uid); + delete firstRequest.attemptLatency; + delete firstRequest.serverLatency; + delete firstRequest.metricsCollectorData.client_uid; + delete firstRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(firstRequest, { + connectivityErrorCount: 0, + streaming: 'true', + status: '0', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method: 'Bigtable.ReadRows', + }, + projectId, + }); + const secondRequest = this.requestsHandled[1] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + assert(secondRequest.operationLatency); + assert(secondRequest.firstResponseLatency); + assert(secondRequest.metricsCollectorData.client_uid); + delete secondRequest.operationLatency; + delete secondRequest.firstResponseLatency; + delete secondRequest.metricsCollectorData.client_uid; + delete secondRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(secondRequest, { + status: '0', + streaming: 'true', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method: 'Bigtable.ReadRows', + table: 'my-table', + }, + projectId, + retryCount: 0, + }); + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + const thirdRequest = this.requestsHandled[2] as any; + assert(thirdRequest.attemptLatency); + assert(thirdRequest.serverLatency); + assert(thirdRequest.metricsCollectorData.client_uid); + delete thirdRequest.attemptLatency; + delete thirdRequest.serverLatency; + delete thirdRequest.metricsCollectorData.client_uid; + delete thirdRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(thirdRequest, { + connectivityErrorCount: 0, + streaming: 'true', + status: '0', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + table: 'my-table2', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method: 'Bigtable.ReadRows', + }, + projectId, + }); + const fourthRequest = this.requestsHandled[3] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + assert(fourthRequest.operationLatency); + assert(fourthRequest.firstResponseLatency); + assert(fourthRequest.metricsCollectorData.client_uid); + delete fourthRequest.operationLatency; + delete fourthRequest.firstResponseLatency; + delete fourthRequest.metricsCollectorData.client_uid; + delete fourthRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(fourthRequest, { + status: '0', + streaming: 'true', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method: 'Bigtable.ReadRows', + table: 'my-table2', + }, + projectId, + retryCount: 0, + }); + done(); + } } catch (e) { done(e); } @@ -100,37 +152,40 @@ describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { }); await operation.promise(); } - const table = instance.table(tableId); - const [tableExists] = await table.exists(); - if (!tableExists) { - await table.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await table.getFamilies(); + const table2 = instance.table(tableId2); + for (const currentTable of [table, table2]) { + const [tableExists] = await currentTable.exists(); + if (!tableExists) { + await currentTable.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await currentTable.getFamilies(); - if ( - !families.some((family: {id: string}) => family.id === columnFamilyId) - ) { - await table.createFamily(columnFamilyId); + if ( + !families.some((family: {id: string}) => family.id === columnFamilyId) + ) { + await currentTable.createFamily(columnFamilyId); + } } - } - // Add some data so that a firstResponseLatency is recorded. - await table.insert([ - { - key: 'rowId', - data: { - [columnFamilyId]: { - gwashington: 1, - tjefferson: 1, + // Add some data so that a firstResponseLatency is recorded. + await currentTable.insert([ + { + key: 'rowId', + data: { + [columnFamilyId]: { + gwashington: 1, + tjefferson: 1, + }, }, }, - }, - ]); + ]); + } } const instanceId = 'emulator-test-instance'; const tableId = 'my-table'; + const tableId2 = 'my-table2'; const columnFamilyId = 'cf1'; let bigtable: Bigtable; @@ -166,6 +221,8 @@ describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { const instance = bigtable.instance(instanceId); const table = instance.table(tableId); await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); })(); }); }); From 558b091fcde5aacd599fabfb53368a5873bd834b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 11:55:44 -0400 Subject: [PATCH 336/448] Refactor the test setup into one file --- .../client-side-metrics-setup-table.ts | 48 +++++++++++++++++++ .../client-side-metrics-to-metrics-handler.ts | 48 +++---------------- 2 files changed, 54 insertions(+), 42 deletions(-) create mode 100644 system-test/client-side-metrics-setup-table.ts diff --git a/system-test/client-side-metrics-setup-table.ts b/system-test/client-side-metrics-setup-table.ts new file mode 100644 index 000000000..36246e517 --- /dev/null +++ b/system-test/client-side-metrics-setup-table.ts @@ -0,0 +1,48 @@ +import {Bigtable} from '../src'; +export async function setupBigtable( + bigtable: Bigtable, + columnFamilyId: string, + instanceId: string, + tableIds: string[] +) { + const instance = bigtable.instance(instanceId); + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, + }, + }); + await operation.promise(); + } + const tables = tableIds.map(tableId => instance.table(tableId)); + for (const currentTable of tables) { + const [tableExists] = await currentTable.exists(); + if (!tableExists) { + await currentTable.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await currentTable.getFamilies(); + + if ( + !families.some((family: {id: string}) => family.id === columnFamilyId) + ) { + await currentTable.createFamily(columnFamilyId); + } + } + // Add some data so that a firstResponseLatency is recorded. + await currentTable.insert([ + { + key: 'rowId', + data: { + [columnFamilyId]: { + gwashington: 1, + tjefferson: 1, + }, + }, + }, + ]); + } +} diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index 3819ec743..cb2bd8119 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -19,6 +19,7 @@ import * as mocha from 'mocha'; import * as assert from 'assert'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; +import {setupBigtable} from './client-side-metrics-setup-table'; describe.only('Bigtable/ClientSideMetricsToMetricsHandler', () => { async function mockBigtable(projectId: string, done: mocha.Done) { @@ -133,54 +134,17 @@ describe.only('Bigtable/ClientSideMetricsToMetricsHandler', () => { } } } + const FakeBigtable = proxyquire('../src/index.js', { './client-side-metrics/gcp-metrics-handler': { GCPMetricsHandler: TestGCPMetricsHandler, }, }).Bigtable; bigtable = new FakeBigtable(); - - const instance = bigtable.instance(instanceId); - const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { - const [, operation] = await instance.create({ - clusters: { - id: 'fake-cluster3', - location: 'us-west1-c', - nodes: 1, - }, - }); - await operation.promise(); - } - const table = instance.table(tableId); - const table2 = instance.table(tableId2); - for (const currentTable of [table, table2]) { - const [tableExists] = await currentTable.exists(); - if (!tableExists) { - await currentTable.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await currentTable.getFamilies(); - - if ( - !families.some((family: {id: string}) => family.id === columnFamilyId) - ) { - await currentTable.createFamily(columnFamilyId); - } - } - // Add some data so that a firstResponseLatency is recorded. - await currentTable.insert([ - { - key: 'rowId', - data: { - [columnFamilyId]: { - gwashington: 1, - tjefferson: 1, - }, - }, - }, - ]); - } + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId, + tableId2, + ]); } const instanceId = 'emulator-test-instance'; From 5b79114a72f154de23fd0e52021b38d541afe2b7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 12:01:05 -0400 Subject: [PATCH 337/448] Use the new setup table function --- system-test/client-side-metrics-to-gcm.ts | 43 ++--------------------- 1 file changed, 3 insertions(+), 40 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 7aa7de09d..9bd891998 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -24,8 +24,9 @@ import { } from '../src/client-side-metrics/exporter'; import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import * as mocha from 'mocha'; +import {setupBigtable} from './client-side-metrics-setup-table'; -describe('Bigtable/ClientSideMetricsToGCM', () => { +describe.only('Bigtable/ClientSideMetricsToGCM', () => { async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down @@ -84,45 +85,7 @@ describe('Bigtable/ClientSideMetricsToGCM', () => { }).Bigtable; bigtable = new FakeBigtable(); - const instance = bigtable.instance(instanceId); - const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { - const [, operation] = await instance.create({ - clusters: { - id: 'fake-cluster3', - location: 'us-west1-c', - nodes: 1, - }, - }); - await operation.promise(); - } - - const table = instance.table(tableId); - const [tableExists] = await table.exists(); - if (!tableExists) { - await table.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await table.getFamilies(); - - if ( - !families.some((family: {id: string}) => family.id === columnFamilyId) - ) { - await table.createFamily(columnFamilyId); - } - } - // Add some data so that a firstResponseLatency is recorded. - await table.insert([ - { - key: 'rowId', - data: { - [columnFamilyId]: { - gwashington: 1, - tjefferson: 1, - }, - }, - }, - ]); + await setupBigtable(bigtable, columnFamilyId, instanceId, [tableId]); } const instanceId = 'emulator-test-instance'; From 0a5c59851b137f05a300af0fe6945c9fdcf2922c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 12:01:39 -0400 Subject: [PATCH 338/448] Remove only handlers --- system-test/client-side-metrics-to-gcm.ts | 2 +- system-test/client-side-metrics-to-metrics-handler.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 9bd891998..b0abb7727 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -26,7 +26,7 @@ import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler' import * as mocha from 'mocha'; import {setupBigtable} from './client-side-metrics-setup-table'; -describe.only('Bigtable/ClientSideMetricsToGCM', () => { +describe('Bigtable/ClientSideMetricsToGCM', () => { async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index cb2bd8119..515891fd7 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -21,7 +21,7 @@ import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; import {setupBigtable} from './client-side-metrics-setup-table'; -describe.only('Bigtable/ClientSideMetricsToMetricsHandler', () => { +describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { async function mockBigtable(projectId: string, done: mocha.Done) { let handlerRequestCount = 0; class TestGCPMetricsHandler extends TestMetricsHandler { From bd3f76c44e220b60277fd762f2efea7f60f80db1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 13:25:50 -0400 Subject: [PATCH 339/448] Add header --- system-test/client-side-metrics-setup-table.ts | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/system-test/client-side-metrics-setup-table.ts b/system-test/client-side-metrics-setup-table.ts index 36246e517..41921cee0 100644 --- a/system-test/client-side-metrics-setup-table.ts +++ b/system-test/client-side-metrics-setup-table.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {Bigtable} from '../src'; export async function setupBigtable( bigtable: Bigtable, From af44069aa3ff8ca88f1548db7b36a77f996d271a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 14:42:08 -0400 Subject: [PATCH 340/448] Add guard in the metrics collector --- .../operation-metrics-collector.ts | 36 ++++++++++--------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 920bcb347..a660b12c4 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -293,23 +293,25 @@ export class OperationMetricsCollector { const mappedValue = status.metadata.internalRepr.get( INSTANCE_INFORMATION_KEY ) as Buffer[]; - const decodedValue = ResponseParams.decode( - mappedValue[0], - mappedValue[0].length - ); - if ( - decodedValue && - (decodedValue as unknown as {zoneId: string}).zoneId - ) { - this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; - } - if ( - decodedValue && - (decodedValue as unknown as {clusterId: string}).clusterId - ) { - this.cluster = ( - decodedValue as unknown as {clusterId: string} - ).clusterId; + if (mappedValue) { + const decodedValue = ResponseParams.decode( + mappedValue[0], + mappedValue[0].length + ); + if ( + decodedValue && + (decodedValue as unknown as {zoneId: string}).zoneId + ) { + this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; + } + if ( + decodedValue && + (decodedValue as unknown as {clusterId: string}).clusterId + ) { + this.cluster = ( + decodedValue as unknown as {clusterId: string} + ).clusterId; + } } } } From 85fcca425bb5b1a3e4e0ee2edb6bc2e7d9b92978 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 17:17:33 -0400 Subject: [PATCH 341/448] Revert "Remove the guard" This reverts commit c581129e69eb6e9c2747554d69419837b878868f. # Conflicts: # src/client-side-metrics/operation-metrics-collector.ts --- .../operation-metrics-collector.ts | 111 ++++++++++-------- 1 file changed, 59 insertions(+), 52 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index a660b12c4..231b13a9e 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -17,10 +17,23 @@ import {IMetricsHandler} from './metrics-handler'; import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; -const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto' -); -const ResponseParams = root.lookupType('ResponseParams'); + +let ResponseParams: gax.protobuf.Type | null; +try { + /* + * Likely due to the Node 18 upgrade, the samples tests are failing with the + * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or + * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since + * these tests don't use this module we can suppress the error for now to + * unblock the CI pipeline. + */ + const root = gax.protobuf.loadSync( + './protos/google/bigtable/v2/response_params.proto' + ); + ResponseParams = root.lookupType('ResponseParams'); +} catch (e) { + ResponseParams = null; +} /** * An interface representing a tabular API surface, such as a Bigtable table. @@ -194,19 +207,16 @@ export class OperationMetricsCollector { * Called when the first response is received. Records first response latencies. */ onResponse(projectId: string) { - if (!this.firstResponseLatency) { - // Check firstResponseLatency first to improve latency for calls with many rows - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; - const endTime = new Date(); - if (projectId && this.operationStartTime) { - this.firstResponseLatency = - endTime.getTime() - this.operationStartTime.getTime(); - } + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET + ) { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; + const endTime = new Date(); + if (projectId && this.operationStartTime) { + this.firstResponseLatency = + endTime.getTime() - this.operationStartTime.getTime(); } } } @@ -256,17 +266,16 @@ export class OperationMetricsCollector { internalRepr: Map; options: {}; }) { - if (!this.serverTimeRead && this.connectivityErrorCount < 1) { - // Check serverTimeRead, connectivityErrorCount here to reduce latency. - const mappedEntries = new Map( - Array.from(metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const SERVER_TIMING_REGEX = /.*gfet4t7;\s*dur=(\d+\.?\d*).*/; - const SERVER_TIMING_KEY = 'server-timing'; - const durationValues = mappedEntries.get(SERVER_TIMING_KEY); + const mappedEntries = new Map( + Array.from(metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const SERVER_TIMING_REGEX = /.*gfet4t7;\s*dur=(\d+\.?\d*).*/; + const SERVER_TIMING_KEY = 'server-timing'; + const durationValues = mappedEntries.get(SERVER_TIMING_KEY); + if (durationValues) { const matchedDuration = durationValues?.match(SERVER_TIMING_REGEX); if (matchedDuration && matchedDuration[1]) { if (!this.serverTimeRead) { @@ -288,30 +297,28 @@ export class OperationMetricsCollector { onStatusMetadataReceived(status: { metadata: {internalRepr: Map; options: {}}; }) { - if (!this.zone || !this.cluster) { - const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; - const mappedValue = status.metadata.internalRepr.get( - INSTANCE_INFORMATION_KEY - ) as Buffer[]; - if (mappedValue) { - const decodedValue = ResponseParams.decode( - mappedValue[0], - mappedValue[0].length - ); - if ( - decodedValue && - (decodedValue as unknown as {zoneId: string}).zoneId - ) { - this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; - } - if ( - decodedValue && - (decodedValue as unknown as {clusterId: string}).clusterId - ) { - this.cluster = ( - decodedValue as unknown as {clusterId: string} - ).clusterId; - } + const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; + const mappedValue = status.metadata.internalRepr.get( + INSTANCE_INFORMATION_KEY + ) as Buffer[]; + if (mappedValue && mappedValue[0] && ResponseParams) { + const decodedValue = ResponseParams.decode( + mappedValue[0], + mappedValue[0].length + ); + if ( + decodedValue && + (decodedValue as unknown as {zoneId: string}).zoneId + ) { + this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; + } + if ( + decodedValue && + (decodedValue as unknown as {clusterId: string}).clusterId + ) { + this.cluster = ( + decodedValue as unknown as {clusterId: string} + ).clusterId; } } } From f2d16bd3445590f58c73714cd1677156e7e9dbee Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Mar 2025 17:27:46 -0400 Subject: [PATCH 342/448] Add the optimizations back again --- .../operation-metrics-collector.ts | 109 ++++++++++-------- 1 file changed, 58 insertions(+), 51 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 231b13a9e..20e3cd4cc 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -207,16 +207,18 @@ export class OperationMetricsCollector { * Called when the first response is received. Records first response latencies. */ onResponse(projectId: string) { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; - const endTime = new Date(); - if (projectId && this.operationStartTime) { - this.firstResponseLatency = - endTime.getTime() - this.operationStartTime.getTime(); + if (!this.firstResponseLatency) { + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET + ) { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; + const endTime = new Date(); + if (projectId && this.operationStartTime) { + this.firstResponseLatency = + endTime.getTime() - this.operationStartTime.getTime(); + } } } } @@ -266,26 +268,29 @@ export class OperationMetricsCollector { internalRepr: Map; options: {}; }) { - const mappedEntries = new Map( - Array.from(metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const SERVER_TIMING_REGEX = /.*gfet4t7;\s*dur=(\d+\.?\d*).*/; - const SERVER_TIMING_KEY = 'server-timing'; - const durationValues = mappedEntries.get(SERVER_TIMING_KEY); - if (durationValues) { - const matchedDuration = durationValues?.match(SERVER_TIMING_REGEX); - if (matchedDuration && matchedDuration[1]) { - if (!this.serverTimeRead) { - this.serverTimeRead = true; - this.serverTime = isNaN(parseInt(matchedDuration[1])) - ? null - : parseInt(matchedDuration[1]); + if (!this.serverTimeRead && this.connectivityErrorCount < 1) { + // Check serverTimeRead, connectivityErrorCount here to reduce latency. + const mappedEntries = new Map( + Array.from(metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const SERVER_TIMING_REGEX = /.*gfet4t7;\s*dur=(\d+\.?\d*).*/; + const SERVER_TIMING_KEY = 'server-timing'; + const durationValues = mappedEntries.get(SERVER_TIMING_KEY); + if (durationValues) { + const matchedDuration = durationValues?.match(SERVER_TIMING_REGEX); + if (matchedDuration && matchedDuration[1]) { + if (!this.serverTimeRead) { + this.serverTimeRead = true; + this.serverTime = isNaN(parseInt(matchedDuration[1])) + ? null + : parseInt(matchedDuration[1]); + } + } else { + this.connectivityErrorCount = 1; } - } else { - this.connectivityErrorCount = 1; } } } @@ -297,28 +302,30 @@ export class OperationMetricsCollector { onStatusMetadataReceived(status: { metadata: {internalRepr: Map; options: {}}; }) { - const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; - const mappedValue = status.metadata.internalRepr.get( - INSTANCE_INFORMATION_KEY - ) as Buffer[]; - if (mappedValue && mappedValue[0] && ResponseParams) { - const decodedValue = ResponseParams.decode( - mappedValue[0], - mappedValue[0].length - ); - if ( - decodedValue && - (decodedValue as unknown as {zoneId: string}).zoneId - ) { - this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; - } - if ( - decodedValue && - (decodedValue as unknown as {clusterId: string}).clusterId - ) { - this.cluster = ( - decodedValue as unknown as {clusterId: string} - ).clusterId; + if (!this.zone || !this.cluster) { + const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; + const mappedValue = status.metadata.internalRepr.get( + INSTANCE_INFORMATION_KEY + ) as Buffer[]; + if (mappedValue && mappedValue[0] && ResponseParams) { + const decodedValue = ResponseParams.decode( + mappedValue[0], + mappedValue[0].length + ); + if ( + decodedValue && + (decodedValue as unknown as {zoneId: string}).zoneId + ) { + this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; + } + if ( + decodedValue && + (decodedValue as unknown as {clusterId: string}).clusterId + ) { + this.cluster = ( + decodedValue as unknown as {clusterId: string} + ).clusterId; + } } } } From 873507aa21c2cfd376c0fd5093ca2e30bf8a3e0e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 10:58:30 -0400 Subject: [PATCH 343/448] cache cluster and zone so a call with 2 tables works --- .../gcp-metrics-handler.ts | 71 +++++++++++++++++-- system-test/client-side-metrics-to-gcm.ts | 6 +- 2 files changed, 69 insertions(+), 8 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 4d7bfc532..548ac7afa 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -51,6 +51,13 @@ interface MetricsInstruments { export class GCPMetricsHandler implements IMetricsHandler { private otelInstruments?: MetricsInstruments; private exporter: PushMetricExporter; + // instanceToZone and instanceToCluster are used to cache clusterId and zone + // values for each instance because the metrics handler will only receive + // zone and cluster values for the first operation. This is because the zone + // and cluster only get returned from the server on the first call for the + // instance so they only get stored on the first metrics collector. + private instanceToZone: {[instanceId: string]: string} = {}; + private instanceToCluster: {[instanceId: string]: string} = {}; /** * The `GCPMetricsHandler` is responsible for managing and recording @@ -208,6 +215,42 @@ export class GCPMetricsHandler implements IMetricsHandler { return this.otelInstruments; } + /** + * This method caches the cluster for an instance and returns the cached + * cluster if the metrics handler doesn't receive a cluster value. This is + * necessary because the server only returns instance information including + * the cluster name on the first call so we need to store the cluster name + * since it will only be available from the first metrics collector used. + * + * @param instanceId The instance identifier + * @param cluster The cluster identifier + */ + getCachedCluster(instanceId: string, cluster?: string) { + const cachedCluster = cluster ?? this.instanceToCluster[instanceId]; + if (cachedCluster) { + this.instanceToCluster[instanceId] = cachedCluster; + } + return cachedCluster; + } + + /** + * This method caches the zone for an instance and returns the cached + * cluster if the metrics handler doesn't receive a cluster value. This is + * necessary because the server only returns instance information including + * the zone name on the first call so we need to store the cluster name + * since it will only be available from the first metrics collector used. + * + * @param instanceId The instance identifier + * @param zone The zone identifier + */ + getCachedZone(instanceId: string, zone?: string) { + const cachedZone = zone ?? this.instanceToZone[instanceId]; + if (cachedZone) { + this.instanceToZone[instanceId] = cachedZone; + } + return cachedZone; + } + /** * Records metrics for a completed Bigtable operation. * This method records the operation latency and retry count, associating them with provided attributes. @@ -215,16 +258,24 @@ export class GCPMetricsHandler implements IMetricsHandler { */ onOperationComplete(data: OnOperationCompleteData) { const otelInstruments = this.getInstruments(data.projectId); + // If the cluster/zone are not available then use the cached cluster/zone + // from the first server response: + const instanceId = data.metricsCollectorData.instanceId; + const zone = this.getCachedZone(instanceId, data.metricsCollectorData.zone); + const cluster = this.getCachedCluster( + instanceId, + data.metricsCollectorData.cluster + ); const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, client_name: data.client_name, - instanceId: data.metricsCollectorData.instanceId, + instanceId, table: data.metricsCollectorData.table, - cluster: data.metricsCollectorData.cluster, - zone: data.metricsCollectorData.zone, + cluster, + zone, }; otelInstruments.operationLatencies.record(data.operationLatency, { streaming: data.streaming, @@ -245,16 +296,24 @@ export class GCPMetricsHandler implements IMetricsHandler { */ onAttemptComplete(data: OnAttemptCompleteData) { const otelInstruments = this.getInstruments(data.projectId); + // If the cluster/zone are not available then use the cached cluster/zone + // from the first server response: + const instanceId = data.metricsCollectorData.instanceId; + const zone = this.getCachedZone(instanceId, data.metricsCollectorData.zone); + const cluster = this.getCachedCluster( + instanceId, + data.metricsCollectorData.cluster + ); const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, client_name: data.client_name, - instanceId: data.metricsCollectorData.instanceId, + instanceId, table: data.metricsCollectorData.table, - cluster: data.metricsCollectorData.cluster, - zone: data.metricsCollectorData.zone, + cluster, + zone, }; otelInstruments.attemptLatencies.record(data.attemptLatency, { streaming: data.streaming, diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index b0abb7727..e54167052 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -16,7 +16,6 @@ import {describe, it, before, after} from 'mocha'; import * as assert from 'assert'; import {Bigtable} from '../src'; import * as proxyquire from 'proxyquire'; -import {TabularApiSurface} from '../src/tabular-api-surface'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import { CloudMonitoringExporter, @@ -26,7 +25,7 @@ import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler' import * as mocha from 'mocha'; import {setupBigtable} from './client-side-metrics-setup-table'; -describe('Bigtable/ClientSideMetricsToGCM', () => { +describe.only('Bigtable/ClientSideMetricsToGCM', () => { async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down @@ -90,6 +89,7 @@ describe('Bigtable/ClientSideMetricsToGCM', () => { const instanceId = 'emulator-test-instance'; const tableId = 'my-table'; + const tableId2 = 'my-table2'; const columnFamilyId = 'cf1'; let bigtable: Bigtable; @@ -116,6 +116,8 @@ describe('Bigtable/ClientSideMetricsToGCM', () => { const instance = bigtable.instance(instanceId); const table = instance.table(tableId); await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); })(); }); }); From ccbf8b7d90942f0ce96c0cca9f1a519a938d28c3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 11:13:04 -0400 Subject: [PATCH 344/448] Properly set up the table --- system-test/client-side-metrics-to-gcm.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index e54167052..76113f506 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -83,11 +83,14 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { }, }).Bigtable; bigtable = new FakeBigtable(); - - await setupBigtable(bigtable, columnFamilyId, instanceId, [tableId]); + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId, + tableId2, + ]); } const instanceId = 'emulator-test-instance'; + // const instanceId2 = 'emulator-test-instance2'; const tableId = 'my-table'; const tableId2 = 'my-table2'; const columnFamilyId = 'cf1'; From 256d1e580abb350900e7eb60fdb9b0eb2f7d485f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 11:13:23 -0400 Subject: [PATCH 345/448] Revert "cache cluster and zone so a call with 2 tables" This reverts commit 873507aa21c2cfd376c0fd5093ca2e30bf8a3e0e. --- .../gcp-metrics-handler.ts | 71 ++----------------- system-test/client-side-metrics-to-gcm.ts | 6 +- 2 files changed, 8 insertions(+), 69 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 548ac7afa..4d7bfc532 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -51,13 +51,6 @@ interface MetricsInstruments { export class GCPMetricsHandler implements IMetricsHandler { private otelInstruments?: MetricsInstruments; private exporter: PushMetricExporter; - // instanceToZone and instanceToCluster are used to cache clusterId and zone - // values for each instance because the metrics handler will only receive - // zone and cluster values for the first operation. This is because the zone - // and cluster only get returned from the server on the first call for the - // instance so they only get stored on the first metrics collector. - private instanceToZone: {[instanceId: string]: string} = {}; - private instanceToCluster: {[instanceId: string]: string} = {}; /** * The `GCPMetricsHandler` is responsible for managing and recording @@ -215,42 +208,6 @@ export class GCPMetricsHandler implements IMetricsHandler { return this.otelInstruments; } - /** - * This method caches the cluster for an instance and returns the cached - * cluster if the metrics handler doesn't receive a cluster value. This is - * necessary because the server only returns instance information including - * the cluster name on the first call so we need to store the cluster name - * since it will only be available from the first metrics collector used. - * - * @param instanceId The instance identifier - * @param cluster The cluster identifier - */ - getCachedCluster(instanceId: string, cluster?: string) { - const cachedCluster = cluster ?? this.instanceToCluster[instanceId]; - if (cachedCluster) { - this.instanceToCluster[instanceId] = cachedCluster; - } - return cachedCluster; - } - - /** - * This method caches the zone for an instance and returns the cached - * cluster if the metrics handler doesn't receive a cluster value. This is - * necessary because the server only returns instance information including - * the zone name on the first call so we need to store the cluster name - * since it will only be available from the first metrics collector used. - * - * @param instanceId The instance identifier - * @param zone The zone identifier - */ - getCachedZone(instanceId: string, zone?: string) { - const cachedZone = zone ?? this.instanceToZone[instanceId]; - if (cachedZone) { - this.instanceToZone[instanceId] = cachedZone; - } - return cachedZone; - } - /** * Records metrics for a completed Bigtable operation. * This method records the operation latency and retry count, associating them with provided attributes. @@ -258,24 +215,16 @@ export class GCPMetricsHandler implements IMetricsHandler { */ onOperationComplete(data: OnOperationCompleteData) { const otelInstruments = this.getInstruments(data.projectId); - // If the cluster/zone are not available then use the cached cluster/zone - // from the first server response: - const instanceId = data.metricsCollectorData.instanceId; - const zone = this.getCachedZone(instanceId, data.metricsCollectorData.zone); - const cluster = this.getCachedCluster( - instanceId, - data.metricsCollectorData.cluster - ); const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, client_name: data.client_name, - instanceId, + instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, - cluster, - zone, + cluster: data.metricsCollectorData.cluster, + zone: data.metricsCollectorData.zone, }; otelInstruments.operationLatencies.record(data.operationLatency, { streaming: data.streaming, @@ -296,24 +245,16 @@ export class GCPMetricsHandler implements IMetricsHandler { */ onAttemptComplete(data: OnAttemptCompleteData) { const otelInstruments = this.getInstruments(data.projectId); - // If the cluster/zone are not available then use the cached cluster/zone - // from the first server response: - const instanceId = data.metricsCollectorData.instanceId; - const zone = this.getCachedZone(instanceId, data.metricsCollectorData.zone); - const cluster = this.getCachedCluster( - instanceId, - data.metricsCollectorData.cluster - ); const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, client_name: data.client_name, - instanceId, + instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, - cluster, - zone, + cluster: data.metricsCollectorData.cluster, + zone: data.metricsCollectorData.zone, }; otelInstruments.attemptLatencies.record(data.attemptLatency, { streaming: data.streaming, diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 76113f506..a80818f11 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -16,6 +16,7 @@ import {describe, it, before, after} from 'mocha'; import * as assert from 'assert'; import {Bigtable} from '../src'; import * as proxyquire from 'proxyquire'; +import {TabularApiSurface} from '../src/tabular-api-surface'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import { CloudMonitoringExporter, @@ -25,7 +26,7 @@ import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler' import * as mocha from 'mocha'; import {setupBigtable} from './client-side-metrics-setup-table'; -describe.only('Bigtable/ClientSideMetricsToGCM', () => { +describe('Bigtable/ClientSideMetricsToGCM', () => { async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down @@ -92,7 +93,6 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { const instanceId = 'emulator-test-instance'; // const instanceId2 = 'emulator-test-instance2'; const tableId = 'my-table'; - const tableId2 = 'my-table2'; const columnFamilyId = 'cf1'; let bigtable: Bigtable; @@ -119,8 +119,6 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { const instance = bigtable.instance(instanceId); const table = instance.table(tableId); await table.getRows(); - const table2 = instance.table(tableId2); - await table2.getRows(); })(); }); }); From 19cb1c45f365ba46632d6ef5fdfcda2f222706aa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 11:17:43 -0400 Subject: [PATCH 346/448] Update the test setup --- system-test/client-side-metrics-to-gcm.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index a80818f11..76113f506 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -16,7 +16,6 @@ import {describe, it, before, after} from 'mocha'; import * as assert from 'assert'; import {Bigtable} from '../src'; import * as proxyquire from 'proxyquire'; -import {TabularApiSurface} from '../src/tabular-api-surface'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import { CloudMonitoringExporter, @@ -26,7 +25,7 @@ import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler' import * as mocha from 'mocha'; import {setupBigtable} from './client-side-metrics-setup-table'; -describe('Bigtable/ClientSideMetricsToGCM', () => { +describe.only('Bigtable/ClientSideMetricsToGCM', () => { async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down @@ -93,6 +92,7 @@ describe('Bigtable/ClientSideMetricsToGCM', () => { const instanceId = 'emulator-test-instance'; // const instanceId2 = 'emulator-test-instance2'; const tableId = 'my-table'; + const tableId2 = 'my-table2'; const columnFamilyId = 'cf1'; let bigtable: Bigtable; @@ -119,6 +119,8 @@ describe('Bigtable/ClientSideMetricsToGCM', () => { const instance = bigtable.instance(instanceId); const table = instance.table(tableId); await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); })(); }); }); From 90ec71c301072ebb76eae6d98faecdf8de254c19 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 11:29:05 -0400 Subject: [PATCH 347/448] Test for multiple instances --- system-test/client-side-metrics-to-gcm.ts | 71 +++++++++++++---------- 1 file changed, 41 insertions(+), 30 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 76113f506..9331f7ca5 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -45,23 +45,27 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): void { - super.export(metrics, (result: ExportResult) => { - if (!exported) { - exported = true; - try { - clearTimeout(timeout); - // The test passes when the code is 0 because that means the - // result from calling export was successful. - assert.strictEqual(result.code, 0); - done(); - resultCallback({code: 0}); - } catch (error) { - // The code here isn't 0 so we report the original error to the mocha test runner. - done(result); - done(error); + try { + super.export(metrics, (result: ExportResult) => { + if (!exported) { + exported = true; + try { + clearTimeout(timeout); + // The test passes when the code is 0 because that means the + // result from calling export was successful. + assert.strictEqual(result.code, 0); + done(); + resultCallback({code: 0}); + } catch (error) { + // The code here isn't 0 so we report the original error to the mocha test runner. + done(result); + done(error); + } } - } - }); + }); + } catch (error) { + done(error); + } } } @@ -83,15 +87,11 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { }, }).Bigtable; bigtable = new FakeBigtable(); - await setupBigtable(bigtable, columnFamilyId, instanceId, [ - tableId, - tableId2, - ]); } - const instanceId = 'emulator-test-instance'; - // const instanceId2 = 'emulator-test-instance2'; - const tableId = 'my-table'; + const instanceId1 = 'emulator-test-instance'; + const instanceId2 = 'emulator-test-instance2'; + const tableId1 = 'my-table'; const tableId2 = 'my-table2'; const columnFamilyId = 'cf1'; let bigtable: Bigtable; @@ -106,7 +106,7 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { try { // If the instance has been deleted already by another source, we don't // want this after hook to block the continuous integration pipeline. - const instance = bigtable.instance(instanceId); + const instance = bigtable.instance(instanceId1); await instance.delete({}); } catch (e) { console.warn('The instance has been deleted already'); @@ -114,13 +114,24 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { }); it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { + // (async () => { - await mockBigtable(done); - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId); - await table.getRows(); - const table2 = instance.table(tableId2); - await table2.getRows(); + try { + await mockBigtable(done); + for (const instanceId of [instanceId1, instanceId2]) { + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId1, + tableId2, + ]); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + } + } catch (e) { + done(e); + } })(); }); }); From c94244da2f5c9057f667a0f4e2041e13813b157e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 11:44:13 -0400 Subject: [PATCH 348/448] Adapt the test to work with multiple clients --- system-test/client-side-metrics-to-gcm.ts | 45 +++++++++++++---------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 9331f7ca5..0bba6a1ab 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -86,7 +86,7 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { GCPMetricsHandler: TestGCPMetricsHandler, }, }).Bigtable; - bigtable = new FakeBigtable(); + return new FakeBigtable(); } const instanceId1 = 'emulator-test-instance'; @@ -94,13 +94,9 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { const tableId1 = 'my-table'; const tableId2 = 'my-table2'; const columnFamilyId = 'cf1'; - let bigtable: Bigtable; - - before(async () => { - // This line is added just to make sure the bigtable variable is assigned. - // It is needed to solve a compile time error in the after hook. - bigtable = new Bigtable(); - }); + // This line is added just to make sure the bigtable variable is assigned. + // It is needed to solve a compile time error in the after hook. + const bigtable = new Bigtable(); after(async () => { try { @@ -111,23 +107,34 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { } catch (e) { console.warn('The instance has been deleted already'); } + try { + // If the instance has been deleted already by another source, we don't + // want this after hook to block the continuous integration pipeline. + const instance = bigtable.instance(instanceId2); + await instance.delete({}); + } catch (e) { + console.warn('The instance has been deleted already'); + } }); it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { // (async () => { try { - await mockBigtable(done); - for (const instanceId of [instanceId1, instanceId2]) { - await setupBigtable(bigtable, columnFamilyId, instanceId, [ - tableId1, - tableId2, - ]); - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId1); - await table.getRows(); - const table2 = instance.table(tableId2); - await table2.getRows(); + const bigtable1 = await mockBigtable(done); + const bigtable2 = await mockBigtable(done); + for (const bigtable of [bigtable1, bigtable2]) { + for (const instanceId of [instanceId1, instanceId2]) { + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId1, + tableId2, + ]); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + } } } catch (e) { done(e); From cb77f938fbde5aa6b7dc07c1c650926b14b54707 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 11:44:45 -0400 Subject: [PATCH 349/448] Remove only --- system-test/client-side-metrics-to-gcm.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 0bba6a1ab..939e714d7 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -25,7 +25,7 @@ import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler' import * as mocha from 'mocha'; import {setupBigtable} from './client-side-metrics-setup-table'; -describe.only('Bigtable/ClientSideMetricsToGCM', () => { +describe('Bigtable/ClientSideMetricsToGCM', () => { async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down From c58ceb438258d2a914659ad81cbd1836032d9b26 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 11:56:07 -0400 Subject: [PATCH 350/448] Modification to handle no status --- src/client-side-metrics/operation-metrics-collector.ts | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 20e3cd4cc..4f1deb651 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -166,12 +166,14 @@ export class OperationMetricsCollector { const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onAttemptComplete) { + // attemptStatus?.toString() is optional because in a test proxy + // test the server does not send back the status. metricsHandler.onAttemptComplete({ attemptLatency: totalTime, serverLatency: this.serverTime ?? undefined, connectivityErrorCount: this.connectivityErrorCount, streaming: this.streamingOperation, - status: attemptStatus.toString(), + status: attemptStatus?.toString(), client_name: `nodejs-bigtable/${version}`, metricsCollectorData: this.getMetricsCollectorData(), projectId, @@ -241,8 +243,10 @@ export class OperationMetricsCollector { { this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { + // finalOperationStatus?.toString() is optional because in a test + // proxy test the server does not send back the status. metricsHandler.onOperationComplete({ - status: finalOperationStatus.toString(), + status: finalOperationStatus?.toString(), streaming: this.streamingOperation, metricsCollectorData: this.getMetricsCollectorData(), client_name: `nodejs-bigtable/${version}`, From b93e5ba672c3806b7377e8cc5f54255cdef9a211 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 13:12:37 -0400 Subject: [PATCH 351/448] Increment number of exports --- system-test/client-side-metrics-to-gcm.ts | 8 ++++++-- system-test/client-side-metrics-to-metrics-handler.ts | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 939e714d7..f04e98893 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -25,7 +25,8 @@ import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler' import * as mocha from 'mocha'; import {setupBigtable} from './client-side-metrics-setup-table'; -describe('Bigtable/ClientSideMetricsToGCM', () => { +describe.only('Bigtable/ClientSideMetricsToGCM', () => { + let numberOfExports = 0; async function mockBigtable(done: mocha.Done) { /* We need to create a timeout here because if we don't then mocha shuts down @@ -54,8 +55,11 @@ describe('Bigtable/ClientSideMetricsToGCM', () => { // The test passes when the code is 0 because that means the // result from calling export was successful. assert.strictEqual(result.code, 0); - done(); resultCallback({code: 0}); + if (numberOfExports > 1) { + done(); + } + numberOfExports++; } catch (error) { // The code here isn't 0 so we report the original error to the mocha test runner. done(result); diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts index 515891fd7..cb2bd8119 100644 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ b/system-test/client-side-metrics-to-metrics-handler.ts @@ -21,7 +21,7 @@ import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; import {setupBigtable} from './client-side-metrics-setup-table'; -describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { +describe.only('Bigtable/ClientSideMetricsToMetricsHandler', () => { async function mockBigtable(projectId: string, done: mocha.Done) { let handlerRequestCount = 0; class TestGCPMetricsHandler extends TestMetricsHandler { From d05bbbaec5d92845132d78792871cf630f6116ee Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 13:26:42 -0400 Subject: [PATCH 352/448] Add an error for the timeouts --- system-test/client-side-metrics-to-gcm.ts | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index f04e98893..8bd344eae 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -28,12 +28,6 @@ import {setupBigtable} from './client-side-metrics-setup-table'; describe.only('Bigtable/ClientSideMetricsToGCM', () => { let numberOfExports = 0; async function mockBigtable(done: mocha.Done) { - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. - */ - const timeout = setTimeout(() => {}, 120000); /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, @@ -122,7 +116,20 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { }); it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { - // + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => { + if (numberOfExports < 2) { + done( + new Error( + 'The exporters have not completed yet and the timeout is over' + ) + ); + } + }, 240000); (async () => { try { const bigtable1 = await mockBigtable(done); From 01f0515fe09a5777328081ba9e0e9c32eddd9b35 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 13:55:55 -0400 Subject: [PATCH 353/448] Add a test for multiple simultaneous exports --- ...client-side-metrics-to-gcm-timeout-test.ts | 146 ++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 system-test/client-side-metrics-to-gcm-timeout-test.ts diff --git a/system-test/client-side-metrics-to-gcm-timeout-test.ts b/system-test/client-side-metrics-to-gcm-timeout-test.ts new file mode 100644 index 000000000..febf4f059 --- /dev/null +++ b/system-test/client-side-metrics-to-gcm-timeout-test.ts @@ -0,0 +1,146 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {describe, it, before, after} from 'mocha'; +import * as assert from 'assert'; +import {Bigtable} from '../src'; +import * as proxyquire from 'proxyquire'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import { + CloudMonitoringExporter, + ExportResult, +} from '../src/client-side-metrics/exporter'; +import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; +import * as mocha from 'mocha'; +import {setupBigtable} from './client-side-metrics-setup-table'; + +describe.only('Bigtable/ClientSideMetricsToGCM', () => { + // This test suite simulates a situation where the user creates multiple + // clients and ensures that the exporter doesn't produce any errors even + // when multiple clients are attempting an export. + async function mockBigtable(done: mocha.Done) { + let testFinished = false; + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => { + testFinished = true; + done(); + }, 120000); + + class TestExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + try { + super.export(metrics, (result: ExportResult) => { + if (!testFinished) { + try { + // The code is expected to be 0 because the + // result from calling export was successful. + assert.strictEqual(result.code, 0); + resultCallback({code: 0}); + } catch (error) { + // The code here isn't 0 so we report the original error to the + // mocha test runner. + // The test fails here because it means that an export was + // unsuccessful. + done(result); + done(error); + } + } else { + resultCallback({code: 0}); + } + }); + } catch (error) { + done(error); + } + } + } + + class TestGCPMetricsHandler extends GCPMetricsHandler { + constructor() { + super(new TestExporter()); + } + } + + /* + Below we mock out the table so that it sends the metrics to a test exporter + that will still send the metrics to Google Cloud Monitoring, but then also + ensure the export was successful and pass the test with code 0 if it is + successful. + */ + const FakeBigtable = proxyquire('../src/index.js', { + './client-side-metrics/gcp-metrics-handler': { + GCPMetricsHandler: TestGCPMetricsHandler, + }, + }).Bigtable; + return new FakeBigtable(); + } + + const instanceId1 = 'emulator-test-instance'; + const instanceId2 = 'emulator-test-instance2'; + const tableId1 = 'my-table'; + const tableId2 = 'my-table2'; + const columnFamilyId = 'cf1'; + // This line is added just to make sure the bigtable variable is assigned. + // It is needed to solve a compile time error in the after hook. + const bigtable = new Bigtable(); + + after(async () => { + try { + // If the instance has been deleted already by another source, we don't + // want this after hook to block the continuous integration pipeline. + const instance = bigtable.instance(instanceId1); + await instance.delete({}); + } catch (e) { + console.warn('The instance has been deleted already'); + } + try { + // If the instance has been deleted already by another source, we don't + // want this after hook to block the continuous integration pipeline. + const instance = bigtable.instance(instanceId2); + await instance.delete({}); + } catch (e) { + console.warn('The instance has been deleted already'); + } + }); + + it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { + (async () => { + try { + const bigtable1 = await mockBigtable(done); + const bigtable2 = await mockBigtable(done); + for (const bigtable of [bigtable1, bigtable2]) { + for (const instanceId of [instanceId1, instanceId2]) { + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId1, + tableId2, + ]); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + } + } + } catch (e) { + done(e); + } + })(); + }); +}); From 00385daec33964ce62e0f4910c746b3b3b3ef17a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 13:58:14 -0400 Subject: [PATCH 354/448] Just test for one bigtable client right now --- system-test/client-side-metrics-to-gcm.ts | 41 +++++++++++------------ 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 8bd344eae..45125babc 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -26,7 +26,8 @@ import * as mocha from 'mocha'; import {setupBigtable} from './client-side-metrics-setup-table'; describe.only('Bigtable/ClientSideMetricsToGCM', () => { - let numberOfExports = 0; + // This test suite ensures that for each test all the export calls are + // successful even when multiple instances and tables are created. async function mockBigtable(done: mocha.Done) { /* The exporter is called every x seconds, but we only want to test the value @@ -34,6 +35,20 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { exported variable ensures we only test the value export receives one time. */ let exported = false; + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => { + if (!exported) { + done( + new Error( + 'The exporters have not completed yet and the timeout is over' + ) + ); + } + }, 120000); class TestExporter extends CloudMonitoringExporter { export( @@ -50,15 +65,14 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { // result from calling export was successful. assert.strictEqual(result.code, 0); resultCallback({code: 0}); - if (numberOfExports > 1) { - done(); - } - numberOfExports++; + done(); } catch (error) { // The code here isn't 0 so we report the original error to the mocha test runner. done(result); done(error); } + } else { + resultCallback({code: 0}); } }); } catch (error) { @@ -116,25 +130,10 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { }); it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. - */ - const timeout = setTimeout(() => { - if (numberOfExports < 2) { - done( - new Error( - 'The exporters have not completed yet and the timeout is over' - ) - ); - } - }, 240000); (async () => { try { const bigtable1 = await mockBigtable(done); - const bigtable2 = await mockBigtable(done); - for (const bigtable of [bigtable1, bigtable2]) { + for (const bigtable of [bigtable1]) { for (const instanceId of [instanceId1, instanceId2]) { await setupBigtable(bigtable, columnFamilyId, instanceId, [ tableId1, From aad9fc023dc34f230faa70906f75d95a24e3d827 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 14:02:12 -0400 Subject: [PATCH 355/448] Name test differently so that we can distinguish --- system-test/client-side-metrics-to-gcm-timeout-test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/client-side-metrics-to-gcm-timeout-test.ts b/system-test/client-side-metrics-to-gcm-timeout-test.ts index febf4f059..66bca9790 100644 --- a/system-test/client-side-metrics-to-gcm-timeout-test.ts +++ b/system-test/client-side-metrics-to-gcm-timeout-test.ts @@ -25,7 +25,7 @@ import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler' import * as mocha from 'mocha'; import {setupBigtable} from './client-side-metrics-setup-table'; -describe.only('Bigtable/ClientSideMetricsToGCM', () => { +describe.only('Bigtable/ClientSideMetricsToGCMTimeout', () => { // This test suite simulates a situation where the user creates multiple // clients and ensures that the exporter doesn't produce any errors even // when multiple clients are attempting an export. From 1356ac54d28e661312386c2eeba05a16ca24be2c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 14:31:40 -0400 Subject: [PATCH 356/448] Clarify test responsibilities Only fail in the timeout test if there are no exporting errors. --- ...client-side-metrics-to-gcm-timeout-test.ts | 48 +++++++++---------- system-test/client-side-metrics-to-gcm.ts | 24 +++++----- 2 files changed, 33 insertions(+), 39 deletions(-) diff --git a/system-test/client-side-metrics-to-gcm-timeout-test.ts b/system-test/client-side-metrics-to-gcm-timeout-test.ts index 66bca9790..5e28bcd8c 100644 --- a/system-test/client-side-metrics-to-gcm-timeout-test.ts +++ b/system-test/client-side-metrics-to-gcm-timeout-test.ts @@ -30,17 +30,6 @@ describe.only('Bigtable/ClientSideMetricsToGCMTimeout', () => { // clients and ensures that the exporter doesn't produce any errors even // when multiple clients are attempting an export. async function mockBigtable(done: mocha.Done) { - let testFinished = false; - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. - */ - const timeout = setTimeout(() => { - testFinished = true; - done(); - }, 120000); - class TestExporter extends CloudMonitoringExporter { export( metrics: ResourceMetrics, @@ -48,22 +37,18 @@ describe.only('Bigtable/ClientSideMetricsToGCMTimeout', () => { ): void { try { super.export(metrics, (result: ExportResult) => { - if (!testFinished) { - try { - // The code is expected to be 0 because the - // result from calling export was successful. - assert.strictEqual(result.code, 0); - resultCallback({code: 0}); - } catch (error) { - // The code here isn't 0 so we report the original error to the - // mocha test runner. - // The test fails here because it means that an export was - // unsuccessful. - done(result); - done(error); - } - } else { + try { + // The code is expected to be 0 because the + // result from calling export was successful. + assert.strictEqual(result.code, 0); resultCallback({code: 0}); + } catch (error) { + // The code here isn't 0 so we report the original error to the + // mocha test runner. + // The test fails here because it means that an export was + // unsuccessful. + done(result); + done(error); } }); } catch (error) { @@ -121,6 +106,17 @@ describe.only('Bigtable/ClientSideMetricsToGCMTimeout', () => { }); it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { + let testFinished = false; + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. When the timeout is finished, if there were no export + errors then the test passes. + */ + const timeout = setTimeout(() => { + testFinished = true; + done(); + }, 120000); (async () => { try { const bigtable1 = await mockBigtable(done); diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 45125babc..707a90ae5 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -132,19 +132,17 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { (async () => { try { - const bigtable1 = await mockBigtable(done); - for (const bigtable of [bigtable1]) { - for (const instanceId of [instanceId1, instanceId2]) { - await setupBigtable(bigtable, columnFamilyId, instanceId, [ - tableId1, - tableId2, - ]); - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId1); - await table.getRows(); - const table2 = instance.table(tableId2); - await table2.getRows(); - } + const bigtable = await mockBigtable(done); + for (const instanceId of [instanceId1, instanceId2]) { + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId1, + tableId2, + ]); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); } } catch (e) { done(e); From df8249ed4a8442389912cf2fb6bfc500af3f4976 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 14:45:14 -0400 Subject: [PATCH 357/448] Eliminate variable assignment --- system-test/client-side-metrics-to-gcm-timeout-test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/client-side-metrics-to-gcm-timeout-test.ts b/system-test/client-side-metrics-to-gcm-timeout-test.ts index 5e28bcd8c..5e155c745 100644 --- a/system-test/client-side-metrics-to-gcm-timeout-test.ts +++ b/system-test/client-side-metrics-to-gcm-timeout-test.ts @@ -113,7 +113,7 @@ describe.only('Bigtable/ClientSideMetricsToGCMTimeout', () => { export the data. When the timeout is finished, if there were no export errors then the test passes. */ - const timeout = setTimeout(() => { + setTimeout(() => { testFinished = true; done(); }, 120000); From 92a396676a344130958086aacebeccca47cc1917 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 14:54:27 -0400 Subject: [PATCH 358/448] Add a waiting script to the instance creation --- system-test/client-side-metrics-setup-table.ts | 11 ++++++++++- .../client-side-metrics-to-gcm-timeout-test.ts | 1 + system-test/client-side-metrics-to-gcm.ts | 1 + 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/system-test/client-side-metrics-setup-table.ts b/system-test/client-side-metrics-setup-table.ts index 41921cee0..c7a21143f 100644 --- a/system-test/client-side-metrics-setup-table.ts +++ b/system-test/client-side-metrics-setup-table.ts @@ -21,7 +21,7 @@ export async function setupBigtable( ) { const instance = bigtable.instance(instanceId); const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { + while (!instanceInfo) { const [, operation] = await instance.create({ clusters: { id: 'fake-cluster3', @@ -30,6 +30,15 @@ export async function setupBigtable( }, }); await operation.promise(); + /** + * For whatever reason, even after waiting for an operation.promise() + * call to complete, the instance still doesn't seem to be ready yet so + * we do another check to ensure the instance is ready. + */ + const [instanceInfoAgain] = await instance.exists(); + if (instanceInfoAgain) { + break; + } } const tables = tableIds.map(tableId => instance.table(tableId)); for (const currentTable of tables) { diff --git a/system-test/client-side-metrics-to-gcm-timeout-test.ts b/system-test/client-side-metrics-to-gcm-timeout-test.ts index 5e155c745..650cde13b 100644 --- a/system-test/client-side-metrics-to-gcm-timeout-test.ts +++ b/system-test/client-side-metrics-to-gcm-timeout-test.ts @@ -135,6 +135,7 @@ describe.only('Bigtable/ClientSideMetricsToGCMTimeout', () => { } } } catch (e) { + done(new Error('An error occurred while running the script')); done(e); } })(); diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts index 707a90ae5..d9b048f88 100644 --- a/system-test/client-side-metrics-to-gcm.ts +++ b/system-test/client-side-metrics-to-gcm.ts @@ -145,6 +145,7 @@ describe.only('Bigtable/ClientSideMetricsToGCM', () => { await table2.getRows(); } } catch (e) { + done(new Error('An error occurred while running the script')); done(e); } })(); From 0b08b0506c0eafdbb018e01c6e8ddc42738f6a4c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 15:25:42 -0400 Subject: [PATCH 359/448] Add the header and combine the tests --- system-test/client-side-metrics.ts | 400 +++++++++++++++++++++++++++++ 1 file changed, 400 insertions(+) create mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts new file mode 100644 index 000000000..ff601abbb --- /dev/null +++ b/system-test/client-side-metrics.ts @@ -0,0 +1,400 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {after, before, describe, it} from 'mocha'; +import * as mocha from 'mocha'; +import { + CloudMonitoringExporter, + ExportResult, +} from '../src/client-side-metrics/exporter'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import * as assert from 'assert'; +import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; +import * as proxyquire from 'proxyquire'; +import {Bigtable} from '../src'; +import {setupBigtable} from './client-side-metrics-setup-table'; +import {TestMetricsHandler} from '../test-common/test-metrics-handler'; +import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; + +describe.only('Bigtable/ClientSideMetrics', () => { + const instanceId1 = 'emulator-test-instance'; + const instanceId2 = 'emulator-test-instance2'; + const tableId1 = 'my-table'; + const tableId2 = 'my-table2'; + const columnFamilyId = 'cf1'; + let bigtable: Bigtable; + + before(async () => { + for (const instanceId of [instanceId1, instanceId2]) { + bigtable = new Bigtable(); + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId1, + tableId2, + ]); + } + }); + + after(async () => { + try { + // If the instance has been deleted already by another source, we don't + // want this after hook to block the continuous integration pipeline. + const instance = bigtable.instance(instanceId1); + await instance.delete({}); + } catch (e) { + console.warn('The instance has been deleted already'); + } + try { + // If the instance has been deleted already by another source, we don't + // want this after hook to block the continuous integration pipeline. + const instance = bigtable.instance(instanceId2); + await instance.delete({}); + } catch (e) { + console.warn('The instance has been deleted already'); + } + }); + + describe('Bigtable/ClientSideMetricsToGCM', () => { + // This test suite ensures that for each test all the export calls are + // successful even when multiple instances and tables are created. + async function mockBigtable(done: mocha.Done) { + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exported variable ensures we only test the value export receives one time. + */ + let exported = false; + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => { + if (!exported) { + done( + new Error( + 'The exporters have not completed yet and the timeout is over' + ) + ); + } + }, 120000); + + class TestExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + try { + super.export(metrics, (result: ExportResult) => { + if (!exported) { + exported = true; + try { + clearTimeout(timeout); + // The test passes when the code is 0 because that means the + // result from calling export was successful. + assert.strictEqual(result.code, 0); + resultCallback({code: 0}); + done(); + } catch (error) { + // The code here isn't 0 so we report the original error to the mocha test runner. + done(result); + done(error); + } + } else { + resultCallback({code: 0}); + } + }); + } catch (error) { + done(error); + } + } + } + + class TestGCPMetricsHandler extends GCPMetricsHandler { + constructor() { + super(new TestExporter()); + } + } + + /* + Below we mock out the table so that it sends the metrics to a test exporter + that will still send the metrics to Google Cloud Monitoring, but then also + ensure the export was successful and pass the test with code 0 if it is + successful. + */ + const FakeBigtable = proxyquire('../src/index.js', { + './client-side-metrics/gcp-metrics-handler': { + GCPMetricsHandler: TestGCPMetricsHandler, + }, + }).Bigtable; + return new FakeBigtable(); + } + + it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { + (async () => { + try { + const bigtable = await mockBigtable(done); + for (const instanceId of [instanceId1, instanceId2]) { + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId1, + tableId2, + ]); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + } + } catch (e) { + done(new Error('An error occurred while running the script')); + done(e); + } + })(); + }); + }); + describe('Bigtable/ClientSideMetricsToGCMTimeout', () => { + // This test suite simulates a situation where the user creates multiple + // clients and ensures that the exporter doesn't produce any errors even + // when multiple clients are attempting an export. + async function mockBigtable(done: mocha.Done) { + class TestExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + try { + super.export(metrics, (result: ExportResult) => { + try { + // The code is expected to be 0 because the + // result from calling export was successful. + assert.strictEqual(result.code, 0); + resultCallback({code: 0}); + } catch (error) { + // The code here isn't 0 so we report the original error to the + // mocha test runner. + // The test fails here because it means that an export was + // unsuccessful. + done(result); + done(error); + } + }); + } catch (error) { + done(error); + } + } + } + + class TestGCPMetricsHandler extends GCPMetricsHandler { + constructor() { + super(new TestExporter()); + } + } + + /* + Below we mock out the table so that it sends the metrics to a test exporter + that will still send the metrics to Google Cloud Monitoring, but then also + ensure the export was successful and pass the test with code 0 if it is + successful. + */ + const FakeBigtable = proxyquire('../src/index.js', { + './client-side-metrics/gcp-metrics-handler': { + GCPMetricsHandler: TestGCPMetricsHandler, + }, + }).Bigtable; + return new FakeBigtable(); + } + + it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { + let testFinished = false; + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. When the timeout is finished, if there were no export + errors then the test passes. + */ + setTimeout(() => { + testFinished = true; + done(); + }, 120000); + (async () => { + try { + const bigtable1 = await mockBigtable(done); + const bigtable2 = await mockBigtable(done); + for (const bigtable of [bigtable1, bigtable2]) { + for (const instanceId of [instanceId1, instanceId2]) { + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId1, + tableId2, + ]); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + } + } + } catch (e) { + done(new Error('An error occurred while running the script')); + done(e); + } + })(); + }); + }); + describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { + async function mockBigtable(projectId: string, done: mocha.Done) { + let handlerRequestCount = 0; + class TestGCPMetricsHandler extends TestMetricsHandler { + onOperationComplete(data: OnOperationCompleteData) { + handlerRequestCount++; + try { + super.onOperationComplete(data); + if (handlerRequestCount > 1) { + assert.strictEqual(this.requestsHandled.length, 4); + const firstRequest = this.requestsHandled[0] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + assert(firstRequest.attemptLatency); + assert(firstRequest.serverLatency); + assert(firstRequest.metricsCollectorData.client_uid); + delete firstRequest.attemptLatency; + delete firstRequest.serverLatency; + delete firstRequest.metricsCollectorData.client_uid; + delete firstRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(firstRequest, { + connectivityErrorCount: 0, + streaming: 'true', + status: '0', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method: 'Bigtable.ReadRows', + }, + projectId, + }); + const secondRequest = this.requestsHandled[1] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + assert(secondRequest.operationLatency); + assert(secondRequest.firstResponseLatency); + assert(secondRequest.metricsCollectorData.client_uid); + delete secondRequest.operationLatency; + delete secondRequest.firstResponseLatency; + delete secondRequest.metricsCollectorData.client_uid; + delete secondRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(secondRequest, { + status: '0', + streaming: 'true', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method: 'Bigtable.ReadRows', + table: 'my-table', + }, + projectId, + retryCount: 0, + }); + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + const thirdRequest = this.requestsHandled[2] as any; + assert(thirdRequest.attemptLatency); + assert(thirdRequest.serverLatency); + assert(thirdRequest.metricsCollectorData.client_uid); + delete thirdRequest.attemptLatency; + delete thirdRequest.serverLatency; + delete thirdRequest.metricsCollectorData.client_uid; + delete thirdRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(thirdRequest, { + connectivityErrorCount: 0, + streaming: 'true', + status: '0', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + table: 'my-table2', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method: 'Bigtable.ReadRows', + }, + projectId, + }); + const fourthRequest = this.requestsHandled[3] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + assert(fourthRequest.operationLatency); + assert(fourthRequest.firstResponseLatency); + assert(fourthRequest.metricsCollectorData.client_uid); + delete fourthRequest.operationLatency; + delete fourthRequest.firstResponseLatency; + delete fourthRequest.metricsCollectorData.client_uid; + delete fourthRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(fourthRequest, { + status: '0', + streaming: 'true', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method: 'Bigtable.ReadRows', + table: 'my-table2', + }, + projectId, + retryCount: 0, + }); + done(); + } + } catch (e) { + done(e); + } + } + } + + const FakeBigtable = proxyquire('../src/index.js', { + './client-side-metrics/gcp-metrics-handler': { + GCPMetricsHandler: TestGCPMetricsHandler, + }, + }).Bigtable; + bigtable = new FakeBigtable(); + await setupBigtable(bigtable, columnFamilyId, instanceId1, [ + tableId1, + tableId2, + ]); + } + + it('should send the metrics to the metrics handler for a ReadRows call', done => { + bigtable = new Bigtable(); + (async () => { + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + await mockBigtable(projectId, done); + const instance = bigtable.instance(instanceId1); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + })(); + }); + }); +}); From debede7cd5e196031775836b11392ca1e54b168e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 15:28:38 -0400 Subject: [PATCH 360/448] Remove only --- system-test/client-side-metrics.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index ff601abbb..59c250396 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -27,7 +27,7 @@ import {setupBigtable} from './client-side-metrics-setup-table'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; -describe.only('Bigtable/ClientSideMetrics', () => { +describe('Bigtable/ClientSideMetrics', () => { const instanceId1 = 'emulator-test-instance'; const instanceId2 = 'emulator-test-instance2'; const tableId1 = 'my-table'; From 53febe98713e2fe1c59aaac006e89e35a647c3ef Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 15:29:43 -0400 Subject: [PATCH 361/448] Delete files that have been consolidated --- ...client-side-metrics-to-gcm-timeout-test.ts | 143 ------------- system-test/client-side-metrics-to-gcm.ts | 153 -------------- .../client-side-metrics-to-metrics-handler.ts | 192 ------------------ 3 files changed, 488 deletions(-) delete mode 100644 system-test/client-side-metrics-to-gcm-timeout-test.ts delete mode 100644 system-test/client-side-metrics-to-gcm.ts delete mode 100644 system-test/client-side-metrics-to-metrics-handler.ts diff --git a/system-test/client-side-metrics-to-gcm-timeout-test.ts b/system-test/client-side-metrics-to-gcm-timeout-test.ts deleted file mode 100644 index 650cde13b..000000000 --- a/system-test/client-side-metrics-to-gcm-timeout-test.ts +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {describe, it, before, after} from 'mocha'; -import * as assert from 'assert'; -import {Bigtable} from '../src'; -import * as proxyquire from 'proxyquire'; -import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; -import { - CloudMonitoringExporter, - ExportResult, -} from '../src/client-side-metrics/exporter'; -import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; -import * as mocha from 'mocha'; -import {setupBigtable} from './client-side-metrics-setup-table'; - -describe.only('Bigtable/ClientSideMetricsToGCMTimeout', () => { - // This test suite simulates a situation where the user creates multiple - // clients and ensures that the exporter doesn't produce any errors even - // when multiple clients are attempting an export. - async function mockBigtable(done: mocha.Done) { - class TestExporter extends CloudMonitoringExporter { - export( - metrics: ResourceMetrics, - resultCallback: (result: ExportResult) => void - ): void { - try { - super.export(metrics, (result: ExportResult) => { - try { - // The code is expected to be 0 because the - // result from calling export was successful. - assert.strictEqual(result.code, 0); - resultCallback({code: 0}); - } catch (error) { - // The code here isn't 0 so we report the original error to the - // mocha test runner. - // The test fails here because it means that an export was - // unsuccessful. - done(result); - done(error); - } - }); - } catch (error) { - done(error); - } - } - } - - class TestGCPMetricsHandler extends GCPMetricsHandler { - constructor() { - super(new TestExporter()); - } - } - - /* - Below we mock out the table so that it sends the metrics to a test exporter - that will still send the metrics to Google Cloud Monitoring, but then also - ensure the export was successful and pass the test with code 0 if it is - successful. - */ - const FakeBigtable = proxyquire('../src/index.js', { - './client-side-metrics/gcp-metrics-handler': { - GCPMetricsHandler: TestGCPMetricsHandler, - }, - }).Bigtable; - return new FakeBigtable(); - } - - const instanceId1 = 'emulator-test-instance'; - const instanceId2 = 'emulator-test-instance2'; - const tableId1 = 'my-table'; - const tableId2 = 'my-table2'; - const columnFamilyId = 'cf1'; - // This line is added just to make sure the bigtable variable is assigned. - // It is needed to solve a compile time error in the after hook. - const bigtable = new Bigtable(); - - after(async () => { - try { - // If the instance has been deleted already by another source, we don't - // want this after hook to block the continuous integration pipeline. - const instance = bigtable.instance(instanceId1); - await instance.delete({}); - } catch (e) { - console.warn('The instance has been deleted already'); - } - try { - // If the instance has been deleted already by another source, we don't - // want this after hook to block the continuous integration pipeline. - const instance = bigtable.instance(instanceId2); - await instance.delete({}); - } catch (e) { - console.warn('The instance has been deleted already'); - } - }); - - it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { - let testFinished = false; - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. When the timeout is finished, if there were no export - errors then the test passes. - */ - setTimeout(() => { - testFinished = true; - done(); - }, 120000); - (async () => { - try { - const bigtable1 = await mockBigtable(done); - const bigtable2 = await mockBigtable(done); - for (const bigtable of [bigtable1, bigtable2]) { - for (const instanceId of [instanceId1, instanceId2]) { - await setupBigtable(bigtable, columnFamilyId, instanceId, [ - tableId1, - tableId2, - ]); - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId1); - await table.getRows(); - const table2 = instance.table(tableId2); - await table2.getRows(); - } - } - } catch (e) { - done(new Error('An error occurred while running the script')); - done(e); - } - })(); - }); -}); diff --git a/system-test/client-side-metrics-to-gcm.ts b/system-test/client-side-metrics-to-gcm.ts deleted file mode 100644 index d9b048f88..000000000 --- a/system-test/client-side-metrics-to-gcm.ts +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {describe, it, before, after} from 'mocha'; -import * as assert from 'assert'; -import {Bigtable} from '../src'; -import * as proxyquire from 'proxyquire'; -import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; -import { - CloudMonitoringExporter, - ExportResult, -} from '../src/client-side-metrics/exporter'; -import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; -import * as mocha from 'mocha'; -import {setupBigtable} from './client-side-metrics-setup-table'; - -describe.only('Bigtable/ClientSideMetricsToGCM', () => { - // This test suite ensures that for each test all the export calls are - // successful even when multiple instances and tables are created. - async function mockBigtable(done: mocha.Done) { - /* - The exporter is called every x seconds, but we only want to test the value - it receives once. Since done cannot be called multiple times in mocha, - exported variable ensures we only test the value export receives one time. - */ - let exported = false; - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. - */ - const timeout = setTimeout(() => { - if (!exported) { - done( - new Error( - 'The exporters have not completed yet and the timeout is over' - ) - ); - } - }, 120000); - - class TestExporter extends CloudMonitoringExporter { - export( - metrics: ResourceMetrics, - resultCallback: (result: ExportResult) => void - ): void { - try { - super.export(metrics, (result: ExportResult) => { - if (!exported) { - exported = true; - try { - clearTimeout(timeout); - // The test passes when the code is 0 because that means the - // result from calling export was successful. - assert.strictEqual(result.code, 0); - resultCallback({code: 0}); - done(); - } catch (error) { - // The code here isn't 0 so we report the original error to the mocha test runner. - done(result); - done(error); - } - } else { - resultCallback({code: 0}); - } - }); - } catch (error) { - done(error); - } - } - } - - class TestGCPMetricsHandler extends GCPMetricsHandler { - constructor() { - super(new TestExporter()); - } - } - - /* - Below we mock out the table so that it sends the metrics to a test exporter - that will still send the metrics to Google Cloud Monitoring, but then also - ensure the export was successful and pass the test with code 0 if it is - successful. - */ - const FakeBigtable = proxyquire('../src/index.js', { - './client-side-metrics/gcp-metrics-handler': { - GCPMetricsHandler: TestGCPMetricsHandler, - }, - }).Bigtable; - return new FakeBigtable(); - } - - const instanceId1 = 'emulator-test-instance'; - const instanceId2 = 'emulator-test-instance2'; - const tableId1 = 'my-table'; - const tableId2 = 'my-table2'; - const columnFamilyId = 'cf1'; - // This line is added just to make sure the bigtable variable is assigned. - // It is needed to solve a compile time error in the after hook. - const bigtable = new Bigtable(); - - after(async () => { - try { - // If the instance has been deleted already by another source, we don't - // want this after hook to block the continuous integration pipeline. - const instance = bigtable.instance(instanceId1); - await instance.delete({}); - } catch (e) { - console.warn('The instance has been deleted already'); - } - try { - // If the instance has been deleted already by another source, we don't - // want this after hook to block the continuous integration pipeline. - const instance = bigtable.instance(instanceId2); - await instance.delete({}); - } catch (e) { - console.warn('The instance has been deleted already'); - } - }); - - it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { - (async () => { - try { - const bigtable = await mockBigtable(done); - for (const instanceId of [instanceId1, instanceId2]) { - await setupBigtable(bigtable, columnFamilyId, instanceId, [ - tableId1, - tableId2, - ]); - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId1); - await table.getRows(); - const table2 = instance.table(tableId2); - await table2.getRows(); - } - } catch (e) { - done(new Error('An error occurred while running the script')); - done(e); - } - })(); - }); -}); diff --git a/system-test/client-side-metrics-to-metrics-handler.ts b/system-test/client-side-metrics-to-metrics-handler.ts deleted file mode 100644 index cb2bd8119..000000000 --- a/system-test/client-side-metrics-to-metrics-handler.ts +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {describe, it, before, after} from 'mocha'; -import {Bigtable} from '../src'; -import * as proxyquire from 'proxyquire'; -import * as mocha from 'mocha'; -import * as assert from 'assert'; -import {TestMetricsHandler} from '../test-common/test-metrics-handler'; -import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; -import {setupBigtable} from './client-side-metrics-setup-table'; - -describe.only('Bigtable/ClientSideMetricsToMetricsHandler', () => { - async function mockBigtable(projectId: string, done: mocha.Done) { - let handlerRequestCount = 0; - class TestGCPMetricsHandler extends TestMetricsHandler { - onOperationComplete(data: OnOperationCompleteData) { - handlerRequestCount++; - try { - super.onOperationComplete(data); - if (handlerRequestCount > 1) { - assert.strictEqual(this.requestsHandled.length, 4); - const firstRequest = this.requestsHandled[0] as any; - // We would expect these parameters to be different every time so delete - // them from the comparison after checking they exist. - assert(firstRequest.attemptLatency); - assert(firstRequest.serverLatency); - assert(firstRequest.metricsCollectorData.client_uid); - delete firstRequest.attemptLatency; - delete firstRequest.serverLatency; - delete firstRequest.metricsCollectorData.client_uid; - delete firstRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(firstRequest, { - connectivityErrorCount: 0, - streaming: 'true', - status: '0', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - }, - projectId, - }); - const secondRequest = this.requestsHandled[1] as any; - // We would expect these parameters to be different every time so delete - // them from the comparison after checking they exist. - assert(secondRequest.operationLatency); - assert(secondRequest.firstResponseLatency); - assert(secondRequest.metricsCollectorData.client_uid); - delete secondRequest.operationLatency; - delete secondRequest.firstResponseLatency; - delete secondRequest.metricsCollectorData.client_uid; - delete secondRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(secondRequest, { - status: '0', - streaming: 'true', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - table: 'my-table', - }, - projectId, - retryCount: 0, - }); - // We would expect these parameters to be different every time so delete - // them from the comparison after checking they exist. - const thirdRequest = this.requestsHandled[2] as any; - assert(thirdRequest.attemptLatency); - assert(thirdRequest.serverLatency); - assert(thirdRequest.metricsCollectorData.client_uid); - delete thirdRequest.attemptLatency; - delete thirdRequest.serverLatency; - delete thirdRequest.metricsCollectorData.client_uid; - delete thirdRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(thirdRequest, { - connectivityErrorCount: 0, - streaming: 'true', - status: '0', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - table: 'my-table2', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - }, - projectId, - }); - const fourthRequest = this.requestsHandled[3] as any; - // We would expect these parameters to be different every time so delete - // them from the comparison after checking they exist. - assert(fourthRequest.operationLatency); - assert(fourthRequest.firstResponseLatency); - assert(fourthRequest.metricsCollectorData.client_uid); - delete fourthRequest.operationLatency; - delete fourthRequest.firstResponseLatency; - delete fourthRequest.metricsCollectorData.client_uid; - delete fourthRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(fourthRequest, { - status: '0', - streaming: 'true', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - table: 'my-table2', - }, - projectId, - retryCount: 0, - }); - done(); - } - } catch (e) { - done(e); - } - } - } - - const FakeBigtable = proxyquire('../src/index.js', { - './client-side-metrics/gcp-metrics-handler': { - GCPMetricsHandler: TestGCPMetricsHandler, - }, - }).Bigtable; - bigtable = new FakeBigtable(); - await setupBigtable(bigtable, columnFamilyId, instanceId, [ - tableId, - tableId2, - ]); - } - - const instanceId = 'emulator-test-instance'; - const tableId = 'my-table'; - const tableId2 = 'my-table2'; - const columnFamilyId = 'cf1'; - let bigtable: Bigtable; - - before(async () => { - // This line is added just to make sure the bigtable variable is assigned. - // It is needed to solve a compile time error in the after hook. - bigtable = new Bigtable(); - }); - - after(async () => { - try { - // If the instance has been deleted already by another source, we don't - // want this after hook to block the continuous integration pipeline. - const instance = bigtable.instance(instanceId); - await instance.delete({}); - } catch (e) { - console.warn('The instance has been deleted already'); - } - }); - - it('should send the metrics to the metrics handler for a ReadRows call', done => { - (async () => { - const projectId: string = await new Promise((resolve, reject) => { - bigtable.getProjectId_((err, projectId) => { - if (err) { - reject(err); - } else { - resolve(projectId as string); - } - }); - }); - await mockBigtable(projectId, done); - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId); - await table.getRows(); - const table2 = instance.table(tableId2); - await table2.getRows(); - })(); - }); -}); From 53e593694ca5140198b8c3556b71365933d8f838 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 31 Mar 2025 16:10:24 -0400 Subject: [PATCH 362/448] Run the linter and add guards --- .../operation-metrics-collector.ts | 291 ++++++++++-------- 1 file changed, 160 insertions(+), 131 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 4f1deb651..fb3d0266c 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -136,13 +136,17 @@ export class OperationMetricsCollector { * Called when the operation starts. Records the start time. */ onOperationStart() { - if (this.state === MetricsCollectorState.OPERATION_NOT_STARTED) { - this.operationStartTime = new Date(); - this.firstResponseLatency = null; - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; - } else { - console.warn('Invalid state transition'); + try { + if (this.state === MetricsCollectorState.OPERATION_NOT_STARTED) { + this.operationStartTime = new Date(); + this.firstResponseLatency = null; + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; + } else { + console.warn('Invalid state transition'); + } + } finally { + // Nothing is required here. We just don't want errors reaching the user. } } @@ -152,37 +156,41 @@ export class OperationMetricsCollector { * @param {grpc.status} attemptStatus The grpc status for the attempt. */ onAttemptComplete(projectId: string, attemptStatus: grpc.status) { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET || - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; - this.attemptCount++; - const endTime = new Date(); - if (projectId && this.attemptStartTime) { - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onAttemptComplete) { - // attemptStatus?.toString() is optional because in a test proxy - // test the server does not send back the status. - metricsHandler.onAttemptComplete({ - attemptLatency: totalTime, - serverLatency: this.serverTime ?? undefined, - connectivityErrorCount: this.connectivityErrorCount, - streaming: this.streamingOperation, - status: attemptStatus?.toString(), - client_name: `nodejs-bigtable/${version}`, - metricsCollectorData: this.getMetricsCollectorData(), - projectId, - }); - } - }); + try { + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET || + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED + ) { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; + this.attemptCount++; + const endTime = new Date(); + if (projectId && this.attemptStartTime) { + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onAttemptComplete) { + // attemptStatus?.toString() is optional because in a test proxy + // test the server does not send back the status. + metricsHandler.onAttemptComplete({ + attemptLatency: totalTime, + serverLatency: this.serverTime ?? undefined, + connectivityErrorCount: this.connectivityErrorCount, + streaming: this.streamingOperation, + status: attemptStatus?.toString(), + client_name: `nodejs-bigtable/${version}`, + metricsCollectorData: this.getMetricsCollectorData(), + projectId, + }); + } + }); + } + } else { + console.warn('Invalid state transition attempted'); } - } else { - console.warn('Invalid state transition attempted'); + } finally { + // Nothing is required here. We just don't want errors reaching the user. } } @@ -190,18 +198,22 @@ export class OperationMetricsCollector { * Called when a new attempt starts. Records the start time of the attempt. */ onAttemptStart() { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET; - this.attemptStartTime = new Date(); - this.serverTime = null; - this.serverTimeRead = false; - this.connectivityErrorCount = 0; - } else { - console.warn('Invalid state transition attempted'); + try { + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS + ) { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET; + this.attemptStartTime = new Date(); + this.serverTime = null; + this.serverTimeRead = false; + this.connectivityErrorCount = 0; + } else { + console.warn('Invalid state transition attempted'); + } + } finally { + // Nothing is required here. We just don't want errors reaching the user. } } @@ -209,19 +221,23 @@ export class OperationMetricsCollector { * Called when the first response is received. Records first response latencies. */ onResponse(projectId: string) { - if (!this.firstResponseLatency) { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; - const endTime = new Date(); - if (projectId && this.operationStartTime) { - this.firstResponseLatency = - endTime.getTime() - this.operationStartTime.getTime(); + try { + if (!this.firstResponseLatency) { + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET + ) { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; + const endTime = new Date(); + if (projectId && this.operationStartTime) { + this.firstResponseLatency = + endTime.getTime() - this.operationStartTime.getTime(); + } } } + } finally { + // Nothing is required here. We just don't want errors reaching the user. } } @@ -232,35 +248,40 @@ export class OperationMetricsCollector { * @param {grpc.status} finalOperationStatus Information about the completed operation. */ onOperationComplete(projectId: string, finalOperationStatus: grpc.status) { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS - ) { - this.state = MetricsCollectorState.OPERATION_COMPLETE; - const endTime = new Date(); - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - { - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onOperationComplete) { - // finalOperationStatus?.toString() is optional because in a test - // proxy test the server does not send back the status. - metricsHandler.onOperationComplete({ - status: finalOperationStatus?.toString(), - streaming: this.streamingOperation, - metricsCollectorData: this.getMetricsCollectorData(), - client_name: `nodejs-bigtable/${version}`, - projectId, - operationLatency: totalTime, - retryCount: this.attemptCount - 1, - firstResponseLatency: this.firstResponseLatency ?? undefined, - }); - } - }); + try { + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS + ) { + this.state = MetricsCollectorState.OPERATION_COMPLETE; + const endTime = new Date(); + if (projectId && this.operationStartTime) { + const totalTime = + endTime.getTime() - this.operationStartTime.getTime(); + { + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onOperationComplete) { + // finalOperationStatus?.toString() is optional because in a test + // proxy test the server does not send back the status. + metricsHandler.onOperationComplete({ + status: finalOperationStatus?.toString(), + streaming: this.streamingOperation, + metricsCollectorData: this.getMetricsCollectorData(), + client_name: `nodejs-bigtable/${version}`, + projectId, + operationLatency: totalTime, + retryCount: this.attemptCount - 1, + firstResponseLatency: this.firstResponseLatency ?? undefined, + }); + } + }); + } } + } else { + console.warn('Invalid state transition attempted'); } - } else { - console.warn('Invalid state transition attempted'); + } finally { + // Nothing is required here. We just don't want errors reaching the user. } } @@ -272,30 +293,34 @@ export class OperationMetricsCollector { internalRepr: Map; options: {}; }) { - if (!this.serverTimeRead && this.connectivityErrorCount < 1) { - // Check serverTimeRead, connectivityErrorCount here to reduce latency. - const mappedEntries = new Map( - Array.from(metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const SERVER_TIMING_REGEX = /.*gfet4t7;\s*dur=(\d+\.?\d*).*/; - const SERVER_TIMING_KEY = 'server-timing'; - const durationValues = mappedEntries.get(SERVER_TIMING_KEY); - if (durationValues) { - const matchedDuration = durationValues?.match(SERVER_TIMING_REGEX); - if (matchedDuration && matchedDuration[1]) { - if (!this.serverTimeRead) { - this.serverTimeRead = true; - this.serverTime = isNaN(parseInt(matchedDuration[1])) - ? null - : parseInt(matchedDuration[1]); + try { + if (!this.serverTimeRead && this.connectivityErrorCount < 1) { + // Check serverTimeRead, connectivityErrorCount here to reduce latency. + const mappedEntries = new Map( + Array.from(metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const SERVER_TIMING_REGEX = /.*gfet4t7;\s*dur=(\d+\.?\d*).*/; + const SERVER_TIMING_KEY = 'server-timing'; + const durationValues = mappedEntries.get(SERVER_TIMING_KEY); + if (durationValues) { + const matchedDuration = durationValues?.match(SERVER_TIMING_REGEX); + if (matchedDuration && matchedDuration[1]) { + if (!this.serverTimeRead) { + this.serverTimeRead = true; + this.serverTime = isNaN(parseInt(matchedDuration[1])) + ? null + : parseInt(matchedDuration[1]); + } + } else { + this.connectivityErrorCount = 1; } - } else { - this.connectivityErrorCount = 1; } } + } finally { + // Nothing is required here. We just don't want errors reaching the user. } } @@ -306,31 +331,35 @@ export class OperationMetricsCollector { onStatusMetadataReceived(status: { metadata: {internalRepr: Map; options: {}}; }) { - if (!this.zone || !this.cluster) { - const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; - const mappedValue = status.metadata.internalRepr.get( - INSTANCE_INFORMATION_KEY - ) as Buffer[]; - if (mappedValue && mappedValue[0] && ResponseParams) { - const decodedValue = ResponseParams.decode( - mappedValue[0], - mappedValue[0].length - ); - if ( - decodedValue && - (decodedValue as unknown as {zoneId: string}).zoneId - ) { - this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; - } - if ( - decodedValue && - (decodedValue as unknown as {clusterId: string}).clusterId - ) { - this.cluster = ( - decodedValue as unknown as {clusterId: string} - ).clusterId; + try { + if (!this.zone || !this.cluster) { + const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; + const mappedValue = status.metadata.internalRepr.get( + INSTANCE_INFORMATION_KEY + ) as Buffer[]; + if (mappedValue && mappedValue[0] && ResponseParams) { + const decodedValue = ResponseParams.decode( + mappedValue[0], + mappedValue[0].length + ); + if ( + decodedValue && + (decodedValue as unknown as {zoneId: string}).zoneId + ) { + this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; + } + if ( + decodedValue && + (decodedValue as unknown as {clusterId: string}).clusterId + ) { + this.cluster = ( + decodedValue as unknown as {clusterId: string} + ).clusterId; + } } } + } finally { + // Nothing is required here. We just don't want errors reaching the user. } } } From 6d9551cd1d8c4bc3ecb4c7530e51001854643530 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 1 Apr 2025 17:23:37 -0400 Subject: [PATCH 363/448] Remove response params --- .../operation-metrics-collector.ts | 20 ++++--------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index fb3d0266c..0fde6afab 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -18,22 +18,10 @@ import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; -let ResponseParams: gax.protobuf.Type | null; -try { - /* - * Likely due to the Node 18 upgrade, the samples tests are failing with the - * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or - * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since - * these tests don't use this module we can suppress the error for now to - * unblock the CI pipeline. - */ - const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto' - ); - ResponseParams = root.lookupType('ResponseParams'); -} catch (e) { - ResponseParams = null; -} +const root = gax.protobuf.loadSync( + './protos/google/bigtable/v2/response_params.proto' +); +const ResponseParams = root.lookupType('ResponseParams'); /** * An interface representing a tabular API surface, such as a Bigtable table. From e243c115a23ac4740b4726f42b1aecd2c83d9394 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 2 Apr 2025 09:40:04 -0400 Subject: [PATCH 364/448] Revert "Remove response params" This reverts commit 6d9551cd1d8c4bc3ecb4c7530e51001854643530. --- .../operation-metrics-collector.ts | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 0fde6afab..fb3d0266c 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -18,10 +18,22 @@ import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; -const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto' -); -const ResponseParams = root.lookupType('ResponseParams'); +let ResponseParams: gax.protobuf.Type | null; +try { + /* + * Likely due to the Node 18 upgrade, the samples tests are failing with the + * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or + * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since + * these tests don't use this module we can suppress the error for now to + * unblock the CI pipeline. + */ + const root = gax.protobuf.loadSync( + './protos/google/bigtable/v2/response_params.proto' + ); + ResponseParams = root.lookupType('ResponseParams'); +} catch (e) { + ResponseParams = null; +} /** * An interface representing a tabular API surface, such as a Bigtable table. From 0164f557f4e8ae37f2b14868abfcfcaac880c935 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 10:31:14 -0400 Subject: [PATCH 365/448] Remove the samples test workaround --- .../operation-metrics-collector.ts | 27 ++++++++----------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index fb3d0266c..c0735b9ea 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -18,22 +18,17 @@ import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; -let ResponseParams: gax.protobuf.Type | null; -try { - /* - * Likely due to the Node 18 upgrade, the samples tests are failing with the - * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or - * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since - * these tests don't use this module we can suppress the error for now to - * unblock the CI pipeline. - */ - const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto' - ); - ResponseParams = root.lookupType('ResponseParams'); -} catch (e) { - ResponseParams = null; -} +/* + * Likely due to the Node 18 upgrade, the samples tests are failing with the + * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or + * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since + * these tests don't use this module we can suppress the error for now to + * unblock the CI pipeline. + */ +const root = gax.protobuf.loadSync( + './protos/google/bigtable/v2/response_params.proto' +); +const ResponseParams = root.lookupType('ResponseParams'); /** * An interface representing a tabular API surface, such as a Bigtable table. From 795cccf6c512b38087c80fbc4446f73474096dbb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 10:52:19 -0400 Subject: [PATCH 366/448] Add a wrapper for the try/catch logic --- .../operation-metrics-collector.ts | 59 ++++++++++--------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index c0735b9ea..7366c3c02 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -18,6 +18,10 @@ import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; +// When this environment variable is set then print any errors associated +// with failures in the metrics collector. +const METRIC_DEBUG = process.env.METRIC_DEBUG; + /* * Likely due to the Node 18 upgrade, the samples tests are failing with the * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or @@ -66,6 +70,19 @@ enum MetricsCollectorState { OPERATION_COMPLETE, } +// This method swallows errors when metrics debugging is not enabled so +// that errors don't bubble up to the user. +function withMetricsDebug(fn: () => T): T | undefined { + try { + return fn(); + } catch (e) { + if (METRIC_DEBUG) { + console.warn(e); + } + } + return; +} + /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ @@ -131,7 +148,7 @@ export class OperationMetricsCollector { * Called when the operation starts. Records the start time. */ onOperationStart() { - try { + withMetricsDebug(() => { if (this.state === MetricsCollectorState.OPERATION_NOT_STARTED) { this.operationStartTime = new Date(); this.firstResponseLatency = null; @@ -140,9 +157,7 @@ export class OperationMetricsCollector { } else { console.warn('Invalid state transition'); } - } finally { - // Nothing is required here. We just don't want errors reaching the user. - } + }); } /** @@ -151,7 +166,7 @@ export class OperationMetricsCollector { * @param {grpc.status} attemptStatus The grpc status for the attempt. */ onAttemptComplete(projectId: string, attemptStatus: grpc.status) { - try { + withMetricsDebug(() => { if ( this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET || @@ -184,16 +199,14 @@ export class OperationMetricsCollector { } else { console.warn('Invalid state transition attempted'); } - } finally { - // Nothing is required here. We just don't want errors reaching the user. - } + }); } /** * Called when a new attempt starts. Records the start time of the attempt. */ onAttemptStart() { - try { + withMetricsDebug(() => { if ( this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS @@ -207,16 +220,14 @@ export class OperationMetricsCollector { } else { console.warn('Invalid state transition attempted'); } - } finally { - // Nothing is required here. We just don't want errors reaching the user. - } + }); } /** * Called when the first response is received. Records first response latencies. */ onResponse(projectId: string) { - try { + withMetricsDebug(() => { if (!this.firstResponseLatency) { if ( this.state === @@ -231,9 +242,7 @@ export class OperationMetricsCollector { } } } - } finally { - // Nothing is required here. We just don't want errors reaching the user. - } + }); } /** @@ -243,7 +252,7 @@ export class OperationMetricsCollector { * @param {grpc.status} finalOperationStatus Information about the completed operation. */ onOperationComplete(projectId: string, finalOperationStatus: grpc.status) { - try { + withMetricsDebug(() => { if ( this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS @@ -275,9 +284,7 @@ export class OperationMetricsCollector { } else { console.warn('Invalid state transition attempted'); } - } finally { - // Nothing is required here. We just don't want errors reaching the user. - } + }); } /** @@ -288,7 +295,7 @@ export class OperationMetricsCollector { internalRepr: Map; options: {}; }) { - try { + withMetricsDebug(() => { if (!this.serverTimeRead && this.connectivityErrorCount < 1) { // Check serverTimeRead, connectivityErrorCount here to reduce latency. const mappedEntries = new Map( @@ -314,9 +321,7 @@ export class OperationMetricsCollector { } } } - } finally { - // Nothing is required here. We just don't want errors reaching the user. - } + }); } /** @@ -326,7 +331,7 @@ export class OperationMetricsCollector { onStatusMetadataReceived(status: { metadata: {internalRepr: Map; options: {}}; }) { - try { + withMetricsDebug(() => { if (!this.zone || !this.cluster) { const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; const mappedValue = status.metadata.internalRepr.get( @@ -353,8 +358,6 @@ export class OperationMetricsCollector { } } } - } finally { - // Nothing is required here. We just don't want errors reaching the user. - } + }); } } From 75f5d60bb07e40162f1f4dfc4ffd78b03bfe2861 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 11:10:45 -0400 Subject: [PATCH 367/448] Revert "Remove the samples test workaround" This reverts commit 0164f557f4e8ae37f2b14868abfcfcaac880c935. # Conflicts: # src/client-side-metrics/operation-metrics-collector.ts --- .../operation-metrics-collector.ts | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 7366c3c02..b4e08d179 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -22,17 +22,22 @@ import * as gax from 'google-gax'; // with failures in the metrics collector. const METRIC_DEBUG = process.env.METRIC_DEBUG; -/* - * Likely due to the Node 18 upgrade, the samples tests are failing with the - * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or - * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since - * these tests don't use this module we can suppress the error for now to - * unblock the CI pipeline. - */ -const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto' -); -const ResponseParams = root.lookupType('ResponseParams'); +let ResponseParams: gax.protobuf.Type | null; +try { + /* + * Likely due to the Node 18 upgrade, the samples tests are failing with the + * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or + * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since + * these tests don't use this module we can suppress the error for now to + * unblock the CI pipeline. + */ + const root = gax.protobuf.loadSync( + './protos/google/bigtable/v2/response_params.proto' + ); + ResponseParams = root.lookupType('ResponseParams'); +} catch (e) { + ResponseParams = null; +} /** * An interface representing a tabular API surface, such as a Bigtable table. From ff63f4b8f15de814d960ca82bf47d67457a09c88 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 11:12:59 -0400 Subject: [PATCH 368/448] Spelling correction --- src/client-side-metrics/operation-metrics-collector.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index b4e08d179..b562bfa67 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -20,7 +20,7 @@ import * as gax from 'google-gax'; // When this environment variable is set then print any errors associated // with failures in the metrics collector. -const METRIC_DEBUG = process.env.METRIC_DEBUG; +const METRICS_DEBUG = process.env.METRICS_DEBUG; let ResponseParams: gax.protobuf.Type | null; try { @@ -81,7 +81,7 @@ function withMetricsDebug(fn: () => T): T | undefined { try { return fn(); } catch (e) { - if (METRIC_DEBUG) { + if (METRICS_DEBUG) { console.warn(e); } } From 5a07489a6056b9f51ae17160170dea479f4576da Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 11:39:13 -0400 Subject: [PATCH 369/448] Add a helper method for checking the state --- .../operation-metrics-collector.ts | 203 ++++++++++-------- 1 file changed, 111 insertions(+), 92 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index b562bfa67..b43621685 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -88,6 +88,20 @@ function withMetricsDebug(fn: () => T): T | undefined { return; } +// Checks that the state transition is valid and if not it throws a warning. +function checkState( + currentState: MetricsCollectorState, + allowedStates: MetricsCollectorState[], + fn: () => T +): T | undefined { + if (allowedStates.includes(currentState)) { + return fn(); + } else { + console.warn('Invalid state transition'); + } + return; +} + /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ @@ -154,14 +168,16 @@ export class OperationMetricsCollector { */ onOperationStart() { withMetricsDebug(() => { - if (this.state === MetricsCollectorState.OPERATION_NOT_STARTED) { - this.operationStartTime = new Date(); - this.firstResponseLatency = null; - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; - } else { - console.warn('Invalid state transition'); - } + checkState( + this.state, + [MetricsCollectorState.OPERATION_NOT_STARTED], + () => { + this.operationStartTime = new Date(); + this.firstResponseLatency = null; + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; + } + ); }); } @@ -172,38 +188,39 @@ export class OperationMetricsCollector { */ onAttemptComplete(projectId: string, attemptStatus: grpc.status) { withMetricsDebug(() => { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET || - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; - this.attemptCount++; - const endTime = new Date(); - if (projectId && this.attemptStartTime) { - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onAttemptComplete) { - // attemptStatus?.toString() is optional because in a test proxy - // test the server does not send back the status. - metricsHandler.onAttemptComplete({ - attemptLatency: totalTime, - serverLatency: this.serverTime ?? undefined, - connectivityErrorCount: this.connectivityErrorCount, - streaming: this.streamingOperation, - status: attemptStatus?.toString(), - client_name: `nodejs-bigtable/${version}`, - metricsCollectorData: this.getMetricsCollectorData(), - projectId, - }); - } - }); + checkState( + this.state, + [ + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET, + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED, + ], + () => { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; + this.attemptCount++; + const endTime = new Date(); + if (projectId && this.attemptStartTime) { + const totalTime = + endTime.getTime() - this.attemptStartTime.getTime(); + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onAttemptComplete) { + // attemptStatus?.toString() is optional because in a test proxy + // test the server does not send back the status. + metricsHandler.onAttemptComplete({ + attemptLatency: totalTime, + serverLatency: this.serverTime ?? undefined, + connectivityErrorCount: this.connectivityErrorCount, + streaming: this.streamingOperation, + status: attemptStatus?.toString(), + client_name: `nodejs-bigtable/${version}`, + metricsCollectorData: this.getMetricsCollectorData(), + projectId, + }); + } + }); + } } - } else { - console.warn('Invalid state transition attempted'); - } + ); }); } @@ -212,19 +229,18 @@ export class OperationMetricsCollector { */ onAttemptStart() { withMetricsDebug(() => { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET; - this.attemptStartTime = new Date(); - this.serverTime = null; - this.serverTimeRead = false; - this.connectivityErrorCount = 0; - } else { - console.warn('Invalid state transition attempted'); - } + checkState( + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS, + [MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS], + () => { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET; + this.attemptStartTime = new Date(); + this.serverTime = null; + this.serverTimeRead = false; + this.connectivityErrorCount = 0; + } + ); }); } @@ -234,18 +250,21 @@ export class OperationMetricsCollector { onResponse(projectId: string) { withMetricsDebug(() => { if (!this.firstResponseLatency) { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; - const endTime = new Date(); - if (projectId && this.operationStartTime) { - this.firstResponseLatency = - endTime.getTime() - this.operationStartTime.getTime(); + checkState( + this.state, + [ + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET, + ], + () => { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; + const endTime = new Date(); + if (projectId && this.operationStartTime) { + this.firstResponseLatency = + endTime.getTime() - this.operationStartTime.getTime(); + } } - } + ); } }); } @@ -258,37 +277,37 @@ export class OperationMetricsCollector { */ onOperationComplete(projectId: string, finalOperationStatus: grpc.status) { withMetricsDebug(() => { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS - ) { - this.state = MetricsCollectorState.OPERATION_COMPLETE; - const endTime = new Date(); - if (projectId && this.operationStartTime) { - const totalTime = - endTime.getTime() - this.operationStartTime.getTime(); - { - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onOperationComplete) { - // finalOperationStatus?.toString() is optional because in a test - // proxy test the server does not send back the status. - metricsHandler.onOperationComplete({ - status: finalOperationStatus?.toString(), - streaming: this.streamingOperation, - metricsCollectorData: this.getMetricsCollectorData(), - client_name: `nodejs-bigtable/${version}`, - projectId, - operationLatency: totalTime, - retryCount: this.attemptCount - 1, - firstResponseLatency: this.firstResponseLatency ?? undefined, - }); - } - }); + checkState( + this.state, + [MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS], + () => { + this.state = MetricsCollectorState.OPERATION_COMPLETE; + const endTime = new Date(); + if (projectId && this.operationStartTime) { + const totalTime = + endTime.getTime() - this.operationStartTime.getTime(); + { + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onOperationComplete) { + // finalOperationStatus?.toString() is optional because in a test + // proxy test the server does not send back the status. + metricsHandler.onOperationComplete({ + status: finalOperationStatus?.toString(), + streaming: this.streamingOperation, + metricsCollectorData: this.getMetricsCollectorData(), + client_name: `nodejs-bigtable/${version}`, + projectId, + operationLatency: totalTime, + retryCount: this.attemptCount - 1, + firstResponseLatency: + this.firstResponseLatency ?? undefined, + }); + } + }); + } } } - } else { - console.warn('Invalid state transition attempted'); - } + ); }); } From d21bef0b03fc0ca861cf09edf6eb47b932dd8f6f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 11:47:16 -0400 Subject: [PATCH 370/448] this.state --- src/client-side-metrics/operation-metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index b43621685..010b2b18e 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -230,7 +230,7 @@ export class OperationMetricsCollector { onAttemptStart() { withMetricsDebug(() => { checkState( - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS, + this.state, [MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS], () => { this.state = From 9163d7a34ec70d0278c2b106d7d607fa4dd6a2bf Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 14:12:46 -0400 Subject: [PATCH 371/448] Optional statuses --- src/client-side-metrics/operation-metrics-collector.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 010b2b18e..5efe67fda 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -211,7 +211,7 @@ export class OperationMetricsCollector { serverLatency: this.serverTime ?? undefined, connectivityErrorCount: this.connectivityErrorCount, streaming: this.streamingOperation, - status: attemptStatus?.toString(), + status: attemptStatus.toString(), client_name: `nodejs-bigtable/${version}`, metricsCollectorData: this.getMetricsCollectorData(), projectId, @@ -289,10 +289,10 @@ export class OperationMetricsCollector { { this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { - // finalOperationStatus?.toString() is optional because in a test + // finalOperationStatus.toString() is optional because in a test // proxy test the server does not send back the status. metricsHandler.onOperationComplete({ - status: finalOperationStatus?.toString(), + status: finalOperationStatus.toString(), streaming: this.streamingOperation, metricsCollectorData: this.getMetricsCollectorData(), client_name: `nodejs-bigtable/${version}`, From da9eb8e8a6fa30e1c97475a5a15ae087ae21943e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 14:17:24 -0400 Subject: [PATCH 372/448] Eliminate old comments --- src/client-side-metrics/operation-metrics-collector.ts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 5efe67fda..80eccc5ae 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -204,8 +204,6 @@ export class OperationMetricsCollector { endTime.getTime() - this.attemptStartTime.getTime(); this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onAttemptComplete) { - // attemptStatus?.toString() is optional because in a test proxy - // test the server does not send back the status. metricsHandler.onAttemptComplete({ attemptLatency: totalTime, serverLatency: this.serverTime ?? undefined, @@ -289,8 +287,6 @@ export class OperationMetricsCollector { { this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { - // finalOperationStatus.toString() is optional because in a test - // proxy test the server does not send back the status. metricsHandler.onOperationComplete({ status: finalOperationStatus.toString(), streaming: this.streamingOperation, From ae10660c2e76d51e57cd30f746da1d7ef325341c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 15:05:48 -0400 Subject: [PATCH 373/448] Add a generateClientUuid function --- src/index.ts | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/index.ts b/src/index.ts index 6a15a2cb0..5fcf33832 100644 --- a/src/index.ts +++ b/src/index.ts @@ -19,6 +19,7 @@ import * as extend from 'extend'; import {GoogleAuth, CallOptions, grpc as gaxVendoredGrpc} from 'google-gax'; import * as gax from 'google-gax'; import * as protos from '../protos/protos'; +import * as os from 'os'; import {AppProfile} from './app-profile'; import {Cluster} from './cluster'; @@ -134,6 +135,13 @@ function getDomain(prefix: string, opts?: gax.ClientOptions) { }`; } +function generateClientUuid() { + const hostname = os.hostname() || 'localhost'; + const currentPid = process.pid || ''; + const uuid4 = crypto.randomUUID(); + return `node-${uuid4}-${currentPid}${hostname}`; +} + /** * @typedef {object} ClientConfig * @property {string} [apiEndpoint] Override the default API endpoint used @@ -424,7 +432,7 @@ export class Bigtable { appProfileId?: string; projectName: string; shouldReplaceProjectIdToken: boolean; - clientUid = crypto.randomUUID(); + clientUid = generateClientUuid(); static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; From 6318dcc53cbb25b1c507218144779c366b48910b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 15:11:50 -0400 Subject: [PATCH 374/448] Changed the name to metrics enabled --- src/index.ts | 12 ++++++------ src/tabular-api-surface.ts | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/index.ts b/src/index.ts index 5fcf33832..ea86fda05 100644 --- a/src/index.ts +++ b/src/index.ts @@ -107,7 +107,7 @@ export interface BigtableOptions extends gax.GoogleAuthOptions { */ BigtableTableAdminClient?: gax.ClientOptions; - collectMetrics?: boolean; + metricsEnabled?: boolean; } /** @@ -440,11 +440,11 @@ export class Bigtable { // Therefore, metrics handlers should be created at the client level and // reused throughout the library to reduce latency: metricsHandlers: IMetricsHandler[]; - // collectMetrics is a member variable that is used to ensure that if the + // metricsEnabled is a member variable that is used to ensure that if the // user provides a `false` value and opts out of metrics collection that // the metrics collector is ignored altogether to reduce latency in the // client. - collectMetrics: boolean; + metricsEnabled: boolean; constructor(options: BigtableOptions = {}) { // Determine what scopes are needed. @@ -543,11 +543,11 @@ export class Bigtable { this.projectName = `projects/${this.projectId}`; this.shouldReplaceProjectIdToken = this.projectId === '{{projectId}}'; - if (options.collectMetrics === false) { - this.collectMetrics = false; + if (options.metricsEnabled === false) { + this.metricsEnabled = false; this.metricsHandlers = []; } else { - this.collectMetrics = true; + this.metricsEnabled = true; this.metricsHandlers = [ new GCPMetricsHandler(new CloudMonitoringExporter()), ]; diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index d8ab3a5d7..a2e94da9e 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -337,7 +337,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return originalEnd(chunk, encoding, cb); }; - const metricsCollector = this.bigtable.collectMetrics + const metricsCollector = this.bigtable.metricsEnabled ? new OperationMetricsCollector( this, this.bigtable.metricsHandlers, @@ -524,7 +524,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return false; }; - if (this.bigtable.collectMetrics) { + if (this.bigtable.metricsEnabled) { requestStream .on( 'metadata', From fa8b5021bd969850ddf6af12a7455fcf9a96b138 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 15:20:46 -0400 Subject: [PATCH 375/448] End call on ignored status code --- src/tabular-api-surface.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index a2e94da9e..86ff2147d 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -550,6 +550,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. userStream.end(); + metricsCollector?.onAttemptComplete( + this.bigtable.projectId, + error.code + ); + metricsCollector?.onOperationComplete( + this.bigtable.projectId, + error.code + ); return; } numConsecutiveErrors++; From 484e4b6f460b958afbe454f88e0926ccf6c96e89 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 15:37:59 -0400 Subject: [PATCH 376/448] eliminate mention of gemini --- src/tabular-api-surface.ts | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 86ff2147d..95910f74b 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -587,10 +587,9 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); ) { // // The TestReadRows_Generic_CloseClient conformance test requires - // a grpc code to be present when the client is closed. According - // to Gemini, the appropriate code for a closed client is - // CANCELLED since the user actually cancelled the call by closing - // the client. + // a grpc code to be present when the client is closed. The + // appropriate code for a closed client is CANCELLED since the + // user actually cancelled the call by closing the client. // error.code = grpc.status.CANCELLED; } From 716681a0786d4c31e7085a7b76f7901474579b92 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 15:48:03 -0400 Subject: [PATCH 377/448] Add onOperationSucceeded helper --- .../operation-metrics-collector.ts | 12 ++++++++++++ src/tabular-api-surface.ts | 12 ++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 80eccc5ae..b4fa2d354 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -307,6 +307,18 @@ export class OperationMetricsCollector { }); } + /** + * This is a useful helper method for all the times we want to record that an + * attempt is complete when an operation is also complete. + * + * @param {string} projectId The id of the project. + * @param {grpc.status} finalOperationStatus Information about the completed operation. + */ + onOperationSucceeded(projectId: string, attemptStatus: grpc.status) { + this.onAttemptComplete(projectId, attemptStatus); + this.onOperationComplete(projectId, attemptStatus); + } + /** * Called when metadata is received. Extracts server timing information if available. * @param {object} metadata The received metadata. diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 95910f74b..3f08682a4 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -550,11 +550,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. userStream.end(); - metricsCollector?.onAttemptComplete( - this.bigtable.projectId, - error.code - ); - metricsCollector?.onOperationComplete( + metricsCollector?.onOperationSucceeded( this.bigtable.projectId, error.code ); @@ -593,11 +589,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // error.code = grpc.status.CANCELLED; } - metricsCollector?.onAttemptComplete( - this.bigtable.projectId, - error.code - ); - metricsCollector?.onOperationComplete( + metricsCollector?.onOperationSucceeded( this.bigtable.projectId, error.code ); From 9dbeb34428126d6c747488da409b8ea5f10594ab Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 15:49:51 -0400 Subject: [PATCH 378/448] Change name to last attempt completed --- src/client-side-metrics/operation-metrics-collector.ts | 2 +- src/tabular-api-surface.ts | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index b4fa2d354..59aa6f952 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -314,7 +314,7 @@ export class OperationMetricsCollector { * @param {string} projectId The id of the project. * @param {grpc.status} finalOperationStatus Information about the completed operation. */ - onOperationSucceeded(projectId: string, attemptStatus: grpc.status) { + onLastAttemptCompleted(projectId: string, attemptStatus: grpc.status) { this.onAttemptComplete(projectId, attemptStatus); this.onOperationComplete(projectId, attemptStatus); } diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 3f08682a4..478af158c 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -550,7 +550,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. userStream.end(); - metricsCollector?.onOperationSucceeded( + metricsCollector?.onLastAttemptCompleted( this.bigtable.projectId, error.code ); @@ -589,7 +589,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // error.code = grpc.status.CANCELLED; } - metricsCollector?.onOperationSucceeded( + metricsCollector?.onLastAttemptCompleted( this.bigtable.projectId, error.code ); From 2e342d0b2f4a139f1f2f4349b282ee617e21b583 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 15:50:47 -0400 Subject: [PATCH 379/448] Use onLastAttemptCompleted here --- src/tabular-api-surface.ts | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 478af158c..e1e5ad57c 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -604,14 +604,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }) .on('end', () => { activeRequestStream = null; - metricsCollector?.onAttemptComplete( + metricsCollector?.onLastAttemptCompleted( this.bigtable.projectId, grpc.status.OK - ); - metricsCollector?.onOperationComplete( - this.bigtable.projectId, - grpc.status.OK - ); + ) }); rowStreamPipe(rowStream, userStream); }; From 530c9bdd3ce547aac71ed5440b9d3ac5806c7c32 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 15:52:07 -0400 Subject: [PATCH 380/448] run linter --- src/tabular-api-surface.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index e1e5ad57c..57b36f742 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -607,7 +607,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); metricsCollector?.onLastAttemptCompleted( this.bigtable.projectId, grpc.status.OK - ) + ); }); rowStreamPipe(rowStream, userStream); }; From 81509cd3073701b1ee191a4706b06006b02cb7fb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 16:27:48 -0400 Subject: [PATCH 381/448] Add handlers no matter what --- src/tabular-api-surface.ts | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 57b36f742..c7742687c 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -524,23 +524,21 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return false; }; - if (this.bigtable.metricsEnabled) { - requestStream - .on( - 'metadata', - (metadata: {internalRepr: Map; options: {}}) => { - metricsCollector?.onMetadataReceived(metadata); - } - ) - .on( - 'status', - (status: { - metadata: {internalRepr: Map; options: {}}; - }) => { - metricsCollector?.onStatusMetadataReceived(status); - } - ); - } + requestStream + .on( + 'metadata', + (metadata: {internalRepr: Map; options: {}}) => { + metricsCollector?.onMetadataReceived(metadata); + } + ) + .on( + 'status', + (status: { + metadata: {internalRepr: Map; options: {}}; + }) => { + metricsCollector?.onStatusMetadataReceived(status); + } + ); rowStream .on('error', (error: ServiceError) => { From f198a4de9d55984e50b7bb61d030ea966ee89818 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 9 Apr 2025 16:30:23 -0400 Subject: [PATCH 382/448] Add warning if check fails --- src/client-side-metrics/operation-metrics-collector.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 59aa6f952..b241dd6a8 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -216,6 +216,8 @@ export class OperationMetricsCollector { }); } }); + } else { + console.warn('ProjectId and start time should always be provided'); } } ); From f204404cc5fbbaa6de778b8d635abef58430538f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 10 Apr 2025 10:01:32 -0400 Subject: [PATCH 383/448] Add console warns for various checks --- src/client-side-metrics/operation-metrics-collector.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index b241dd6a8..5a1e2ebfd 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -262,6 +262,10 @@ export class OperationMetricsCollector { if (projectId && this.operationStartTime) { this.firstResponseLatency = endTime.getTime() - this.operationStartTime.getTime(); + } else { + console.warn( + 'ProjectId and operationStartTime should always be provided' + ); } } ); @@ -303,6 +307,10 @@ export class OperationMetricsCollector { } }); } + } else { + console.warn( + 'projectId and operation start time should always be available here' + ); } } ); From 7d459a484990d1a06ca265622eff727638138175 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 10:37:22 -0400 Subject: [PATCH 384/448] Remove the try block --- .../operation-metrics-collector.ts | 27 ++++++++----------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 5a1e2ebfd..3eb09a075 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -22,22 +22,17 @@ import * as gax from 'google-gax'; // with failures in the metrics collector. const METRICS_DEBUG = process.env.METRICS_DEBUG; -let ResponseParams: gax.protobuf.Type | null; -try { - /* - * Likely due to the Node 18 upgrade, the samples tests are failing with the - * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or - * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since - * these tests don't use this module we can suppress the error for now to - * unblock the CI pipeline. - */ - const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto' - ); - ResponseParams = root.lookupType('ResponseParams'); -} catch (e) { - ResponseParams = null; -} +/* + * The samples tests are failing with the + * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or + * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since + * these tests don't use this module we can suppress the error for now to + * unblock the CI pipeline. + */ +const root = gax.protobuf.loadSync( + './protos/google/bigtable/v2/response_params.proto' +); +const ResponseParams = root.lookupType('ResponseParams'); /** * An interface representing a tabular API surface, such as a Bigtable table. From 914a081bb728de5c3ce3d6c94e7aa175342812d4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 10:39:44 -0400 Subject: [PATCH 385/448] Modified comment --- src/index.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/index.ts b/src/index.ts index ea86fda05..ae5797443 100644 --- a/src/index.ts +++ b/src/index.ts @@ -436,9 +436,9 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; - // Each time a metrics handler is created it introduces significant latency. - // Therefore, metrics handlers should be created at the client level and - // reused throughout the library to reduce latency: + // Metrics handlers should be created at the client level and + // reused throughout the library to reduce latency due to creation of the + // open telemetry instruments: metricsHandlers: IMetricsHandler[]; // metricsEnabled is a member variable that is used to ensure that if the // user provides a `false` value and opts out of metrics collection that From 16a2134dde807015e06da874ee073c7a0ba83f78 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 10:41:03 -0400 Subject: [PATCH 386/448] Modified export comment --- src/index.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/index.ts b/src/index.ts index ae5797443..101826653 100644 --- a/src/index.ts +++ b/src/index.ts @@ -438,7 +438,8 @@ export class Bigtable { static Cluster: Cluster; // Metrics handlers should be created at the client level and // reused throughout the library to reduce latency due to creation of the - // open telemetry instruments: + // open telemetry instruments. We also only want one OTEL stack per project + // so that we don't encounter errors from exporting too frequently: metricsHandlers: IMetricsHandler[]; // metricsEnabled is a member variable that is used to ensure that if the // user provides a `false` value and opts out of metrics collection that From 7a2d5d0e5481064a10d785e81085fa3eeda8f0ba Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 11:52:22 -0400 Subject: [PATCH 387/448] Make the metrics handler static on the MC MC = OperationMetricsCollector --- .../operation-metrics-collector.ts | 68 ++++++++++--------- src/index.ts | 9 --- src/tabular-api-surface.ts | 3 +- test/metrics-collector/metrics-collector.ts | 6 +- 4 files changed, 42 insertions(+), 44 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 3eb09a075..ffed407b8 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -13,10 +13,11 @@ // limitations under the License. import * as fs from 'fs'; -import {IMetricsHandler} from './metrics-handler'; import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; +import {GCPMetricsHandler} from './gcp-metrics-handler'; +import {CloudMonitoringExporter} from './exporter'; // When this environment variable is set then print any errors associated // with failures in the metrics collector. @@ -109,22 +110,22 @@ export class OperationMetricsCollector { private tabularApiSurface: ITabularApiSurface; private methodName: MethodName; private attemptCount = 0; - private metricsHandlers: IMetricsHandler[]; private firstResponseLatency: number | null; private serverTimeRead: boolean; private serverTime: number | null; private connectivityErrorCount: number; private streamingOperation: StreamingState; + static metricsHandlers = [ + new GCPMetricsHandler(new CloudMonitoringExporter()), + ]; /** * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. - * @param {IMetricsHandler[]} metricsHandlers The metrics handlers used for recording metrics. * @param {MethodName} methodName The name of the method being traced. * @param {StreamingState} streamingOperation Whether or not the call is a streaming operation. */ constructor( tabularApiSurface: ITabularApiSurface, - metricsHandlers: IMetricsHandler[], methodName: MethodName, streamingOperation: StreamingState ) { @@ -135,7 +136,6 @@ export class OperationMetricsCollector { this.methodName = methodName; this.operationStartTime = null; this.attemptStartTime = null; - this.metricsHandlers = metricsHandlers; this.firstResponseLatency = null; this.serverTimeRead = false; this.serverTime = null; @@ -197,20 +197,22 @@ export class OperationMetricsCollector { if (projectId && this.attemptStartTime) { const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onAttemptComplete) { - metricsHandler.onAttemptComplete({ - attemptLatency: totalTime, - serverLatency: this.serverTime ?? undefined, - connectivityErrorCount: this.connectivityErrorCount, - streaming: this.streamingOperation, - status: attemptStatus.toString(), - client_name: `nodejs-bigtable/${version}`, - metricsCollectorData: this.getMetricsCollectorData(), - projectId, - }); + OperationMetricsCollector.metricsHandlers.forEach( + metricsHandler => { + if (metricsHandler.onAttemptComplete) { + metricsHandler.onAttemptComplete({ + attemptLatency: totalTime, + serverLatency: this.serverTime ?? undefined, + connectivityErrorCount: this.connectivityErrorCount, + streaming: this.streamingOperation, + status: attemptStatus.toString(), + client_name: `nodejs-bigtable/${version}`, + metricsCollectorData: this.getMetricsCollectorData(), + projectId, + }); + } } - }); + ); } else { console.warn('ProjectId and start time should always be provided'); } @@ -286,21 +288,23 @@ export class OperationMetricsCollector { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onOperationComplete) { - metricsHandler.onOperationComplete({ - status: finalOperationStatus.toString(), - streaming: this.streamingOperation, - metricsCollectorData: this.getMetricsCollectorData(), - client_name: `nodejs-bigtable/${version}`, - projectId, - operationLatency: totalTime, - retryCount: this.attemptCount - 1, - firstResponseLatency: - this.firstResponseLatency ?? undefined, - }); + OperationMetricsCollector.metricsHandlers.forEach( + metricsHandler => { + if (metricsHandler.onOperationComplete) { + metricsHandler.onOperationComplete({ + status: finalOperationStatus.toString(), + streaming: this.streamingOperation, + metricsCollectorData: this.getMetricsCollectorData(), + client_name: `nodejs-bigtable/${version}`, + projectId, + operationLatency: totalTime, + retryCount: this.attemptCount - 1, + firstResponseLatency: + this.firstResponseLatency ?? undefined, + }); + } } - }); + ); } } else { console.warn( diff --git a/src/index.ts b/src/index.ts index 101826653..a4210f41f 100644 --- a/src/index.ts +++ b/src/index.ts @@ -436,11 +436,6 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; - // Metrics handlers should be created at the client level and - // reused throughout the library to reduce latency due to creation of the - // open telemetry instruments. We also only want one OTEL stack per project - // so that we don't encounter errors from exporting too frequently: - metricsHandlers: IMetricsHandler[]; // metricsEnabled is a member variable that is used to ensure that if the // user provides a `false` value and opts out of metrics collection that // the metrics collector is ignored altogether to reduce latency in the @@ -546,12 +541,8 @@ export class Bigtable { if (options.metricsEnabled === false) { this.metricsEnabled = false; - this.metricsHandlers = []; } else { this.metricsEnabled = true; - this.metricsHandlers = [ - new GCPMetricsHandler(new CloudMonitoringExporter()), - ]; } } diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index c7742687c..74e1bf43b 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -44,6 +44,8 @@ import { MethodName, StreamingState, } from './client-side-metrics/client-side-metrics-attributes'; +import {GCPMetricsHandler} from './client-side-metrics/gcp-metrics-handler'; +import {CloudMonitoringExporter} from './client-side-metrics/exporter'; // See protos/google/rpc/code.proto // (4=DEADLINE_EXCEEDED, 8=RESOURCE_EXHAUSTED, 10=ABORTED, 14=UNAVAILABLE) @@ -340,7 +342,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const metricsCollector = this.bigtable.metricsEnabled ? new OperationMetricsCollector( this, - this.bigtable.metricsHandlers, MethodName.READ_ROWS, StreamingState.STREAMING ) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 252e58652..1b49eeef6 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -24,6 +24,7 @@ import { import {grpc} from 'google-gax'; import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; import * as gax from 'google-gax'; +import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; const root = gax.protobuf.loadSync( './protos/google/bigtable/v2/response_params.proto' ); @@ -94,7 +95,6 @@ describe('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const testHandler = new TestMetricsHandler(logger); - const metricsHandlers = [testHandler]; class FakeTable { id = 'fakeTableId'; instance = new FakeInstance(); @@ -128,10 +128,12 @@ describe('Bigtable/MetricsCollector', () => { }; const metricsCollector = new OperationMetricsCollector( this, - metricsHandlers, MethodName.READ_ROWS, StreamingState.STREAMING ); + OperationMetricsCollector.metricsHandlers = [ + testHandler as unknown as GCPMetricsHandler, + ]; // In this method we simulate a series of events that might happen // when a user calls one of the Table methods. // Here is an example of what might happen in a method call: From 2ec1694197a6dbd04756b78420e90c93f804947e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 13:23:01 -0400 Subject: [PATCH 388/448] Throw an error for better testing Also. Always gate the error throwing to just tests. --- package.json | 2 +- src/client-side-metrics/operation-metrics-collector.ts | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/package.json b/package.json index a200f3687..7e4a7f94d 100644 --- a/package.json +++ b/package.json @@ -40,7 +40,7 @@ "presystem-test": "npm run compile", "system-test": "mocha build/system-test --timeout 600000", "pretest": "npm run compile", - "test": "c8 mocha build/test", + "test": "METRICS_DEBUG=true c8 mocha build/test", "test:snap": "SNAPSHOT_UPDATE=1 npm test", "testproxy": "npm run compile && node testproxy/index.js", "clean": "gts clean", diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index ffed407b8..164021f7d 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -78,7 +78,7 @@ function withMetricsDebug(fn: () => T): T | undefined { return fn(); } catch (e) { if (METRICS_DEBUG) { - console.warn(e); + throw e; } } return; @@ -93,7 +93,9 @@ function checkState( if (allowedStates.includes(currentState)) { return fn(); } else { - console.warn('Invalid state transition'); + if (METRICS_DEBUG) { + throw Error('Invalid state transition'); + } } return; } From 228edcb72a77f5f601e8aa826f15b84c4e20b02e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 13:33:53 -0400 Subject: [PATCH 389/448] Add a helper for adding handlers --- .../operation-metrics-collector.ts | 25 +++++++++++++++++++ src/tabular-api-surface.ts | 17 +------------ 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 164021f7d..ddf9d0735 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -18,6 +18,7 @@ import {grpc} from 'google-gax'; import * as gax from 'google-gax'; import {GCPMetricsHandler} from './gcp-metrics-handler'; import {CloudMonitoringExporter} from './exporter'; +import {AbortableDuplex} from '../index'; // When this environment variable is set then print any errors associated // with failures in the metrics collector. @@ -160,6 +161,30 @@ export class OperationMetricsCollector { ); } + /** + * Called to add handlers to the stream so that we can observe + * header and trailer data for client side metrics. + * + * @param stream + */ + handleStatusAndMetadata(stream: AbortableDuplex) { + stream + .on( + 'metadata', + (metadata: {internalRepr: Map; options: {}}) => { + this.onMetadataReceived(metadata); + } + ) + .on( + 'status', + (status: { + metadata: {internalRepr: Map; options: {}}; + }) => { + this.onStatusMetadataReceived(status); + } + ); + } + /** * Called when the operation starts. Records the start time. */ diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 74e1bf43b..216f4d657 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -525,22 +525,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return false; }; - requestStream - .on( - 'metadata', - (metadata: {internalRepr: Map; options: {}}) => { - metricsCollector?.onMetadataReceived(metadata); - } - ) - .on( - 'status', - (status: { - metadata: {internalRepr: Map; options: {}}; - }) => { - metricsCollector?.onStatusMetadataReceived(status); - } - ); - + metricsCollector?.handleStatusAndMetadata(requestStream); rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); From 1ddaa7df1d142abff4fe399a2da6fa8b1cf00aaf Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 15:00:32 -0400 Subject: [PATCH 390/448] Directionally correct proxyquire stuff --- system-test/client-side-metrics.ts | 42 ++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 59c250396..c8a73171d 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -64,7 +64,7 @@ describe('Bigtable/ClientSideMetrics', () => { } }); - describe('Bigtable/ClientSideMetricsToGCM', () => { + describe.only('Bigtable/ClientSideMetricsToGCM', () => { // This test suite ensures that for each test all the export calls are // successful even when multiple instances and tables are created. async function mockBigtable(done: mocha.Done) { @@ -94,6 +94,7 @@ describe('Bigtable/ClientSideMetrics', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): void { + console.trace('call exporter'); try { super.export(metrics, (result: ExportResult) => { if (!exported) { @@ -121,7 +122,9 @@ describe('Bigtable/ClientSideMetrics', () => { } class TestGCPMetricsHandler extends GCPMetricsHandler { + static value = 'value'; constructor() { + console.trace('initialize handler'); super(new TestExporter()); } } @@ -132,12 +135,41 @@ describe('Bigtable/ClientSideMetrics', () => { ensure the export was successful and pass the test with code 0 if it is successful. */ + const FakeOperationMetricsCollector = proxyquire( + '../src/client-side-metrics/operation-metrics-collector', + { + './gcp-metrics-handler': {GCPMetricsHandler: TestGCPMetricsHandler}, + } + ).OperationMetricsCollector; + const FakeTabularApiSurface = proxyquire( + '../src/tabular-api-surface.js', + { + './client-side-metrics/operation-metrics-collector': { + OperationMetricsCollector: FakeOperationMetricsCollector, + }, + } + ).TabularApiSurface; + // FakeTabularApiSurface.surfaceName = 'fake-surface'; + const FakeTable = proxyquire('../src/table.js', { + './tabular-api-surface': {TabularApiSurface: FakeTabularApiSurface}, + }).Table; + const FakeInstance = proxyquire('../src/instance.js', { + './table': {Table: FakeTable}, + }).Instance; + FakeInstance.instanceName = 'fakeInstance'; const FakeBigtable = proxyquire('../src/index.js', { - './client-side-metrics/gcp-metrics-handler': { - GCPMetricsHandler: TestGCPMetricsHandler, - }, + './instance': {Instance: FakeInstance}, }).Bigtable; - return new FakeBigtable(); + // FakeBigtable.client_name = 'fakeClient'; + /* + const FakeBigtable = class FakeBigtable { + constructor() { + console.log('creating experimental fake bigtable'); + } + }; + */ + const fakeBigtable = new FakeBigtable(); + return fakeBigtable; } it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { From f33f69ff0613093602d639f88c818bb2aaee1f24 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 15:04:43 -0400 Subject: [PATCH 391/448] Remove traces and various comments --- system-test/client-side-metrics.ts | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index c8a73171d..18e158a9f 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -94,7 +94,6 @@ describe('Bigtable/ClientSideMetrics', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): void { - console.trace('call exporter'); try { super.export(metrics, (result: ExportResult) => { if (!exported) { @@ -124,7 +123,6 @@ describe('Bigtable/ClientSideMetrics', () => { class TestGCPMetricsHandler extends GCPMetricsHandler { static value = 'value'; constructor() { - console.trace('initialize handler'); super(new TestExporter()); } } @@ -149,7 +147,6 @@ describe('Bigtable/ClientSideMetrics', () => { }, } ).TabularApiSurface; - // FakeTabularApiSurface.surfaceName = 'fake-surface'; const FakeTable = proxyquire('../src/table.js', { './tabular-api-surface': {TabularApiSurface: FakeTabularApiSurface}, }).Table; @@ -160,14 +157,6 @@ describe('Bigtable/ClientSideMetrics', () => { const FakeBigtable = proxyquire('../src/index.js', { './instance': {Instance: FakeInstance}, }).Bigtable; - // FakeBigtable.client_name = 'fakeClient'; - /* - const FakeBigtable = class FakeBigtable { - constructor() { - console.log('creating experimental fake bigtable'); - } - }; - */ const fakeBigtable = new FakeBigtable(); return fakeBigtable; } From e5eb375dc4eb3e55133ca619f0dc6fab8e8cf2a0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 15:08:25 -0400 Subject: [PATCH 392/448] Set instance name --- system-test/client-side-metrics.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 18e158a9f..87bc2b2b9 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -153,7 +153,6 @@ describe('Bigtable/ClientSideMetrics', () => { const FakeInstance = proxyquire('../src/instance.js', { './table': {Table: FakeTable}, }).Instance; - FakeInstance.instanceName = 'fakeInstance'; const FakeBigtable = proxyquire('../src/index.js', { './instance': {Instance: FakeInstance}, }).Bigtable; From 1f830a771e86015350dd82a82cb9046354a0a598 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 15:23:59 -0400 Subject: [PATCH 393/448] Factor out the fake bigtable creator --- system-test/client-side-metrics.ts | 82 ++++++++++++++---------------- 1 file changed, 37 insertions(+), 45 deletions(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 87bc2b2b9..f94713001 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -27,7 +27,39 @@ import {setupBigtable} from './client-side-metrics-setup-table'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; -describe('Bigtable/ClientSideMetrics', () => { +function getFakeBigtable( + metricsHandlerClass: typeof GCPMetricsHandler | typeof TestMetricsHandler +) { + /* + Below we mock out the table so that it sends the metrics to a test exporter + that will still send the metrics to Google Cloud Monitoring, but then also + ensure the export was successful and pass the test with code 0 if it is + successful. + */ + const FakeOperationMetricsCollector = proxyquire( + '../src/client-side-metrics/operation-metrics-collector', + { + './gcp-metrics-handler': {GCPMetricsHandler: metricsHandlerClass}, + } + ).OperationMetricsCollector; + const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { + './client-side-metrics/operation-metrics-collector': { + OperationMetricsCollector: FakeOperationMetricsCollector, + }, + }).TabularApiSurface; + const FakeTable = proxyquire('../src/table.js', { + './tabular-api-surface': {TabularApiSurface: FakeTabularApiSurface}, + }).Table; + const FakeInstance = proxyquire('../src/instance.js', { + './table': {Table: FakeTable}, + }).Instance; + const FakeBigtable = proxyquire('../src/index.js', { + './instance': {Instance: FakeInstance}, + }).Bigtable; + return new FakeBigtable(); +} + +describe.only('Bigtable/ClientSideMetrics', () => { const instanceId1 = 'emulator-test-instance'; const instanceId2 = 'emulator-test-instance2'; const tableId1 = 'my-table'; @@ -64,7 +96,7 @@ describe('Bigtable/ClientSideMetrics', () => { } }); - describe.only('Bigtable/ClientSideMetricsToGCM', () => { + describe('Bigtable/ClientSideMetricsToGCM', () => { // This test suite ensures that for each test all the export calls are // successful even when multiple instances and tables are created. async function mockBigtable(done: mocha.Done) { @@ -127,37 +159,7 @@ describe('Bigtable/ClientSideMetrics', () => { } } - /* - Below we mock out the table so that it sends the metrics to a test exporter - that will still send the metrics to Google Cloud Monitoring, but then also - ensure the export was successful and pass the test with code 0 if it is - successful. - */ - const FakeOperationMetricsCollector = proxyquire( - '../src/client-side-metrics/operation-metrics-collector', - { - './gcp-metrics-handler': {GCPMetricsHandler: TestGCPMetricsHandler}, - } - ).OperationMetricsCollector; - const FakeTabularApiSurface = proxyquire( - '../src/tabular-api-surface.js', - { - './client-side-metrics/operation-metrics-collector': { - OperationMetricsCollector: FakeOperationMetricsCollector, - }, - } - ).TabularApiSurface; - const FakeTable = proxyquire('../src/table.js', { - './tabular-api-surface': {TabularApiSurface: FakeTabularApiSurface}, - }).Table; - const FakeInstance = proxyquire('../src/instance.js', { - './table': {Table: FakeTable}, - }).Instance; - const FakeBigtable = proxyquire('../src/index.js', { - './instance': {Instance: FakeInstance}, - }).Bigtable; - const fakeBigtable = new FakeBigtable(); - return fakeBigtable; + return getFakeBigtable(TestGCPMetricsHandler); } it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { @@ -226,12 +228,7 @@ describe('Bigtable/ClientSideMetrics', () => { ensure the export was successful and pass the test with code 0 if it is successful. */ - const FakeBigtable = proxyquire('../src/index.js', { - './client-side-metrics/gcp-metrics-handler': { - GCPMetricsHandler: TestGCPMetricsHandler, - }, - }).Bigtable; - return new FakeBigtable(); + return getFakeBigtable(TestGCPMetricsHandler); } it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { @@ -384,12 +381,7 @@ describe('Bigtable/ClientSideMetrics', () => { } } - const FakeBigtable = proxyquire('../src/index.js', { - './client-side-metrics/gcp-metrics-handler': { - GCPMetricsHandler: TestGCPMetricsHandler, - }, - }).Bigtable; - bigtable = new FakeBigtable(); + bigtable = getFakeBigtable(TestGCPMetricsHandler); await setupBigtable(bigtable, columnFamilyId, instanceId1, [ tableId1, tableId2, From cc32a4cfef184ab4212d48f0e14cb97acd3b4e74 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 15:40:30 -0400 Subject: [PATCH 394/448] Remove only --- system-test/client-side-metrics.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index f94713001..02528866d 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -59,7 +59,7 @@ function getFakeBigtable( return new FakeBigtable(); } -describe.only('Bigtable/ClientSideMetrics', () => { +describe('Bigtable/ClientSideMetrics', () => { const instanceId1 = 'emulator-test-instance'; const instanceId2 = 'emulator-test-instance2'; const tableId1 = 'my-table'; From 61750d60be8478c07103fd07296caad760006d68 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 16:25:25 -0400 Subject: [PATCH 395/448] Eliminate onLastAttemptCompleted --- .../operation-metrics-collector.ts | 13 +------------ src/tabular-api-surface.ts | 6 +++--- test/metrics-collector/metrics-collector.ts | 4 ---- 3 files changed, 4 insertions(+), 19 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index ddf9d0735..36487f899 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -304,6 +304,7 @@ export class OperationMetricsCollector { * @param {grpc.status} finalOperationStatus Information about the completed operation. */ onOperationComplete(projectId: string, finalOperationStatus: grpc.status) { + this.onAttemptComplete(projectId, finalOperationStatus); withMetricsDebug(() => { checkState( this.state, @@ -343,18 +344,6 @@ export class OperationMetricsCollector { }); } - /** - * This is a useful helper method for all the times we want to record that an - * attempt is complete when an operation is also complete. - * - * @param {string} projectId The id of the project. - * @param {grpc.status} finalOperationStatus Information about the completed operation. - */ - onLastAttemptCompleted(projectId: string, attemptStatus: grpc.status) { - this.onAttemptComplete(projectId, attemptStatus); - this.onOperationComplete(projectId, attemptStatus); - } - /** * Called when metadata is received. Extracts server timing information if available. * @param {object} metadata The received metadata. diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 216f4d657..57ae1b73b 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -534,7 +534,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. userStream.end(); - metricsCollector?.onLastAttemptCompleted( + metricsCollector?.onOperationComplete( this.bigtable.projectId, error.code ); @@ -573,7 +573,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // error.code = grpc.status.CANCELLED; } - metricsCollector?.onLastAttemptCompleted( + metricsCollector?.onOperationComplete( this.bigtable.projectId, error.code ); @@ -588,7 +588,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }) .on('end', () => { activeRequestStream = null; - metricsCollector?.onLastAttemptCompleted( + metricsCollector?.onOperationComplete( this.bigtable.projectId, grpc.status.OK ); diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 1b49eeef6..c95cb8aeb 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -170,10 +170,6 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(this.bigtable.projectId); logger.value += '15. User reads row 1\n'; logger.value += '16. Stream ends, operation completes\n'; - metricsCollector.onAttemptComplete( - this.bigtable.projectId, - grpc.status.OK - ); metricsCollector.onOperationComplete( this.bigtable.projectId, grpc.status.OK From 67f9050250445bc361e65bdd414fce6761233b76 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 11 Apr 2025 17:00:29 -0400 Subject: [PATCH 396/448] eliminate environment variable setting --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 7e4a7f94d..a200f3687 100644 --- a/package.json +++ b/package.json @@ -40,7 +40,7 @@ "presystem-test": "npm run compile", "system-test": "mocha build/system-test --timeout 600000", "pretest": "npm run compile", - "test": "METRICS_DEBUG=true c8 mocha build/test", + "test": "c8 mocha build/test", "test:snap": "SNAPSHOT_UPDATE=1 npm test", "testproxy": "npm run compile && node testproxy/index.js", "clean": "gts clean", From cc47f995de2984cdf4dc11b75d9a20dc02ec3a9d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 14 Apr 2025 14:52:27 -0400 Subject: [PATCH 397/448] thread projectId through test framework --- system-test/client-side-metrics.ts | 49 ++++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 9 deletions(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 02528866d..20d4a7d20 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -28,6 +28,7 @@ import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; function getFakeBigtable( + projectId: string, metricsHandlerClass: typeof GCPMetricsHandler | typeof TestMetricsHandler ) { /* @@ -56,7 +57,7 @@ function getFakeBigtable( const FakeBigtable = proxyquire('../src/index.js', { './instance': {Instance: FakeInstance}, }).Bigtable; - return new FakeBigtable(); + return new FakeBigtable({projectId}); } describe('Bigtable/ClientSideMetrics', () => { @@ -99,7 +100,7 @@ describe('Bigtable/ClientSideMetrics', () => { describe('Bigtable/ClientSideMetricsToGCM', () => { // This test suite ensures that for each test all the export calls are // successful even when multiple instances and tables are created. - async function mockBigtable(done: mocha.Done) { + async function mockBigtable(projectId: string, done: mocha.Done) { /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, @@ -159,13 +160,22 @@ describe('Bigtable/ClientSideMetrics', () => { } } - return getFakeBigtable(TestGCPMetricsHandler); + return getFakeBigtable(projectId, TestGCPMetricsHandler); } it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { (async () => { try { - const bigtable = await mockBigtable(done); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err: any, projectId: string) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const bigtable = await mockBigtable(projectId, done); for (const instanceId of [instanceId1, instanceId2]) { await setupBigtable(bigtable, columnFamilyId, instanceId, [ tableId1, @@ -188,7 +198,7 @@ describe('Bigtable/ClientSideMetrics', () => { // This test suite simulates a situation where the user creates multiple // clients and ensures that the exporter doesn't produce any errors even // when multiple clients are attempting an export. - async function mockBigtable(done: mocha.Done) { + async function mockBigtable(projectId: string, done: mocha.Done) { class TestExporter extends CloudMonitoringExporter { export( metrics: ResourceMetrics, @@ -228,7 +238,7 @@ describe('Bigtable/ClientSideMetrics', () => { ensure the export was successful and pass the test with code 0 if it is successful. */ - return getFakeBigtable(TestGCPMetricsHandler); + return getFakeBigtable(projectId, TestGCPMetricsHandler); } it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { @@ -245,8 +255,17 @@ describe('Bigtable/ClientSideMetrics', () => { }, 120000); (async () => { try { - const bigtable1 = await mockBigtable(done); - const bigtable2 = await mockBigtable(done); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const bigtable1 = await mockBigtable(projectId, done); + const bigtable2 = await mockBigtable(projectId, done); for (const bigtable of [bigtable1, bigtable2]) { for (const instanceId of [instanceId1, instanceId2]) { await setupBigtable(bigtable, columnFamilyId, instanceId, [ @@ -381,7 +400,7 @@ describe('Bigtable/ClientSideMetrics', () => { } } - bigtable = getFakeBigtable(TestGCPMetricsHandler); + bigtable = getFakeBigtable(projectId, TestGCPMetricsHandler); await setupBigtable(bigtable, columnFamilyId, instanceId1, [ tableId1, tableId2, @@ -408,5 +427,17 @@ describe('Bigtable/ClientSideMetrics', () => { await table2.getRows(); })(); }); + it.only('should pass the projectId to the metrics handler properly', done => { + bigtable = new Bigtable({projectId: 'cfdb-sdk-node-tests'}); + (async () => { + const projectId = 'cfdb-sdk-node-tests'; + await mockBigtable(projectId, done); + const instance = bigtable.instance(instanceId1); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + })(); + }); }); }); From 599ededcacaee88736b2205cb1f65efe0d1cb0bd Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 14 Apr 2025 14:53:23 -0400 Subject: [PATCH 398/448] Add error handling --- system-test/client-side-metrics.ts | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 20d4a7d20..aee33e2b8 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -430,13 +430,17 @@ describe('Bigtable/ClientSideMetrics', () => { it.only('should pass the projectId to the metrics handler properly', done => { bigtable = new Bigtable({projectId: 'cfdb-sdk-node-tests'}); (async () => { - const projectId = 'cfdb-sdk-node-tests'; - await mockBigtable(projectId, done); - const instance = bigtable.instance(instanceId1); - const table = instance.table(tableId1); - await table.getRows(); - const table2 = instance.table(tableId2); - await table2.getRows(); + try { + const projectId = 'cfdb-sdk-node-tests'; + await mockBigtable(projectId, done); + const instance = bigtable.instance(instanceId1); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + } catch (e) { + done(e); + } })(); }); }); From 4e32c5dde2fa00a308fe4c758a865f749f263288 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 14 Apr 2025 15:44:00 -0400 Subject: [PATCH 399/448] Test with different project --- system-test/client-side-metrics.ts | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index aee33e2b8..7b589161a 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -193,6 +193,28 @@ describe('Bigtable/ClientSideMetrics', () => { } })(); }); + it.only('should send the metrics to Google Cloud Monitoring for a ReadRows call with a second project', done => { + (async () => { + try { + const projectId = 'cfdb-sdk-node-tests'; + const bigtable = await mockBigtable(projectId, done); + for (const instanceId of [instanceId1, instanceId2]) { + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId1, + tableId2, + ]); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + } + } catch (e) { + done(new Error('An error occurred while running the script')); + done(e); + } + })(); + }); }); describe('Bigtable/ClientSideMetricsToGCMTimeout', () => { // This test suite simulates a situation where the user creates multiple @@ -427,7 +449,7 @@ describe('Bigtable/ClientSideMetrics', () => { await table2.getRows(); })(); }); - it.only('should pass the projectId to the metrics handler properly', done => { + it('should pass the projectId to the metrics handler properly', done => { bigtable = new Bigtable({projectId: 'cfdb-sdk-node-tests'}); (async () => { try { From cf1ec7758fb8f36c9b96ae499513e967b0b2db01 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 16 Apr 2025 15:14:15 -0400 Subject: [PATCH 400/448] Localize the MetricServiceClient --- src/client-side-metrics/exporter.ts | 35 ++++++++++++++++++++--------- system-test/client-side-metrics.ts | 11 +++++---- 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 90151ae4e..3acfe45fe 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -115,6 +115,27 @@ function getIntegerPoints(dataPoint: DataPoint) { ]; } +/** + * Extracts the project ID from a `ResourceMetrics` object. + * + * This function retrieves the Google Cloud project ID from the resource + * attributes of a `ResourceMetrics` object, which is the standard data + * structure used by OpenTelemetry for representing metrics data. The project ID + * is typically stored under the `monitored_resource.project_id` key within the + * resource's attributes. + * + */ +function getProject(exportArgs: ResourceMetrics) { + type WithSyncAttributes = {_syncAttributes: {[index: string]: string}}; + const resourcesWithSyncAttributes = + exportArgs.resource as unknown as WithSyncAttributes; + const projectId = + resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.project_id' + ]; + return projectId; +} + /** * getResource gets the resource object which is used for building the timeseries * object that will be sent to Google Cloud Monitoring dashboard @@ -211,13 +232,7 @@ function getMetric( * */ export function metricsToRequest(exportArgs: ResourceMetrics) { - type WithSyncAttributes = {_syncAttributes: {[index: string]: string}}; - const resourcesWithSyncAttributes = - exportArgs.resource as unknown as WithSyncAttributes; - const projectId = - resourcesWithSyncAttributes._syncAttributes[ - 'monitored_resource.project_id' - ]; + const projectId = getProject(exportArgs); const timeSeriesArray = []; for (const scopeMetrics of exportArgs.scopeMetrics) { for (const scopeMetric of scopeMetrics.metrics) { @@ -296,14 +311,13 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { * @beta */ export class CloudMonitoringExporter extends MetricExporter { - private monitoringClient = new MetricServiceClient(); - export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): void { (async () => { try { + const projectId = getProject(metrics); const request = metricsToRequest(metrics); // In order to manage the "One or more points were written more // frequently than the maximum sampling period configured for the @@ -327,7 +341,8 @@ export class CloudMonitoringExporter extends MetricExporter { maxRetryDelayMillis: 50000, } ); - await this.monitoringClient.createTimeSeries( + const monitoringClient = new MetricServiceClient({projectId}); + await monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest, { retry, diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 7b589161a..7afcb70d8 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -27,6 +27,8 @@ import {setupBigtable} from './client-side-metrics-setup-table'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; +const SECOND_PROJECT_ID = 'cfdb-sdk-node-tests'; + function getFakeBigtable( projectId: string, metricsHandlerClass: typeof GCPMetricsHandler | typeof TestMetricsHandler @@ -193,10 +195,11 @@ describe('Bigtable/ClientSideMetrics', () => { } })(); }); - it.only('should send the metrics to Google Cloud Monitoring for a ReadRows call with a second project', done => { + it('should send the metrics to Google Cloud Monitoring for a ReadRows call with a second project', done => { (async () => { try { - const projectId = 'cfdb-sdk-node-tests'; + // This is the second project the test is configured to work with: + const projectId = SECOND_PROJECT_ID; const bigtable = await mockBigtable(projectId, done); for (const instanceId of [instanceId1, instanceId2]) { await setupBigtable(bigtable, columnFamilyId, instanceId, [ @@ -450,10 +453,10 @@ describe('Bigtable/ClientSideMetrics', () => { })(); }); it('should pass the projectId to the metrics handler properly', done => { - bigtable = new Bigtable({projectId: 'cfdb-sdk-node-tests'}); + bigtable = new Bigtable({projectId: SECOND_PROJECT_ID}); (async () => { try { - const projectId = 'cfdb-sdk-node-tests'; + const projectId = SECOND_PROJECT_ID; await mockBigtable(projectId, done); const instance = bigtable.instance(instanceId1); const table = instance.table(tableId1); From cef3dad90e40cbd2ba9be1994508b031c5553d2f Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Wed, 16 Apr 2025 19:33:31 +0000 Subject: [PATCH 401/448] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= =?UTF-8?q?=20post-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- protos/protos.d.ts | 15 + protos/protos.js | 1279 +++++++++++++++++++++++++++++++++----------- protos/protos.json | 57 +- 3 files changed, 1017 insertions(+), 334 deletions(-) diff --git a/protos/protos.d.ts b/protos/protos.d.ts index 0eedaa428..4ff4aa310 100644 --- a/protos/protos.d.ts +++ b/protos/protos.d.ts @@ -3420,6 +3420,9 @@ export namespace google { /** Instance satisfiesPzs. */ public satisfiesPzs?: (boolean|null); + /** Instance _satisfiesPzs. */ + public _satisfiesPzs?: "satisfiesPzs"; + /** * Creates a new Instance instance using the specified properties. * @param [properties] Properties to set @@ -4637,6 +4640,9 @@ export namespace google { /** DataBoostIsolationReadOnly computeBillingOwner. */ public computeBillingOwner?: (google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner|keyof typeof google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner|null); + /** DataBoostIsolationReadOnly _computeBillingOwner. */ + public _computeBillingOwner?: "computeBillingOwner"; + /** * Creates a new DataBoostIsolationReadOnly instance using the specified properties. * @param [properties] Properties to set @@ -16366,6 +16372,9 @@ export namespace google { /** MutateRowsResponse rateLimitInfo. */ public rateLimitInfo?: (google.bigtable.v2.IRateLimitInfo|null); + /** MutateRowsResponse _rateLimitInfo. */ + public _rateLimitInfo?: "rateLimitInfo"; + /** * Creates a new MutateRowsResponse instance using the specified properties. * @param [properties] Properties to set @@ -25298,6 +25307,12 @@ export namespace google { /** ResponseParams clusterId. */ public clusterId?: (string|null); + /** ResponseParams _zoneId. */ + public _zoneId?: "zoneId"; + + /** ResponseParams _clusterId. */ + public _clusterId?: "clusterId"; + /** * Creates a new ResponseParams instance using the specified properties. * @param [properties] Properties to set diff --git a/protos/protos.js b/protos/protos.js index bbbed66cb..c73eb7b91 100644 --- a/protos/protos.js +++ b/protos/protos.js @@ -916,12 +916,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateInstanceRequest.decode = function decode(reader, length) { + CreateInstanceRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateInstanceRequest(), key, value; while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -1200,12 +1202,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetInstanceRequest.decode = function decode(reader, length) { + GetInstanceRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GetInstanceRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -1414,12 +1418,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListInstancesRequest.decode = function decode(reader, length) { + ListInstancesRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListInstancesRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -1656,12 +1662,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListInstancesResponse.decode = function decode(reader, length) { + ListInstancesResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListInstancesResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.instances && message.instances.length)) @@ -1929,12 +1937,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PartialUpdateInstanceRequest.decode = function decode(reader, length) { + PartialUpdateInstanceRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.PartialUpdateInstanceRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.instance = $root.google.bigtable.admin.v2.Instance.decode(reader, reader.uint32()); @@ -2155,12 +2165,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteInstanceRequest.decode = function decode(reader, length) { + DeleteInstanceRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.DeleteInstanceRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -2380,12 +2392,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateClusterRequest.decode = function decode(reader, length) { + CreateClusterRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -2613,12 +2627,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetClusterRequest.decode = function decode(reader, length) { + GetClusterRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GetClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -2827,12 +2843,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListClustersRequest.decode = function decode(reader, length) { + ListClustersRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListClustersRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -3069,12 +3087,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListClustersResponse.decode = function decode(reader, length) { + ListClustersResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListClustersResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.clusters && message.clusters.length)) @@ -3331,12 +3351,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteClusterRequest.decode = function decode(reader, length) { + DeleteClusterRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.DeleteClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -3556,12 +3578,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateInstanceMetadata.decode = function decode(reader, length) { + CreateInstanceMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateInstanceMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.originalRequest = $root.google.bigtable.admin.v2.CreateInstanceRequest.decode(reader, reader.uint32()); @@ -3821,12 +3845,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateInstanceMetadata.decode = function decode(reader, length) { + UpdateInstanceMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UpdateInstanceMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.originalRequest = $root.google.bigtable.admin.v2.PartialUpdateInstanceRequest.decode(reader, reader.uint32()); @@ -4101,12 +4127,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateClusterMetadata.decode = function decode(reader, length) { + CreateClusterMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateClusterMetadata(), key, value; while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.originalRequest = $root.google.bigtable.admin.v2.CreateClusterRequest.decode(reader, reader.uint32()); @@ -4414,12 +4442,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TableProgress.decode = function decode(reader, length) { + TableProgress.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateClusterMetadata.TableProgress(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 2: { message.estimatedSizeBytes = reader.int64(); @@ -4749,12 +4779,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateClusterMetadata.decode = function decode(reader, length) { + UpdateClusterMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UpdateClusterMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.originalRequest = $root.google.bigtable.admin.v2.Cluster.decode(reader, reader.uint32()); @@ -5014,12 +5046,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PartialUpdateClusterMetadata.decode = function decode(reader, length) { + PartialUpdateClusterMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.PartialUpdateClusterMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.requestTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); @@ -5268,12 +5302,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PartialUpdateClusterRequest.decode = function decode(reader, length) { + PartialUpdateClusterRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.PartialUpdateClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.cluster = $root.google.bigtable.admin.v2.Cluster.decode(reader, reader.uint32()); @@ -5527,12 +5563,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateAppProfileRequest.decode = function decode(reader, length) { + CreateAppProfileRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateAppProfileRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -5772,12 +5810,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetAppProfileRequest.decode = function decode(reader, length) { + GetAppProfileRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GetAppProfileRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -5997,12 +6037,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListAppProfilesRequest.decode = function decode(reader, length) { + ListAppProfilesRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListAppProfilesRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -6251,12 +6293,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListAppProfilesResponse.decode = function decode(reader, length) { + ListAppProfilesResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListAppProfilesResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.appProfiles && message.appProfiles.length)) @@ -6535,12 +6579,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateAppProfileRequest.decode = function decode(reader, length) { + UpdateAppProfileRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UpdateAppProfileRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.appProfile = $root.google.bigtable.admin.v2.AppProfile.decode(reader, reader.uint32()); @@ -6784,12 +6830,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteAppProfileRequest.decode = function decode(reader, length) { + DeleteAppProfileRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.DeleteAppProfileRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -6989,12 +7037,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateAppProfileMetadata.decode = function decode(reader, length) { + UpdateAppProfileMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UpdateAppProfileMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -7219,12 +7269,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListHotTabletsRequest.decode = function decode(reader, length) { + ListHotTabletsRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListHotTabletsRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -7494,12 +7546,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListHotTabletsResponse.decode = function decode(reader, length) { + ListHotTabletsResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListHotTabletsResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.hotTablets && message.hotTablets.length)) @@ -7734,7 +7788,12 @@ // OneOf field names bound to virtual getters and setters var $oneOfFields; - // Virtual OneOf for proto3 optional field + /** + * Instance _satisfiesPzs. + * @member {"satisfiesPzs"|undefined} _satisfiesPzs + * @memberof google.bigtable.admin.v2.Instance + * @instance + */ Object.defineProperty(Instance.prototype, "_satisfiesPzs", { get: $util.oneOfGetter($oneOfFields = ["satisfiesPzs"]), set: $util.oneOfSetter($oneOfFields) @@ -7806,12 +7865,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Instance.decode = function decode(reader, length) { + Instance.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Instance(), key, value; while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -8217,12 +8278,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AutoscalingTargets.decode = function decode(reader, length) { + AutoscalingTargets.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.AutoscalingTargets(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 2: { message.cpuUtilizationPercent = reader.int32(); @@ -8444,12 +8507,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AutoscalingLimits.decode = function decode(reader, length) { + AutoscalingLimits.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.AutoscalingLimits(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.minServeNodes = reader.int32(); @@ -8740,12 +8805,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Cluster.decode = function decode(reader, length) { + Cluster.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Cluster(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -9118,12 +9185,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterAutoscalingConfig.decode = function decode(reader, length) { + ClusterAutoscalingConfig.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.autoscalingLimits = $root.google.bigtable.admin.v2.AutoscalingLimits.decode(reader, reader.uint32()); @@ -9344,12 +9413,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterConfig.decode = function decode(reader, length) { + ClusterConfig.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Cluster.ClusterConfig(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.clusterAutoscalingConfig = $root.google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.decode(reader, reader.uint32()); @@ -9552,12 +9623,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EncryptionConfig.decode = function decode(reader, length) { + EncryptionConfig.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Cluster.EncryptionConfig(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.kmsKeyName = reader.string(); @@ -9860,12 +9933,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AppProfile.decode = function decode(reader, length) { + AppProfile.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.AppProfile(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -10227,12 +10302,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MultiClusterRoutingUseAny.decode = function decode(reader, length) { + MultiClusterRoutingUseAny.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.clusterIds && message.clusterIds.length)) @@ -10455,12 +10532,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SingleClusterRouting.decode = function decode(reader, length) { + SingleClusterRouting.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.AppProfile.SingleClusterRouting(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.clusterId = reader.string(); @@ -10689,12 +10768,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StandardIsolation.decode = function decode(reader, length) { + StandardIsolation.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.AppProfile.StandardIsolation(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.priority = reader.int32(); @@ -10871,7 +10952,12 @@ // OneOf field names bound to virtual getters and setters var $oneOfFields; - // Virtual OneOf for proto3 optional field + /** + * DataBoostIsolationReadOnly _computeBillingOwner. + * @member {"computeBillingOwner"|undefined} _computeBillingOwner + * @memberof google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + * @instance + */ Object.defineProperty(DataBoostIsolationReadOnly.prototype, "_computeBillingOwner", { get: $util.oneOfGetter($oneOfFields = ["computeBillingOwner"]), set: $util.oneOfSetter($oneOfFields) @@ -10930,12 +11016,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DataBoostIsolationReadOnly.decode = function decode(reader, length) { + DataBoostIsolationReadOnly.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.computeBillingOwner = reader.int32(); @@ -11239,12 +11327,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - HotTablet.decode = function decode(reader, length) { + HotTablet.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.HotTablet(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -11563,12 +11653,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - OperationProgress.decode = function decode(reader, length) { + OperationProgress.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.OperationProgress(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.progressPercent = reader.int32(); @@ -12862,12 +12954,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RestoreTableRequest.decode = function decode(reader, length) { + RestoreTableRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.RestoreTableRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -13153,12 +13247,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RestoreTableMetadata.decode = function decode(reader, length) { + RestoreTableMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.RestoreTableMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -13451,12 +13547,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - OptimizeRestoredTableMetadata.decode = function decode(reader, length) { + OptimizeRestoredTableMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.OptimizeRestoredTableMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -13707,12 +13805,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateTableRequest.decode = function decode(reader, length) { + CreateTableRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateTableRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -13969,12 +14069,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Split.decode = function decode(reader, length) { + Split.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateTableRequest.Split(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.key = reader.bytes(); @@ -14206,12 +14308,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateTableFromSnapshotRequest.decode = function decode(reader, length) { + CreateTableFromSnapshotRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateTableFromSnapshotRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -14470,12 +14574,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DropRowRangeRequest.decode = function decode(reader, length) { + DropRowRangeRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.DropRowRangeRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -14744,12 +14850,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListTablesRequest.decode = function decode(reader, length) { + ListTablesRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListTablesRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -15036,12 +15144,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListTablesResponse.decode = function decode(reader, length) { + ListTablesResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListTablesResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.tables && message.tables.length)) @@ -15282,12 +15392,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTableRequest.decode = function decode(reader, length) { + GetTableRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GetTableRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -15548,12 +15660,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateTableRequest.decode = function decode(reader, length) { + UpdateTableRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UpdateTableRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.table = $root.google.bigtable.admin.v2.Table.decode(reader, reader.uint32()); @@ -15796,12 +15910,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateTableMetadata.decode = function decode(reader, length) { + UpdateTableMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UpdateTableMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -16034,12 +16150,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteTableRequest.decode = function decode(reader, length) { + DeleteTableRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.DeleteTableRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -16237,12 +16355,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UndeleteTableRequest.decode = function decode(reader, length) { + UndeleteTableRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UndeleteTableRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -16462,12 +16582,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UndeleteTableMetadata.decode = function decode(reader, length) { + UndeleteTableMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UndeleteTableMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -16724,12 +16846,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ModifyColumnFamiliesRequest.decode = function decode(reader, length) { + ModifyColumnFamiliesRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ModifyColumnFamiliesRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -17027,12 +17151,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Modification.decode = function decode(reader, length) { + Modification.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.id = reader.string(); @@ -17316,12 +17442,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GenerateConsistencyTokenRequest.decode = function decode(reader, length) { + GenerateConsistencyTokenRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GenerateConsistencyTokenRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -17519,12 +17647,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GenerateConsistencyTokenResponse.decode = function decode(reader, length) { + GenerateConsistencyTokenResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GenerateConsistencyTokenResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.consistencyToken = reader.string(); @@ -17769,12 +17899,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CheckConsistencyRequest.decode = function decode(reader, length) { + CheckConsistencyRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CheckConsistencyRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -18021,12 +18153,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StandardReadRemoteWrites.decode = function decode(reader, length) { + StandardReadRemoteWrites.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.StandardReadRemoteWrites(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -18196,12 +18330,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DataBoostReadLocalWrites.decode = function decode(reader, length) { + DataBoostReadLocalWrites.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.DataBoostReadLocalWrites(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -18382,12 +18518,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CheckConsistencyResponse.decode = function decode(reader, length) { + CheckConsistencyResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CheckConsistencyResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.consistent = reader.bool(); @@ -18629,12 +18767,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SnapshotTableRequest.decode = function decode(reader, length) { + SnapshotTableRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.SnapshotTableRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -18886,12 +19026,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSnapshotRequest.decode = function decode(reader, length) { + GetSnapshotRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GetSnapshotRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -19111,12 +19253,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListSnapshotsRequest.decode = function decode(reader, length) { + ListSnapshotsRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListSnapshotsRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -19352,12 +19496,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListSnapshotsResponse.decode = function decode(reader, length) { + ListSnapshotsResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListSnapshotsResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.snapshots && message.snapshots.length)) @@ -19587,12 +19733,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteSnapshotRequest.decode = function decode(reader, length) { + DeleteSnapshotRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.DeleteSnapshotRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -19812,12 +19960,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SnapshotTableMetadata.decode = function decode(reader, length) { + SnapshotTableMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.SnapshotTableMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.originalRequest = $root.google.bigtable.admin.v2.SnapshotTableRequest.decode(reader, reader.uint32()); @@ -20077,12 +20227,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateTableFromSnapshotMetadata.decode = function decode(reader, length) { + CreateTableFromSnapshotMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateTableFromSnapshotMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.originalRequest = $root.google.bigtable.admin.v2.CreateTableFromSnapshotRequest.decode(reader, reader.uint32()); @@ -20342,12 +20494,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateBackupRequest.decode = function decode(reader, length) { + CreateBackupRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateBackupRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -20608,12 +20762,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateBackupMetadata.decode = function decode(reader, length) { + CreateBackupMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateBackupMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -20869,12 +21025,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateBackupRequest.decode = function decode(reader, length) { + UpdateBackupRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UpdateBackupRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.backup = $root.google.bigtable.admin.v2.Backup.decode(reader, reader.uint32()); @@ -21095,12 +21253,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetBackupRequest.decode = function decode(reader, length) { + GetBackupRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GetBackupRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -21298,12 +21458,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteBackupRequest.decode = function decode(reader, length) { + DeleteBackupRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.DeleteBackupRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -21545,12 +21707,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListBackupsRequest.decode = function decode(reader, length) { + ListBackupsRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListBackupsRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -21810,12 +21974,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListBackupsResponse.decode = function decode(reader, length) { + ListBackupsResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListBackupsResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.backups && message.backups.length)) @@ -22078,12 +22244,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CopyBackupRequest.decode = function decode(reader, length) { + CopyBackupRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CopyBackupRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -22345,12 +22513,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CopyBackupMetadata.decode = function decode(reader, length) { + CopyBackupMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CopyBackupMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -22605,12 +22775,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateAuthorizedViewRequest.decode = function decode(reader, length) { + CreateAuthorizedViewRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateAuthorizedViewRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -22860,12 +23032,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateAuthorizedViewMetadata.decode = function decode(reader, length) { + CreateAuthorizedViewMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.CreateAuthorizedViewMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.originalRequest = $root.google.bigtable.admin.v2.CreateAuthorizedViewRequest.decode(reader, reader.uint32()); @@ -23136,12 +23310,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListAuthorizedViewsRequest.decode = function decode(reader, length) { + ListAuthorizedViewsRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListAuthorizedViewsRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.parent = reader.string(); @@ -23418,12 +23594,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListAuthorizedViewsResponse.decode = function decode(reader, length) { + ListAuthorizedViewsResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ListAuthorizedViewsResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.authorizedViews && message.authorizedViews.length)) @@ -23664,12 +23842,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetAuthorizedViewRequest.decode = function decode(reader, length) { + GetAuthorizedViewRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GetAuthorizedViewRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -23931,12 +24111,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateAuthorizedViewRequest.decode = function decode(reader, length) { + UpdateAuthorizedViewRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UpdateAuthorizedViewRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.authorizedView = $root.google.bigtable.admin.v2.AuthorizedView.decode(reader, reader.uint32()); @@ -24191,12 +24373,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateAuthorizedViewMetadata.decode = function decode(reader, length) { + UpdateAuthorizedViewMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.UpdateAuthorizedViewMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.originalRequest = $root.google.bigtable.admin.v2.UpdateAuthorizedViewRequest.decode(reader, reader.uint32()); @@ -24445,12 +24629,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteAuthorizedViewRequest.decode = function decode(reader, length) { + DeleteAuthorizedViewRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.DeleteAuthorizedViewRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -24686,12 +24872,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RestoreInfo.decode = function decode(reader, length) { + RestoreInfo.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.RestoreInfo(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.sourceType = reader.int32(); @@ -24931,12 +25119,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ChangeStreamConfig.decode = function decode(reader, length) { + ChangeStreamConfig.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ChangeStreamConfig(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.retentionPeriod = $root.google.protobuf.Duration.decode(reader, reader.uint32()); @@ -25238,12 +25428,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Table.decode = function decode(reader, length) { + Table.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Table(), key, value; while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -25653,12 +25845,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterState.decode = function decode(reader, length) { + ClusterState.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Table.ClusterState(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.replicationState = reader.int32(); @@ -25996,12 +26190,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AutomatedBackupPolicy.decode = function decode(reader, length) { + AutomatedBackupPolicy.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Table.AutomatedBackupPolicy(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.retentionPeriod = $root.google.protobuf.Duration.decode(reader, reader.uint32()); @@ -26272,12 +26468,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AuthorizedView.decode = function decode(reader, length) { + AuthorizedView.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.AuthorizedView(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -26535,12 +26733,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FamilySubsets.decode = function decode(reader, length) { + FamilySubsets.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.AuthorizedView.FamilySubsets(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.qualifiers && message.qualifiers.length)) @@ -26802,12 +27002,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SubsetView.decode = function decode(reader, length) { + SubsetView.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.AuthorizedView.SubsetView(), key, value; while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.rowPrefixes && message.rowPrefixes.length)) @@ -27105,12 +27307,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ColumnFamily.decode = function decode(reader, length) { + ColumnFamily.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.ColumnFamily(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.gcRule = $root.google.bigtable.admin.v2.GcRule.decode(reader, reader.uint32()); @@ -27378,12 +27582,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GcRule.decode = function decode(reader, length) { + GcRule.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GcRule(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.maxNumVersions = reader.int32(); @@ -27656,12 +27862,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Intersection.decode = function decode(reader, length) { + Intersection.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GcRule.Intersection(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.rules && message.rules.length)) @@ -27880,12 +28088,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Union.decode = function decode(reader, length) { + Union.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.GcRule.Union(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.rules && message.rules.length)) @@ -28127,12 +28337,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EncryptionInfo.decode = function decode(reader, length) { + EncryptionInfo.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.EncryptionInfo(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 3: { message.encryptionType = reader.int32(); @@ -28466,12 +28678,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Snapshot.decode = function decode(reader, length) { + Snapshot.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Snapshot(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -28921,12 +29135,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Backup.decode = function decode(reader, length) { + Backup.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Backup(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -29408,12 +29624,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BackupInfo.decode = function decode(reader, length) { + BackupInfo.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.BackupInfo(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.backup = reader.string(); @@ -29819,12 +30037,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Type.decode = function decode(reader, length) { + Type.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.bytesType = $root.google.bigtable.admin.v2.Type.Bytes.decode(reader, reader.uint32()); @@ -30293,12 +30513,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Bytes.decode = function decode(reader, length) { + Bytes.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Bytes(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.encoding = $root.google.bigtable.admin.v2.Type.Bytes.Encoding.decode(reader, reader.uint32()); @@ -30512,12 +30734,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Encoding.decode = function decode(reader, length) { + Encoding.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Bytes.Encoding(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.raw = $root.google.bigtable.admin.v2.Type.Bytes.Encoding.Raw.decode(reader, reader.uint32()); @@ -30711,12 +30935,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Raw.decode = function decode(reader, length) { + Raw.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Bytes.Encoding.Raw(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -30903,12 +31129,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - String.decode = function decode(reader, length) { + String.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.String(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.encoding = $root.google.bigtable.admin.v2.Type.String.Encoding.decode(reader, reader.uint32()); @@ -31133,12 +31361,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Encoding.decode = function decode(reader, length) { + Encoding.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.String.Encoding(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.utf8Raw = $root.google.bigtable.admin.v2.Type.String.Encoding.Utf8Raw.decode(reader, reader.uint32()); @@ -31356,12 +31586,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Utf8Raw.decode = function decode(reader, length) { + Utf8Raw.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.String.Encoding.Utf8Raw(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -31531,12 +31763,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Utf8Bytes.decode = function decode(reader, length) { + Utf8Bytes.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.String.Encoding.Utf8Bytes(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -31723,12 +31957,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Int64.decode = function decode(reader, length) { + Int64.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Int64(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.encoding = $root.google.bigtable.admin.v2.Type.Int64.Encoding.decode(reader, reader.uint32()); @@ -31942,12 +32178,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Encoding.decode = function decode(reader, length) { + Encoding.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Int64.Encoding(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.bigEndianBytes = $root.google.bigtable.admin.v2.Type.Int64.Encoding.BigEndianBytes.decode(reader, reader.uint32()); @@ -32152,12 +32390,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BigEndianBytes.decode = function decode(reader, length) { + BigEndianBytes.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Int64.Encoding.BigEndianBytes(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.bytesType = $root.google.bigtable.admin.v2.Type.Bytes.decode(reader, reader.uint32()); @@ -32355,12 +32595,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Bool.decode = function decode(reader, length) { + Bool.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Bool(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -32530,12 +32772,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Float32.decode = function decode(reader, length) { + Float32.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Float32(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -32705,12 +32949,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Float64.decode = function decode(reader, length) { + Float64.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Float64(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -32880,12 +33126,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Timestamp.decode = function decode(reader, length) { + Timestamp.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Timestamp(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -33055,12 +33303,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Date.decode = function decode(reader, length) { + Date.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Date(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -33243,12 +33493,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Struct.decode = function decode(reader, length) { + Struct.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Struct(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.fields && message.fields.length)) @@ -33473,12 +33725,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Field.decode = function decode(reader, length) { + Field.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Struct.Field(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.fieldName = reader.string(); @@ -33697,12 +33951,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Array.decode = function decode(reader, length) { + Array.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Array(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.elementType = $root.google.bigtable.admin.v2.Type.decode(reader, reader.uint32()); @@ -33916,12 +34172,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Map.decode = function decode(reader, length) { + Map.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Map(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.keyType = $root.google.bigtable.admin.v2.Type.decode(reader, reader.uint32()); @@ -34211,12 +34469,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Aggregate.decode = function decode(reader, length) { + Aggregate.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Aggregate(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.inputType = $root.google.bigtable.admin.v2.Type.decode(reader, reader.uint32()); @@ -34518,12 +34778,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Sum.decode = function decode(reader, length) { + Sum.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Aggregate.Sum(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -34693,12 +34955,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Max.decode = function decode(reader, length) { + Max.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Aggregate.Max(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -34868,12 +35132,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Min.decode = function decode(reader, length) { + Min.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Aggregate.Min(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -35043,12 +35309,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - HyperLogLogPlusPlusUniqueCount.decode = function decode(reader, length) { + HyperLogLogPlusPlusUniqueCount.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.admin.v2.Type.Aggregate.HyperLogLogPlusPlusUniqueCount(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -35692,12 +35960,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadRowsRequest.decode = function decode(reader, length) { + ReadRowsRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadRowsRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.tableName = reader.string(); @@ -36068,12 +36338,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadRowsResponse.decode = function decode(reader, length) { + ReadRowsResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadRowsResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.chunks && message.chunks.length)) @@ -36431,12 +36703,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CellChunk.decode = function decode(reader, length) { + CellChunk.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadRowsResponse.CellChunk(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.rowKey = reader.bytes(); @@ -36824,12 +37098,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SampleRowKeysRequest.decode = function decode(reader, length) { + SampleRowKeysRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.SampleRowKeysRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.tableName = reader.string(); @@ -37063,12 +37339,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SampleRowKeysResponse.decode = function decode(reader, length) { + SampleRowKeysResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.SampleRowKeysResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.rowKey = reader.bytes(); @@ -37348,12 +37626,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MutateRowRequest.decode = function decode(reader, length) { + MutateRowRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.MutateRowRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.tableName = reader.string(); @@ -37618,12 +37898,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MutateRowResponse.decode = function decode(reader, length) { + MutateRowResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.MutateRowResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -37839,12 +38121,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MutateRowsRequest.decode = function decode(reader, length) { + MutateRowsRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.MutateRowsRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.tableName = reader.string(); @@ -38109,12 +38393,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Entry.decode = function decode(reader, length) { + Entry.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.MutateRowsRequest.Entry(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.rowKey = reader.bytes(); @@ -38316,7 +38602,12 @@ // OneOf field names bound to virtual getters and setters var $oneOfFields; - // Virtual OneOf for proto3 optional field + /** + * MutateRowsResponse _rateLimitInfo. + * @member {"rateLimitInfo"|undefined} _rateLimitInfo + * @memberof google.bigtable.v2.MutateRowsResponse + * @instance + */ Object.defineProperty(MutateRowsResponse.prototype, "_rateLimitInfo", { get: $util.oneOfGetter($oneOfFields = ["rateLimitInfo"]), set: $util.oneOfSetter($oneOfFields) @@ -38378,12 +38669,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MutateRowsResponse.decode = function decode(reader, length) { + MutateRowsResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.MutateRowsResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.entries && message.entries.length)) @@ -38631,12 +38924,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Entry.decode = function decode(reader, length) { + Entry.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.MutateRowsResponse.Entry(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.index = reader.int64(); @@ -38880,12 +39175,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RateLimitInfo.decode = function decode(reader, length) { + RateLimitInfo.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.RateLimitInfo(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.period = $root.google.protobuf.Duration.decode(reader, reader.uint32()); @@ -39171,12 +39468,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CheckAndMutateRowRequest.decode = function decode(reader, length) { + CheckAndMutateRowRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.CheckAndMutateRowRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.tableName = reader.string(); @@ -39501,12 +39800,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CheckAndMutateRowResponse.decode = function decode(reader, length) { + CheckAndMutateRowResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.CheckAndMutateRowResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.predicateMatched = reader.bool(); @@ -39715,12 +40016,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PingAndWarmRequest.decode = function decode(reader, length) { + PingAndWarmRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.PingAndWarmRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -39920,12 +40223,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PingAndWarmResponse.decode = function decode(reader, length) { + PingAndWarmResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.PingAndWarmResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -40152,12 +40457,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadModifyWriteRowRequest.decode = function decode(reader, length) { + ReadModifyWriteRowRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadModifyWriteRowRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.tableName = reader.string(); @@ -40433,12 +40740,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadModifyWriteRowResponse.decode = function decode(reader, length) { + ReadModifyWriteRowResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadModifyWriteRowResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.row = $root.google.bigtable.v2.Row.decode(reader, reader.uint32()); @@ -40652,12 +40961,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GenerateInitialChangeStreamPartitionsRequest.decode = function decode(reader, length) { + GenerateInitialChangeStreamPartitionsRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.tableName = reader.string(); @@ -40868,12 +41179,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GenerateInitialChangeStreamPartitionsResponse.decode = function decode(reader, length) { + GenerateInitialChangeStreamPartitionsResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.partition = $root.google.bigtable.v2.StreamPartition.decode(reader, reader.uint32()); @@ -41156,12 +41469,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadChangeStreamRequest.decode = function decode(reader, length) { + ReadChangeStreamRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadChangeStreamRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.tableName = reader.string(); @@ -41506,12 +41821,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadChangeStreamResponse.decode = function decode(reader, length) { + ReadChangeStreamResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadChangeStreamResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.dataChange = $root.google.bigtable.v2.ReadChangeStreamResponse.DataChange.decode(reader, reader.uint32()); @@ -41775,12 +42092,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MutationChunk.decode = function decode(reader, length) { + MutationChunk.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.chunkInfo = $root.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.decode(reader, reader.uint32()); @@ -42020,12 +42339,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ChunkInfo.decode = function decode(reader, length) { + ChunkInfo.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.chunkedValueSize = reader.int32(); @@ -42341,12 +42662,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DataChange.decode = function decode(reader, length) { + DataChange.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadChangeStreamResponse.DataChange(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.type = reader.int32(); @@ -42738,12 +43061,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Heartbeat.decode = function decode(reader, length) { + Heartbeat.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.continuationToken = $root.google.bigtable.v2.StreamContinuationToken.decode(reader, reader.uint32()); @@ -42990,12 +43315,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CloseStream.decode = function decode(reader, length) { + CloseStream.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadChangeStreamResponse.CloseStream(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.status = $root.google.rpc.Status.decode(reader, reader.uint32()); @@ -43338,12 +43665,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteQueryRequest.decode = function decode(reader, length) { + ExecuteQueryRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ExecuteQueryRequest(), key, value; while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.instanceName = reader.string(); @@ -43686,12 +44015,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteQueryResponse.decode = function decode(reader, length) { + ExecuteQueryResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ExecuteQueryResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.metadata = $root.google.bigtable.v2.ResultSetMetadata.decode(reader, reader.uint32()); @@ -43936,12 +44267,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Row.decode = function decode(reader, length) { + Row.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Row(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.key = reader.bytes(); @@ -44193,12 +44526,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Family.decode = function decode(reader, length) { + Family.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Family(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -44441,12 +44776,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Column.decode = function decode(reader, length) { + Column.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Column(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.qualifier = reader.bytes(); @@ -44709,12 +45046,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Cell.decode = function decode(reader, length) { + Cell.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Cell(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.timestampMicros = reader.int64(); @@ -45099,12 +45438,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Value.decode = function decode(reader, length) { + Value.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Value(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 7: { message.type = $root.google.bigtable.v2.Type.decode(reader, reader.uint32()); @@ -45532,12 +45873,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ArrayValue.decode = function decode(reader, length) { + ArrayValue.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ArrayValue(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.values && message.values.length)) @@ -45812,12 +46155,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RowRange.decode = function decode(reader, length) { + RowRange.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.RowRange(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.startKeyClosed = reader.bytes(); @@ -46098,12 +46443,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RowSet.decode = function decode(reader, length) { + RowSet.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.RowSet(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.rowKeys && message.rowKeys.length)) @@ -46419,12 +46766,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ColumnRange.decode = function decode(reader, length) { + ColumnRange.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ColumnRange(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.familyName = reader.string(); @@ -46714,12 +47063,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TimestampRange.decode = function decode(reader, length) { + TimestampRange.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.TimestampRange(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.startTimestampMicros = reader.int64(); @@ -47016,12 +47367,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValueRange.decode = function decode(reader, length) { + ValueRange.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ValueRange(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.startValueClosed = reader.bytes(); @@ -47499,12 +47852,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RowFilter.decode = function decode(reader, length) { + RowFilter.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.RowFilter(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.chain = $root.google.bigtable.v2.RowFilter.Chain.decode(reader, reader.uint32()); @@ -48074,12 +48429,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Chain.decode = function decode(reader, length) { + Chain.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.RowFilter.Chain(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.filters && message.filters.length)) @@ -48298,12 +48655,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Interleave.decode = function decode(reader, length) { + Interleave.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.RowFilter.Interleave(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.filters && message.filters.length)) @@ -48542,12 +48901,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Condition.decode = function decode(reader, length) { + Condition.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.RowFilter.Condition(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.predicateFilter = $root.google.bigtable.v2.RowFilter.decode(reader, reader.uint32()); @@ -48857,12 +49218,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Mutation.decode = function decode(reader, length) { + Mutation.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Mutation(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.setCell = $root.google.bigtable.v2.Mutation.SetCell.decode(reader, reader.uint32()); @@ -49220,12 +49583,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetCell.decode = function decode(reader, length) { + SetCell.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Mutation.SetCell(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.familyName = reader.string(); @@ -49525,12 +49890,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AddToCell.decode = function decode(reader, length) { + AddToCell.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Mutation.AddToCell(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.familyName = reader.string(); @@ -49813,12 +50180,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MergeToCell.decode = function decode(reader, length) { + MergeToCell.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Mutation.MergeToCell(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.familyName = reader.string(); @@ -50090,12 +50459,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteFromColumn.decode = function decode(reader, length) { + DeleteFromColumn.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Mutation.DeleteFromColumn(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.familyName = reader.string(); @@ -50332,12 +50703,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteFromFamily.decode = function decode(reader, length) { + DeleteFromFamily.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Mutation.DeleteFromFamily(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.familyName = reader.string(); @@ -50524,12 +50897,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteFromRow.decode = function decode(reader, length) { + DeleteFromRow.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Mutation.DeleteFromRow(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -50760,12 +51135,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadModifyWriteRule.decode = function decode(reader, length) { + ReadModifyWriteRule.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadModifyWriteRule(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.familyName = reader.string(); @@ -51033,12 +51410,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamPartition.decode = function decode(reader, length) { + StreamPartition.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.StreamPartition(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.rowRange = $root.google.bigtable.v2.RowRange.decode(reader, reader.uint32()); @@ -51243,12 +51622,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamContinuationTokens.decode = function decode(reader, length) { + StreamContinuationTokens.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.StreamContinuationTokens(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.tokens && message.tokens.length)) @@ -51476,12 +51857,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamContinuationToken.decode = function decode(reader, length) { + StreamContinuationToken.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.StreamContinuationToken(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.partition = $root.google.bigtable.v2.StreamPartition.decode(reader, reader.uint32()); @@ -51686,12 +52069,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ProtoFormat.decode = function decode(reader, length) { + ProtoFormat.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ProtoFormat(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -51883,12 +52268,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ColumnMetadata.decode = function decode(reader, length) { + ColumnMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ColumnMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -52106,12 +52493,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ProtoSchema.decode = function decode(reader, length) { + ProtoSchema.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ProtoSchema(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.columns && message.columns.length)) @@ -52342,12 +52731,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ResultSetMetadata.decode = function decode(reader, length) { + ResultSetMetadata.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ResultSetMetadata(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.protoSchema = $root.google.bigtable.v2.ProtoSchema.decode(reader, reader.uint32()); @@ -52557,12 +52948,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ProtoRows.decode = function decode(reader, length) { + ProtoRows.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ProtoRows(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 2: { if (!(message.values && message.values.length)) @@ -52779,12 +53172,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ProtoRowsBatch.decode = function decode(reader, length) { + ProtoRowsBatch.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ProtoRowsBatch(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.batchData = reader.bytes(); @@ -53027,12 +53422,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PartialResultSet.decode = function decode(reader, length) { + PartialResultSet.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.PartialResultSet(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 3: { message.protoRowsBatch = $root.google.bigtable.v2.ProtoRowsBatch.decode(reader, reader.uint32()); @@ -53410,12 +53807,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Type.decode = function decode(reader, length) { + Type.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.bytesType = $root.google.bigtable.v2.Type.Bytes.decode(reader, reader.uint32()); @@ -53884,12 +54283,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Bytes.decode = function decode(reader, length) { + Bytes.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Bytes(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.encoding = $root.google.bigtable.v2.Type.Bytes.Encoding.decode(reader, reader.uint32()); @@ -54103,12 +54504,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Encoding.decode = function decode(reader, length) { + Encoding.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Bytes.Encoding(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.raw = $root.google.bigtable.v2.Type.Bytes.Encoding.Raw.decode(reader, reader.uint32()); @@ -54302,12 +54705,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Raw.decode = function decode(reader, length) { + Raw.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Bytes.Encoding.Raw(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -54494,12 +54899,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - String.decode = function decode(reader, length) { + String.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.String(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.encoding = $root.google.bigtable.v2.Type.String.Encoding.decode(reader, reader.uint32()); @@ -54724,12 +55131,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Encoding.decode = function decode(reader, length) { + Encoding.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.String.Encoding(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.utf8Raw = $root.google.bigtable.v2.Type.String.Encoding.Utf8Raw.decode(reader, reader.uint32()); @@ -54947,12 +55356,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Utf8Raw.decode = function decode(reader, length) { + Utf8Raw.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.String.Encoding.Utf8Raw(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -55122,12 +55533,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Utf8Bytes.decode = function decode(reader, length) { + Utf8Bytes.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.String.Encoding.Utf8Bytes(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -55314,12 +55727,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Int64.decode = function decode(reader, length) { + Int64.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Int64(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.encoding = $root.google.bigtable.v2.Type.Int64.Encoding.decode(reader, reader.uint32()); @@ -55533,12 +55948,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Encoding.decode = function decode(reader, length) { + Encoding.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Int64.Encoding(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.bigEndianBytes = $root.google.bigtable.v2.Type.Int64.Encoding.BigEndianBytes.decode(reader, reader.uint32()); @@ -55743,12 +56160,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BigEndianBytes.decode = function decode(reader, length) { + BigEndianBytes.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Int64.Encoding.BigEndianBytes(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.bytesType = $root.google.bigtable.v2.Type.Bytes.decode(reader, reader.uint32()); @@ -55946,12 +56365,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Bool.decode = function decode(reader, length) { + Bool.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Bool(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -56121,12 +56542,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Float32.decode = function decode(reader, length) { + Float32.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Float32(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -56296,12 +56719,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Float64.decode = function decode(reader, length) { + Float64.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Float64(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -56471,12 +56896,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Timestamp.decode = function decode(reader, length) { + Timestamp.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Timestamp(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -56646,12 +57073,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Date.decode = function decode(reader, length) { + Date.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Date(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -56834,12 +57263,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Struct.decode = function decode(reader, length) { + Struct.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Struct(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.fields && message.fields.length)) @@ -57064,12 +57495,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Field.decode = function decode(reader, length) { + Field.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Struct.Field(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.fieldName = reader.string(); @@ -57288,12 +57721,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Array.decode = function decode(reader, length) { + Array.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Array(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.elementType = $root.google.bigtable.v2.Type.decode(reader, reader.uint32()); @@ -57507,12 +57942,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Map.decode = function decode(reader, length) { + Map.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Map(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.keyType = $root.google.bigtable.v2.Type.decode(reader, reader.uint32()); @@ -57802,12 +58239,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Aggregate.decode = function decode(reader, length) { + Aggregate.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Aggregate(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.inputType = $root.google.bigtable.v2.Type.decode(reader, reader.uint32()); @@ -58109,12 +58548,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Sum.decode = function decode(reader, length) { + Sum.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Aggregate.Sum(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -58284,12 +58725,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Max.decode = function decode(reader, length) { + Max.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Aggregate.Max(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -58459,12 +58902,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Min.decode = function decode(reader, length) { + Min.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Aggregate.Min(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -58634,12 +59079,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - HyperLogLogPlusPlusUniqueCount.decode = function decode(reader, length) { + HyperLogLogPlusPlusUniqueCount.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.Type.Aggregate.HyperLogLogPlusPlusUniqueCount(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -58859,12 +59306,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadIterationStats.decode = function decode(reader, length) { + ReadIterationStats.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ReadIterationStats(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.rowsSeenCount = reader.int64(); @@ -59155,12 +59604,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RequestLatencyStats.decode = function decode(reader, length) { + RequestLatencyStats.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.RequestLatencyStats(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.frontendServerLatency = $root.google.protobuf.Duration.decode(reader, reader.uint32()); @@ -59374,12 +59825,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FullReadStatsView.decode = function decode(reader, length) { + FullReadStatsView.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.FullReadStatsView(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.readIterationStats = $root.google.bigtable.v2.ReadIterationStats.decode(reader, reader.uint32()); @@ -59614,12 +60067,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RequestStats.decode = function decode(reader, length) { + RequestStats.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.RequestStats(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.fullReadStatsView = $root.google.bigtable.v2.FullReadStatsView.decode(reader, reader.uint32()); @@ -59893,12 +60348,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FeatureFlags.decode = function decode(reader, length) { + FeatureFlags.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.FeatureFlags(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.reverseScans = reader.bool(); @@ -60128,13 +60585,23 @@ // OneOf field names bound to virtual getters and setters var $oneOfFields; - // Virtual OneOf for proto3 optional field + /** + * ResponseParams _zoneId. + * @member {"zoneId"|undefined} _zoneId + * @memberof google.bigtable.v2.ResponseParams + * @instance + */ Object.defineProperty(ResponseParams.prototype, "_zoneId", { get: $util.oneOfGetter($oneOfFields = ["zoneId"]), set: $util.oneOfSetter($oneOfFields) }); - // Virtual OneOf for proto3 optional field + /** + * ResponseParams _clusterId. + * @member {"clusterId"|undefined} _clusterId + * @memberof google.bigtable.v2.ResponseParams + * @instance + */ Object.defineProperty(ResponseParams.prototype, "_clusterId", { get: $util.oneOfGetter($oneOfFields = ["clusterId"]), set: $util.oneOfSetter($oneOfFields) @@ -60195,12 +60662,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ResponseParams.decode = function decode(reader, length) { + ResponseParams.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.bigtable.v2.ResponseParams(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.zoneId = reader.string(); @@ -60446,12 +60915,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Http.decode = function decode(reader, length) { + Http.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.Http(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.rules && message.rules.length)) @@ -60796,12 +61267,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - HttpRule.decode = function decode(reader, length) { + HttpRule.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.HttpRule(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.selector = reader.string(); @@ -61180,12 +61653,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CustomHttpPattern.decode = function decode(reader, length) { + CustomHttpPattern.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.CustomHttpPattern(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.kind = reader.string(); @@ -61412,12 +61887,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CommonLanguageSettings.decode = function decode(reader, length) { + CommonLanguageSettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.CommonLanguageSettings(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.referenceDocsUri = reader.string(); @@ -61781,12 +62258,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClientLibrarySettings.decode = function decode(reader, length) { + ClientLibrarySettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.ClientLibrarySettings(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.version = reader.string(); @@ -62310,12 +62789,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Publishing.decode = function decode(reader, length) { + Publishing.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.Publishing(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 2: { if (!(message.methodSettings && message.methodSettings.length)) @@ -62761,12 +63242,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - JavaSettings.decode = function decode(reader, length) { + JavaSettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.JavaSettings(), key, value; while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.libraryPackage = reader.string(); @@ -63028,12 +63511,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CppSettings.decode = function decode(reader, length) { + CppSettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.CppSettings(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.common = $root.google.api.CommonLanguageSettings.decode(reader, reader.uint32()); @@ -63236,12 +63721,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PhpSettings.decode = function decode(reader, length) { + PhpSettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.PhpSettings(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.common = $root.google.api.CommonLanguageSettings.decode(reader, reader.uint32()); @@ -63444,12 +63931,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PythonSettings.decode = function decode(reader, length) { + PythonSettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.PythonSettings(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.common = $root.google.api.CommonLanguageSettings.decode(reader, reader.uint32()); @@ -63652,12 +64141,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - NodeSettings.decode = function decode(reader, length) { + NodeSettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.NodeSettings(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.common = $root.google.api.CommonLanguageSettings.decode(reader, reader.uint32()); @@ -63925,12 +64416,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DotnetSettings.decode = function decode(reader, length) { + DotnetSettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.DotnetSettings(), key, value; while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.common = $root.google.api.CommonLanguageSettings.decode(reader, reader.uint32()); @@ -64304,12 +64797,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RubySettings.decode = function decode(reader, length) { + RubySettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.RubySettings(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.common = $root.google.api.CommonLanguageSettings.decode(reader, reader.uint32()); @@ -64512,12 +65007,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GoSettings.decode = function decode(reader, length) { + GoSettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.GoSettings(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.common = $root.google.api.CommonLanguageSettings.decode(reader, reader.uint32()); @@ -64744,12 +65241,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MethodSettings.decode = function decode(reader, length) { + MethodSettings.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.MethodSettings(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.selector = reader.string(); @@ -65022,12 +65521,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LongRunning.decode = function decode(reader, length) { + LongRunning.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.MethodSettings.LongRunning(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.initialPollDelay = $root.google.protobuf.Duration.decode(reader, reader.uint32()); @@ -65449,12 +65950,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ResourceDescriptor.decode = function decode(reader, length) { + ResourceDescriptor.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.ResourceDescriptor(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.type = reader.string(); @@ -65844,12 +66347,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ResourceReference.decode = function decode(reader, length) { + ResourceReference.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.ResourceReference(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.type = reader.string(); @@ -66062,12 +66567,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RoutingRule.decode = function decode(reader, length) { + RoutingRule.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.RoutingRule(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 2: { if (!(message.routingParameters && message.routingParameters.length)) @@ -66295,12 +66802,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RoutingParameter.decode = function decode(reader, length) { + RoutingParameter.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.api.RoutingParameter(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.field = reader.string(); @@ -66525,12 +67034,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FileDescriptorSet.decode = function decode(reader, length) { + FileDescriptorSet.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FileDescriptorSet(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.file && message.file.length)) @@ -66925,12 +67436,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FileDescriptorProto.decode = function decode(reader, length) { + FileDescriptorProto.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FileDescriptorProto(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -67592,12 +68105,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DescriptorProto.decode = function decode(reader, length) { + DescriptorProto.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.DescriptorProto(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -68077,12 +68592,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExtensionRange.decode = function decode(reader, length) { + ExtensionRange.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.DescriptorProto.ExtensionRange(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.start = reader.int32(); @@ -68321,12 +68838,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReservedRange.decode = function decode(reader, length) { + ReservedRange.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.DescriptorProto.ReservedRange(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.start = reader.int32(); @@ -68577,12 +69096,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExtensionRangeOptions.decode = function decode(reader, length) { + ExtensionRangeOptions.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.ExtensionRangeOptions(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 999: { if (!(message.uninterpretedOption && message.uninterpretedOption.length)) @@ -68922,12 +69443,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Declaration.decode = function decode(reader, length) { + Declaration.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.ExtensionRangeOptions.Declaration(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.number = reader.int32(); @@ -69301,12 +69824,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FieldDescriptorProto.decode = function decode(reader, length) { + FieldDescriptorProto.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FieldDescriptorProto(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -69826,12 +70351,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - OneofDescriptorProto.decode = function decode(reader, length) { + OneofDescriptorProto.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.OneofDescriptorProto(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -70097,12 +70624,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EnumDescriptorProto.decode = function decode(reader, length) { + EnumDescriptorProto.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.EnumDescriptorProto(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -70416,12 +70945,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EnumReservedRange.decode = function decode(reader, length) { + EnumReservedRange.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.EnumDescriptorProto.EnumReservedRange(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.start = reader.int32(); @@ -70657,12 +71188,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EnumValueDescriptorProto.decode = function decode(reader, length) { + EnumValueDescriptorProto.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.EnumValueDescriptorProto(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -70914,12 +71447,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ServiceDescriptorProto.decode = function decode(reader, length) { + ServiceDescriptorProto.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.ServiceDescriptorProto(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -71222,12 +71757,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MethodDescriptorProto.decode = function decode(reader, length) { + MethodDescriptorProto.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.MethodDescriptorProto(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -71726,12 +72263,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FileOptions.decode = function decode(reader, length) { + FileOptions.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FileOptions(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.javaPackage = reader.string(); @@ -72346,12 +72885,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MessageOptions.decode = function decode(reader, length) { + MessageOptions.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.MessageOptions(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.messageSetWireFormat = reader.bool(); @@ -72826,12 +73367,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FieldOptions.decode = function decode(reader, length) { + FieldOptions.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FieldOptions(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.ctype = reader.int32(); @@ -73557,12 +74100,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EditionDefault.decode = function decode(reader, length) { + EditionDefault.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FieldOptions.EditionDefault(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 3: { message.edition = reader.int32(); @@ -73853,12 +74398,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - OneofOptions.decode = function decode(reader, length) { + OneofOptions.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.OneofOptions(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.features = $root.google.protobuf.FeatureSet.decode(reader, reader.uint32()); @@ -74139,12 +74686,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EnumOptions.decode = function decode(reader, length) { + EnumOptions.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.EnumOptions(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 2: { message.allowAlias = reader.bool(); @@ -74451,12 +75000,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EnumValueOptions.decode = function decode(reader, length) { + EnumValueOptions.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.EnumValueOptions(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.deprecated = reader.bool(); @@ -74773,12 +75324,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ServiceOptions.decode = function decode(reader, length) { + ServiceOptions.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.ServiceOptions(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 34: { message.features = $root.google.protobuf.FeatureSet.decode(reader, reader.uint32()); @@ -75143,12 +75696,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MethodOptions.decode = function decode(reader, length) { + MethodOptions.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.MethodOptions(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 33: { message.deprecated = reader.bool(); @@ -75594,12 +76149,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UninterpretedOption.decode = function decode(reader, length) { + UninterpretedOption.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.UninterpretedOption(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 2: { if (!(message.name && message.name.length)) @@ -75933,12 +76490,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - NamePart.decode = function decode(reader, length) { + NamePart.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.UninterpretedOption.NamePart(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.namePart = reader.string(); @@ -76209,12 +76768,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FeatureSet.decode = function decode(reader, length) { + FeatureSet.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FeatureSet(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.fieldPresence = reader.int32(); @@ -76744,12 +77305,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FeatureSetDefaults.decode = function decode(reader, length) { + FeatureSetDefaults.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FeatureSetDefaults(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.defaults && message.defaults.length)) @@ -77128,12 +77691,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FeatureSetEditionDefault.decode = function decode(reader, length) { + FeatureSetEditionDefault.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 3: { message.edition = reader.int32(); @@ -77418,12 +77983,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SourceCodeInfo.decode = function decode(reader, length) { + SourceCodeInfo.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.SourceCodeInfo(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.location && message.location.length)) @@ -77693,12 +78260,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Location.decode = function decode(reader, length) { + Location.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.SourceCodeInfo.Location(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.path && message.path.length)) @@ -78004,12 +78573,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GeneratedCodeInfo.decode = function decode(reader, length) { + GeneratedCodeInfo.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.GeneratedCodeInfo(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.annotation && message.annotation.length)) @@ -78272,12 +78843,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Annotation.decode = function decode(reader, length) { + Annotation.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.GeneratedCodeInfo.Annotation(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.path && message.path.length)) @@ -78598,12 +79171,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Duration.decode = function decode(reader, length) { + Duration.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.Duration(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.seconds = reader.int64(); @@ -78839,12 +79414,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Timestamp.decode = function decode(reader, length) { + Timestamp.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.Timestamp(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.seconds = reader.int64(); @@ -79071,12 +79648,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FieldMask.decode = function decode(reader, length) { + FieldMask.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FieldMask(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.paths && message.paths.length)) @@ -79299,12 +79878,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Any.decode = function decode(reader, length) { + Any.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.Any(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.type_url = reader.string(); @@ -79513,12 +80094,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Empty.decode = function decode(reader, length) { + Empty.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.Empty(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { default: reader.skipType(tag & 7); @@ -79699,12 +80282,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DoubleValue.decode = function decode(reader, length) { + DoubleValue.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.DoubleValue(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.value = reader.double(); @@ -79902,12 +80487,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FloatValue.decode = function decode(reader, length) { + FloatValue.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.FloatValue(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.value = reader.float(); @@ -80105,12 +80692,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Int64Value.decode = function decode(reader, length) { + Int64Value.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.Int64Value(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.value = reader.int64(); @@ -80322,12 +80911,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UInt64Value.decode = function decode(reader, length) { + UInt64Value.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.UInt64Value(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.value = reader.uint64(); @@ -80539,12 +81130,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Int32Value.decode = function decode(reader, length) { + Int32Value.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.Int32Value(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.value = reader.int32(); @@ -80742,12 +81335,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UInt32Value.decode = function decode(reader, length) { + UInt32Value.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.UInt32Value(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.value = reader.uint32(); @@ -80945,12 +81540,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BoolValue.decode = function decode(reader, length) { + BoolValue.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.BoolValue(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.value = reader.bool(); @@ -81148,12 +81745,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StringValue.decode = function decode(reader, length) { + StringValue.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.StringValue(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.value = reader.string(); @@ -81351,12 +81950,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BytesValue.decode = function decode(reader, length) { + BytesValue.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.BytesValue(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.value = reader.bytes(); @@ -81740,12 +82341,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetIamPolicyRequest.decode = function decode(reader, length) { + SetIamPolicyRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.SetIamPolicyRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.resource = reader.string(); @@ -81989,12 +82592,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetIamPolicyRequest.decode = function decode(reader, length) { + GetIamPolicyRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.GetIamPolicyRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.resource = reader.string(); @@ -82223,12 +82828,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TestIamPermissionsRequest.decode = function decode(reader, length) { + TestIamPermissionsRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.TestIamPermissionsRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.resource = reader.string(); @@ -82455,12 +83062,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TestIamPermissionsResponse.decode = function decode(reader, length) { + TestIamPermissionsResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.TestIamPermissionsResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.permissions && message.permissions.length)) @@ -82672,12 +83281,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetPolicyOptions.decode = function decode(reader, length) { + GetPolicyOptions.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.GetPolicyOptions(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.requestedPolicyVersion = reader.int32(); @@ -82912,12 +83523,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Policy.decode = function decode(reader, length) { + Policy.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.Policy(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.version = reader.int32(); @@ -83225,12 +83838,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Binding.decode = function decode(reader, length) { + Binding.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.Binding(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.role = reader.string(); @@ -83486,12 +84101,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AuditConfig.decode = function decode(reader, length) { + AuditConfig.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.AuditConfig(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.service = reader.string(); @@ -83734,12 +84351,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AuditLogConfig.decode = function decode(reader, length) { + AuditLogConfig.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.AuditLogConfig(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.logType = reader.int32(); @@ -84026,12 +84645,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PolicyDelta.decode = function decode(reader, length) { + PolicyDelta.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.PolicyDelta(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.bindingDeltas && message.bindingDeltas.length)) @@ -84313,12 +84934,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BindingDelta.decode = function decode(reader, length) { + BindingDelta.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.BindingDelta(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.action = reader.int32(); @@ -84631,12 +85254,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AuditConfigDelta.decode = function decode(reader, length) { + AuditConfigDelta.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.iam.v1.AuditConfigDelta(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.action = reader.int32(); @@ -84959,12 +85584,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Expr.decode = function decode(reader, length) { + Expr.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.type.Expr(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.expression = reader.string(); @@ -85221,12 +85848,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Date.decode = function decode(reader, length) { + Date.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.type.Date(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.year = reader.int32(); @@ -85719,12 +86348,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Operation.decode = function decode(reader, length) { + Operation.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.longrunning.Operation(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -85999,12 +86630,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetOperationRequest.decode = function decode(reader, length) { + GetOperationRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.longrunning.GetOperationRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -86235,12 +86868,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListOperationsRequest.decode = function decode(reader, length) { + ListOperationsRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.longrunning.ListOperationsRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 4: { message.name = reader.string(); @@ -86488,12 +87123,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListOperationsResponse.decode = function decode(reader, length) { + ListOperationsResponse.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.longrunning.ListOperationsResponse(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { if (!(message.operations && message.operations.length)) @@ -86723,12 +87360,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CancelOperationRequest.decode = function decode(reader, length) { + CancelOperationRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.longrunning.CancelOperationRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -86926,12 +87565,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteOperationRequest.decode = function decode(reader, length) { + DeleteOperationRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.longrunning.DeleteOperationRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -87140,12 +87781,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - WaitOperationRequest.decode = function decode(reader, length) { + WaitOperationRequest.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.longrunning.WaitOperationRequest(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.name = reader.string(); @@ -87372,12 +88015,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - OperationInfo.decode = function decode(reader, length) { + OperationInfo.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.longrunning.OperationInfo(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.responseType = reader.string(); @@ -87624,12 +88269,14 @@ * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Status.decode = function decode(reader, length) { + Status.decode = function decode(reader, length, error) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.rpc.Status(); while (reader.pos < end) { var tag = reader.uint32(); + if (tag === error) + break; switch (tag >>> 3) { case 1: { message.code = reader.int32(); diff --git a/protos/protos.json b/protos/protos.json index b086d3aa0..38081a126 100644 --- a/protos/protos.json +++ b/protos/protos.json @@ -1,7 +1,4 @@ { - "options": { - "syntax": "proto3" - }, "nested": { "google": { "nested": { @@ -6496,6 +6493,7 @@ }, "nested": { "FileDescriptorSet": { + "edition": "proto2", "fields": { "file": { "rule": "repeated", @@ -6505,6 +6503,7 @@ } }, "Edition": { + "edition": "proto2", "values": { "EDITION_UNKNOWN": 0, "EDITION_PROTO2": 998, @@ -6520,6 +6519,7 @@ } }, "FileDescriptorProto": { + "edition": "proto2", "fields": { "name": { "type": "string", @@ -6537,18 +6537,12 @@ "publicDependency": { "rule": "repeated", "type": "int32", - "id": 10, - "options": { - "packed": false - } + "id": 10 }, "weakDependency": { "rule": "repeated", "type": "int32", - "id": 11, - "options": { - "packed": false - } + "id": 11 }, "messageType": { "rule": "repeated", @@ -6589,6 +6583,7 @@ } }, "DescriptorProto": { + "edition": "proto2", "fields": { "name": { "type": "string", @@ -6671,6 +6666,7 @@ } }, "ExtensionRangeOptions": { + "edition": "proto2", "fields": { "uninterpretedOption": { "rule": "repeated", @@ -6744,6 +6740,7 @@ } }, "FieldDescriptorProto": { + "edition": "proto2", "fields": { "name": { "type": "string", @@ -6823,6 +6820,7 @@ } }, "OneofDescriptorProto": { + "edition": "proto2", "fields": { "name": { "type": "string", @@ -6835,6 +6833,7 @@ } }, "EnumDescriptorProto": { + "edition": "proto2", "fields": { "name": { "type": "string", @@ -6876,6 +6875,7 @@ } }, "EnumValueDescriptorProto": { + "edition": "proto2", "fields": { "name": { "type": "string", @@ -6892,6 +6892,7 @@ } }, "ServiceDescriptorProto": { + "edition": "proto2", "fields": { "name": { "type": "string", @@ -6909,6 +6910,7 @@ } }, "MethodDescriptorProto": { + "edition": "proto2", "fields": { "name": { "type": "string", @@ -6943,6 +6945,7 @@ } }, "FileOptions": { + "edition": "proto2", "fields": { "javaPackage": { "type": "string", @@ -7084,6 +7087,7 @@ } }, "MessageOptions": { + "edition": "proto2", "fields": { "messageSetWireFormat": { "type": "bool", @@ -7157,6 +7161,7 @@ ] }, "FieldOptions": { + "edition": "proto2", "fields": { "ctype": { "type": "CType", @@ -7218,10 +7223,7 @@ "targets": { "rule": "repeated", "type": "OptionTargetType", - "id": 19, - "options": { - "packed": false - } + "id": 19 }, "editionDefaults": { "rule": "repeated", @@ -7305,6 +7307,7 @@ } }, "OneofOptions": { + "edition": "proto2", "fields": { "features": { "type": "FeatureSet", @@ -7324,6 +7327,7 @@ ] }, "EnumOptions": { + "edition": "proto2", "fields": { "allowAlias": { "type": "bool", @@ -7367,6 +7371,7 @@ ] }, "EnumValueOptions": { + "edition": "proto2", "fields": { "deprecated": { "type": "bool", @@ -7400,6 +7405,7 @@ ] }, "ServiceOptions": { + "edition": "proto2", "fields": { "features": { "type": "FeatureSet", @@ -7426,6 +7432,7 @@ ] }, "MethodOptions": { + "edition": "proto2", "fields": { "deprecated": { "type": "bool", @@ -7468,6 +7475,7 @@ } }, "UninterpretedOption": { + "edition": "proto2", "fields": { "name": { "rule": "repeated", @@ -7517,6 +7525,7 @@ } }, "FeatureSet": { + "edition": "proto2", "fields": { "fieldPresence": { "type": "FieldPresence", @@ -7658,6 +7667,7 @@ } }, "FeatureSetDefaults": { + "edition": "proto2", "fields": { "defaults": { "rule": "repeated", @@ -7689,6 +7699,7 @@ } }, "SourceCodeInfo": { + "edition": "proto2", "fields": { "location": { "rule": "repeated", @@ -7702,12 +7713,18 @@ "path": { "rule": "repeated", "type": "int32", - "id": 1 + "id": 1, + "options": { + "packed": true + } }, "span": { "rule": "repeated", "type": "int32", - "id": 2 + "id": 2, + "options": { + "packed": true + } }, "leadingComments": { "type": "string", @@ -7727,6 +7744,7 @@ } }, "GeneratedCodeInfo": { + "edition": "proto2", "fields": { "annotation": { "rule": "repeated", @@ -7740,7 +7758,10 @@ "path": { "rule": "repeated", "type": "int32", - "id": 1 + "id": 1, + "options": { + "packed": true + } }, "sourceFile": { "type": "string", From b2c8f51d2d2b28b29b177f4f9dbe80b4c0a95d2c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 16 Apr 2025 15:47:28 -0400 Subject: [PATCH 402/448] remove callback on checkState --- .../operation-metrics-collector.ts | 198 ++++++++---------- 1 file changed, 83 insertions(+), 115 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 36487f899..be00915c6 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -88,17 +88,13 @@ function withMetricsDebug(fn: () => T): T | undefined { // Checks that the state transition is valid and if not it throws a warning. function checkState( currentState: MetricsCollectorState, - allowedStates: MetricsCollectorState[], - fn: () => T + allowedStates: MetricsCollectorState[] ): T | undefined { if (allowedStates.includes(currentState)) { - return fn(); + return; } else { - if (METRICS_DEBUG) { - throw Error('Invalid state transition'); - } + throw Error('Invalid state transition'); } - return; } /** @@ -190,16 +186,11 @@ export class OperationMetricsCollector { */ onOperationStart() { withMetricsDebug(() => { - checkState( - this.state, - [MetricsCollectorState.OPERATION_NOT_STARTED], - () => { - this.operationStartTime = new Date(); - this.firstResponseLatency = null; - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; - } - ); + checkState(this.state, [MetricsCollectorState.OPERATION_NOT_STARTED]); + this.operationStartTime = new Date(); + this.firstResponseLatency = null; + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; }); } @@ -210,41 +201,33 @@ export class OperationMetricsCollector { */ onAttemptComplete(projectId: string, attemptStatus: grpc.status) { withMetricsDebug(() => { - checkState( - this.state, - [ - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET, - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED, - ], - () => { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; - this.attemptCount++; - const endTime = new Date(); - if (projectId && this.attemptStartTime) { - const totalTime = - endTime.getTime() - this.attemptStartTime.getTime(); - OperationMetricsCollector.metricsHandlers.forEach( - metricsHandler => { - if (metricsHandler.onAttemptComplete) { - metricsHandler.onAttemptComplete({ - attemptLatency: totalTime, - serverLatency: this.serverTime ?? undefined, - connectivityErrorCount: this.connectivityErrorCount, - streaming: this.streamingOperation, - status: attemptStatus.toString(), - client_name: `nodejs-bigtable/${version}`, - metricsCollectorData: this.getMetricsCollectorData(), - projectId, - }); - } - } - ); - } else { - console.warn('ProjectId and start time should always be provided'); + checkState(this.state, [ + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET, + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED, + ]); + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; + this.attemptCount++; + const endTime = new Date(); + if (projectId && this.attemptStartTime) { + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + OperationMetricsCollector.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onAttemptComplete) { + metricsHandler.onAttemptComplete({ + attemptLatency: totalTime, + serverLatency: this.serverTime ?? undefined, + connectivityErrorCount: this.connectivityErrorCount, + streaming: this.streamingOperation, + status: attemptStatus.toString(), + client_name: `nodejs-bigtable/${version}`, + metricsCollectorData: this.getMetricsCollectorData(), + projectId, + }); } - } - ); + }); + } else { + console.warn('ProjectId and start time should always be provided'); + } }); } @@ -253,18 +236,15 @@ export class OperationMetricsCollector { */ onAttemptStart() { withMetricsDebug(() => { - checkState( - this.state, - [MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS], - () => { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET; - this.attemptStartTime = new Date(); - this.serverTime = null; - this.serverTimeRead = false; - this.connectivityErrorCount = 0; - } - ); + checkState(this.state, [ + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS, + ]); + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET; + this.attemptStartTime = new Date(); + this.serverTime = null; + this.serverTimeRead = false; + this.connectivityErrorCount = 0; }); } @@ -274,25 +254,20 @@ export class OperationMetricsCollector { onResponse(projectId: string) { withMetricsDebug(() => { if (!this.firstResponseLatency) { - checkState( - this.state, - [ - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET, - ], - () => { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; - const endTime = new Date(); - if (projectId && this.operationStartTime) { - this.firstResponseLatency = - endTime.getTime() - this.operationStartTime.getTime(); - } else { - console.warn( - 'ProjectId and operationStartTime should always be provided' - ); - } - } - ); + checkState(this.state, [ + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET, + ]); + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; + const endTime = new Date(); + if (projectId && this.operationStartTime) { + this.firstResponseLatency = + endTime.getTime() - this.operationStartTime.getTime(); + } else { + console.warn( + 'ProjectId and operationStartTime should always be provided' + ); + } } }); } @@ -306,41 +281,34 @@ export class OperationMetricsCollector { onOperationComplete(projectId: string, finalOperationStatus: grpc.status) { this.onAttemptComplete(projectId, finalOperationStatus); withMetricsDebug(() => { - checkState( - this.state, - [MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS], - () => { - this.state = MetricsCollectorState.OPERATION_COMPLETE; - const endTime = new Date(); - if (projectId && this.operationStartTime) { - const totalTime = - endTime.getTime() - this.operationStartTime.getTime(); - { - OperationMetricsCollector.metricsHandlers.forEach( - metricsHandler => { - if (metricsHandler.onOperationComplete) { - metricsHandler.onOperationComplete({ - status: finalOperationStatus.toString(), - streaming: this.streamingOperation, - metricsCollectorData: this.getMetricsCollectorData(), - client_name: `nodejs-bigtable/${version}`, - projectId, - operationLatency: totalTime, - retryCount: this.attemptCount - 1, - firstResponseLatency: - this.firstResponseLatency ?? undefined, - }); - } - } - ); + checkState(this.state, [ + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS, + ]); + this.state = MetricsCollectorState.OPERATION_COMPLETE; + const endTime = new Date(); + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + { + OperationMetricsCollector.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onOperationComplete) { + metricsHandler.onOperationComplete({ + status: finalOperationStatus.toString(), + streaming: this.streamingOperation, + metricsCollectorData: this.getMetricsCollectorData(), + client_name: `nodejs-bigtable/${version}`, + projectId, + operationLatency: totalTime, + retryCount: this.attemptCount - 1, + firstResponseLatency: this.firstResponseLatency ?? undefined, + }); } - } else { - console.warn( - 'projectId and operation start time should always be available here' - ); - } + }); } - ); + } else { + console.warn( + 'projectId and operation start time should always be available here' + ); + } }); } From ce79cb149357a1e04242a03fe335447d22b15d8d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 16 Apr 2025 15:53:00 -0400 Subject: [PATCH 403/448] Throw instead of console.warn --- src/client-side-metrics/operation-metrics-collector.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index be00915c6..bd66028d2 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -226,7 +226,7 @@ export class OperationMetricsCollector { } }); } else { - console.warn('ProjectId and start time should always be provided'); + throw new Error('ProjectId and start time should always be provided'); } }); } @@ -264,7 +264,7 @@ export class OperationMetricsCollector { this.firstResponseLatency = endTime.getTime() - this.operationStartTime.getTime(); } else { - console.warn( + throw new Error( 'ProjectId and operationStartTime should always be provided' ); } From 6ac0dfbd50ad8e7eed4d92f0f9446bf187434f76 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 10 Apr 2025 11:17:17 -0400 Subject: [PATCH 404/448] Use the hrtime module instead of the Date module # Conflicts: # src/client-side-metrics/operation-metrics-collector.ts # test/metrics-collector/metrics-collector.ts --- .../operation-metrics-collector.ts | 28 +++++---- test/metrics-collector/metrics-collector.ts | 60 +++++++------------ 2 files changed, 37 insertions(+), 51 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index bd66028d2..2234b8cb0 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -35,6 +35,7 @@ const root = gax.protobuf.loadSync( './protos/google/bigtable/v2/response_params.proto' ); const ResponseParams = root.lookupType('ResponseParams'); +const {hrtime} = require('node:process'); /** * An interface representing a tabular API surface, such as a Bigtable table. @@ -102,8 +103,8 @@ function checkState( */ export class OperationMetricsCollector { private state: MetricsCollectorState; - private operationStartTime: Date | null; - private attemptStartTime: Date | null; + private operationStartTime: bigint | null; + private attemptStartTime: bigint | null; private zone: string | undefined; private cluster: string | undefined; private tabularApiSurface: ITabularApiSurface; @@ -187,7 +188,7 @@ export class OperationMetricsCollector { onOperationStart() { withMetricsDebug(() => { checkState(this.state, [MetricsCollectorState.OPERATION_NOT_STARTED]); - this.operationStartTime = new Date(); + this.operationStartTime = hrtime.bigint(); this.firstResponseLatency = null; this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; @@ -208,9 +209,11 @@ export class OperationMetricsCollector { this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; this.attemptCount++; - const endTime = new Date(); + const endTime = hrtime.bigint(); if (projectId && this.attemptStartTime) { - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + const totalTime = Number( + (endTime - this.attemptStartTime) / BigInt(1000000) + ); OperationMetricsCollector.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onAttemptComplete) { metricsHandler.onAttemptComplete({ @@ -241,7 +244,7 @@ export class OperationMetricsCollector { ]); this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET; - this.attemptStartTime = new Date(); + this.attemptStartTime = hrtime.bigint(); this.serverTime = null; this.serverTimeRead = false; this.connectivityErrorCount = 0; @@ -259,10 +262,11 @@ export class OperationMetricsCollector { ]); this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; - const endTime = new Date(); + const endTime = hrtime.bigint(); if (projectId && this.operationStartTime) { - this.firstResponseLatency = - endTime.getTime() - this.operationStartTime.getTime(); + this.firstResponseLatency = Number( + (endTime - this.operationStartTime) / BigInt(1000000) + ); } else { throw new Error( 'ProjectId and operationStartTime should always be provided' @@ -285,9 +289,11 @@ export class OperationMetricsCollector { MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS, ]); this.state = MetricsCollectorState.OPERATION_COMPLETE; - const endTime = new Date(); + const endTime = hrtime.bigint(); if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + const totalTime = Number( + (endTime - this.operationStartTime) / BigInt(1000000) + ); { OperationMetricsCollector.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index c95cb8aeb..84add8a8f 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -24,6 +24,7 @@ import { import {grpc} from 'google-gax'; import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; import * as gax from 'google-gax'; +import * as proxyquire from 'proxyquire'; import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; const root = gax.protobuf.loadSync( './protos/google/bigtable/v2/response_params.proto' @@ -51,47 +52,26 @@ class FakeInstance { } describe('Bigtable/MetricsCollector', () => { - const logger = {value: ''}; - const originalDate = global.Date; - - before(() => { - let mockTime = new Date('1970-01-01T00:00:01.000Z').getTime(); - - (global as any).Date = class extends originalDate { - constructor(...args: any[]) { - // Using a rest parameter - if (args.length === 0) { - super(mockTime); - logger.value += `getDate call returns ${mockTime.toString()} ms\n`; - mockTime += 1000; - } - } - - static now(): number { - return mockTime; - } + class FakeHRTime { + startTime = BigInt(0); + bigint() { + this.startTime += BigInt(1000000000); + logger.value += `getDate call returns ${Number(this.startTime / BigInt(1000000))} ms\n`; + return this.startTime; + } + } - static parse(dateString: string): number { - return originalDate.parse(dateString); - } + const stubs = { + 'node:process': { + hrtime: new FakeHRTime(), + }, + }; + const FakeOperationsMetricsCollector = proxyquire( + '../../src/client-side-metrics/operation-metrics-collector.js', + stubs + ).OperationMetricsCollector; - static UTC( - year: number, - month: number, - date?: number, - hours?: number, - minutes?: number, - seconds?: number, - ms?: number - ): number { - return originalDate.UTC(year, month, date, hours, minutes, seconds, ms); - } - }; - }); - - after(() => { - (global as any).Date = originalDate; - }); + const logger = {value: ''}; it('should record the right metrics with a typical method call', async () => { const testHandler = new TestMetricsHandler(logger); @@ -126,7 +106,7 @@ describe('Bigtable/MetricsCollector', () => { options: {}, }, }; - const metricsCollector = new OperationMetricsCollector( + const metricsCollector = new FakeOperationsMetricsCollector( this, MethodName.READ_ROWS, StreamingState.STREAMING From e1063e6e53548527d07151006daf73ab2c018cce Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 17 Apr 2025 10:45:28 -0400 Subject: [PATCH 405/448] run the linter --- .../operation-metrics-collector.ts | 22 +++++++++---------- src/tabular-api-surface.ts | 10 ++++----- .../client-side-metrics-setup-table.ts | 2 +- system-test/client-side-metrics.ts | 12 +++++----- system-test/read-rows.ts | 2 +- test/metrics-collector/metrics-collector.ts | 2 +- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 32b5a3dd1..18ffd4e66 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -89,7 +89,7 @@ function withMetricsDebug(fn: () => T): T | undefined { // Checks that the state transition is valid and if not it throws a warning. function checkState( currentState: MetricsCollectorState, - allowedStates: MetricsCollectorState[] + allowedStates: MetricsCollectorState[], ): T | undefined { if (allowedStates.includes(currentState)) { return; @@ -170,7 +170,7 @@ export class OperationMetricsCollector { 'metadata', (metadata: {internalRepr: Map; options: {}}) => { this.onMetadataReceived(metadata); - } + }, ) .on( 'status', @@ -178,7 +178,7 @@ export class OperationMetricsCollector { metadata: {internalRepr: Map; options: {}}; }) => { this.onStatusMetadataReceived(status); - } + }, ); } @@ -212,7 +212,7 @@ export class OperationMetricsCollector { const endTime = hrtime.bigint(); if (projectId && this.attemptStartTime) { const totalTime = Number( - (endTime - this.attemptStartTime) / BigInt(1000000) + (endTime - this.attemptStartTime) / BigInt(1000000), ); OperationMetricsCollector.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onAttemptComplete) { @@ -265,11 +265,11 @@ export class OperationMetricsCollector { const endTime = hrtime.bigint(); if (projectId && this.operationStartTime) { this.firstResponseLatency = Number( - (endTime - this.operationStartTime) / BigInt(1000000) + (endTime - this.operationStartTime) / BigInt(1000000), ); } else { throw new Error( - 'ProjectId and operationStartTime should always be provided' + 'ProjectId and operationStartTime should always be provided', ); } } @@ -292,7 +292,7 @@ export class OperationMetricsCollector { const endTime = hrtime.bigint(); if (projectId && this.operationStartTime) { const totalTime = Number( - (endTime - this.operationStartTime) / BigInt(1000000) + (endTime - this.operationStartTime) / BigInt(1000000), ); { OperationMetricsCollector.metricsHandlers.forEach(metricsHandler => { @@ -312,7 +312,7 @@ export class OperationMetricsCollector { } } else { console.warn( - 'projectId and operation start time should always be available here' + 'projectId and operation start time should always be available here', ); } }); @@ -332,7 +332,7 @@ export class OperationMetricsCollector { Array.from(metadata.internalRepr.entries(), ([key, value]) => [ key, value.toString(), - ]) + ]), ); const SERVER_TIMING_REGEX = /.*gfet4t7;\s*dur=(\d+\.?\d*).*/; const SERVER_TIMING_KEY = 'server-timing'; @@ -362,12 +362,12 @@ export class OperationMetricsCollector { if (!this.zone || !this.cluster) { const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; const mappedValue = status.metadata.internalRepr.get( - INSTANCE_INFORMATION_KEY + INSTANCE_INFORMATION_KEY, ) as Buffer[]; if (mappedValue && mappedValue[0] && ResponseParams) { const decodedValue = ResponseParams.decode( mappedValue[0], - mappedValue[0].length + mappedValue[0].length, ); if ( decodedValue && diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index d0e0e6273..c4440e701 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -343,7 +343,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); ? new OperationMetricsCollector( this, MethodName.READ_ROWS, - StreamingState.STREAMING + StreamingState.STREAMING, ) : null; metricsCollector?.onOperationStart(); @@ -536,7 +536,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); userStream.end(); metricsCollector?.onOperationComplete( this.bigtable.projectId, - error.code + error.code, ); return; } @@ -557,7 +557,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); ); metricsCollector?.onAttemptComplete( this.bigtable.projectId, - error.code + error.code, ); retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { @@ -575,7 +575,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } metricsCollector?.onOperationComplete( this.bigtable.projectId, - error.code + error.code, ); userStream.emit('error', error); } @@ -590,7 +590,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); activeRequestStream = null; metricsCollector?.onOperationComplete( this.bigtable.projectId, - grpc.status.OK + grpc.status.OK, ); }); rowStreamPipe(rowStream, userStream); diff --git a/system-test/client-side-metrics-setup-table.ts b/system-test/client-side-metrics-setup-table.ts index c7a21143f..003d9b3bf 100644 --- a/system-test/client-side-metrics-setup-table.ts +++ b/system-test/client-side-metrics-setup-table.ts @@ -17,7 +17,7 @@ export async function setupBigtable( bigtable: Bigtable, columnFamilyId: string, instanceId: string, - tableIds: string[] + tableIds: string[], ) { const instance = bigtable.instance(instanceId); const [instanceInfo] = await instance.exists(); diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 7afcb70d8..5d45aa094 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -31,7 +31,7 @@ const SECOND_PROJECT_ID = 'cfdb-sdk-node-tests'; function getFakeBigtable( projectId: string, - metricsHandlerClass: typeof GCPMetricsHandler | typeof TestMetricsHandler + metricsHandlerClass: typeof GCPMetricsHandler | typeof TestMetricsHandler, ) { /* Below we mock out the table so that it sends the metrics to a test exporter @@ -43,7 +43,7 @@ function getFakeBigtable( '../src/client-side-metrics/operation-metrics-collector', { './gcp-metrics-handler': {GCPMetricsHandler: metricsHandlerClass}, - } + }, ).OperationMetricsCollector; const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { './client-side-metrics/operation-metrics-collector': { @@ -118,8 +118,8 @@ describe('Bigtable/ClientSideMetrics', () => { if (!exported) { done( new Error( - 'The exporters have not completed yet and the timeout is over' - ) + 'The exporters have not completed yet and the timeout is over', + ), ); } }, 120000); @@ -127,7 +127,7 @@ describe('Bigtable/ClientSideMetrics', () => { class TestExporter extends CloudMonitoringExporter { export( metrics: ResourceMetrics, - resultCallback: (result: ExportResult) => void + resultCallback: (result: ExportResult) => void, ): void { try { super.export(metrics, (result: ExportResult) => { @@ -227,7 +227,7 @@ describe('Bigtable/ClientSideMetrics', () => { class TestExporter extends CloudMonitoringExporter { export( metrics: ResourceMetrics, - resultCallback: (result: ExportResult) => void + resultCallback: (result: ExportResult) => void, ): void { try { super.export(metrics, (result: ExportResult) => { diff --git a/system-test/read-rows.ts b/system-test/read-rows.ts index e267c393d..db156e31b 100644 --- a/system-test/read-rows.ts +++ b/system-test/read-rows.ts @@ -224,7 +224,7 @@ describe('Bigtable/Table', () => { TABLE.maxRetries = test.max_retries; TABLE.createReadStream(test.createReadStream_options) .on('data', (row: Row) => - rowKeysRead[rowKeysRead.length - 1].push(row.id) + rowKeysRead[rowKeysRead.length - 1].push(row.id), ) .on('end', () => (endCalled = true)) .on('error', (err: ServiceError) => (error = err as ServiceError)); diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 40820ff5d..272f10e3f 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -68,7 +68,7 @@ describe('Bigtable/MetricsCollector', () => { }; const FakeOperationsMetricsCollector = proxyquire( '../../src/client-side-metrics/operation-metrics-collector.js', - stubs + stubs, ).OperationMetricsCollector; const logger = {value: ''}; From 07cee6089c062d83e1a9b663f17729043218b76c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 17 Apr 2025 11:05:08 -0400 Subject: [PATCH 406/448] Fix failing tests Have to change the static variable on the mock instead --- test/metrics-collector/metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 272f10e3f..02058b22f 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -111,7 +111,7 @@ describe('Bigtable/MetricsCollector', () => { MethodName.READ_ROWS, StreamingState.STREAMING, ); - OperationMetricsCollector.metricsHandlers = [ + FakeOperationsMetricsCollector.metricsHandlers = [ testHandler as unknown as GCPMetricsHandler, ]; // In this method we simulate a series of events that might happen From 2aff9a25392dc73c6a07a56c0f0bfb6b016d9f2d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 17 Apr 2025 11:30:09 -0400 Subject: [PATCH 407/448] Add promise catching Fixes the linter errors --- system-test/client-side-metrics.ts | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 5d45aa094..8b889403d 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -193,7 +193,9 @@ describe('Bigtable/ClientSideMetrics', () => { done(new Error('An error occurred while running the script')); done(e); } - })(); + })().catch(err => { + throw err; + }); }); it('should send the metrics to Google Cloud Monitoring for a ReadRows call with a second project', done => { (async () => { @@ -216,7 +218,9 @@ describe('Bigtable/ClientSideMetrics', () => { done(new Error('An error occurred while running the script')); done(e); } - })(); + })().catch(err => { + throw err; + }); }); }); describe('Bigtable/ClientSideMetricsToGCMTimeout', () => { @@ -308,7 +312,9 @@ describe('Bigtable/ClientSideMetrics', () => { done(new Error('An error occurred while running the script')); done(e); } - })(); + })().catch(err => { + throw err; + }); }); }); describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { @@ -450,7 +456,9 @@ describe('Bigtable/ClientSideMetrics', () => { await table.getRows(); const table2 = instance.table(tableId2); await table2.getRows(); - })(); + })().catch(err => { + throw err; + }); }); it('should pass the projectId to the metrics handler properly', done => { bigtable = new Bigtable({projectId: SECOND_PROJECT_ID}); @@ -466,7 +474,9 @@ describe('Bigtable/ClientSideMetrics', () => { } catch (e) { done(e); } - })(); + })().catch(err => { + throw err; + }); }); }); }); From 6d9772375ccaca04ad07e1528c902cd7b4a7cad9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 17 Apr 2025 13:25:31 -0400 Subject: [PATCH 408/448] Remove unused imports --- src/index.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/index.ts b/src/index.ts index 590ee2fb0..4940cae0c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -37,9 +37,6 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; -import {IMetricsHandler} from './client-side-metrics/metrics-handler'; -import {GCPMetricsHandler} from './client-side-metrics/gcp-metrics-handler'; -import {CloudMonitoringExporter} from './client-side-metrics/exporter'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); From 5ec6275fa9a247ef9d189800be0798a27d42e2ef Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 17 Apr 2025 13:28:33 -0400 Subject: [PATCH 409/448] Remove unused import --- test/metrics-collector/metrics-collector.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 02058b22f..52025e8b2 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -16,7 +16,6 @@ import {describe} from 'mocha'; import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../test-common/test-metrics-handler'; -import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; import { MethodName, StreamingState, From 1bb9634073efb8fb0441d0c6d34e1de06e158d6e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 17 Apr 2025 13:47:19 -0400 Subject: [PATCH 410/448] Use the path module --- .../operation-metrics-collector.ts | 14 +++++--------- test/metrics-collector/metrics-collector.ts | 7 +++++-- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 18ffd4e66..ff15d71a7 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -19,21 +19,17 @@ import * as gax from 'google-gax'; import {GCPMetricsHandler} from './gcp-metrics-handler'; import {CloudMonitoringExporter} from './exporter'; import {AbortableDuplex} from '../index'; +import * as path from 'path'; // When this environment variable is set then print any errors associated // with failures in the metrics collector. const METRICS_DEBUG = process.env.METRICS_DEBUG; -/* - * The samples tests are failing with the - * error UnhandledPromiseRejectionWarning: Error: ENOENT: no such file or - * directory, open 'protos/google/bigtable/v2/response_params.proto'. Since - * these tests don't use this module we can suppress the error for now to - * unblock the CI pipeline. - */ -const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto', +const protoPath = path.join( + __dirname, + '../../protos/google/bigtable/v2/response_params.proto', ); +const root = gax.protobuf.loadSync(protoPath); const ResponseParams = root.lookupType('ResponseParams'); const {hrtime} = require('node:process'); diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 52025e8b2..741519b64 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -22,12 +22,15 @@ import { } from '../../src/client-side-metrics/client-side-metrics-attributes'; import {grpc} from 'google-gax'; import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; +import * as path from 'path'; // Import the 'path' module import * as gax from 'google-gax'; import * as proxyquire from 'proxyquire'; import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; -const root = gax.protobuf.loadSync( - './protos/google/bigtable/v2/response_params.proto', +const protoPath = path.join( + __dirname, + '../../protos/google/bigtable/v2/response_params.proto', ); +const root = gax.protobuf.loadSync(protoPath); const ResponseParams = root.lookupType('ResponseParams'); /** From 03b8d110848b70dc1498847dd305e8945b3ffa04 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Mon, 28 Apr 2025 15:18:30 +0000 Subject: [PATCH 411/448] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= =?UTF-8?q?=20post-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- protos/protos.d.ts | 21 -------------------- protos/protos.js | 49 +++++++--------------------------------------- 2 files changed, 7 insertions(+), 63 deletions(-) diff --git a/protos/protos.d.ts b/protos/protos.d.ts index 29918e68f..a39ddc6c7 100644 --- a/protos/protos.d.ts +++ b/protos/protos.d.ts @@ -5320,12 +5320,6 @@ export namespace google { /** Instance satisfiesPzi. */ public satisfiesPzi?: (boolean|null); - /** Instance _satisfiesPzs. */ - public _satisfiesPzs?: "satisfiesPzs"; - - /** Instance _satisfiesPzi. */ - public _satisfiesPzi?: "satisfiesPzi"; - /** * Creates a new Instance instance using the specified properties. * @param [properties] Properties to set @@ -6659,9 +6653,6 @@ export namespace google { /** DataBoostIsolationReadOnly computeBillingOwner. */ public computeBillingOwner?: (google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner|keyof typeof google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner|null); - /** DataBoostIsolationReadOnly _computeBillingOwner. */ - public _computeBillingOwner?: "computeBillingOwner"; - /** * Creates a new DataBoostIsolationReadOnly instance using the specified properties. * @param [properties] Properties to set @@ -19266,9 +19257,6 @@ export namespace google { /** MutateRowsResponse rateLimitInfo. */ public rateLimitInfo?: (google.bigtable.v2.IRateLimitInfo|null); - /** MutateRowsResponse _rateLimitInfo. */ - public _rateLimitInfo?: "rateLimitInfo"; - /** * Creates a new MutateRowsResponse instance using the specified properties. * @param [properties] Properties to set @@ -25292,9 +25280,6 @@ export namespace google { /** PartialResultSet partialRows. */ public partialRows?: "protoRowsBatch"; - /** PartialResultSet _batchChecksum. */ - public _batchChecksum?: "batchChecksum"; - /** * Creates a new PartialResultSet instance using the specified properties. * @param [properties] Properties to set @@ -28467,12 +28452,6 @@ export namespace google { /** ResponseParams clusterId. */ public clusterId?: (string|null); - /** ResponseParams _zoneId. */ - public _zoneId?: "zoneId"; - - /** ResponseParams _clusterId. */ - public _clusterId?: "clusterId"; - /** * Creates a new ResponseParams instance using the specified properties. * @param [properties] Properties to set diff --git a/protos/protos.js b/protos/protos.js index cd82b0a66..b8f776b25 100644 --- a/protos/protos.js +++ b/protos/protos.js @@ -12059,23 +12059,13 @@ // OneOf field names bound to virtual getters and setters var $oneOfFields; - /** - * Instance _satisfiesPzs. - * @member {"satisfiesPzs"|undefined} _satisfiesPzs - * @memberof google.bigtable.admin.v2.Instance - * @instance - */ + // Virtual OneOf for proto3 optional field Object.defineProperty(Instance.prototype, "_satisfiesPzs", { get: $util.oneOfGetter($oneOfFields = ["satisfiesPzs"]), set: $util.oneOfSetter($oneOfFields) }); - /** - * Instance _satisfiesPzi. - * @member {"satisfiesPzi"|undefined} _satisfiesPzi - * @memberof google.bigtable.admin.v2.Instance - * @instance - */ + // Virtual OneOf for proto3 optional field Object.defineProperty(Instance.prototype, "_satisfiesPzi", { get: $util.oneOfGetter($oneOfFields = ["satisfiesPzi"]), set: $util.oneOfSetter($oneOfFields) @@ -15540,12 +15530,7 @@ // OneOf field names bound to virtual getters and setters var $oneOfFields; - /** - * DataBoostIsolationReadOnly _computeBillingOwner. - * @member {"computeBillingOwner"|undefined} _computeBillingOwner - * @memberof google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly - * @instance - */ + // Virtual OneOf for proto3 optional field Object.defineProperty(DataBoostIsolationReadOnly.prototype, "_computeBillingOwner", { get: $util.oneOfGetter($oneOfFields = ["computeBillingOwner"]), set: $util.oneOfSetter($oneOfFields) @@ -45217,12 +45202,7 @@ // OneOf field names bound to virtual getters and setters var $oneOfFields; - /** - * MutateRowsResponse _rateLimitInfo. - * @member {"rateLimitInfo"|undefined} _rateLimitInfo - * @memberof google.bigtable.v2.MutateRowsResponse - * @instance - */ + // Virtual OneOf for proto3 optional field Object.defineProperty(MutateRowsResponse.prototype, "_rateLimitInfo", { get: $util.oneOfGetter($oneOfFields = ["rateLimitInfo"]), set: $util.oneOfSetter($oneOfFields) @@ -60667,12 +60647,7 @@ set: $util.oneOfSetter($oneOfFields) }); - /** - * PartialResultSet _batchChecksum. - * @member {"batchChecksum"|undefined} _batchChecksum - * @memberof google.bigtable.v2.PartialResultSet - * @instance - */ + // Virtual OneOf for proto3 optional field Object.defineProperty(PartialResultSet.prototype, "_batchChecksum", { get: $util.oneOfGetter($oneOfFields = ["batchChecksum"]), set: $util.oneOfSetter($oneOfFields) @@ -67976,23 +67951,13 @@ // OneOf field names bound to virtual getters and setters var $oneOfFields; - /** - * ResponseParams _zoneId. - * @member {"zoneId"|undefined} _zoneId - * @memberof google.bigtable.v2.ResponseParams - * @instance - */ + // Virtual OneOf for proto3 optional field Object.defineProperty(ResponseParams.prototype, "_zoneId", { get: $util.oneOfGetter($oneOfFields = ["zoneId"]), set: $util.oneOfSetter($oneOfFields) }); - /** - * ResponseParams _clusterId. - * @member {"clusterId"|undefined} _clusterId - * @memberof google.bigtable.v2.ResponseParams - * @instance - */ + // Virtual OneOf for proto3 optional field Object.defineProperty(ResponseParams.prototype, "_clusterId", { get: $util.oneOfGetter($oneOfFields = ["clusterId"]), set: $util.oneOfSetter($oneOfFields) From 55c068c66bc30543f0ad80290ab22c98c40406e3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 29 Apr 2025 10:54:59 -0400 Subject: [PATCH 412/448] Delete application latencies from the check --- system-test/client-side-metrics.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 8b889403d..6184c1402 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -356,9 +356,11 @@ describe('Bigtable/ClientSideMetrics', () => { // them from the comparison after checking they exist. assert(secondRequest.operationLatency); assert(secondRequest.firstResponseLatency); + assert(secondRequest.applicationLatencies); assert(secondRequest.metricsCollectorData.client_uid); delete secondRequest.operationLatency; delete secondRequest.firstResponseLatency; + delete secondRequest.applicationLatencies; delete secondRequest.metricsCollectorData.client_uid; delete secondRequest.metricsCollectorData.appProfileId; assert.deepStrictEqual(secondRequest, { @@ -404,9 +406,11 @@ describe('Bigtable/ClientSideMetrics', () => { // them from the comparison after checking they exist. assert(fourthRequest.operationLatency); assert(fourthRequest.firstResponseLatency); + assert(fourthRequest.applicationLatencies); assert(fourthRequest.metricsCollectorData.client_uid); delete fourthRequest.operationLatency; delete fourthRequest.firstResponseLatency; + delete fourthRequest.applicationLatencies; delete fourthRequest.metricsCollectorData.client_uid; delete fourthRequest.metricsCollectorData.appProfileId; assert.deepStrictEqual(fourthRequest, { From 05cfa205275df4f2d44b781ccb9dcdb7209dc98b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 29 Apr 2025 11:58:42 -0400 Subject: [PATCH 413/448] Get rid of bigtable singleton MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bigtable singleton makes it really confusing to read the code. Instead, let’s have a unique bigtable client for each test and mock it out each time as necessary so that we know the exact state of the client in each test. --- system-test/client-side-metrics.ts | 50 ++++++++++-------------------- 1 file changed, 16 insertions(+), 34 deletions(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 6184c1402..65694fa06 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -68,19 +68,29 @@ describe('Bigtable/ClientSideMetrics', () => { const tableId1 = 'my-table'; const tableId2 = 'my-table2'; const columnFamilyId = 'cf1'; - let bigtable: Bigtable; + let projectId: string; before(async () => { + const bigtable = new Bigtable(); for (const instanceId of [instanceId1, instanceId2]) { - bigtable = new Bigtable(); await setupBigtable(bigtable, columnFamilyId, instanceId, [ tableId1, tableId2, ]); } + projectId = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err: Error | null, projectId?: string) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); }); after(async () => { + const bigtable = new Bigtable(); try { // If the instance has been deleted already by another source, we don't // want this after hook to block the continuous integration pipeline. @@ -168,15 +178,6 @@ describe('Bigtable/ClientSideMetrics', () => { it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { (async () => { try { - const projectId: string = await new Promise((resolve, reject) => { - bigtable.getProjectId_((err: any, projectId: string) => { - if (err) { - reject(err); - } else { - resolve(projectId as string); - } - }); - }); const bigtable = await mockBigtable(projectId, done); for (const instanceId of [instanceId1, instanceId2]) { await setupBigtable(bigtable, columnFamilyId, instanceId, [ @@ -284,15 +285,6 @@ describe('Bigtable/ClientSideMetrics', () => { }, 120000); (async () => { try { - const projectId: string = await new Promise((resolve, reject) => { - bigtable.getProjectId_((err, projectId) => { - if (err) { - reject(err); - } else { - resolve(projectId as string); - } - }); - }); const bigtable1 = await mockBigtable(projectId, done); const bigtable2 = await mockBigtable(projectId, done); for (const bigtable of [bigtable1, bigtable2]) { @@ -435,26 +427,17 @@ describe('Bigtable/ClientSideMetrics', () => { } } - bigtable = getFakeBigtable(projectId, TestGCPMetricsHandler); + const bigtable = getFakeBigtable(projectId, TestGCPMetricsHandler); await setupBigtable(bigtable, columnFamilyId, instanceId1, [ tableId1, tableId2, ]); + return bigtable; } it('should send the metrics to the metrics handler for a ReadRows call', done => { - bigtable = new Bigtable(); (async () => { - const projectId: string = await new Promise((resolve, reject) => { - bigtable.getProjectId_((err, projectId) => { - if (err) { - reject(err); - } else { - resolve(projectId as string); - } - }); - }); - await mockBigtable(projectId, done); + const bigtable = await mockBigtable(projectId, done); const instance = bigtable.instance(instanceId1); const table = instance.table(tableId1); await table.getRows(); @@ -465,11 +448,10 @@ describe('Bigtable/ClientSideMetrics', () => { }); }); it('should pass the projectId to the metrics handler properly', done => { - bigtable = new Bigtable({projectId: SECOND_PROJECT_ID}); (async () => { try { const projectId = SECOND_PROJECT_ID; - await mockBigtable(projectId, done); + const bigtable = await mockBigtable(projectId, done); const instance = bigtable.instance(instanceId1); const table = instance.table(tableId1); await table.getRows(); From 671011bfa2fe0b0e7f3b5bdbd0286f294cef7161 Mon Sep 17 00:00:00 2001 From: danieljbruce Date: Tue, 27 May 2025 17:24:33 -0400 Subject: [PATCH 414/448] fix: Get rid of the global singletons thereby improving client side metrics design (#1605) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * added metrics controller at the client level * renamed class * did some clean up around handler creation * fixed accidental deletion * use options instead of auth * fixed typo * don't pass project on each call * call getProjectId when setting up config manager * pulled out project id from metrics model * Fix: Resolve CSM branch compilation and auth issues This commit addresses compilation errors in the `csm_sanche` branch and ensures that authentication credentials can be passed to the `CloudMonitoringExporter`. The `csm_sanche` branch aimed to remove global singletons for client-side metrics. This change continues that effort by: - Fixing type errors and import issues in `ClientSideMetricsConfigManager`, `GCPMetricsHandler`, and `CloudMonitoringExporter`. - Modifying `CloudMonitoringExporter` and `GCPMetricsHandler` to accept an `options` object in their constructors. This object is passed down to the `MetricServiceClient`, allowing credentials to be provided. - Updating test files to align with these changes. - Adding new unit tests for `ClientSideMetricsConfigManager.getGcpHandlerForProject` to verify: - Correct instantiation of `GCPMetricsHandler`. - Propagation of the `options` object (for credentials). - Caching behavior of `GCPMetricsHandler` instances per project ID. These changes ensure that the client-side metrics functionality compiles correctly and that the exporter can be properly authenticated for sending metrics to Google Cloud Monitoring. * run the linter * Various changes to clarify types * Parameter should be in this position * Pass in project Id first * Name of the project * Go back to fetching OTEL instruments for each call * expect project id in the metrics collector * Make small change moving config manager Fix the tests because the mock has moved too * Debug current test of interest * Thread the projectId through the OTEL instruments again * Run the linter * Use metrics enabled to conditionally use metrics c Turn on metrics enabled Eliminate unnecessary parameter pass thrus * Don’t store options * Remove set options * Separate object for creating metrics collector Also adjust mocking for the system test. * remove the console logs * Only create one exporter per test * Remove console trace * Add fn to get the handler from the exporter * Add a test making sure the exporter gets the opts * Provide the proper mock for the metric service cli client * Each handler has a different instrument stack now So these tests need to change * cosmetic changes Add the check for the projectId again? * Fix compiler error for projectId * Address most of the comments Jules added * Remove unnecessary comments * Address header check * Remove unused imports * Removed automatically added comments * Pass in the projectId at constructor time * Fix the tests so they pass * Fix the manager tests * Move the instrument stack cache * Eliminate some proxyquire mocks * More changes required to bring handlers to the client * Resolve the remaining compile time errors * Fix the tests for createReadStream * Fix Should export a value ready for sending to the * Fix test due to metrics handler contract * Remove only * Add header * Change fake bigtable mock * Eliminate the assertion checks inside handler * Fix the projectId test * Remove onlys * Fix the hundred metrics handlers test * Add a test for 100 clients * Add retries to metric service client * Add a timeout to the hundred handlers * Remove console logs * Better test for many clients * No set console log * Eliminate hundred metrics handlers test * Fix issues with system tests due to new design * Remove onlys * separate variable for handlers --------- Co-authored-by: Daniel Sanche Co-authored-by: google-labs-jules[bot] <161369871+google-labs-jules[bot]@users.noreply.github.com> --- src/client-side-metrics/exporter.ts | 62 ++---- .../gcp-metrics-handler.ts | 52 ++--- .../generate-client-uuid.ts | 39 ++++ .../metrics-config-manager.ts | 44 ++++ src/client-side-metrics/metrics-handler.ts | 3 - .../operation-metrics-collector.ts | 45 ++-- src/index.ts | 26 +-- src/tabular-api-surface.ts | 44 ++-- system-test/client-side-metrics.ts | 158 +++++++++----- system-test/cloud-monitoring-exporter.ts | 14 +- system-test/gcp-metrics-handler.ts | 202 ++++-------------- .../metric-service-client-credentials.ts | 63 ++++++ system-test/read-rows-acceptance-tests.ts | 32 +++ system-test/service-path.ts | 6 +- test-common/expected-otel-export-input.ts | 2 - test-common/metrics-handler-fixture.ts | 3 - test-common/test-metrics-handler.ts | 20 +- test/metrics-collector/gcp-metrics-handler.ts | 27 ++- test/metrics-collector/metrics-collector.ts | 30 ++- test/metrics-collector/metricsToRequest.ts | 2 + .../metrics-collector/typical-method-call.txt | 6 +- test/table.ts | 27 ++- 22 files changed, 497 insertions(+), 410 deletions(-) create mode 100644 src/client-side-metrics/generate-client-uuid.ts create mode 100644 src/client-side-metrics/metrics-config-manager.ts create mode 100644 system-test/metric-service-client-credentials.ts diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 854e7be89..3fd9bf521 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -19,7 +19,7 @@ import { Histogram, ResourceMetrics, } from '@opentelemetry/sdk-metrics'; -import {grpc, ServiceError} from 'google-gax'; +import {grpc, ClientOptions, ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; @@ -115,32 +115,11 @@ function getIntegerPoints(dataPoint: DataPoint) { ]; } -/** - * Extracts the project ID from a `ResourceMetrics` object. - * - * This function retrieves the Google Cloud project ID from the resource - * attributes of a `ResourceMetrics` object, which is the standard data - * structure used by OpenTelemetry for representing metrics data. The project ID - * is typically stored under the `monitored_resource.project_id` key within the - * resource's attributes. - * - */ -function getProject(exportArgs: ResourceMetrics) { - type WithSyncAttributes = {_syncAttributes: {[index: string]: string}}; - const resourcesWithSyncAttributes = - exportArgs.resource as unknown as WithSyncAttributes; - const projectId = - resourcesWithSyncAttributes._syncAttributes[ - 'monitored_resource.project_id' - ]; - return projectId; -} - /** * getResource gets the resource object which is used for building the timeseries * object that will be sent to Google Cloud Monitoring dashboard * - * @param {string} metricName The backend name of the metric that we want to record + * @param {string} projectId The name of the project * @param {DataPoint} dataPoint The datapoint containing the data we wish to * send to the Google Cloud Monitoring dashboard */ @@ -205,6 +184,7 @@ function getMetric( * metric attributes, data points, and aggregation information, into an object * that conforms to the expected request format of the Cloud Monitoring API. * + * @param projectId * @param {ResourceMetrics} exportArgs - The OpenTelemetry metrics data to be converted. This * object contains resource attributes, scope information, and a list of * metrics with their associated data points. @@ -232,8 +212,10 @@ function getMetric( * * */ -export function metricsToRequest(exportArgs: ResourceMetrics) { - const projectId = getProject(exportArgs); +export function metricsToRequest( + projectId: string, + exportArgs: ResourceMetrics, +) { const timeSeriesArray = []; for (const scopeMetrics of exportArgs.scopeMetrics) { for (const scopeMetric of scopeMetrics.metrics) { @@ -312,22 +294,23 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { * @beta */ export class CloudMonitoringExporter extends MetricExporter { - export( + private client: MetricServiceClient; + + constructor(options: ClientOptions) { + super(); + this.client = new MetricServiceClient(options); + } + + async export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void, - ): void { + ): Promise { (async () => { try { - const projectId = getProject(metrics); - const request = metricsToRequest(metrics); - // In order to manage the "One or more points were written more - // frequently than the maximum sampling period configured for the - // metric." error we should have the metric service client retry a few - // times to ensure the metrics do get written. - // - // We use all the usual retry codes plus INVALID_ARGUMENT (code 3) - // because INVALID ARGUMENT (code 3) corresponds to the maximum - // sampling error. + const projectId = await this.client.getProjectId(); + const request = metricsToRequest(projectId, metrics); + // We need the client to retry or we get errors: + // in addition, done() received error: Error: 4 DEADLINE_EXCEEDED: Deadline exceeded after 12.757s,name resolution: 1.614s,metadata filters: 0.001s,time to current attempt start: 0.029s,Waiting for LB pick const retry = new RetryOptions( [ grpc.status.INVALID_ARGUMENT, @@ -340,10 +323,11 @@ export class CloudMonitoringExporter extends MetricExporter { initialRetryDelayMillis: 5000, retryDelayMultiplier: 2, maxRetryDelayMillis: 50000, + totalTimeoutMillis: 50000, }, ); - const monitoringClient = new MetricServiceClient({projectId}); - await monitoringClient.createTimeSeries( + + await this.client.createTimeSeries( request as ICreateTimeSeriesRequest, { retry, diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 37fa4adea..945fcbe23 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +import {CloudMonitoringExporter} from './exporter'; import { IMetricsHandler, OnAttemptCompleteData, @@ -20,6 +21,8 @@ import { import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {PushMetricExporter, View} from '@opentelemetry/sdk-metrics'; +import {ClientOptions} from 'google-gax'; +import {generateClientUuid} from './generate-client-uuid'; const { Aggregation, ExplicitBucketHistogramAggregation, @@ -47,10 +50,9 @@ interface MetricsInstruments { * This method gets the open telemetry instruments that will store GCP metrics * for a particular project. * - * @param projectId The project for which the instruments will be stored. * @param exporter The exporter the metrics will be sent to. */ -function createInstruments(projectId: string, exporter: PushMetricExporter) { +function createInstruments(exporter: PushMetricExporter): MetricsInstruments { const latencyBuckets = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, 400.0, @@ -80,7 +82,6 @@ function createInstruments(projectId: string, exporter: PushMetricExporter) { views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', - 'monitored_resource.project_id': projectId, }).merge(new ResourceUtil.GcpDetectorSync().detect()), readers: [ // Register the exporter @@ -183,11 +184,8 @@ function createInstruments(projectId: string, exporter: PushMetricExporter) { * associating them with relevant attributes for detailed analysis in Cloud Monitoring. */ export class GCPMetricsHandler implements IMetricsHandler { - private exporter: PushMetricExporter; - // The variable below is the singleton map from projects to instrument stacks - // which exists so that we only create one instrument stack per project. This - // will eliminate errors due to the maximum sampling period. - static instrumentsForProject: {[projectId: string]: MetricsInstruments} = {}; + private otelInstruments: MetricsInstruments; + private clientUid: string; /** * The `GCPMetricsHandler` is responsible for managing and recording @@ -196,33 +194,11 @@ export class GCPMetricsHandler implements IMetricsHandler { * (histograms and counters) and exports them to Google Cloud Monitoring * through the provided `PushMetricExporter`. * - * @param exporter - The `PushMetricExporter` instance to use for exporting - * metrics to Google Cloud Monitoring. This exporter is responsible for - * sending the collected metrics data to the monitoring backend. The provided exporter must be fully configured, for example the projectId must have been set. */ - constructor(exporter: PushMetricExporter) { - this.exporter = exporter; - } - - /** - * Initializes the OpenTelemetry metrics instruments if they haven't been already. - * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. - * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. - * - * which will be provided to the exporter in every export call. - * - */ - private getInstruments(projectId: string): MetricsInstruments { - // The projectId is needed per metrics handler because when the exporter is - // used it provides the project id for the name of the time series exported. - // ie. name: `projects/${....['monitored_resource.project_id']}`, - if (!GCPMetricsHandler.instrumentsForProject[projectId]) { - GCPMetricsHandler.instrumentsForProject[projectId] = createInstruments( - projectId, - this.exporter, - ); - } - return GCPMetricsHandler.instrumentsForProject[projectId]; + constructor(options: ClientOptions) { + this.clientUid = generateClientUuid(); + const exporter = new CloudMonitoringExporter(options); + this.otelInstruments = createInstruments(exporter); } /** @@ -231,11 +207,11 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnOperationCompleteData} data Data related to the completed operation. */ onOperationComplete(data: OnOperationCompleteData) { - const otelInstruments = this.getInstruments(data.projectId); + const otelInstruments = this.otelInstruments; const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, - client_uid: data.metricsCollectorData.client_uid, + client_uid: this.clientUid, client_name: data.client_name, instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, @@ -271,11 +247,11 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ onAttemptComplete(data: OnAttemptCompleteData) { - const otelInstruments = this.getInstruments(data.projectId); + const otelInstruments = this.otelInstruments; const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, - client_uid: data.metricsCollectorData.client_uid, + client_uid: this.clientUid, status: data.status, client_name: data.client_name, instanceId: data.metricsCollectorData.instanceId, diff --git a/src/client-side-metrics/generate-client-uuid.ts b/src/client-side-metrics/generate-client-uuid.ts new file mode 100644 index 000000000..066193879 --- /dev/null +++ b/src/client-side-metrics/generate-client-uuid.ts @@ -0,0 +1,39 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as os from 'os'; +import * as crypto from 'crypto'; + +/** + * Generates a unique client identifier string. + * + * This function creates a client identifier that incorporates the hostname, + * process ID, and a UUID to ensure uniqueness across different client instances + * and processes. The identifier follows the pattern: + * + * `node--` + * + * where: + * - `` is a randomly generated UUID (version 4). + * - `` is the process ID of the current Node.js process. + * - `` is the hostname of the machine. + * + * @returns {string} A unique client identifier string. + */ +export function generateClientUuid() { + const hostname = os.hostname() || 'localhost'; + const currentPid = process.pid || ''; + const uuid4 = crypto.randomUUID(); + return `node-${uuid4}-${currentPid}${hostname}`; +} diff --git a/src/client-side-metrics/metrics-config-manager.ts b/src/client-side-metrics/metrics-config-manager.ts new file mode 100644 index 000000000..a28d7f14f --- /dev/null +++ b/src/client-side-metrics/metrics-config-manager.ts @@ -0,0 +1,44 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {IMetricsHandler} from './metrics-handler'; +import { + ITabularApiSurface, + OperationMetricsCollector, +} from './operation-metrics-collector'; +import {MethodName, StreamingState} from './client-side-metrics-attributes'; + +/** + * A class for tracing and recording client-side metrics related to Bigtable operations. + */ +export class ClientSideMetricsConfigManager { + private metricsHandlers: IMetricsHandler[]; + + constructor(handlers: IMetricsHandler[]) { + this.metricsHandlers = handlers; + } + + createOperation( + methodName: MethodName, + streaming: StreamingState, + table: ITabularApiSurface, + ): OperationMetricsCollector { + return new OperationMetricsCollector( + table, + methodName, + streaming, + this.metricsHandlers, + ); + } +} diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 6b4f0053e..6ce5bce12 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -13,7 +13,6 @@ // limitations under the License. import {MethodName, StreamingState} from './client-side-metrics-attributes'; -import {grpc} from 'google-gax'; /** * The interfaces below use undefined instead of null to indicate a metric is @@ -28,11 +27,9 @@ type IMetricsCollectorData = { zone?: string; app_profile?: string; method: MethodName; - client_uid: string; }; interface StandardData { - projectId: string; metricsCollectorData: IMetricsCollectorData; client_name: string; streaming: StreamingState; diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index b9d84be6c..1ecf8c6d8 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -16,10 +16,9 @@ import * as fs from 'fs'; import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; import * as gax from 'google-gax'; -import {GCPMetricsHandler} from './gcp-metrics-handler'; -import {CloudMonitoringExporter} from './exporter'; -import {AbortableDuplex} from '../index'; +import {AbortableDuplex, BigtableOptions} from '../index'; import * as path from 'path'; +import {IMetricsHandler} from './metrics-handler'; // When this environment variable is set then print any errors associated // with failures in the metrics collector. @@ -42,8 +41,10 @@ export interface ITabularApiSurface { }; id: string; bigtable: { + metricsEnabled?: boolean; + projectId?: string; appProfileId?: string; - clientUid: string; + options: BigtableOptions; }; } @@ -113,19 +114,19 @@ export class OperationMetricsCollector { private streamingOperation: StreamingState; private applicationLatencies: number[]; private lastRowReceivedTime: bigint | null; - static metricsHandlers = [ - new GCPMetricsHandler(new CloudMonitoringExporter()), - ]; + private handlers: IMetricsHandler[]; /** * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. * @param {MethodName} methodName The name of the method being traced. * @param {StreamingState} streamingOperation Whether or not the call is a streaming operation. + * @param {IMetricsHandler[]} handlers The metrics handlers used to store the record the metrics. */ constructor( tabularApiSurface: ITabularApiSurface, methodName: MethodName, streamingOperation: StreamingState, + handlers: IMetricsHandler[], ) { this.state = MetricsCollectorState.OPERATION_NOT_STARTED; this.zone = undefined; @@ -141,6 +142,7 @@ export class OperationMetricsCollector { this.streamingOperation = streamingOperation; this.lastRowReceivedTime = null; this.applicationLatencies = []; + this.handlers = handlers; } private getMetricsCollectorData() { @@ -152,7 +154,6 @@ export class OperationMetricsCollector { cluster: this.cluster, zone: this.zone, method: this.methodName, - client_uid: this.tabularApiSurface.bigtable.clientUid, }, appProfileId ? {app_profile: appProfileId} : {}, ); @@ -198,10 +199,9 @@ export class OperationMetricsCollector { /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param {string} projectId The id of the project. * @param {grpc.status} attemptStatus The grpc status for the attempt. */ - onAttemptComplete(projectId: string, attemptStatus: grpc.status) { + onAttemptComplete(attemptStatus: grpc.status) { withMetricsDebug(() => { checkState(this.state, [ MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET, @@ -211,11 +211,11 @@ export class OperationMetricsCollector { MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; this.attemptCount++; const endTime = hrtime.bigint(); - if (projectId && this.attemptStartTime) { + if (this.attemptStartTime) { const totalMilliseconds = Number( (endTime - this.attemptStartTime) / BigInt(1000000), ); - OperationMetricsCollector.metricsHandlers.forEach(metricsHandler => { + this.handlers.forEach(metricsHandler => { if (metricsHandler.onAttemptComplete) { metricsHandler.onAttemptComplete({ attemptLatency: totalMilliseconds, @@ -225,12 +225,11 @@ export class OperationMetricsCollector { status: attemptStatus.toString(), client_name: `nodejs-bigtable/${version}`, metricsCollectorData: this.getMetricsCollectorData(), - projectId, }); } }); } else { - throw new Error('ProjectId and start time should always be provided'); + throw new Error('Start time should always be provided'); } }); } @@ -256,7 +255,7 @@ export class OperationMetricsCollector { /** * Called when the first response is received. Records first response latencies. */ - onResponse(projectId: string) { + onResponse() { withMetricsDebug(() => { if (!this.firstResponseLatency) { checkState(this.state, [ @@ -265,7 +264,7 @@ export class OperationMetricsCollector { this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; const endTime = hrtime.bigint(); - if (projectId && this.operationStartTime) { + if (this.operationStartTime) { this.firstResponseLatency = Number( (endTime - this.operationStartTime) / BigInt(1000000), ); @@ -281,30 +280,28 @@ export class OperationMetricsCollector { /** * Called when an operation completes (successfully or unsuccessfully). * Records operation latencies, retry counts, and connectivity error counts. - * @param {string} projectId The id of the project. * @param {grpc.status} finalOperationStatus Information about the completed operation. */ - onOperationComplete(projectId: string, finalOperationStatus: grpc.status) { - this.onAttemptComplete(projectId, finalOperationStatus); + onOperationComplete(finalOperationStatus: grpc.status) { + this.onAttemptComplete(finalOperationStatus); withMetricsDebug(() => { checkState(this.state, [ MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS, ]); this.state = MetricsCollectorState.OPERATION_COMPLETE; const endTime = hrtime.bigint(); - if (projectId && this.operationStartTime) { + if (this.operationStartTime) { const totalMilliseconds = Number( (endTime - this.operationStartTime) / BigInt(1000000), ); { - OperationMetricsCollector.metricsHandlers.forEach(metricsHandler => { + this.handlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { metricsHandler.onOperationComplete({ status: finalOperationStatus.toString(), streaming: this.streamingOperation, metricsCollectorData: this.getMetricsCollectorData(), client_name: `nodejs-bigtable/${version}`, - projectId, operationLatency: totalMilliseconds, retryCount: this.attemptCount - 1, firstResponseLatency: this.firstResponseLatency ?? undefined, @@ -314,9 +311,7 @@ export class OperationMetricsCollector { }); } } else { - console.warn( - 'projectId and operation start time should always be available here', - ); + console.warn('operation start time should always be available here'); } }); } diff --git a/src/index.ts b/src/index.ts index 4940cae0c..045aa3605 100644 --- a/src/index.ts +++ b/src/index.ts @@ -16,7 +16,12 @@ import {replaceProjectIdToken} from '@google-cloud/projectify'; import {promisifyAll} from '@google-cloud/promisify'; import arrify = require('arrify'); import * as extend from 'extend'; -import {GoogleAuth, CallOptions, grpc as gaxVendoredGrpc} from 'google-gax'; +import { + GoogleAuth, + CallOptions, + grpc as gaxVendoredGrpc, + ClientOptions, +} from 'google-gax'; import * as gax from 'google-gax'; import * as protos from '../protos/protos'; import * as os from 'os'; @@ -37,6 +42,8 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; +import {ClientSideMetricsConfigManager} from './client-side-metrics/metrics-config-manager'; +import {GCPMetricsHandler} from './client-side-metrics/gcp-metrics-handler'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -430,15 +437,10 @@ export class Bigtable { appProfileId?: string; projectName: string; shouldReplaceProjectIdToken: boolean; - clientUid = generateClientUuid(); static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; - // metricsEnabled is a member variable that is used to ensure that if the - // user provides a `false` value and opts out of metrics collection that - // the metrics collector is ignored altogether to reduce latency in the - // client. - metricsEnabled: boolean; + _metricsConfigManager: ClientSideMetricsConfigManager; constructor(options: BigtableOptions = {}) { // Determine what scopes are needed. @@ -537,11 +539,11 @@ export class Bigtable { this.projectName = `projects/${this.projectId}`; this.shouldReplaceProjectIdToken = this.projectId === '{{projectId}}'; - if (options.metricsEnabled === false) { - this.metricsEnabled = false; - } else { - this.metricsEnabled = true; - } + const handlers = + options.metricsEnabled === true + ? [new GCPMetricsHandler(options as ClientOptions)] + : []; + this._metricsConfigManager = new ClientSideMetricsConfigManager(handlers); } createInstance( diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index c4440e701..05dbc3f73 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {OperationMetricsCollector} from './client-side-metrics/operation-metrics-collector'; import {promisifyAll} from '@google-cloud/promisify'; import arrify = require('arrify'); import {Instance} from './instance'; @@ -44,8 +43,6 @@ import { MethodName, StreamingState, } from './client-side-metrics/client-side-metrics-attributes'; -import {GCPMetricsHandler} from './client-side-metrics/gcp-metrics-handler'; -import {CloudMonitoringExporter} from './client-side-metrics/exporter'; // See protos/google/rpc/code.proto // (4=DEADLINE_EXCEEDED, 8=RESOURCE_EXHAUSTED, 10=ABORTED, 14=UNAVAILABLE) @@ -339,16 +336,15 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return originalEnd(chunk, encoding, cb); }; - const metricsCollector = this.bigtable.metricsEnabled - ? new OperationMetricsCollector( - this, - MethodName.READ_ROWS, - StreamingState.STREAMING, - ) - : null; - metricsCollector?.onOperationStart(); + const metricsCollector = + this.bigtable._metricsConfigManager.createOperation( + MethodName.READ_ROWS, + StreamingState.STREAMING, + this, + ); + metricsCollector.onOperationStart(); const makeNewRequest = () => { - metricsCollector?.onAttemptStart(); + metricsCollector.onAttemptStart(); // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry @@ -525,7 +521,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return false; }; - metricsCollector?.handleStatusAndMetadata(requestStream); + metricsCollector.handleStatusAndMetadata(requestStream); rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); @@ -534,10 +530,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. userStream.end(); - metricsCollector?.onOperationComplete( - this.bigtable.projectId, - error.code, - ); + metricsCollector.onOperationComplete(error.code); return; } numConsecutiveErrors++; @@ -555,10 +548,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings, ); - metricsCollector?.onAttemptComplete( - this.bigtable.projectId, - error.code, - ); + metricsCollector.onAttemptComplete(error.code); retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -573,10 +563,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // error.code = grpc.status.CANCELLED; } - metricsCollector?.onOperationComplete( - this.bigtable.projectId, - error.code, - ); + metricsCollector.onOperationComplete(error.code); userStream.emit('error', error); } }) @@ -584,14 +571,11 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; - metricsCollector?.onResponse(this.bigtable.projectId); + metricsCollector.onResponse(); }) .on('end', () => { activeRequestStream = null; - metricsCollector?.onOperationComplete( - this.bigtable.projectId, - grpc.status.OK, - ); + metricsCollector.onOperationComplete(grpc.status.OK); }); rowStreamPipe(rowStream, userStream); }; diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 65694fa06..255b2368a 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -26,6 +26,8 @@ import {Bigtable} from '../src'; import {setupBigtable} from './client-side-metrics-setup-table'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; +import {ClientOptions} from 'google-gax'; +import {ClientSideMetricsConfigManager} from '../src/client-side-metrics/metrics-config-manager'; const SECOND_PROJECT_ID = 'cfdb-sdk-node-tests'; @@ -33,33 +35,22 @@ function getFakeBigtable( projectId: string, metricsHandlerClass: typeof GCPMetricsHandler | typeof TestMetricsHandler, ) { - /* - Below we mock out the table so that it sends the metrics to a test exporter - that will still send the metrics to Google Cloud Monitoring, but then also - ensure the export was successful and pass the test with code 0 if it is - successful. - */ - const FakeOperationMetricsCollector = proxyquire( - '../src/client-side-metrics/operation-metrics-collector', - { - './gcp-metrics-handler': {GCPMetricsHandler: metricsHandlerClass}, - }, - ).OperationMetricsCollector; - const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { - './client-side-metrics/operation-metrics-collector': { - OperationMetricsCollector: FakeOperationMetricsCollector, + const metricHandler = new metricsHandlerClass( + {} as unknown as ClientOptions & {value: string}, + ); + const newClient = new Bigtable({projectId}); + newClient._metricsConfigManager = new ClientSideMetricsConfigManager([ + metricHandler, + ]); + return newClient; +} + +function getHandlerFromExporter(Exporter: typeof CloudMonitoringExporter) { + return proxyquire('../src/client-side-metrics/gcp-metrics-handler.js', { + './exporter': { + CloudMonitoringExporter: Exporter, }, - }).TabularApiSurface; - const FakeTable = proxyquire('../src/table.js', { - './tabular-api-surface': {TabularApiSurface: FakeTabularApiSurface}, - }).Table; - const FakeInstance = proxyquire('../src/instance.js', { - './table': {Table: FakeTable}, - }).Instance; - const FakeBigtable = proxyquire('../src/index.js', { - './instance': {Instance: FakeInstance}, - }).Bigtable; - return new FakeBigtable({projectId}); + }).GCPMetricsHandler; } describe('Bigtable/ClientSideMetrics', () => { @@ -135,12 +126,16 @@ describe('Bigtable/ClientSideMetrics', () => { }, 120000); class TestExporter extends CloudMonitoringExporter { - export( + constructor(options: ClientOptions) { + super(options); + } + + async export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void, - ): void { + ): Promise { try { - super.export(metrics, (result: ExportResult) => { + await super.export(metrics, (result: ExportResult) => { if (!exported) { exported = true; try { @@ -165,14 +160,7 @@ describe('Bigtable/ClientSideMetrics', () => { } } - class TestGCPMetricsHandler extends GCPMetricsHandler { - static value = 'value'; - constructor() { - super(new TestExporter()); - } - } - - return getFakeBigtable(projectId, TestGCPMetricsHandler); + return getFakeBigtable(projectId, getHandlerFromExporter(TestExporter)); } it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { @@ -228,19 +216,30 @@ describe('Bigtable/ClientSideMetrics', () => { // This test suite simulates a situation where the user creates multiple // clients and ensures that the exporter doesn't produce any errors even // when multiple clients are attempting an export. - async function mockBigtable(projectId: string, done: mocha.Done) { + async function mockBigtable( + projectId: string, + done: mocha.Done, + onExportSuccess?: () => void, + ) { class TestExporter extends CloudMonitoringExporter { - export( + constructor(options: ClientOptions) { + super(options); + } + + async export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void, - ): void { + ): Promise { try { - super.export(metrics, (result: ExportResult) => { + await super.export(metrics, (result: ExportResult) => { try { // The code is expected to be 0 because the // result from calling export was successful. assert.strictEqual(result.code, 0); resultCallback({code: 0}); + if (onExportSuccess) { + onExportSuccess(); + } } catch (error) { // The code here isn't 0 so we report the original error to the // mocha test runner. @@ -248,27 +247,23 @@ describe('Bigtable/ClientSideMetrics', () => { // unsuccessful. done(result); done(error); + resultCallback({code: 0}); } }); } catch (error) { done(error); + resultCallback({code: 0}); } } } - class TestGCPMetricsHandler extends GCPMetricsHandler { - constructor() { - super(new TestExporter()); - } - } - /* Below we mock out the table so that it sends the metrics to a test exporter that will still send the metrics to Google Cloud Monitoring, but then also ensure the export was successful and pass the test with code 0 if it is successful. */ - return getFakeBigtable(projectId, TestGCPMetricsHandler); + return getFakeBigtable(projectId, getHandlerFromExporter(TestExporter)); } it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { @@ -308,11 +303,70 @@ describe('Bigtable/ClientSideMetrics', () => { throw err; }); }); + it('should send the metrics to Google Cloud Monitoring for a ReadRows call with thirty clients', done => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. When the timeout is finished, if there were no export + errors then the test passes. + */ + const testTimeout = setTimeout(() => { + done(new Error('The test timed out')); + }, 480000); + let testComplete = false; + const numClients = 30; + (async () => { + try { + const bigtableList = []; + const completedSet = new Set(); + for ( + let bigtableCount = 0; + bigtableCount < numClients; + bigtableCount++ + ) { + const currentCount = bigtableCount; + const onExportSuccess = () => { + completedSet.add(currentCount); + if (completedSet.size === numClients) { + // If every client has completed the export then pass the test. + clearTimeout(testTimeout); + if (!testComplete) { + testComplete = true; + done(); + } + } + }; + bigtableList.push( + await mockBigtable(projectId, done, onExportSuccess), + ); + } + for (const bigtable of bigtableList) { + for (const instanceId of [instanceId1, instanceId2]) { + await setupBigtable(bigtable, columnFamilyId, instanceId, [ + tableId1, + tableId2, + ]); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + } + } + } catch (e) { + done(e); + done(new Error('An error occurred while running the script')); + } + })().catch(err => { + throw err; + }); + }); }); describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { async function mockBigtable(projectId: string, done: mocha.Done) { let handlerRequestCount = 0; class TestGCPMetricsHandler extends TestMetricsHandler { + projectId = projectId; onOperationComplete(data: OnOperationCompleteData) { handlerRequestCount++; try { @@ -324,10 +378,8 @@ describe('Bigtable/ClientSideMetrics', () => { // them from the comparison after checking they exist. assert(firstRequest.attemptLatency); assert(firstRequest.serverLatency); - assert(firstRequest.metricsCollectorData.client_uid); delete firstRequest.attemptLatency; delete firstRequest.serverLatency; - delete firstRequest.metricsCollectorData.client_uid; delete firstRequest.metricsCollectorData.appProfileId; assert.deepStrictEqual(firstRequest, { connectivityErrorCount: 0, @@ -349,11 +401,9 @@ describe('Bigtable/ClientSideMetrics', () => { assert(secondRequest.operationLatency); assert(secondRequest.firstResponseLatency); assert(secondRequest.applicationLatencies); - assert(secondRequest.metricsCollectorData.client_uid); delete secondRequest.operationLatency; delete secondRequest.firstResponseLatency; delete secondRequest.applicationLatencies; - delete secondRequest.metricsCollectorData.client_uid; delete secondRequest.metricsCollectorData.appProfileId; assert.deepStrictEqual(secondRequest, { status: '0', @@ -374,10 +424,8 @@ describe('Bigtable/ClientSideMetrics', () => { const thirdRequest = this.requestsHandled[2] as any; assert(thirdRequest.attemptLatency); assert(thirdRequest.serverLatency); - assert(thirdRequest.metricsCollectorData.client_uid); delete thirdRequest.attemptLatency; delete thirdRequest.serverLatency; - delete thirdRequest.metricsCollectorData.client_uid; delete thirdRequest.metricsCollectorData.appProfileId; assert.deepStrictEqual(thirdRequest, { connectivityErrorCount: 0, @@ -399,11 +447,9 @@ describe('Bigtable/ClientSideMetrics', () => { assert(fourthRequest.operationLatency); assert(fourthRequest.firstResponseLatency); assert(fourthRequest.applicationLatencies); - assert(fourthRequest.metricsCollectorData.client_uid); delete fourthRequest.operationLatency; delete fourthRequest.firstResponseLatency; delete fourthRequest.applicationLatencies; - delete fourthRequest.metricsCollectorData.client_uid; delete fourthRequest.metricsCollectorData.appProfileId; assert.deepStrictEqual(fourthRequest, { status: '0', diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index cbee08e21..f484c8e05 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -70,11 +70,15 @@ describe('Bigtable/CloudMonitoringExporter', () => { }); }); } - const exporter = new CloudMonitoringExporter(); - exporter.export( - transformedExportInput as unknown as ResourceMetrics, - resultCallback, - ); + const exporter = new CloudMonitoringExporter({}); // Pass empty object as options + exporter + .export( + transformedExportInput as unknown as ResourceMetrics, + resultCallback, + ) + .catch(err => { + throw err; + }); })().catch(err => { throw err; }); diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 93025d0a8..6c765ea02 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -13,7 +13,6 @@ // limitations under the License. import {describe} from 'mocha'; -import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import {expectedRequestsHandled} from '../test-common/metrics-handler-fixture'; import { OnAttemptCompleteData, @@ -26,8 +25,25 @@ import { import {Bigtable} from '../src'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import * as assert from 'assert'; -import {expectedOtelHundredExportInputs} from '../test-common/expected-otel-export-input'; +import { + expectedOtelExportInput, + expectedOtelHundredExportInputs, +} from '../test-common/expected-otel-export-input'; import {replaceTimestamps} from '../test-common/replace-timestamps'; +import {ClientOptions} from 'google-gax'; +import * as proxyquire from 'proxyquire'; + +function getHandler(Exporter: typeof CloudMonitoringExporter) { + const FakeCGPMetricsHandler = proxyquire( + '../src/client-side-metrics/gcp-metrics-handler.js', + { + './exporter': { + CloudMonitoringExporter: Exporter, + }, + }, + ).GCPMetricsHandler; + return new FakeCGPMetricsHandler(); +} describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the GCPMetricsHandler', done => { @@ -64,13 +80,17 @@ describe('Bigtable/GCPMetricsHandler', () => { }; } class MockExporter extends CloudMonitoringExporter { - export( + constructor(options: ClientOptions) { + super(options); + } + + async export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void, - ): void { + ): Promise { const testResultCallback = getTestResultCallback(resultCallback); if (!exported) { - super.export(metrics, testResultCallback); + await super.export(metrics, testResultCallback); } else { resultCallback({code: 0}); } @@ -87,10 +107,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } }); }); - // projectToInstruments argument is set to {} because we want a fresh - // instrument stack each time this test is run. - GCPMetricsHandler.instrumentsForProject = {}; - const handler = new GCPMetricsHandler(new MockExporter({projectId})); + const handler = getHandler(MockExporter); const transformedRequestsHandled = JSON.parse( JSON.stringify(expectedRequestsHandled).replace( /my-project/g, @@ -152,14 +169,18 @@ describe('Bigtable/GCPMetricsHandler', () => { }; } class MockExporter extends CloudMonitoringExporter { - export( + constructor(options: ClientOptions) { + super(options); + } + + async export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void, - ): void { + ): Promise { if (exportedCount < 1) { // The code below uses the test callback to ensure the export was successful. const testResultCallback = getTestResultCallback(resultCallback); - super.export(metrics, testResultCallback); + await super.export(metrics, testResultCallback); } else { // After the test is complete the periodic exporter may still be // running in which case we don't want to do any checks. We just @@ -180,11 +201,8 @@ describe('Bigtable/GCPMetricsHandler', () => { } }); }); - // projectToInstruments argument is set to {} because we want a fresh - // instrument stack each time this test is run. - GCPMetricsHandler.instrumentsForProject = {}; - const handler = new GCPMetricsHandler(new MockExporter({projectId})); - const handler2 = new GCPMetricsHandler(new MockExporter({projectId})); + const handler = getHandler(MockExporter); + const handler2 = handler; const transformedRequestsHandled = JSON.parse( JSON.stringify(expectedRequestsHandled).replace( /my-project/g, @@ -209,141 +227,6 @@ describe('Bigtable/GCPMetricsHandler', () => { throw err; }); }); - it('Should export a value to a hundred GCPMetricsHandlers', done => { - // This test ensures that when we create multiple GCPMetricsHandlers much like - // what we would be doing when calling readRows on separate tables that - // the data doesn't store duplicates in the same place and export twice as - // much data as it should. - (async () => { - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. - */ - const timeout = setTimeout(() => { - done(new Error('The export never happened')); - }, 120000); - /* - The exporter is called every x seconds, but we only want to test the value - it receives once. Since done cannot be called multiple times in mocha, - exported variable ensures we only test the value export receives one time. - */ - let exportedCount = 0; - function getTestResultCallback( - resultCallback: (result: ExportResult) => void, - ) { - return (result: ExportResult) => { - exportedCount++; - try { - assert.strictEqual(result.code, 0); - } catch (error) { - // Code isn't 0 so report the original error. - done(result); - done(error); - } - if (exportedCount === 1) { - // We are expecting one call to an exporter. - clearTimeout(timeout); - done(); - } - // The resultCallback needs to be called to end the exporter operation - // so that the test shuts down in mocha. - resultCallback({code: 0}); - }; - } - class MockExporter extends CloudMonitoringExporter { - export( - metrics: ResourceMetrics, - resultCallback: (result: ExportResult) => void, - ): void { - if (exportedCount < 1) { - try { - // This code block ensures the metrics are correct. Mainly, the metrics - // shouldn't contain two copies of the data. It should only contain - // one. - // - // For this test since we are still writing a time series with - // metrics variable we don't want to modify the metrics variable - // to have artificial times because then sending the data to the - // metric service client will fail. Therefore, we must make a copy - // of the metrics and use that. - const parsedExportInput: ResourceMetrics = JSON.parse( - JSON.stringify(metrics), - ); - replaceTimestamps( - parsedExportInput as unknown as typeof expectedOtelHundredExportInputs, - [123, 789], - [456, 789], - ); - assert.deepStrictEqual( - parsedExportInput.scopeMetrics[0].metrics.length, - expectedOtelHundredExportInputs.scopeMetrics[0].metrics.length, - ); - for ( - let index = 0; - index < parsedExportInput.scopeMetrics[0].metrics.length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. - assert.deepStrictEqual( - parsedExportInput.scopeMetrics[0].metrics[index], - expectedOtelHundredExportInputs.scopeMetrics[0].metrics[ - index - ], - ); - } - } catch (e) { - // The error needs to be caught so it can be reported to the mocha - // test runner. - done(e); - } - // The code below uses the test callback to ensure the export was successful. - const testResultCallback = getTestResultCallback(resultCallback); - super.export(metrics, testResultCallback); - } else { - // After the test is complete the periodic exporter may still be - // running in which case we don't want to do any checks. We just - // want to call the resultCallback so that there are no hanging - // threads. - resultCallback({code: 0}); - } - } - } - - const bigtable = new Bigtable(); - const projectId: string = await new Promise((resolve, reject) => { - bigtable.getProjectId_((err, projectId) => { - if (err) { - reject(err); - } else { - resolve(projectId as string); - } - }); - }); - const transformedRequestsHandled = JSON.parse( - JSON.stringify(expectedRequestsHandled).replace( - /my-project/g, - projectId, - ), - ); - const handlers = []; - // projectToInstruments argument is set to {} because we want a fresh - // instrument stack each time this test is run. - GCPMetricsHandler.instrumentsForProject = {}; - for (let i = 0; i < 100; i++) { - handlers.push(new GCPMetricsHandler(new MockExporter({projectId}))); - for (const request of transformedRequestsHandled) { - if (request.attemptLatency) { - handlers[i].onAttemptComplete(request as OnAttemptCompleteData); - } else { - handlers[i].onOperationComplete(request as OnOperationCompleteData); - } - } - } - })().catch(err => { - throw err; - }); - }); it('Should write two duplicate points inserted into the metrics handler', done => { (async () => { /* @@ -378,13 +261,17 @@ describe('Bigtable/GCPMetricsHandler', () => { }; } class MockExporter extends CloudMonitoringExporter { - export( + constructor(options: ClientOptions) { + super(options); + } + + async export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void, - ): void { + ): Promise { const testResultCallback = getTestResultCallback(resultCallback); if (!exported) { - super.export(metrics, testResultCallback); + await super.export(metrics, testResultCallback); } else { resultCallback({code: 0}); } @@ -401,10 +288,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } }); }); - // projectToInstruments argument is set to {} because we want a fresh - // instrument stack each time this test is run. - GCPMetricsHandler.instrumentsForProject = {}; - const handler = new GCPMetricsHandler(new MockExporter({projectId})); + const handler = getHandler(MockExporter); // Pass options with exporter const transformedRequestsHandled = JSON.parse( JSON.stringify(expectedRequestsHandled).replace( /my-project/g, diff --git a/system-test/metric-service-client-credentials.ts b/system-test/metric-service-client-credentials.ts new file mode 100644 index 000000000..ca918f6ff --- /dev/null +++ b/system-test/metric-service-client-credentials.ts @@ -0,0 +1,63 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as proxyquire from 'proxyquire'; +import {ClientOptions} from 'google-gax'; +import * as assert from 'assert'; +import {setupBigtable} from './client-side-metrics-setup-table'; + +describe('Bigtable/MetricServiceClientCredentials', () => { + it('should pass the credentials to the metric service client', done => { + const clientOptions = {metricsEnabled: true}; + class FakeExporter { + constructor(options: ClientOptions) { + try { + assert.strictEqual(options, clientOptions); + done(); + } catch (e) { + done(e); + } + } + } + const FakeCGPMetricsHandler = proxyquire( + '../src/client-side-metrics/gcp-metrics-handler.js', + { + './exporter': { + CloudMonitoringExporter: FakeExporter, + }, + }, + ).GCPMetricsHandler; + const FakeBigtable = proxyquire('../src/index.js', { + './client-side-metrics/gcp-metrics-handler': { + GCPMetricsHandler: FakeCGPMetricsHandler, + }, + }).Bigtable; + const bigtable = new FakeBigtable(clientOptions); + const instanceId = 'emulator-test-instance'; + const columnFamilyId = 'cf1'; + const tableId1 = 'my-table'; + (async () => { + try { + await setupBigtable(bigtable, columnFamilyId, instanceId, [tableId1]); + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId1); + await table.getRows(); + } catch (e) { + done(e); + } + })().catch(err => { + throw err; + }); + }); +}); diff --git a/system-test/read-rows-acceptance-tests.ts b/system-test/read-rows-acceptance-tests.ts index 93b9af01c..8d70db610 100644 --- a/system-test/read-rows-acceptance-tests.ts +++ b/system-test/read-rows-acceptance-tests.ts @@ -25,6 +25,37 @@ import * as fs from 'fs'; import * as path from 'path'; import {Instance} from '../src/instance'; import {Bigtable, AbortableDuplex} from '../src'; +import {ClientSideMetricsConfigManager} from '../src/client-side-metrics/metrics-config-manager'; +import { + ITabularApiSurface, + OperationMetricsCollector, +} from '../src/client-side-metrics/operation-metrics-collector'; +import { + MethodName, + StreamingState, +} from '../src/client-side-metrics/client-side-metrics-attributes'; + +class FakeOperationMetricsCollector extends OperationMetricsCollector { + onOperationComplete() {} + onResponse() {} + onAttemptStart() {} + onAttemptComplete() {} + onOperationStart() {} + handleStatusAndMetadata() {} + onMetadataReceived() {} + onRowReachesUser() {} + onStatusMetadataReceived() {} +} + +class FakeMetricsConfigManager extends ClientSideMetricsConfigManager { + createOperation( + methodName: MethodName, + streaming: StreamingState, + table: ITabularApiSurface, + ): OperationMetricsCollector { + return new FakeOperationMetricsCollector(table, methodName, streaming, []); + } +} const protosJson = path.resolve(__dirname, '../protos/protos.json'); const root = protobuf.Root.fromJSON( @@ -67,6 +98,7 @@ describe('Read Row Acceptance tests', () => { }); table.bigtable = {} as Bigtable; + table.bigtable._metricsConfigManager = new FakeMetricsConfigManager([]); // eslint-disable-next-line @typescript-eslint/no-explicit-any (table.bigtable.request as any) = () => { const stream = new PassThrough({ diff --git a/system-test/service-path.ts b/system-test/service-path.ts index 8c895f9b0..cf4d9758a 100644 --- a/system-test/service-path.ts +++ b/system-test/service-path.ts @@ -44,7 +44,7 @@ describe('Service Path', () => { await bigtable.getInstances({timeout: 1000}); } catch (e) { assert.strictEqual( - (e as ServiceError).message, + (e as ServiceError).message.substring(0, 250), 'Total timeout of API google.bigtable.admin.v2.BigtableInstanceAdmin exceeded 1000 milliseconds retrying error Error: 14 UNAVAILABLE: Name resolution failed for target dns:bigtableadmin.someUniverseDomain:443 before any response was received.', ); } finally { @@ -104,7 +104,7 @@ describe('Service Path', () => { await bigtable.getInstances({timeout: 1000}); } catch (e) { assert.strictEqual( - (e as ServiceError).message, + (e as ServiceError).message.substring(0, 250), 'Total timeout of API google.bigtable.admin.v2.BigtableInstanceAdmin exceeded 1000 milliseconds retrying error Error: 14 UNAVAILABLE: Name resolution failed for target dns:someApiEndpoint:443 before any response was received.', ); } finally { @@ -157,7 +157,7 @@ describe('Service Path', () => { await bigtable.getInstances({timeout: 1000}); } catch (e) { assert.strictEqual( - (e as ServiceError).message, + (e as ServiceError).message.substring(0, 250), 'Total timeout of API google.bigtable.admin.v2.BigtableInstanceAdmin exceeded 1000 milliseconds retrying error Error: 14 UNAVAILABLE: Name resolution failed for target dns:bigtableadmin.someUniverseDomain:443 before any response was received.', ); } finally { diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 5a8ac0c82..7561dda38 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -895,7 +895,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'monitored_resource.project_id': 'my-project', }, asyncAttributesPending: false, _syncAttributes: { @@ -903,7 +902,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'monitored_resource.project_id': 'my-project', }, _asyncAttributesPromise: {}, }, diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index 69fce0287..39e89cfcd 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -26,7 +26,6 @@ export const expectedRequestsHandled = [ cluster: 'fake-cluster3', zone: 'us-west1-c', method: 'Bigtable.ReadRows', - client_uid: 'fake-uuid', }, projectId: 'my-project', }, @@ -43,7 +42,6 @@ export const expectedRequestsHandled = [ cluster: 'fake-cluster3', zone: 'us-west1-c', method: 'Bigtable.ReadRows', - client_uid: 'fake-uuid', }, projectId: 'my-project', }, @@ -56,7 +54,6 @@ export const expectedRequestsHandled = [ cluster: 'fake-cluster3', zone: 'us-west1-c', method: 'Bigtable.ReadRows', - client_uid: 'fake-uuid', }, client_name: 'nodejs-bigtable', projectId: 'my-project', diff --git a/test-common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts index 61257913f..0ace7b271 100644 --- a/test-common/test-metrics-handler.ts +++ b/test-common/test-metrics-handler.ts @@ -23,21 +23,20 @@ import { * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. */ export class TestMetricsHandler implements IMetricsHandler { - private messages: {value: string}; + messages = {value: ''}; + projectId = 'projectId'; requestsHandled: (OnOperationCompleteData | OnAttemptCompleteData)[] = []; - constructor(messages: {value: string}) { - this.messages = messages; - } /** * Logs the metrics and attributes received for an operation completion. * @param {OnOperationCompleteData} data Metrics related to the completed operation. */ onOperationComplete(data: OnOperationCompleteData) { - this.requestsHandled.push(data); - data.client_name = 'nodejs-bigtable'; + const dataWithProject = Object.assign({projectId: this.projectId}, data); + dataWithProject.client_name = 'nodejs-bigtable'; + this.requestsHandled.push(dataWithProject); this.messages.value += 'Recording parameters for onOperationComplete:\n'; - this.messages.value += `${JSON.stringify(data)}\n`; + this.messages.value += `${JSON.stringify(dataWithProject)}\n`; } /** @@ -45,9 +44,10 @@ export class TestMetricsHandler implements IMetricsHandler { * @param {OnOperationCompleteData} data Metrics related to the completed attempt. */ onAttemptComplete(data: OnAttemptCompleteData) { - this.requestsHandled.push(data); - data.client_name = 'nodejs-bigtable'; + const dataWithProject = Object.assign({projectId: this.projectId}, data); + dataWithProject.client_name = 'nodejs-bigtable'; + this.requestsHandled.push(dataWithProject); this.messages.value += 'Recording parameters for onAttemptComplete:\n'; - this.messages.value += `${JSON.stringify(data)}\n`; + this.messages.value += `${JSON.stringify(dataWithProject)}\n`; } } diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 655bdd78d..4a78f0388 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -18,7 +18,6 @@ import { ExportResult, metricsToRequest, } from '../../src/client-side-metrics/exporter'; -import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; import { @@ -31,6 +30,7 @@ import { } from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; +import * as proxyquire from 'proxyquire'; describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', function (done) { @@ -50,6 +50,10 @@ describe('Bigtable/GCPMetricsHandler', () => { let exported = false; class TestExporter extends MetricExporter { + constructor() { + super(); + } + export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void, @@ -84,7 +88,10 @@ describe('Bigtable/GCPMetricsHandler', () => { JSON.parse(JSON.stringify(metrics)), expectedOtelExportInput, ); - const convertedRequest = metricsToRequest(parsedExportInput); + const convertedRequest = metricsToRequest( + 'my-project', + parsedExportInput, + ); assert.deepStrictEqual( convertedRequest.timeSeries.length, expectedOtelExportConvertedValue.timeSeries.length, @@ -113,10 +120,20 @@ describe('Bigtable/GCPMetricsHandler', () => { } } } + const stubs = { + './exporter': { + CloudMonitoringExporter: TestExporter, + }, + './generate-client-uuid': { + generateClientUuid: () => 'fake-uuid', + }, + }; + const FakeMetricsHandler = proxyquire( + '../../src/client-side-metrics/gcp-metrics-handler.js', + stubs, + ).GCPMetricsHandler; - const handler = new GCPMetricsHandler( - new TestExporter({projectId: 'some-project'}), - ); + const handler = new FakeMetricsHandler('my-project'); for (const request of expectedRequestsHandled) { if (request.attemptLatency) { diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index a0b5bd951..efcef9813 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -33,14 +33,15 @@ const protoPath = path.join( const root = gax.protobuf.loadSync(protoPath); const ResponseParams = root.lookupType('ResponseParams'); +const projectId = 'my-project'; + /** * A fake implementation of the Bigtable client for testing purposes. Provides a * metricsTracerFactory and a stubbed projectId method. */ class FakeBigtable { - clientUid = 'fake-uuid'; appProfileId?: string; - projectId = 'my-project'; + projectId = projectId; } /** @@ -53,6 +54,11 @@ class FakeInstance { id = 'fakeInstanceId'; } +const logger = {value: ''}; +const testHandler = new TestMetricsHandler(); +testHandler.projectId = projectId; +testHandler.messages = logger; + describe('Bigtable/MetricsCollector', () => { class FakeHRTime { startTime = BigInt(0); @@ -67,16 +73,16 @@ describe('Bigtable/MetricsCollector', () => { 'node:process': { hrtime: new FakeHRTime(), }, + './gcp-metrics-handler': { + GCPMetricsHandler: testHandler, + }, }; const FakeOperationsMetricsCollector = proxyquire( '../../src/client-side-metrics/operation-metrics-collector.js', stubs, ).OperationMetricsCollector; - const logger = {value: ''}; - it('should record the right metrics with a typical method call', async () => { - const testHandler = new TestMetricsHandler(logger); class FakeTable { id = 'fakeTableId'; instance = new FakeInstance(); @@ -112,10 +118,8 @@ describe('Bigtable/MetricsCollector', () => { this, MethodName.READ_ROWS, StreamingState.STREAMING, + [testHandler as unknown as GCPMetricsHandler], ); - FakeOperationsMetricsCollector.metricsHandlers = [ - testHandler as unknown as GCPMetricsHandler, - ]; // In this method we simulate a series of events that might happen // when a user calls one of the Table methods. // Here is an example of what might happen in a method call: @@ -138,10 +142,7 @@ describe('Bigtable/MetricsCollector', () => { logger.value += '9. User receives second row.\n'; metricsCollector.onRowReachesUser(); logger.value += '10. A transient error occurs.\n'; - metricsCollector.onAttemptComplete( - this.bigtable.projectId, - grpc.status.DEADLINE_EXCEEDED, - ); + metricsCollector.onAttemptComplete(grpc.status.DEADLINE_EXCEEDED); logger.value += '11. After a timeout, the second attempt is made.\n'; metricsCollector.onAttemptStart(); logger.value += '12. Client receives status information.\n'; @@ -160,10 +161,7 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onRowReachesUser(); logger.value += '19. User reads row 1\n'; logger.value += '20. Stream ends, operation completes\n'; - metricsCollector.onOperationComplete( - this.bigtable.projectId, - grpc.status.OK, - ); + metricsCollector.onOperationComplete(grpc.status.OK); } } } diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index cd71a36fd..fcba50ab2 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -23,7 +23,9 @@ import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; describe('Bigtable/metricsToRequest', () => { it('Converts an otel request to a request ready for the metric service client', () => { + const projectId = 'my-project'; const convertedValue = metricsToRequest( + projectId, expectedOtelExportInput as unknown as ResourceMetrics, ); assert.deepStrictEqual( diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 917dcd0c6..17a134445 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -15,7 +15,7 @@ getDate call returns 5000 ms 10. A transient error occurs. getDate call returns 6000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":4000,"serverLatency":101,"connectivityErrorCount":0,"streaming":"true","status":"4","client_name":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"projectId":"my-project"} +{"projectId":"my-project","attemptLatency":4000,"serverLatency":101,"connectivityErrorCount":0,"streaming":"true","status":"4","client_name":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows"}} 11. After a timeout, the second attempt is made. getDate call returns 7000 ms 12. Client receives status information. @@ -31,7 +31,7 @@ getDate call returns 9000 ms 20. Stream ends, operation completes getDate call returns 10000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":0,"streaming":"true","status":"0","client_name":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"projectId":"my-project"} +{"projectId":"my-project","attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":0,"streaming":"true","status":"0","client_name":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows"}} getDate call returns 11000 ms Recording parameters for onOperationComplete: -{"status":"0","streaming":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"client_name":"nodejs-bigtable","projectId":"my-project","operationLatency":10000,"retryCount":1,"firstResponseLatency":2000,"applicationLatencies":[1000,1000]} +{"projectId":"my-project","status":"0","streaming":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows"},"client_name":"nodejs-bigtable","operationLatency":10000,"retryCount":1,"firstResponseLatency":2000,"applicationLatencies":[1000,1000]} diff --git a/test/table.ts b/test/table.ts index a1a28282a..960a6509c 100644 --- a/test/table.ts +++ b/test/table.ts @@ -30,6 +30,9 @@ import * as tblTypes from '../src/table'; import {Bigtable, RequestOptions} from '../src'; import {EventEmitter} from 'events'; import {TableUtils} from '../src/utils/table'; +import {ClientSideMetricsConfigManager} from '../src/client-side-metrics/metrics-config-manager'; +import {IMetricsHandler} from '../src/client-side-metrics/metrics-handler'; +import {OperationMetricsCollector} from '../src/client-side-metrics/operation-metrics-collector'; const sandbox = sinon.createSandbox(); const noop = () => {}; @@ -59,6 +62,24 @@ function createFake(klass: any) { }; } +class FakeMetricsCollector { + onOperationStart() {} + onOperationComplete() {} + onResponse() {} + onAttemptStart() {} + onAttemptComplete() {} + onMetadataReceived() {} + handleStatusAndMetadata() {} + onStatusMetadataReceived() {} + onRowReachesUser() {} +} + +class FakeMetricsConfigManager extends ClientSideMetricsConfigManager { + createOperation() { + return new FakeMetricsCollector() as unknown as OperationMetricsCollector; + } +} + const FakeFamily = createFake(Family); FakeFamily.formatRule_ = sinon.spy(rule => rule); @@ -130,7 +151,11 @@ describe('Bigtable/Table', () => { beforeEach(() => { INSTANCE = { - bigtable: {} as Bigtable, + bigtable: { + _metricsConfigManager: new FakeMetricsConfigManager( + [], + ) as ClientSideMetricsConfigManager, + } as Bigtable, name: 'a/b/c/d', } as inst.Instance; TABLE_NAME = INSTANCE.name + '/tables/' + TABLE_ID; From a79dfb6f862e65539789b50219d2164d7471986a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 28 May 2025 11:43:52 -0400 Subject: [PATCH 415/448] Add warnings --- src/client-side-metrics/operation-metrics-collector.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 1ecf8c6d8..8a982c18e 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -14,7 +14,7 @@ import * as fs from 'fs'; import {MethodName, StreamingState} from './client-side-metrics-attributes'; -import {grpc} from 'google-gax'; +import {grpc, ServiceError} from 'google-gax'; import * as gax from 'google-gax'; import {AbortableDuplex, BigtableOptions} from '../index'; import * as path from 'path'; @@ -77,7 +77,8 @@ function withMetricsDebug(fn: () => T): T | undefined { return fn(); } catch (e) { if (METRICS_DEBUG) { - throw e; + console.warn('METRICS_DEBUG warning'); + console.warn((e as ServiceError).message); } } return; From cd25919f5a7fe7e804201a4cbe9c99a73d5718eb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 28 May 2025 11:45:50 -0400 Subject: [PATCH 416/448] private readonly variable --- src/client-side-metrics/operation-metrics-collector.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 8a982c18e..4b6ee7007 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -100,6 +100,7 @@ function checkState( * A class for tracing and recording client-side metrics related to Bigtable operations. */ export class OperationMetricsCollector { + private readonly INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; private state: MetricsCollectorState; private operationStartTime: bigint | null; private attemptStartTime: bigint | null; @@ -391,9 +392,8 @@ export class OperationMetricsCollector { }) { withMetricsDebug(() => { if (!this.zone || !this.cluster) { - const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; const mappedValue = status.metadata.internalRepr.get( - INSTANCE_INFORMATION_KEY, + this.INSTANCE_INFORMATION_KEY, ) as Buffer[]; if (mappedValue && mappedValue[0] && ResponseParams) { const decodedValue = ResponseParams.decode( From 3b7099e9e0df0a9df2e4fbae11a1a2094f406b63 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 28 May 2025 11:48:03 -0400 Subject: [PATCH 417/448] Add default zone and cluster ids --- src/client-side-metrics/operation-metrics-collector.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 4b6ee7007..599ef0326 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -153,8 +153,8 @@ export class OperationMetricsCollector { { instanceId: this.tabularApiSurface.instance.id, table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, + cluster: this.cluster || 'unspecified', + zone: this.zone || 'global', method: this.methodName, }, appProfileId ? {app_profile: appProfileId} : {}, From 81d55f5ed9dfd9050220e2b2154717a6dcf837ab Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 28 May 2025 13:22:27 -0400 Subject: [PATCH 418/448] Add a test for second project id --- .../metric-service-client-credentials.ts | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/system-test/metric-service-client-credentials.ts b/system-test/metric-service-client-credentials.ts index ca918f6ff..083ddd645 100644 --- a/system-test/metric-service-client-credentials.ts +++ b/system-test/metric-service-client-credentials.ts @@ -16,6 +16,7 @@ import * as proxyquire from 'proxyquire'; import {ClientOptions} from 'google-gax'; import * as assert from 'assert'; import {setupBigtable} from './client-side-metrics-setup-table'; +import {MetricServiceClient} from '@google-cloud/monitoring'; describe('Bigtable/MetricServiceClientCredentials', () => { it('should pass the credentials to the metric service client', done => { @@ -60,4 +61,31 @@ describe('Bigtable/MetricServiceClientCredentials', () => { throw err; }); }); + it('should use second project for the metric service client', async () => { + const SECOND_PROJECT_ID = 'second-project-id'; + const clientOptions = {metricsEnabled: true, projectId: SECOND_PROJECT_ID}; + let savedOptions: ClientOptions = {}; + class FakeExporter { + constructor(options: ClientOptions) { + savedOptions = options; + } + } + const FakeCGPMetricsHandler = proxyquire( + '../src/client-side-metrics/gcp-metrics-handler.js', + { + './exporter': { + CloudMonitoringExporter: FakeExporter, + }, + }, + ).GCPMetricsHandler; + const FakeBigtable = proxyquire('../src/index.js', { + './client-side-metrics/gcp-metrics-handler': { + GCPMetricsHandler: FakeCGPMetricsHandler, + }, + }).Bigtable; + new FakeBigtable(clientOptions); + const client = new MetricServiceClient(savedOptions); + const projectIdUsed = await client.getProjectId(); + assert.strictEqual(projectIdUsed, SECOND_PROJECT_ID); + }); }); From 8757a65edaf56945fe23cc3f1d8a1e4aa166c58c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 29 May 2025 14:16:50 -0400 Subject: [PATCH 419/448] Better naming conventions for project --- system-test/client-side-metrics.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 255b2368a..e1e37a8a0 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -53,13 +53,13 @@ function getHandlerFromExporter(Exporter: typeof CloudMonitoringExporter) { }).GCPMetricsHandler; } -describe('Bigtable/ClientSideMetrics', () => { +describe.only('Bigtable/ClientSideMetrics', () => { const instanceId1 = 'emulator-test-instance'; const instanceId2 = 'emulator-test-instance2'; const tableId1 = 'my-table'; const tableId2 = 'my-table2'; const columnFamilyId = 'cf1'; - let projectId: string; + let defaultProjectId: string; before(async () => { const bigtable = new Bigtable(); @@ -69,7 +69,7 @@ describe('Bigtable/ClientSideMetrics', () => { tableId2, ]); } - projectId = await new Promise((resolve, reject) => { + defaultProjectId = await new Promise((resolve, reject) => { bigtable.getProjectId_((err: Error | null, projectId?: string) => { if (err) { reject(err); @@ -166,7 +166,7 @@ describe('Bigtable/ClientSideMetrics', () => { it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { (async () => { try { - const bigtable = await mockBigtable(projectId, done); + const bigtable = await mockBigtable(defaultProjectId, done); for (const instanceId of [instanceId1, instanceId2]) { await setupBigtable(bigtable, columnFamilyId, instanceId, [ tableId1, @@ -280,8 +280,8 @@ describe('Bigtable/ClientSideMetrics', () => { }, 120000); (async () => { try { - const bigtable1 = await mockBigtable(projectId, done); - const bigtable2 = await mockBigtable(projectId, done); + const bigtable1 = await mockBigtable(defaultProjectId, done); + const bigtable2 = await mockBigtable(defaultProjectId, done); for (const bigtable of [bigtable1, bigtable2]) { for (const instanceId of [instanceId1, instanceId2]) { await setupBigtable(bigtable, columnFamilyId, instanceId, [ @@ -337,7 +337,7 @@ describe('Bigtable/ClientSideMetrics', () => { } }; bigtableList.push( - await mockBigtable(projectId, done, onExportSuccess), + await mockBigtable(defaultProjectId, done, onExportSuccess), ); } for (const bigtable of bigtableList) { @@ -483,7 +483,7 @@ describe('Bigtable/ClientSideMetrics', () => { it('should send the metrics to the metrics handler for a ReadRows call', done => { (async () => { - const bigtable = await mockBigtable(projectId, done); + const bigtable = await mockBigtable(defaultProjectId, done); const instance = bigtable.instance(instanceId1); const table = instance.table(tableId1); await table.getRows(); From 266e65ebe4b0c1f25676d2719ad75c645aa5c390 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 29 May 2025 15:15:32 -0400 Subject: [PATCH 420/448] =?UTF-8?q?don=E2=80=99t=20retry=20MSC,=20warning?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/client-side-metrics/exporter.ts | 25 +------------------ .../operation-metrics-collector.ts | 3 +-- 2 files changed, 2 insertions(+), 26 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 3fd9bf521..ae6aaf1ec 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -309,30 +309,7 @@ export class CloudMonitoringExporter extends MetricExporter { try { const projectId = await this.client.getProjectId(); const request = metricsToRequest(projectId, metrics); - // We need the client to retry or we get errors: - // in addition, done() received error: Error: 4 DEADLINE_EXCEEDED: Deadline exceeded after 12.757s,name resolution: 1.614s,metadata filters: 0.001s,time to current attempt start: 0.029s,Waiting for LB pick - const retry = new RetryOptions( - [ - grpc.status.INVALID_ARGUMENT, - grpc.status.DEADLINE_EXCEEDED, - grpc.status.RESOURCE_EXHAUSTED, - grpc.status.ABORTED, - grpc.status.UNAVAILABLE, - ], - { - initialRetryDelayMillis: 5000, - retryDelayMultiplier: 2, - maxRetryDelayMillis: 50000, - totalTimeoutMillis: 50000, - }, - ); - - await this.client.createTimeSeries( - request as ICreateTimeSeriesRequest, - { - retry, - }, - ); + await this.client.createTimeSeries(request as ICreateTimeSeriesRequest); // The resultCallback typically accepts a value equal to {code: x} // for some value x along with other info. When the code is equal to 0 // then the operation completed successfully. When the code is not equal diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 599ef0326..0299dbac3 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -70,8 +70,7 @@ enum MetricsCollectorState { OPERATION_COMPLETE, } -// This method swallows errors when metrics debugging is not enabled so -// that errors don't bubble up to the user. +// This method displays warnings if METRICS_DEBUG is enabled. function withMetricsDebug(fn: () => T): T | undefined { try { return fn(); From b394dcda0d742977c8dfcd5bb4b7a967a6f97756 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 29 May 2025 15:23:01 -0400 Subject: [PATCH 421/448] Change to unspecified --- src/client-side-metrics/operation-metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 0299dbac3..4b29efda0 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -152,7 +152,7 @@ export class OperationMetricsCollector { { instanceId: this.tabularApiSurface.instance.id, table: this.tabularApiSurface.id, - cluster: this.cluster || 'unspecified', + cluster: this.cluster || '', zone: this.zone || 'global', method: this.methodName, }, From 910ea865c9b04b22479273033ac65737899950f4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 29 May 2025 15:27:31 -0400 Subject: [PATCH 422/448] Remove unused method --- src/index.ts | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/index.ts b/src/index.ts index 045aa3605..3690d3184 100644 --- a/src/index.ts +++ b/src/index.ts @@ -48,8 +48,6 @@ import {GCPMetricsHandler} from './client-side-metrics/gcp-metrics-handler'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); -const crypto = require('crypto'); - // eslint-disable-next-line @typescript-eslint/no-var-requires const PKG = require('../../package.json'); @@ -140,13 +138,6 @@ function getDomain(prefix: string, opts?: gax.ClientOptions) { }`; } -function generateClientUuid() { - const hostname = os.hostname() || 'localhost'; - const currentPid = process.pid || ''; - const uuid4 = crypto.randomUUID(); - return `node-${uuid4}-${currentPid}${hostname}`; -} - /** * @typedef {object} ClientConfig * @property {string} [apiEndpoint] Override the default API endpoint used From 5bf194e9a125c5b7f14957ef09068ec4ece1b1e2 Mon Sep 17 00:00:00 2001 From: danieljbruce Date: Wed, 4 Jun 2025 09:45:47 -0400 Subject: [PATCH 423/448] feat: Record READ_ROW metrics for single row calls (#1612) * Delegate createReadStream * Pull out getRows into an internal function * Remove only * We have some duplicate code so add a TODO * Almost fixed all the row tests * Fix last remaining test * Settings for Read_Row vs Read_Rows * Set to false singlerow * Add new READ_ROW test Also pull check into a separate fn * Fix the single row call for the test * Fix test for single row read * Refactor some of the proxyquire code * Remove only --- .../client-side-metrics-attributes.ts | 1 + src/row.ts | 3 +- src/tabular-api-surface.ts | 409 +--------------- src/utils/createReadStreamInternal.ts | 445 ++++++++++++++++++ src/utils/getRowsInternal.ts | 66 +++ system-test/client-side-metrics.ts | 270 +++++++---- test/row.ts | 101 +++- test/table.ts | 100 +++- 8 files changed, 859 insertions(+), 536 deletions(-) create mode 100644 src/utils/createReadStreamInternal.ts create mode 100644 src/utils/getRowsInternal.ts diff --git a/src/client-side-metrics/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts index f5fbf911d..1946cacdd 100644 --- a/src/client-side-metrics/client-side-metrics-attributes.ts +++ b/src/client-side-metrics/client-side-metrics-attributes.ts @@ -25,6 +25,7 @@ export enum StreamingState { * metrics, allowing for differentiation of performance by method. */ export enum MethodName { + READ_ROW = 'Bigtable.ReadRow', READ_ROWS = 'Bigtable.ReadRows', MUTATE_ROW = 'Bigtable.MutateRow', CHECK_AND_MUTATE_ROW = 'Bigtable.CheckAndMutateRow', diff --git a/src/row.ts b/src/row.ts index 9383f4703..3ca4029d0 100644 --- a/src/row.ts +++ b/src/row.ts @@ -31,6 +31,7 @@ import {ServiceError} from 'google-gax'; import {google} from '../protos/protos'; import {RowDataUtils, RowProperties} from './row-data-utils'; import {TabularApiSurface} from './tabular-api-surface'; +import {getRowsInternal} from './utils/getRowsInternal'; export interface Rule { column: string; @@ -666,7 +667,7 @@ export class Row { filter, }); - this.table.getRows(getRowsOptions, (err, rows) => { + void getRowsInternal(this.table, true, getRowsOptions, (err, rows) => { if (err) { callback(err); return; diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 05dbc3f73..ef8408b33 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -17,32 +17,22 @@ import arrify = require('arrify'); import {Instance} from './instance'; import {Mutation} from './mutation'; import { - AbortableDuplex, Bigtable, Entry, MutateOptions, SampleRowKeysCallback, SampleRowsKeysResponse, } from './index'; -import {Filter, BoundData, RawFilter} from './filter'; +import {BoundData, RawFilter} from './filter'; import {Row} from './row'; -import { - ChunkPushData, - ChunkPushLastScannedRowData, - ChunkTransformer, - DataEvent, -} from './chunktransformer'; import {BackoffSettings} from 'google-gax/build/src/gax'; import {google} from '../protos/protos'; import {CallOptions, grpc, ServiceError} from 'google-gax'; -import {Duplex, PassThrough, Transform} from 'stream'; +import {Transform} from 'stream'; import * as is from 'is'; import {GoogleInnerError} from './table'; -import {TableUtils} from './utils/table'; -import { - MethodName, - StreamingState, -} from './client-side-metrics/client-side-metrics-attributes'; +import {createReadStreamInternal} from './utils/createReadStreamInternal'; +import {getRowsInternal} from './utils/getRowsInternal'; // See protos/google/rpc/code.proto // (4=DEADLINE_EXCEEDED, 8=RESOURCE_EXHAUSTED, 10=ABORTED, 14=UNAVAILABLE) @@ -163,7 +153,9 @@ export class TabularApiSurface { id: string; metadata?: google.bigtable.admin.v2.ITable; maxRetries?: number; - protected viewName?: string; + // We need viewName to be public because now we need it in Row class + // We need it in Row class because now we use getRowsInternal instead of getRows + viewName?: string; protected constructor(instance: Instance, id: string, viewName?: string) { this.bigtable = instance.bigtable; @@ -191,7 +183,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * Get {@link Row} objects for the rows currently in your table as a * readable object stream. * - * @param {object} [options] Configuration object. * @param {boolean} [options.decode=true] If set to `false` it will not decode * Buffer values returned from Bigtable. * @param {boolean} [options.encoding] The encoding to use when converting @@ -212,376 +203,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * * @example include:samples/api-reference-doc-snippets/table.js * region_tag:bigtable_api_table_readstream + * @param opts */ createReadStream(opts?: GetRowsOptions) { - const options = opts || {}; - const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; - let activeRequestStream: AbortableDuplex | null; - let rowKeys: string[]; - let filter: {} | null; - const rowsLimit = options.limit || 0; - const hasLimit = rowsLimit !== 0; - - let numConsecutiveErrors = 0; - let numRequestsMade = 0; - let retryTimer: NodeJS.Timeout | null; - - rowKeys = options.keys || []; - - /* - The following line of code sets the timeout if it was provided while - creating the client. This will be used to determine if the client should - retry on DEADLINE_EXCEEDED errors. Eventually, this will be handled - downstream in google-gax. - */ - const timeout = - opts?.gaxOptions?.timeout || - (this?.bigtable?.options?.BigtableClient?.clientConfig?.interfaces && - this?.bigtable?.options?.BigtableClient?.clientConfig?.interfaces[ - 'google.bigtable.v2.Bigtable' - ]?.methods['ReadRows']?.timeout_millis); - const callTimeMillis = new Date().getTime(); - - const ranges = TableUtils.getRanges(options); - - // If rowKeys and ranges are both empty, the request is a full table scan. - // Add an empty range to simplify the resumption logic. - if (rowKeys.length === 0 && ranges.length === 0) { - ranges.push({}); - } - - if (options.filter) { - filter = Filter.parse(options.filter); - } - - let chunkTransformer: ChunkTransformer; - let rowStream: Duplex; - - let userCanceled = false; - // The key of the last row that was emitted by the per attempt pipeline - // Note: this must be updated from the operation level userStream to avoid referencing buffered rows that will be - // discarded in the per attempt subpipeline (rowStream) - let lastRowKey = ''; - let rowsRead = 0; - const userStream = new PassThrough({ - objectMode: true, - readableHighWaterMark: 0, // We need to disable readside buffering to allow for acceptable behavior when the end user cancels the stream early. - writableHighWaterMark: 0, // We need to disable writeside buffering because in nodejs 14 the call to _transform happens after write buffering. This creates problems for tracking the last seen row key. - transform(event, _encoding, callback) { - if (userCanceled) { - callback(); - return; - } - if (event.eventType === DataEvent.LAST_ROW_KEY_UPDATE) { - /** - * This code will run when receiving an event containing - * lastScannedRowKey data that the chunk transformer sent. When the - * chunk transformer gets lastScannedRowKey data, this code - * updates the lastRowKey to ensure row ids with the lastScannedRowKey - * aren't re-requested in retries. The lastRowKey needs to be updated - * here and not in the chunk transformer to ensure the update is - * queued behind all events that deliver data to the user stream - * first. - */ - lastRowKey = event.lastScannedRowKey; - callback(); - return; - } - const row = event; - if (TableUtils.lessThanOrEqualTo(row.id, lastRowKey)) { - /* - Sometimes duplicate rows reach this point. To avoid delivering - duplicate rows to the user, rows are thrown away if they don't exceed - the last row key. We can expect each row to reach this point and rows - are delivered in order so if the last row key equals or exceeds the - row id then we know data for this row has already reached this point - and been delivered to the user. In this case we want to throw the row - away and we do not want to deliver this row to the user again. - */ - callback(); - return; - } - lastRowKey = row.id; - rowsRead++; - callback(null, row); - }, - }); - - // The caller should be able to call userStream.end() to stop receiving - // more rows and cancel the stream prematurely. But also, the 'end' event - // will be emitted if the stream ended normally. To tell these two - // situations apart, we'll save the "original" end() function, and - // will call it on rowStream.on('end'). - const originalEnd = userStream.end.bind(userStream); - - // Taking care of this extra listener when piping and unpiping userStream: - const rowStreamPipe = (rowStream: Duplex, userStream: PassThrough) => { - rowStream.pipe(userStream, {end: false}); - rowStream.on('end', originalEnd); - }; - const rowStreamUnpipe = (rowStream: Duplex, userStream: PassThrough) => { - rowStream?.unpipe(userStream); - rowStream?.removeListener('end', originalEnd); - }; - - // eslint-disable-next-line @typescript-eslint/no-explicit-any - userStream.end = (chunk?: any, encoding?: any, cb?: () => void) => { - rowStreamUnpipe(rowStream, userStream); - userCanceled = true; - if (activeRequestStream) { - activeRequestStream.abort(); - } - if (retryTimer) { - clearTimeout(retryTimer); - } - return originalEnd(chunk, encoding, cb); - }; - const metricsCollector = - this.bigtable._metricsConfigManager.createOperation( - MethodName.READ_ROWS, - StreamingState.STREAMING, - this, - ); - metricsCollector.onOperationStart(); - const makeNewRequest = () => { - metricsCollector.onAttemptStart(); - - // Avoid cancelling an expired timer if user - // cancelled the stream in the middle of a retry - retryTimer = null; - - // eslint-disable-next-line @typescript-eslint/no-explicit-any - chunkTransformer = new ChunkTransformer({ - decode: options.decode, - } as any); - - // If the viewName is provided then request will be made for an - // authorized view. Otherwise, the request is made for a table. - const reqOpts = ( - this.viewName - ? { - authorizedViewName: `${this.name}/authorizedViews/${this.viewName}`, - appProfileId: this.bigtable.appProfileId, - } - : { - tableName: this.name, - appProfileId: this.bigtable.appProfileId, - } - ) as google.bigtable.v2.IReadRowsRequest; - - const retryOpts = { - currentRetryAttempt: 0, // was numConsecutiveErrors - // Handling retries in this client. Specify the retry options to - // make sure nothing is retried in retry-request. - noResponseRetries: 0, - shouldRetryFn: (_: any) => { - return false; - }, - }; - - if (lastRowKey) { - // Readjust and/or remove ranges based on previous valid row reads. - // Iterate backward since items may need to be removed. - for (let index = ranges.length - 1; index >= 0; index--) { - const range = ranges[index]; - const startValue = is.object(range.start) - ? (range.start as BoundData).value - : range.start; - const endValue = is.object(range.end) - ? (range.end as BoundData).value - : range.end; - const startKeyIsRead = - !startValue || - TableUtils.lessThanOrEqualTo( - startValue as string, - lastRowKey as string, - ); - const endKeyIsNotRead = - !endValue || - (endValue as Buffer).length === 0 || - TableUtils.lessThan(lastRowKey as string, endValue as string); - if (startKeyIsRead) { - if (endKeyIsNotRead) { - // EndKey is not read, reset the range to start from lastRowKey open - range.start = { - value: lastRowKey, - inclusive: false, - }; - } else { - // EndKey is read, remove this range - ranges.splice(index, 1); - } - } - } - - // Remove rowKeys already read. - rowKeys = rowKeys.filter(rowKey => - TableUtils.greaterThan(rowKey, lastRowKey as string), - ); - - // If there was a row limit in the original request and - // we've already read all the rows, end the stream and - // do not retry. - if (hasLimit && rowsLimit === rowsRead) { - userStream.end(); - return; - } - // If all the row keys and ranges are read, end the stream - // and do not retry. - if (rowKeys.length === 0 && ranges.length === 0) { - userStream.end(); - return; - } - } - - // Create the new reqOpts - reqOpts.rows = {}; - - // TODO: preprocess all the keys and ranges to Bytes - reqOpts.rows.rowKeys = rowKeys.map( - Mutation.convertToBytes, - ) as {} as Uint8Array[]; - - reqOpts.rows.rowRanges = ranges.map(range => - Filter.createRange( - range.start as BoundData, - range.end as BoundData, - 'Key', - ), - ); - - if (filter) { - reqOpts.filter = filter; - } - - if (hasLimit) { - reqOpts.rowsLimit = rowsLimit - rowsRead; - } - - const gaxOpts = populateAttemptHeader( - numRequestsMade, - options.gaxOptions, - ); - - const requestStream = this.bigtable.request({ - client: 'BigtableClient', - method: 'readRows', - reqOpts, - gaxOpts, - retryOpts, - }); - - activeRequestStream = requestStream!; - - const toRowStream = new Transform({ - transform: (rowData: ChunkPushData, _, next) => { - if ( - userCanceled || - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (userStream as any)._writableState.ended - ) { - return next(); - } - if ( - (rowData as ChunkPushLastScannedRowData).eventType === - DataEvent.LAST_ROW_KEY_UPDATE - ) { - /** - * If the data is the chunk transformer communicating that the - * lastScannedRow was received then this message is passed along - * to the user stream to update the lastRowKey. - */ - next(null, rowData); - } else { - /** - * If the data is just regular rows being pushed from the - * chunk transformer then the rows are encoded so that they - * can be consumed by the user stream. - */ - const row = this.row((rowData as Row).key as string); - row.data = (rowData as Row).data; - next(null, row); - } - }, - objectMode: true, - }); - - rowStream = pumpify.obj([requestStream, chunkTransformer, toRowStream]); - - // Retry on "received rst stream" errors - const isRstStreamError = (error: ServiceError): boolean => { - if (error.code === 13 && error.message) { - const error_message = (error.message || '').toLowerCase(); - return ( - error.code === 13 && - (error_message.includes('rst_stream') || - error_message.includes('rst stream')) - ); - } - return false; - }; - - metricsCollector.handleStatusAndMetadata(requestStream); - rowStream - .on('error', (error: ServiceError) => { - rowStreamUnpipe(rowStream, userStream); - activeRequestStream = null; - if (IGNORED_STATUS_CODES.has(error.code)) { - // We ignore the `cancelled` "error", since we are the ones who cause - // it when the user calls `.abort()`. - userStream.end(); - metricsCollector.onOperationComplete(error.code); - return; - } - numConsecutiveErrors++; - numRequestsMade++; - if ( - numConsecutiveErrors <= maxRetries && - (RETRYABLE_STATUS_CODES.has(error.code) || - isRstStreamError(error)) && - !(timeout && timeout < new Date().getTime() - callTimeMillis) - ) { - const backOffSettings = - options.gaxOptions?.retry?.backoffSettings || - DEFAULT_BACKOFF_SETTINGS; - const nextRetryDelay = getNextDelay( - numConsecutiveErrors, - backOffSettings, - ); - metricsCollector.onAttemptComplete(error.code); - retryTimer = setTimeout(makeNewRequest, nextRetryDelay); - } else { - if ( - !error.code && - error.message === 'The client has already been closed.' - ) { - // - // The TestReadRows_Generic_CloseClient conformance test requires - // a grpc code to be present when the client is closed. The - // appropriate code for a closed client is CANCELLED since the - // user actually cancelled the call by closing the client. - // - error.code = grpc.status.CANCELLED; - } - metricsCollector.onOperationComplete(error.code); - userStream.emit('error', error); - } - }) - .on('data', _ => { - // Reset error count after a successful read so the backoff - // time won't keep increasing when as stream had multiple errors - numConsecutiveErrors = 0; - metricsCollector.onResponse(); - }) - .on('end', () => { - activeRequestStream = null; - metricsCollector.onOperationComplete(grpc.status.OK); - }); - rowStreamPipe(rowStream, userStream); - }; - - makeNewRequest(); - return userStream; + return createReadStreamInternal(this, false, opts); } getRows(options?: GetRowsOptions): Promise; @@ -594,32 +219,22 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * before returning the results. Instead we recommend using the streaming API * via {@link Table#createReadStream}. * - * @param {object} [options] Configuration object. See * {@link Table#createReadStream} for a complete list of options. * @param {object} [options.gaxOptions] Request configuration options, outlined * here: https://googleapis.github.io/gax-nodejs/CallSettings.html. - * @param {function} callback The callback function. * @param {?error} callback.err An error returned while making this request. * @param {Row[]} callback.rows List of Row objects. * * @example include:samples/api-reference-doc-snippets/table.js * region_tag:bigtable_api_get_rows + * @param optionsOrCallback + * @param cb */ getRows( optionsOrCallback?: GetRowsOptions | GetRowsCallback, cb?: GetRowsCallback, ): void | Promise { - const callback = - typeof optionsOrCallback === 'function' ? optionsOrCallback : cb!; - const options = - typeof optionsOrCallback === 'object' ? optionsOrCallback : {}; - this.createReadStream(options) - .on('error', callback) - .pipe( - concat((rows: Row[]) => { - callback(null, rows); - }), - ); + return getRowsInternal(this, false, optionsOrCallback, cb); } insert( diff --git a/src/utils/createReadStreamInternal.ts b/src/utils/createReadStreamInternal.ts new file mode 100644 index 000000000..32b98c331 --- /dev/null +++ b/src/utils/createReadStreamInternal.ts @@ -0,0 +1,445 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {GetRowsOptions} from '../table'; +import {Row} from '../row'; +import * as is from 'is'; +import {Filter, BoundData} from '../filter'; +import {Mutation} from '../mutation'; +import {AbortableDuplex} from '../index'; +import { + ChunkPushData, + ChunkPushLastScannedRowData, + ChunkTransformer, + DataEvent, +} from '../chunktransformer'; +import {TableUtils} from './table'; +import {Duplex, PassThrough, Transform} from 'stream'; +import { + MethodName, + StreamingState, +} from '../client-side-metrics/client-side-metrics-attributes'; +import {google} from '../../protos/protos'; +const pumpify = require('pumpify'); +import {grpc, ServiceError} from 'google-gax'; +import { + DEFAULT_BACKOFF_SETTINGS, + getNextDelay, + IGNORED_STATUS_CODES, + populateAttemptHeader, + RETRYABLE_STATUS_CODES, + TabularApiSurface, +} from '../tabular-api-surface'; + +/** + * Creates a readable stream of rows from a Bigtable table or authorized view. + * + * This internal method handles the core logic for streaming rows from a Bigtable + * table. It supports various filtering, limiting, and retry mechanisms. It can + * be used to create a stream for either a whole table or an authorized view. + * + * @param {Table} table The Table instance to read rows from. + * @param {boolean} singleRow boolean to check if the request is for a single row. + * @param {GetRowsOptions} [opts] Optional configuration for the read operation. + * @param {boolean} [opts.decode=true] If set to `false` it will not decode + * Buffer values returned from Bigtable. + * @param {boolean} [opts.encoding] The encoding to use when converting + * Buffer values to a string. + * @param {string} [opts.end] End value for key range. + * @param {Filter} [opts.filter] Row filters allow you to + * both make advanced queries and format how the data is returned. + * @param {object} [opts.gaxOptions] Request configuration options, outlined + * here: https://googleapis.github.io/gax-nodejs/CallSettings.html. + * @param {string[]} [opts.keys] A list of row keys. + * @param {number} [opts.limit] Maximum number of rows to be returned. + * @param {string} [opts.prefix] Prefix that the row key must match. + * @param {string[]} [opts.prefixes] List of prefixes that a row key must + * match. + * @param {object[]} [opts.ranges] A list of key ranges. + * @param {string} [opts.start] Start value for key range. + * @param {string} [viewName] The name of the authorized view, if applicable. + * @returns {stream} A readable stream of {@link Row} objects. + * + */ +export function createReadStreamInternal( + table: TabularApiSurface, + singleRow: boolean, + opts?: GetRowsOptions, +) { + const options = opts || {}; + const maxRetries = is.number(table.maxRetries) ? table.maxRetries! : 10; + let activeRequestStream: AbortableDuplex | null; + let rowKeys: string[]; + let filter: {} | null; + const rowsLimit = options.limit || 0; + const hasLimit = rowsLimit !== 0; + + const viewName = table.viewName; + + let numConsecutiveErrors = 0; + let numRequestsMade = 0; + let retryTimer: NodeJS.Timeout | null; + + rowKeys = options.keys || []; + + /* + The following line of code sets the timeout if it was provided while + creating the client. This will be used to determine if the client should + retry on DEADLINE_EXCEEDED errors. Eventually, this will be handled + downstream in google-gax. + */ + const timeout = + opts?.gaxOptions?.timeout || + (table?.bigtable?.options?.BigtableClient?.clientConfig?.interfaces && + table?.bigtable?.options?.BigtableClient?.clientConfig?.interfaces[ + 'google.bigtable.v2.Bigtable' + ]?.methods['ReadRows']?.timeout_millis); + const callTimeMillis = new Date().getTime(); + + const ranges = TableUtils.getRanges(options); + + // If rowKeys and ranges are both empty, the request is a full table scan. + // Add an empty range to simplify the resumption logic. + if (rowKeys.length === 0 && ranges.length === 0) { + ranges.push({}); + } + + if (options.filter) { + filter = Filter.parse(options.filter); + } + + let chunkTransformer: ChunkTransformer; + let rowStream: Duplex; + + let userCanceled = false; + // The key of the last row that was emitted by the per attempt pipeline + // Note: this must be updated from the operation level userStream to avoid referencing buffered rows that will be + // discarded in the per attempt subpipeline (rowStream) + let lastRowKey = ''; + let rowsRead = 0; + const userStream = new PassThrough({ + objectMode: true, + readableHighWaterMark: 0, // We need to disable readside buffering to allow for acceptable behavior when the end user cancels the stream early. + writableHighWaterMark: 0, // We need to disable writeside buffering because in nodejs 14 the call to _transform happens after write buffering. This creates problems for tracking the last seen row key. + transform(event, _encoding, callback) { + if (userCanceled) { + callback(); + return; + } + if (event.eventType === DataEvent.LAST_ROW_KEY_UPDATE) { + /** + * This code will run when receiving an event containing + * lastScannedRowKey data that the chunk transformer sent. When the + * chunk transformer gets lastScannedRowKey data, this code + * updates the lastRowKey to ensure row ids with the lastScannedRowKey + * aren't re-requested in retries. The lastRowKey needs to be updated + * here and not in the chunk transformer to ensure the update is + * queued behind all events that deliver data to the user stream + * first. + */ + lastRowKey = event.lastScannedRowKey; + callback(); + return; + } + const row = event; + if (TableUtils.lessThanOrEqualTo(row.id, lastRowKey)) { + /* + Sometimes duplicate rows reach this point. To avoid delivering + duplicate rows to the user, rows are thrown away if they don't exceed + the last row key. We can expect each row to reach this point and rows + are delivered in order so if the last row key equals or exceeds the + row id then we know data for this row has already reached this point + and been delivered to the user. In this case we want to throw the row + away and we do not want to deliver this row to the user again. + */ + callback(); + return; + } + lastRowKey = row.id; + rowsRead++; + callback(null, row); + }, + }); + + // The caller should be able to call userStream.end() to stop receiving + // more rows and cancel the stream prematurely. But also, the 'end' event + // will be emitted if the stream ended normally. To tell these two + // situations apart, we'll save the "original" end() function, and + // will call it on rowStream.on('end'). + const originalEnd = userStream.end.bind(userStream); + + // Taking care of this extra listener when piping and unpiping userStream: + const rowStreamPipe = (rowStream: Duplex, userStream: PassThrough) => { + rowStream.pipe(userStream, {end: false}); + rowStream.on('end', originalEnd); + }; + const rowStreamUnpipe = (rowStream: Duplex, userStream: PassThrough) => { + rowStream?.unpipe(userStream); + rowStream?.removeListener('end', originalEnd); + }; + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + userStream.end = (chunk?: any, encoding?: any, cb?: () => void) => { + rowStreamUnpipe(rowStream, userStream); + userCanceled = true; + if (activeRequestStream) { + activeRequestStream.abort(); + } + if (retryTimer) { + clearTimeout(retryTimer); + } + return originalEnd(chunk, encoding, cb); + }; + const metricsCollector = table.bigtable._metricsConfigManager.createOperation( + singleRow ? MethodName.READ_ROW : MethodName.READ_ROWS, + singleRow ? StreamingState.UNARY : StreamingState.STREAMING, + table, + ); + metricsCollector.onOperationStart(); + const makeNewRequest = () => { + metricsCollector.onAttemptStart(); + + // Avoid cancelling an expired timer if user + // cancelled the stream in the middle of a retry + retryTimer = null; + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + chunkTransformer = new ChunkTransformer({ + decode: options.decode, + } as any); + + // If the viewName is provided then request will be made for an + // authorized view. Otherwise, the request is made for a table. + const reqOpts = ( + viewName + ? { + authorizedViewName: `${table.name}/authorizedViews/${viewName}`, + appProfileId: table.bigtable.appProfileId, + } + : { + tableName: table.name, + appProfileId: table.bigtable.appProfileId, + } + ) as google.bigtable.v2.IReadRowsRequest; + + const retryOpts = { + currentRetryAttempt: 0, // was numConsecutiveErrors + // Handling retries in this client. Specify the retry options to + // make sure nothing is retried in retry-request. + noResponseRetries: 0, + shouldRetryFn: (_: any) => { + return false; + }, + }; + + if (lastRowKey) { + // Readjust and/or remove ranges based on previous valid row reads. + // Iterate backward since items may need to be removed. + for (let index = ranges.length - 1; index >= 0; index--) { + const range = ranges[index]; + const startValue = is.object(range.start) + ? (range.start as BoundData).value + : range.start; + const endValue = is.object(range.end) + ? (range.end as BoundData).value + : range.end; + const startKeyIsRead = + !startValue || + TableUtils.lessThanOrEqualTo( + startValue as string, + lastRowKey as string, + ); + const endKeyIsNotRead = + !endValue || + (endValue as Buffer).length === 0 || + TableUtils.lessThan(lastRowKey as string, endValue as string); + if (startKeyIsRead) { + if (endKeyIsNotRead) { + // EndKey is not read, reset the range to start from lastRowKey open + range.start = { + value: lastRowKey, + inclusive: false, + }; + } else { + // EndKey is read, remove this range + ranges.splice(index, 1); + } + } + } + + // Remove rowKeys already read. + rowKeys = rowKeys.filter(rowKey => + TableUtils.greaterThan(rowKey, lastRowKey as string), + ); + + // If there was a row limit in the original request and + // we've already read all the rows, end the stream and + // do not retry. + if (hasLimit && rowsLimit === rowsRead) { + userStream.end(); + return; + } + // If all the row keys and ranges are read, end the stream + // and do not retry. + if (rowKeys.length === 0 && ranges.length === 0) { + userStream.end(); + return; + } + } + + // Create the new reqOpts + reqOpts.rows = {}; + + // TODO: preprocess all the keys and ranges to Bytes + reqOpts.rows.rowKeys = rowKeys.map( + Mutation.convertToBytes, + ) as {} as Uint8Array[]; + + reqOpts.rows.rowRanges = ranges.map(range => + Filter.createRange( + range.start as BoundData, + range.end as BoundData, + 'Key', + ), + ); + + if (filter) { + reqOpts.filter = filter; + } + + if (hasLimit) { + reqOpts.rowsLimit = rowsLimit - rowsRead; + } + + const gaxOpts = populateAttemptHeader(numRequestsMade, options.gaxOptions); + + const requestStream = table.bigtable.request({ + client: 'BigtableClient', + method: 'readRows', + reqOpts, + gaxOpts, + retryOpts, + }); + + activeRequestStream = requestStream!; + + const toRowStream = new Transform({ + transform: (rowData: ChunkPushData, _, next) => { + if ( + userCanceled || + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (userStream as any)._writableState.ended + ) { + return next(); + } + if ( + (rowData as ChunkPushLastScannedRowData).eventType === + DataEvent.LAST_ROW_KEY_UPDATE + ) { + /** + * If the data is the chunk transformer communicating that the + * lastScannedRow was received then this message is passed along + * to the user stream to update the lastRowKey. + */ + next(null, rowData); + } else { + /** + * If the data is just regular rows being pushed from the + * chunk transformer then the rows are encoded so that they + * can be consumed by the user stream. + */ + const row = table.row((rowData as Row).key as string); + row.data = (rowData as Row).data; + next(null, row); + } + }, + objectMode: true, + }); + + rowStream = pumpify.obj([requestStream, chunkTransformer, toRowStream]); + + // Retry on "received rst stream" errors + const isRstStreamError = (error: ServiceError): boolean => { + if (error.code === 13 && error.message) { + const error_message = (error.message || '').toLowerCase(); + return ( + error.code === 13 && + (error_message.includes('rst_stream') || + error_message.includes('rst stream')) + ); + } + return false; + }; + + metricsCollector.handleStatusAndMetadata(requestStream); + rowStream + .on('error', (error: ServiceError) => { + rowStreamUnpipe(rowStream, userStream); + activeRequestStream = null; + if (IGNORED_STATUS_CODES.has(error.code)) { + // We ignore the `cancelled` "error", since we are the ones who cause + // it when the user calls `.abort()`. + userStream.end(); + metricsCollector.onOperationComplete(error.code); + return; + } + numConsecutiveErrors++; + numRequestsMade++; + if ( + numConsecutiveErrors <= maxRetries && + (RETRYABLE_STATUS_CODES.has(error.code) || isRstStreamError(error)) && + !(timeout && timeout < new Date().getTime() - callTimeMillis) + ) { + const backOffSettings = + options.gaxOptions?.retry?.backoffSettings || + DEFAULT_BACKOFF_SETTINGS; + const nextRetryDelay = getNextDelay( + numConsecutiveErrors, + backOffSettings, + ); + metricsCollector.onAttemptComplete(error.code); + retryTimer = setTimeout(makeNewRequest, nextRetryDelay); + } else { + if ( + !error.code && + error.message === 'The client has already been closed.' + ) { + // + // The TestReadRows_Generic_CloseClient conformance test requires + // a grpc code to be present when the client is closed. The + // appropriate code for a closed client is CANCELLED since the + // user actually cancelled the call by closing the client. + // + error.code = grpc.status.CANCELLED; + } + metricsCollector.onOperationComplete(error.code); + userStream.emit('error', error); + } + }) + .on('data', _ => { + // Reset error count after a successful read so the backoff + // time won't keep increasing when as stream had multiple errors + numConsecutiveErrors = 0; + metricsCollector.onResponse(); + }) + .on('end', () => { + activeRequestStream = null; + metricsCollector.onOperationComplete(grpc.status.OK); + }); + rowStreamPipe(rowStream, userStream); + }; + + makeNewRequest(); + return userStream; +} diff --git a/src/utils/getRowsInternal.ts b/src/utils/getRowsInternal.ts new file mode 100644 index 000000000..b756a086e --- /dev/null +++ b/src/utils/getRowsInternal.ts @@ -0,0 +1,66 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import { + GetRowsCallback, + GetRowsOptions, + GetRowsResponse, + TabularApiSurface, +} from '../tabular-api-surface'; +import {createReadStreamInternal} from './createReadStreamInternal'; +import {Row} from '../row'; +// eslint-disable-next-line @typescript-eslint/no-var-requires +const concat = require('concat-stream'); + +/** + * Get {@link Row} objects for the rows currently in your table. + * + * This method is not recommended for large datasets as it will buffer all rows + * before returning the results. Instead we recommend using the streaming API + * via {@link Table#createReadStream}. + * + * @param {TabularApiSurface} table The table instance to get rows from. + * @param {boolean} singleRow Boolean to check if the request is for a single row. + * @param {string} [viewName] The name of the authorized view, if applicable. + * @param {object} [optionsOrCallback] Configuration object. See + * {@link Table#createReadStream} for a complete list of options. + * @param {object} [optionsOrCallback.gaxOptions] Request configuration options, outlined + * here: https://googleapis.github.io/gax-nodejs/CallSettings.html. + * @param {function} cb The callback function. + * @param {?error} cb.err An error returned while making this request. + * @param {Row[]} cb.rows List of Row objects. + * + * @returns {Promise|void} Returns a promise that resolves with the rows if no callback is provided, otherwise calls the callback with the rows. + * + * @example include:samples/api-reference-doc-snippets/table.js + * region_tag:bigtable_api_get_rows + */ +export function getRowsInternal( + table: TabularApiSurface, + singleRow: boolean, + optionsOrCallback?: GetRowsOptions | GetRowsCallback, + cb?: GetRowsCallback, +): void | Promise { + const callback = + typeof optionsOrCallback === 'function' ? optionsOrCallback : cb!; + const options = + typeof optionsOrCallback === 'object' ? optionsOrCallback : {}; + createReadStreamInternal(table, singleRow, options) + .on('error', callback) + .pipe( + concat((rows: Row[]) => { + callback(null, rows); + }), + ); +} diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index e1e37a8a0..36c929792 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -23,9 +23,13 @@ import * as assert from 'assert'; import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import * as proxyquire from 'proxyquire'; import {Bigtable} from '../src'; +import {Row} from '../src/row'; import {setupBigtable} from './client-side-metrics-setup-table'; import {TestMetricsHandler} from '../test-common/test-metrics-handler'; -import {OnOperationCompleteData} from '../src/client-side-metrics/metrics-handler'; +import { + OnAttemptCompleteData, + OnOperationCompleteData, +} from '../src/client-side-metrics/metrics-handler'; import {ClientOptions} from 'google-gax'; import {ClientSideMetricsConfigManager} from '../src/client-side-metrics/metrics-config-manager'; @@ -53,7 +57,132 @@ function getHandlerFromExporter(Exporter: typeof CloudMonitoringExporter) { }).GCPMetricsHandler; } -describe.only('Bigtable/ClientSideMetrics', () => { +function readRowsAssertionCheck( + projectId: string, + requestsHandled: (OnOperationCompleteData | OnAttemptCompleteData)[] = [], + method: string, + streaming: string, +) { + assert.strictEqual(requestsHandled.length, 4); + const firstRequest = requestsHandled[0] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + assert(firstRequest.attemptLatency); + assert(firstRequest.serverLatency); + delete firstRequest.attemptLatency; + delete firstRequest.serverLatency; + delete firstRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(firstRequest, { + connectivityErrorCount: 0, + streaming, + status: '0', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method, + }, + projectId, + }); + const secondRequest = requestsHandled[1] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + assert(secondRequest.operationLatency); + assert(secondRequest.firstResponseLatency); + assert(secondRequest.applicationLatencies); + delete secondRequest.operationLatency; + delete secondRequest.firstResponseLatency; + delete secondRequest.applicationLatencies; + delete secondRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(secondRequest, { + status: '0', + streaming, + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method, + table: 'my-table', + }, + projectId, + retryCount: 0, + }); + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + const thirdRequest = requestsHandled[2] as any; + assert(thirdRequest.attemptLatency); + assert(thirdRequest.serverLatency); + delete thirdRequest.attemptLatency; + delete thirdRequest.serverLatency; + delete thirdRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(thirdRequest, { + connectivityErrorCount: 0, + streaming, + status: '0', + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + table: 'my-table2', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method, + }, + projectId, + }); + const fourthRequest = requestsHandled[3] as any; + // We would expect these parameters to be different every time so delete + // them from the comparison after checking they exist. + assert(fourthRequest.operationLatency); + assert(fourthRequest.firstResponseLatency); + assert(fourthRequest.applicationLatencies); + delete fourthRequest.operationLatency; + delete fourthRequest.firstResponseLatency; + delete fourthRequest.applicationLatencies; + delete fourthRequest.metricsCollectorData.appProfileId; + assert.deepStrictEqual(fourthRequest, { + status: '0', + streaming, + client_name: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'emulator-test-instance', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + method, + table: 'my-table2', + }, + projectId, + retryCount: 0, + }); +} + +function checkMultiRowCall( + projectId: string, + requestsHandled: (OnOperationCompleteData | OnAttemptCompleteData)[] = [], +) { + readRowsAssertionCheck( + projectId, + requestsHandled, + 'Bigtable.ReadRows', + 'true', + ); +} + +function checkSingleRowCall( + projectId: string, + requestsHandled: (OnOperationCompleteData | OnAttemptCompleteData)[] = [], +) { + readRowsAssertionCheck( + projectId, + requestsHandled, + 'Bigtable.ReadRow', + 'false', + ); +} + +describe('Bigtable/ClientSideMetrics', () => { const instanceId1 = 'emulator-test-instance'; const instanceId2 = 'emulator-test-instance2'; const tableId1 = 'my-table'; @@ -363,7 +492,14 @@ describe.only('Bigtable/ClientSideMetrics', () => { }); }); describe('Bigtable/ClientSideMetricsToMetricsHandler', () => { - async function mockBigtable(projectId: string, done: mocha.Done) { + async function mockBigtable( + projectId: string, + done: mocha.Done, + checkFn: ( + projectId: string, + requestsHandled: (OnOperationCompleteData | OnAttemptCompleteData)[], + ) => void, + ) { let handlerRequestCount = 0; class TestGCPMetricsHandler extends TestMetricsHandler { projectId = projectId; @@ -372,99 +508,7 @@ describe.only('Bigtable/ClientSideMetrics', () => { try { super.onOperationComplete(data); if (handlerRequestCount > 1) { - assert.strictEqual(this.requestsHandled.length, 4); - const firstRequest = this.requestsHandled[0] as any; - // We would expect these parameters to be different every time so delete - // them from the comparison after checking they exist. - assert(firstRequest.attemptLatency); - assert(firstRequest.serverLatency); - delete firstRequest.attemptLatency; - delete firstRequest.serverLatency; - delete firstRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(firstRequest, { - connectivityErrorCount: 0, - streaming: 'true', - status: '0', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - }, - projectId, - }); - const secondRequest = this.requestsHandled[1] as any; - // We would expect these parameters to be different every time so delete - // them from the comparison after checking they exist. - assert(secondRequest.operationLatency); - assert(secondRequest.firstResponseLatency); - assert(secondRequest.applicationLatencies); - delete secondRequest.operationLatency; - delete secondRequest.firstResponseLatency; - delete secondRequest.applicationLatencies; - delete secondRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(secondRequest, { - status: '0', - streaming: 'true', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - table: 'my-table', - }, - projectId, - retryCount: 0, - }); - // We would expect these parameters to be different every time so delete - // them from the comparison after checking they exist. - const thirdRequest = this.requestsHandled[2] as any; - assert(thirdRequest.attemptLatency); - assert(thirdRequest.serverLatency); - delete thirdRequest.attemptLatency; - delete thirdRequest.serverLatency; - delete thirdRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(thirdRequest, { - connectivityErrorCount: 0, - streaming: 'true', - status: '0', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - table: 'my-table2', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - }, - projectId, - }); - const fourthRequest = this.requestsHandled[3] as any; - // We would expect these parameters to be different every time so delete - // them from the comparison after checking they exist. - assert(fourthRequest.operationLatency); - assert(fourthRequest.firstResponseLatency); - assert(fourthRequest.applicationLatencies); - delete fourthRequest.operationLatency; - delete fourthRequest.firstResponseLatency; - delete fourthRequest.applicationLatencies; - delete fourthRequest.metricsCollectorData.appProfileId; - assert.deepStrictEqual(fourthRequest, { - status: '0', - streaming: 'true', - client_name: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'emulator-test-instance', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - method: 'Bigtable.ReadRows', - table: 'my-table2', - }, - projectId, - retryCount: 0, - }); + checkFn(projectId, this.requestsHandled); done(); } } catch (e) { @@ -483,7 +527,11 @@ describe.only('Bigtable/ClientSideMetrics', () => { it('should send the metrics to the metrics handler for a ReadRows call', done => { (async () => { - const bigtable = await mockBigtable(defaultProjectId, done); + const bigtable = await mockBigtable( + defaultProjectId, + done, + checkMultiRowCall, + ); const instance = bigtable.instance(instanceId1); const table = instance.table(tableId1); await table.getRows(); @@ -494,15 +542,37 @@ describe.only('Bigtable/ClientSideMetrics', () => { }); }); it('should pass the projectId to the metrics handler properly', done => { + (async () => { + const bigtable = await mockBigtable( + defaultProjectId, + done, + checkMultiRowCall, + ); + const instance = bigtable.instance(instanceId1); + const table = instance.table(tableId1); + await table.getRows(); + const table2 = instance.table(tableId2); + await table2.getRows(); + })().catch(err => { + throw err; + }); + }); + it('should send the metrics to the metrics handler for a single row read', done => { (async () => { try { const projectId = SECOND_PROJECT_ID; - const bigtable = await mockBigtable(projectId, done); + const bigtable = await mockBigtable( + projectId, + done, + checkSingleRowCall, + ); const instance = bigtable.instance(instanceId1); const table = instance.table(tableId1); - await table.getRows(); + const row = new Row(table, 'rowId'); + await row.get(); const table2 = instance.table(tableId2); - await table2.getRows(); + const row2 = new Row(table2, 'rowId'); + await row2.get(); } catch (e) { done(e); } diff --git a/test/row.ts b/test/row.ts index 35bd985bc..9e4833239 100644 --- a/test/row.ts +++ b/test/row.ts @@ -19,9 +19,19 @@ import * as proxyquire from 'proxyquire'; import * as sinon from 'sinon'; import {Mutation} from '../src/mutation.js'; import * as rw from '../src/row'; -import {Table, Entry} from '../src/table.js'; +import { + Table, + Entry, + GetRowsOptions, + GetRowsCallback, + GetRowsResponse, +} from '../src/table.js'; import {Chunk} from '../src/chunktransformer.js'; -import {CallOptions} from 'google-gax'; +import {CallOptions, ServiceError} from 'google-gax'; +import {ClientSideMetricsConfigManager} from '../src/client-side-metrics/metrics-config-manager'; +import {Bigtable} from '../src/'; +import {getRowsInternal} from '../src/utils/getRowsInternal'; +import {TabularApiSurface} from '../src/tabular-api-surface'; const sandbox = sinon.createSandbox(); @@ -78,19 +88,36 @@ describe('Bigtable/Row', () => { let RowError: typeof rw.RowError; let row: rw.Row; - before(() => { + function getFakeRow( + getRowsInternal: ( + table: TabularApiSurface, + singleRow: boolean, + optionsOrCallback?: GetRowsOptions | GetRowsCallback, + cb?: GetRowsCallback, + ) => void | Promise, + ) { const Fake = proxyquire('../src/row.js', { '@google-cloud/promisify': fakePromisify, './mutation.js': {Mutation: FakeMutation}, './filter.js': {Filter: FakeFilter}, './row-data-utils.js': {RowDataUtils: FakeRowDataUtil}, + './utils/getRowsInternal': { + getRowsInternal, + }, }); - Row = Fake.Row; RowError = Fake.RowError; + return Fake; + } + + before(() => { + const Fake = getFakeRow(() => {}); + Row = Fake.Row; }); beforeEach(() => { row = new Row(TABLE, ROW_ID); + row.table.bigtable._metricsConfigManager = + new ClientSideMetricsConfigManager([]); }); afterEach(() => { @@ -997,15 +1024,48 @@ describe('Bigtable/Row', () => { }); describe('get', () => { + function getRowInstance( + fn: (reqOpts: any) => void | Promise, + ) { + const getRowsInternal = ( + table: TabularApiSurface, + singleRow: boolean, + optionsOrCallback?: GetRowsOptions | GetRowsCallback, + cb?: GetRowsCallback, + ) => { + return fn(optionsOrCallback); + }; + const Fake = getFakeRow(getRowsInternal); + Row = Fake.Row; + row = new Row(TABLE, ROW_ID); + return row; + } + + function getRowInstanceForErrResp(err: ServiceError | null, resp?: any[]) { + const getRowsInternal = ( + table: TabularApiSurface, + singleRow: boolean, + optionsOrCallback?: GetRowsOptions | GetRowsCallback, + cb?: GetRowsCallback, + ) => { + if (cb) { + cb(err, resp); + } + }; + const Fake = getFakeRow(getRowsInternal); + Row = Fake.Row; + row = new Row(TABLE, ROW_ID); + return row; + } it('should provide the proper request options', done => { // eslint-disable-next-line @typescript-eslint/no-explicit-any - (row.table.getRows as Function) = (reqOpts: any) => { + const fn = (reqOpts: any) => { assert.strictEqual(reqOpts.keys[0], ROW_ID); assert.strictEqual(reqOpts.filter, undefined); assert.strictEqual(FakeMutation.parseColumnName.callCount, 0); done(); }; - + const row = getRowInstance(fn); row.get(assert.ifError); }); @@ -1022,12 +1082,13 @@ describe('Bigtable/Row', () => { ]; // eslint-disable-next-line @typescript-eslint/no-explicit-any - (row.table.getRows as Function) = (reqOpts: any) => { + const fn = (reqOpts: any) => { assert.deepStrictEqual(reqOpts.filter, expectedFilter); assert.strictEqual(FakeMutation.parseColumnName.callCount, 1); assert(FakeMutation.parseColumnName.calledWith(keys[0])); done(); }; + const row = getRowInstance(fn); row.get(keys, assert.ifError); }); @@ -1058,7 +1119,7 @@ describe('Bigtable/Row', () => { ]; // eslint-disable-next-line @typescript-eslint/no-explicit-any - (row.table.getRows as Function) = (reqOpts: any) => { + const fn = (reqOpts: any) => { assert.deepStrictEqual(reqOpts.filter, expectedFilter); const spy = FakeMutation.parseColumnName; @@ -1068,6 +1129,7 @@ describe('Bigtable/Row', () => { assert.strictEqual(spy.getCall(1).args[0], keys[1]); done(); }; + const row = getRowInstance(fn); row.get(keys, assert.ifError); }); @@ -1082,12 +1144,13 @@ describe('Bigtable/Row', () => { ]; // eslint-disable-next-line @typescript-eslint/no-explicit-any - (row.table.getRows as Function) = (reqOpts: any) => { + const fn = (reqOpts: any) => { assert.deepStrictEqual(reqOpts.filter, expectedFilter); assert.strictEqual(FakeMutation.parseColumnName.callCount, 1); assert(FakeMutation.parseColumnName.calledWith(keys[0])); done(); }; + const row = getRowInstance(fn); row.get(keys, assert.ifError); }); @@ -1121,13 +1184,14 @@ describe('Bigtable/Row', () => { ]; // eslint-disable-next-line @typescript-eslint/no-explicit-any - (row.table.getRows as Function) = (reqOpts: any) => { + const fn = (reqOpts: any) => { assert.deepStrictEqual(reqOpts.filter, expectedFilter); assert.strictEqual(FakeMutation.parseColumnName.callCount, 1); assert(FakeMutation.parseColumnName.calledWith(keys[0])); assert.strictEqual(reqOpts.decode, options.decode); done(); }; + const row = getRowInstance(fn); row.get(keys, options, assert.ifError); }); @@ -1175,13 +1239,14 @@ describe('Bigtable/Row', () => { ]; // eslint-disable-next-line @typescript-eslint/no-explicit-any - (row.table.getRows as Function) = (reqOpts: any) => { + const fn = (reqOpts: any) => { assert.deepStrictEqual(reqOpts.filter, expectedFilter); assert.strictEqual(FakeMutation.parseColumnName.callCount, 2); assert(FakeMutation.parseColumnName.calledWith(keys[0])); assert.strictEqual(reqOpts.decode, options.decode); done(); }; + const row = getRowInstance(fn); row.get(keys, options, assert.ifError); }); @@ -1196,10 +1261,11 @@ describe('Bigtable/Row', () => { const expectedFilter = options.filter; // eslint-disable-next-line @typescript-eslint/no-explicit-any - (row.table.getRows as Function) = (reqOpts: any) => { + const fn = (reqOpts: any) => { assert.deepStrictEqual(reqOpts.filter, expectedFilter); done(); }; + const row = getRowInstance(fn); row.get(keys, options, assert.ifError); }); @@ -1210,18 +1276,19 @@ describe('Bigtable/Row', () => { }; // eslint-disable-next-line @typescript-eslint/no-explicit-any - (row.table.getRows as Function) = (reqOpts: any) => { + const fn = (reqOpts: any) => { assert.strictEqual(reqOpts.decode, options.decode); assert(!reqOpts.filter); done(); }; + const row = getRowInstance(fn); row.get(options, assert.ifError); }); it('should return an error to the callback', done => { const error = new Error('err'); - sandbox.stub(row.table, 'getRows').callsArgWith(1, error); + const row = getRowInstanceForErrResp(error as ServiceError); row.get((err, row) => { assert.strictEqual(error, err); assert.strictEqual(row, undefined); @@ -1230,7 +1297,7 @@ describe('Bigtable/Row', () => { }); it('should return a custom error if the row is not found', done => { - sandbox.stub(row.table, 'getRows').callsArgWith(1, null, []); + const row = getRowInstanceForErrResp(null, []); row.get((err, row_) => { assert(err instanceof RowError); assert.strictEqual(err!.message, 'Unknown row: ' + row.id + '.'); @@ -1245,7 +1312,7 @@ describe('Bigtable/Row', () => { a: 'a', b: 'b', }; - sandbox.stub(row.table, 'getRows').callsArgWith(1, null, [fakeRow]); + const row = getRowInstanceForErrResp(null, [fakeRow]); row.get((err, row_) => { assert.ifError(err); assert.strictEqual(row_, row); @@ -1263,11 +1330,11 @@ describe('Bigtable/Row', () => { }; const keys = ['a', 'b']; + const row = getRowInstanceForErrResp(null, [fakeRow]); row.data = { c: 'c', }; - sandbox.stub(row.table, 'getRows').callsArgWith(1, null, [fakeRow]); row.get(keys, (err, data) => { assert.ifError(err); assert.deepStrictEqual(Object.keys(data), keys); diff --git a/test/table.ts b/test/table.ts index 960a6509c..913e77c12 100644 --- a/test/table.ts +++ b/test/table.ts @@ -31,8 +31,10 @@ import {Bigtable, RequestOptions} from '../src'; import {EventEmitter} from 'events'; import {TableUtils} from '../src/utils/table'; import {ClientSideMetricsConfigManager} from '../src/client-side-metrics/metrics-config-manager'; -import {IMetricsHandler} from '../src/client-side-metrics/metrics-handler'; import {OperationMetricsCollector} from '../src/client-side-metrics/operation-metrics-collector'; +import {SinonSpy} from 'sinon'; +import {TabularApiSurface} from '../src/tabular-api-surface'; +import {GetRowsOptions} from '../src/table'; const sandbox = sinon.createSandbox(); const noop = () => {}; @@ -121,6 +123,43 @@ const FakeFilter = { }, }; +function getTableMock( + createReadStreamInternal: ( + table: TabularApiSurface, + singleRow: boolean, + opts?: GetRowsOptions, + ) => PassThrough, +) { + const FakeGetRows = proxyquire('../src/utils/getRowsInternal.js', { + './createReadStreamInternal': { + createReadStreamInternal: createReadStreamInternal, + }, + }); + const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { + '@google-cloud/promisify': fakePromisify, + './family.js': {Family: FakeFamily}, + './mutation.js': {Mutation: FakeMutation}, + './filter.js': {Filter: FakeFilter}, + pumpify, + './row.js': {Row: FakeRow}, + './chunktransformer.js': {ChunkTransformer: FakeChunkTransformer}, + './utils/createReadStreamInternal': { + createReadStreamInternal, + }, + './utils/getRowsInternal': { + getRowsInternal: FakeGetRows.getRowsInternal, + }, + }).TabularApiSurface; + const Table = proxyquire('../src/table.js', { + '@google-cloud/promisify': fakePromisify, + './family.js': {Family: FakeFamily}, + './mutation.js': {Mutation: FakeMutation}, + './row.js': {Row: FakeRow}, + './tabular-api-surface': {TabularApiSurface: FakeTabularApiSurface}, + }).Table; + return Table; +} + describe('Bigtable/Table', () => { const TABLE_ID = 'my-table'; let INSTANCE: inst.Instance; @@ -131,22 +170,17 @@ describe('Bigtable/Table', () => { let table: any; before(() => { - const FakeTabularApiSurface = proxyquire('../src/tabular-api-surface.js', { - '@google-cloud/promisify': fakePromisify, - './family.js': {Family: FakeFamily}, - './mutation.js': {Mutation: FakeMutation}, - './filter.js': {Filter: FakeFilter}, - pumpify, - './row.js': {Row: FakeRow}, - './chunktransformer.js': {ChunkTransformer: FakeChunkTransformer}, - }).TabularApiSurface; - Table = proxyquire('../src/table.js', { - '@google-cloud/promisify': fakePromisify, - './family.js': {Family: FakeFamily}, - './mutation.js': {Mutation: FakeMutation}, - './row.js': {Row: FakeRow}, - './tabular-api-surface': {TabularApiSurface: FakeTabularApiSurface}, - }).Table; + const FakeCreateReadStreamInternal = proxyquire( + '../src/utils/createReadStreamInternal.js', + { + '../row.js': {Row: FakeRow}, + '../chunktransformer.js': {ChunkTransformer: FakeChunkTransformer}, + '../filter.js': {Filter: FakeFilter}, + '../mutation.js': {Mutation: FakeMutation}, + pumpify, + }, + ).createReadStreamInternal; + Table = getTableMock(FakeCreateReadStreamInternal); }); beforeEach(() => { @@ -2326,13 +2360,14 @@ describe('Bigtable/Table', () => { describe('getRows', () => { describe('success', () => { + let createReadStreamInternal: SinonSpy<[], PassThrough>; const fakeRows = [ {key: 'c', data: {}}, {key: 'd', data: {}}, ]; beforeEach(() => { - table.createReadStream = sinon.spy(() => { + createReadStreamInternal = sinon.spy(() => { const stream = new PassThrough({ objectMode: true, }); @@ -2347,6 +2382,17 @@ describe('Bigtable/Table', () => { return stream; }); + Table = getTableMock(createReadStreamInternal); + INSTANCE = { + bigtable: { + _metricsConfigManager: new FakeMetricsConfigManager( + [], + ) as ClientSideMetricsConfigManager, + } as Bigtable, + name: 'a/b/c/d', + } as inst.Instance; + TABLE_NAME = INSTANCE.name + '/tables/' + TABLE_ID; + table = new Table(INSTANCE, TABLE_ID); }); it('should return the rows to the callback', done => { @@ -2357,8 +2403,8 @@ describe('Bigtable/Table', () => { assert.deepStrictEqual(rows, fakeRows); // eslint-disable-next-line @typescript-eslint/no-explicit-any - const spy = (table as any).createReadStream.getCall(0); - assert.strictEqual(spy.args[0], options); + const spy = createReadStreamInternal.getCall(0); + assert.strictEqual((spy.args as any)[2], options); done(); }); }); @@ -2373,10 +2419,11 @@ describe('Bigtable/Table', () => { }); describe('error', () => { + let createReadStreamInternal: SinonSpy<[], PassThrough>; const error = new Error('err'); beforeEach(() => { - table.createReadStream = sinon.spy(() => { + createReadStreamInternal = sinon.spy(() => { const stream = new PassThrough({ objectMode: true, }); @@ -2387,6 +2434,17 @@ describe('Bigtable/Table', () => { return stream; }); + Table = getTableMock(createReadStreamInternal); + INSTANCE = { + bigtable: { + _metricsConfigManager: new FakeMetricsConfigManager( + [], + ) as ClientSideMetricsConfigManager, + } as Bigtable, + name: 'a/b/c/d', + } as inst.Instance; + TABLE_NAME = INSTANCE.name + '/tables/' + TABLE_ID; + table = new Table(INSTANCE, TABLE_ID); }); it('should return the error to the callback', done => { From 097324ac88592be57f53d6f540879fc530bf5b17 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 4 Jun 2025 10:12:47 -0400 Subject: [PATCH 424/448] Added a comment for the test --- system-test/metric-service-client-credentials.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/system-test/metric-service-client-credentials.ts b/system-test/metric-service-client-credentials.ts index 083ddd645..d2e6696e3 100644 --- a/system-test/metric-service-client-credentials.ts +++ b/system-test/metric-service-client-credentials.ts @@ -84,6 +84,12 @@ describe('Bigtable/MetricServiceClientCredentials', () => { }, }).Bigtable; new FakeBigtable(clientOptions); + // savedOptions are the options passed down to the exporter + // we want to ensure that when the second project id is provided to the + // fake client that this sends savedOptions to the exporter that then + // fetches the right projectId when the saved options are provided to the + // MetricsServiceClient as this is required to save the metrics to the right + // project. const client = new MetricServiceClient(savedOptions); const projectIdUsed = await client.getProjectId(); assert.strictEqual(projectIdUsed, SECOND_PROJECT_ID); From 4bbe51bc6f602ee468d6b23f0fa68071dda43ec7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 4 Jun 2025 10:27:06 -0400 Subject: [PATCH 425/448] Include credentials in the test --- system-test/metric-service-client-credentials.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system-test/metric-service-client-credentials.ts b/system-test/metric-service-client-credentials.ts index d2e6696e3..5330701b4 100644 --- a/system-test/metric-service-client-credentials.ts +++ b/system-test/metric-service-client-credentials.ts @@ -19,8 +19,8 @@ import {setupBigtable} from './client-side-metrics-setup-table'; import {MetricServiceClient} from '@google-cloud/monitoring'; describe('Bigtable/MetricServiceClientCredentials', () => { - it('should pass the credentials to the metric service client', done => { - const clientOptions = {metricsEnabled: true}; + it('should pass the credentials to the exporter', done => { + const clientOptions = {metricsEnabled: true, credentials: {}}; class FakeExporter { constructor(options: ClientOptions) { try { From 29f987dd58e692b8a4bbcdcc1510b6f7a0362f44 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 4 Jun 2025 11:51:01 -0400 Subject: [PATCH 426/448] Modify test to pass credentials in the preferred manner --- .../metric-service-client-credentials.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) rename {system-test => test}/metric-service-client-credentials.ts (93%) diff --git a/system-test/metric-service-client-credentials.ts b/test/metric-service-client-credentials.ts similarity index 93% rename from system-test/metric-service-client-credentials.ts rename to test/metric-service-client-credentials.ts index 5330701b4..6a2d2fd84 100644 --- a/system-test/metric-service-client-credentials.ts +++ b/test/metric-service-client-credentials.ts @@ -13,14 +13,17 @@ // limitations under the License. import * as proxyquire from 'proxyquire'; -import {ClientOptions} from 'google-gax'; +import {ClientOptions, grpc} from 'google-gax'; import * as assert from 'assert'; -import {setupBigtable} from './client-side-metrics-setup-table'; +import {setupBigtable} from '../system-test/client-side-metrics-setup-table'; import {MetricServiceClient} from '@google-cloud/monitoring'; describe('Bigtable/MetricServiceClientCredentials', () => { it('should pass the credentials to the exporter', done => { - const clientOptions = {metricsEnabled: true, credentials: {}}; + const clientOptions = { + metricsEnabled: true, + sslCreds: grpc.credentials.createInsecure(), + }; class FakeExporter { constructor(options: ClientOptions) { try { From 41bb650aa566227cfca56e171bf2dc694e9623cb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 4 Jun 2025 13:01:52 -0400 Subject: [PATCH 427/448] =?UTF-8?q?Remove=20the=20calls=20that=20aren?= =?UTF-8?q?=E2=80=99t=20needed=20from=20unit=20test?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/metric-service-client-credentials.ts | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/test/metric-service-client-credentials.ts b/test/metric-service-client-credentials.ts index 6a2d2fd84..edd8001eb 100644 --- a/test/metric-service-client-credentials.ts +++ b/test/metric-service-client-credentials.ts @@ -47,22 +47,7 @@ describe('Bigtable/MetricServiceClientCredentials', () => { GCPMetricsHandler: FakeCGPMetricsHandler, }, }).Bigtable; - const bigtable = new FakeBigtable(clientOptions); - const instanceId = 'emulator-test-instance'; - const columnFamilyId = 'cf1'; - const tableId1 = 'my-table'; - (async () => { - try { - await setupBigtable(bigtable, columnFamilyId, instanceId, [tableId1]); - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId1); - await table.getRows(); - } catch (e) { - done(e); - } - })().catch(err => { - throw err; - }); + new FakeBigtable(clientOptions); }); it('should use second project for the metric service client', async () => { const SECOND_PROJECT_ID = 'second-project-id'; From 6abc68a78cf4aa344513753a7098595b3ad1979c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 5 Jun 2025 11:19:17 -0400 Subject: [PATCH 428/448] Add a check for published metrics --- system-test/client-side-metrics.ts | 53 ++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 36c929792..091e8076a 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -32,6 +32,7 @@ import { } from '../src/client-side-metrics/metrics-handler'; import {ClientOptions} from 'google-gax'; import {ClientSideMetricsConfigManager} from '../src/client-side-metrics/metrics-config-manager'; +import {MetricServiceClient} from '@google-cloud/monitoring'; const SECOND_PROJECT_ID = 'cfdb-sdk-node-tests'; @@ -182,6 +183,53 @@ function checkSingleRowCall( ); } +/** + * Checks if metrics have been published to Google Cloud Monitoring. + * + * This asynchronous function queries Google Cloud Monitoring to verify + * that the expected metrics from the Bigtable client library have been + * successfully published. It constructs a `MetricServiceClient` to + * interact with the Cloud Monitoring API and retrieves time series data + * for a predefined set of metrics. The test passes if time series data + * is found for each of the specified metrics within a defined time + * interval. + * + * @param {string} projectId The Google Cloud project ID where metrics are + * expected to be published. + * @throws {Error} If no time series data is found for any of the specified + * metrics, indicating that the metrics were not successfully published to + * Cloud Monitoring. + */ +async function checkForPublishedMetrics(projectId: string) { + const monitoringClient = new MetricServiceClient(); // Correct instantiation + const now = Math.floor(Date.now() / 1000); + const filters = [ + 'metric.type="bigtable.googleapis.com/client/attempt_latencies"', + 'metric.type="bigtable.googleapis.com/client/operation_latencies"', + 'metric.type="bigtable.googleapis.com/client/retry_count"', + 'metric.type="bigtable.googleapis.com/client/server_latencies"', + 'metric.type="bigtable.googleapis.com/client/first_response_latencies"', + ]; + for (let i = 0; i < filters.length; i++) { + const filter = filters[i]; + const [series] = await monitoringClient.listTimeSeries({ + name: `projects/${projectId}`, + interval: { + endTime: { + seconds: now, + nanos: 0, + }, + startTime: { + seconds: now - 1000 * 60 * 60 * 24, + nanos: 0, + }, + }, + filter, + }); + assert(series.length > 0); + } +} + describe('Bigtable/ClientSideMetrics', () => { const instanceId1 = 'emulator-test-instance'; const instanceId2 = 'emulator-test-instance2'; @@ -264,7 +312,7 @@ describe('Bigtable/ClientSideMetrics', () => { resultCallback: (result: ExportResult) => void, ): Promise { try { - await super.export(metrics, (result: ExportResult) => { + await super.export(metrics, async (result: ExportResult) => { if (!exported) { exported = true; try { @@ -273,6 +321,7 @@ describe('Bigtable/ClientSideMetrics', () => { // result from calling export was successful. assert.strictEqual(result.code, 0); resultCallback({code: 0}); + await checkForPublishedMetrics(projectId); done(); } catch (error) { // The code here isn't 0 so we report the original error to the mocha test runner. @@ -360,7 +409,7 @@ describe('Bigtable/ClientSideMetrics', () => { resultCallback: (result: ExportResult) => void, ): Promise { try { - await super.export(metrics, (result: ExportResult) => { + await super.export(metrics, async (result: ExportResult) => { try { // The code is expected to be 0 because the // result from calling export was successful. From 5424dc453989eddc62f15d5313bf0d84aefb7399 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 5 Jun 2025 16:26:57 -0400 Subject: [PATCH 429/448] Change to check for a custom endpoint --- src/client-side-metrics/exporter.ts | 7 ++-- system-test/client-side-metrics.ts | 53 +++++++++++++++++++++++++---- 2 files changed, 51 insertions(+), 9 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index ae6aaf1ec..8a2ff99a6 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -19,11 +19,10 @@ import { Histogram, ResourceMetrics, } from '@opentelemetry/sdk-metrics'; -import {grpc, ClientOptions, ServiceError} from 'google-gax'; +import {ClientOptions, ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -import {RetryOptions} from 'google-gax'; export interface ExportResult { code: number; @@ -298,6 +297,10 @@ export class CloudMonitoringExporter extends MetricExporter { constructor(options: ClientOptions) { super(); + if (options.apiEndpoint) { + // We want the MetricServiceClient to always hit its default endpoint. + delete options.apiEndpoint; + } this.client = new MetricServiceClient(options); } diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 091e8076a..0850027c9 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -30,7 +30,7 @@ import { OnAttemptCompleteData, OnOperationCompleteData, } from '../src/client-side-metrics/metrics-handler'; -import {ClientOptions} from 'google-gax'; +import {ClientOptions, ServiceError} from 'google-gax'; import {ClientSideMetricsConfigManager} from '../src/client-side-metrics/metrics-config-manager'; import {MetricServiceClient} from '@google-cloud/monitoring'; @@ -39,11 +39,15 @@ const SECOND_PROJECT_ID = 'cfdb-sdk-node-tests'; function getFakeBigtable( projectId: string, metricsHandlerClass: typeof GCPMetricsHandler | typeof TestMetricsHandler, + apiEndpoint?: string, ) { const metricHandler = new metricsHandlerClass( {} as unknown as ClientOptions & {value: string}, ); - const newClient = new Bigtable({projectId}); + const newClient = new Bigtable({ + projectId, + apiEndpoint, + }); newClient._metricsConfigManager = new ClientSideMetricsConfigManager([ metricHandler, ]); @@ -280,7 +284,11 @@ describe('Bigtable/ClientSideMetrics', () => { describe('Bigtable/ClientSideMetricsToGCM', () => { // This test suite ensures that for each test all the export calls are // successful even when multiple instances and tables are created. - async function mockBigtable(projectId: string, done: mocha.Done) { + async function mockBigtable( + projectId: string, + done: mocha.Done, + apiEndpoint?: string, + ) { /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, @@ -312,7 +320,7 @@ describe('Bigtable/ClientSideMetrics', () => { resultCallback: (result: ExportResult) => void, ): Promise { try { - await super.export(metrics, async (result: ExportResult) => { + await super.export(metrics, (result: ExportResult) => { if (!exported) { exported = true; try { @@ -321,8 +329,9 @@ describe('Bigtable/ClientSideMetrics', () => { // result from calling export was successful. assert.strictEqual(result.code, 0); resultCallback({code: 0}); - await checkForPublishedMetrics(projectId); - done(); + void checkForPublishedMetrics(projectId).then(() => { + done(); + }); } catch (error) { // The code here isn't 0 so we report the original error to the mocha test runner. done(result); @@ -338,7 +347,11 @@ describe('Bigtable/ClientSideMetrics', () => { } } - return getFakeBigtable(projectId, getHandlerFromExporter(TestExporter)); + return getFakeBigtable( + projectId, + getHandlerFromExporter(TestExporter), + apiEndpoint, + ); } it('should send the metrics to Google Cloud Monitoring for a ReadRows call', done => { @@ -364,6 +377,32 @@ describe('Bigtable/ClientSideMetrics', () => { throw err; }); }); + it('should send the metrics to Google Cloud Monitoring for a custom endpoint', done => { + (async () => { + try { + const bigtable = await mockBigtable( + defaultProjectId, + done, + 'bogus-endpoint', + ); + const instance = bigtable.instance(instanceId1); + const table = instance.table(tableId1); + try { + // This call will fail because we are trying to hit a bogus endpoint. + // The idea here is that we just want to record at least one metric + // so that the exporter gets executed. + await table.getRows(); + } catch (e: unknown) { + // Try blocks just need a catch/finally block. + } + } catch (e) { + done(new Error('An error occurred while running the script')); + done(e); + } + })().catch(err => { + throw err; + }); + }); it('should send the metrics to Google Cloud Monitoring for a ReadRows call with a second project', done => { (async () => { try { From 2b8fb4eb1b84aedd562b04a16de00e5b99aca111 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 5 Jun 2025 16:55:09 -0400 Subject: [PATCH 430/448] apiEndpoint should be part of the metrics handler --- system-test/client-side-metrics.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 0850027c9..587f56bd6 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -41,9 +41,9 @@ function getFakeBigtable( metricsHandlerClass: typeof GCPMetricsHandler | typeof TestMetricsHandler, apiEndpoint?: string, ) { - const metricHandler = new metricsHandlerClass( - {} as unknown as ClientOptions & {value: string}, - ); + const metricHandler = new metricsHandlerClass({ + apiEndpoint, + } as unknown as ClientOptions & {value: string}); const newClient = new Bigtable({ projectId, apiEndpoint, From 61acede9ca6110aa9e1867cb61421465c99ef64d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 6 Jun 2025 09:53:08 -0400 Subject: [PATCH 431/448] Remove import --- src/index.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/index.ts b/src/index.ts index 1ed9cd272..b17fcc7a3 100644 --- a/src/index.ts +++ b/src/index.ts @@ -24,7 +24,6 @@ import { } from 'google-gax'; import * as gax from 'google-gax'; import * as protos from '../protos/protos'; -import * as os from 'os'; import {AppProfile} from './app-profile'; import {Cluster} from './cluster'; From 476adedd529f3fc0e8b6d060652018a4a235e614 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 6 Jun 2025 10:44:42 -0400 Subject: [PATCH 432/448] Add a comment about the key --- src/client-side-metrics/operation-metrics-collector.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 4b29efda0..4ff2147a6 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -99,6 +99,8 @@ function checkState( * A class for tracing and recording client-side metrics related to Bigtable operations. */ export class OperationMetricsCollector { + // The following key corresponds to the key the instance information is + // stored in for the metadata that gets returned from the server. private readonly INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; private state: MetricsCollectorState; private operationStartTime: bigint | null; From 58f5da2526b7b359ae406f4b811dc9285003fff3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 6 Jun 2025 11:43:38 -0400 Subject: [PATCH 433/448] Move generateUUID function --- .../gcp-metrics-handler.ts | 26 +++++++++++++- test/metrics-collector/gcp-metrics-handler.ts | 34 +++++++++++++++++-- test/metrics-collector/metrics-collector.ts | 1 + 3 files changed, 57 insertions(+), 4 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 945fcbe23..153fa6f84 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -22,7 +22,6 @@ import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {PushMetricExporter, View} from '@opentelemetry/sdk-metrics'; import {ClientOptions} from 'google-gax'; -import {generateClientUuid} from './generate-client-uuid'; const { Aggregation, ExplicitBucketHistogramAggregation, @@ -30,6 +29,31 @@ const { Histogram, PeriodicExportingMetricReader, } = require('@opentelemetry/sdk-metrics'); +import * as os from 'os'; +import * as crypto from 'crypto'; + +/** + * Generates a unique client identifier string. + * + * This function creates a client identifier that incorporates the hostname, + * process ID, and a UUID to ensure uniqueness across different client instances + * and processes. The identifier follows the pattern: + * + * `node--` + * + * where: + * - `` is a randomly generated UUID (version 4). + * - `` is the process ID of the current Node.js process. + * - `` is the hostname of the machine. + * + * @returns {string} A unique client identifier string. + */ +export function generateClientUuid() { + const hostname = os.hostname() || 'localhost'; + const currentPid = process.pid || ''; + const uuid4 = crypto.randomUUID(); + return `node-${uuid4}-${currentPid}${hostname}`; +} /** * A collection of OpenTelemetry metric instruments used to record diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 4a78f0388..c3dddb086 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -32,6 +32,36 @@ import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; import * as proxyquire from 'proxyquire'; +/** + * Cleans a ResourceMetrics object by replacing client UUIDs with a placeholder. + * + * This function creates a deep copy of the input ResourceMetrics object and + * then iterates through its metrics, replacing any existing client_uid attribute + * in the data points with the string 'fake-uuid'. This is primarily used in + * testing to ensure consistent metric output by removing the variability of + * randomly generated client UUIDs. + * + * @param {ResourceMetrics} metrics The ResourceMetrics object to clean. + * @returns {ResourceMetrics} A new ResourceMetrics object with client UUIDs replaced by 'fake-uuid'. + */ +function cleanMetrics(metrics: ResourceMetrics): ResourceMetrics { + const newMetrics = JSON.parse(JSON.stringify(metrics)); // Deep copy to avoid modifying the original object + + newMetrics.scopeMetrics.forEach((scopeMetric: any) => { + scopeMetric.metrics.forEach((metric: any) => { + if (metric.dataPoints) { + metric.dataPoints.forEach((dataPoint: any) => { + if (dataPoint.attributes && dataPoint.attributes.client_uid) { + dataPoint.attributes.client_uid = 'fake-uuid'; + } + }); + } + }); + }); + + return newMetrics; +} + describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', function (done) { this.timeout(600000); @@ -61,6 +91,7 @@ describe('Bigtable/GCPMetricsHandler', () => { if (!exported) { exported = true; try { + metrics = cleanMetrics(metrics); replaceTimestamps( metrics as unknown as typeof expectedOtelExportInput, [123, 789], @@ -124,9 +155,6 @@ describe('Bigtable/GCPMetricsHandler', () => { './exporter': { CloudMonitoringExporter: TestExporter, }, - './generate-client-uuid': { - generateClientUuid: () => 'fake-uuid', - }, }; const FakeMetricsHandler = proxyquire( '../../src/client-side-metrics/gcp-metrics-handler.js', diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index efcef9813..626c254c0 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -26,6 +26,7 @@ import * as path from 'path'; // Import the 'path' module import * as gax from 'google-gax'; import * as proxyquire from 'proxyquire'; import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; const protoPath = path.join( __dirname, '../../protos/google/bigtable/v2/response_params.proto', From 0ce87af013563770cd5fc30e5b78aac5d16e2737 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 6 Jun 2025 11:45:50 -0400 Subject: [PATCH 434/448] Eliminate generateClientUuid and leftovers --- .../gcp-metrics-handler.ts | 2 +- .../generate-client-uuid.ts | 39 ------------------- 2 files changed, 1 insertion(+), 40 deletions(-) delete mode 100644 src/client-side-metrics/generate-client-uuid.ts diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 153fa6f84..7208d17e0 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -48,7 +48,7 @@ import * as crypto from 'crypto'; * * @returns {string} A unique client identifier string. */ -export function generateClientUuid() { +function generateClientUuid() { const hostname = os.hostname() || 'localhost'; const currentPid = process.pid || ''; const uuid4 = crypto.randomUUID(); diff --git a/src/client-side-metrics/generate-client-uuid.ts b/src/client-side-metrics/generate-client-uuid.ts deleted file mode 100644 index 066193879..000000000 --- a/src/client-side-metrics/generate-client-uuid.ts +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import * as os from 'os'; -import * as crypto from 'crypto'; - -/** - * Generates a unique client identifier string. - * - * This function creates a client identifier that incorporates the hostname, - * process ID, and a UUID to ensure uniqueness across different client instances - * and processes. The identifier follows the pattern: - * - * `node--` - * - * where: - * - `` is a randomly generated UUID (version 4). - * - `` is the process ID of the current Node.js process. - * - `` is the hostname of the machine. - * - * @returns {string} A unique client identifier string. - */ -export function generateClientUuid() { - const hostname = os.hostname() || 'localhost'; - const currentPid = process.pid || ''; - const uuid4 = crypto.randomUUID(); - return `node-${uuid4}-${currentPid}${hostname}`; -} From 9fb51dca88e002ff48c99e46c5c981e199b95ffa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 6 Jun 2025 12:01:18 -0400 Subject: [PATCH 435/448] Pass in metrics collector --- src/row.ts | 57 +++++++++++++++++---------- src/tabular-api-surface.ts | 20 +++++++++- src/utils/createReadStreamInternal.ts | 11 ++---- src/utils/getRowsInternal.ts | 8 ++-- 4 files changed, 61 insertions(+), 35 deletions(-) diff --git a/src/row.ts b/src/row.ts index 3ca4029d0..39af7a32b 100644 --- a/src/row.ts +++ b/src/row.ts @@ -32,6 +32,10 @@ import {google} from '../protos/protos'; import {RowDataUtils, RowProperties} from './row-data-utils'; import {TabularApiSurface} from './tabular-api-surface'; import {getRowsInternal} from './utils/getRowsInternal'; +import { + MethodName, + StreamingState, +} from './client-side-metrics/client-side-metrics-attributes'; export interface Rule { column: string; @@ -667,31 +671,42 @@ export class Row { filter, }); - void getRowsInternal(this.table, true, getRowsOptions, (err, rows) => { - if (err) { - callback(err); - return; - } + const metricsCollector = + this.table.bigtable._metricsConfigManager.createOperation( + MethodName.READ_ROW, + StreamingState.UNARY, + this.table, + ); + void getRowsInternal( + this.table, + metricsCollector, + getRowsOptions, + (err, rows) => { + if (err) { + callback(err); + return; + } - const row = rows![0]; + const row = rows![0]; - if (!row) { - const e = new RowError(this.id); - callback(e); - return; - } + if (!row) { + const e = new RowError(this.id); + callback(e); + return; + } - this.data = row.data; + this.data = row.data; - // If the user specifies column names, we'll return back the row data - // we received. Otherwise, we'll return the row "this" in a typical - // GrpcServiceObject#get fashion. - if (columns.length > 0) { - callback(null, row.data); - } else { - (callback as {} as GetRowCallback)(null, this); - } - }); + // If the user specifies column names, we'll return back the row data + // we received. Otherwise, we'll return the row "this" in a typical + // GrpcServiceObject#get fashion. + if (columns.length > 0) { + callback(null, row.data); + } else { + (callback as {} as GetRowCallback)(null, this); + } + }, + ); } getMetadata(options?: GetRowOptions): Promise; diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index ef8408b33..462658e92 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -33,6 +33,10 @@ import * as is from 'is'; import {GoogleInnerError} from './table'; import {createReadStreamInternal} from './utils/createReadStreamInternal'; import {getRowsInternal} from './utils/getRowsInternal'; +import { + MethodName, + StreamingState, +} from './client-side-metrics/client-side-metrics-attributes'; // See protos/google/rpc/code.proto // (4=DEADLINE_EXCEEDED, 8=RESOURCE_EXHAUSTED, 10=ABORTED, 14=UNAVAILABLE) @@ -206,7 +210,13 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * @param opts */ createReadStream(opts?: GetRowsOptions) { - return createReadStreamInternal(this, false, opts); + const metricsCollector = + this.bigtable._metricsConfigManager.createOperation( + MethodName.READ_ROWS, + StreamingState.STREAMING, + this, + ); + return createReadStreamInternal(this, metricsCollector, opts); } getRows(options?: GetRowsOptions): Promise; @@ -234,7 +244,13 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); optionsOrCallback?: GetRowsOptions | GetRowsCallback, cb?: GetRowsCallback, ): void | Promise { - return getRowsInternal(this, false, optionsOrCallback, cb); + const metricsCollector = + this.bigtable._metricsConfigManager.createOperation( + MethodName.READ_ROWS, + StreamingState.STREAMING, + this, + ); + return getRowsInternal(this, metricsCollector, optionsOrCallback, cb); } insert( diff --git a/src/utils/createReadStreamInternal.ts b/src/utils/createReadStreamInternal.ts index 32b98c331..e1561715a 100644 --- a/src/utils/createReadStreamInternal.ts +++ b/src/utils/createReadStreamInternal.ts @@ -41,6 +41,7 @@ import { RETRYABLE_STATUS_CODES, TabularApiSurface, } from '../tabular-api-surface'; +import {OperationMetricsCollector} from '../client-side-metrics/operation-metrics-collector'; /** * Creates a readable stream of rows from a Bigtable table or authorized view. @@ -50,7 +51,7 @@ import { * be used to create a stream for either a whole table or an authorized view. * * @param {Table} table The Table instance to read rows from. - * @param {boolean} singleRow boolean to check if the request is for a single row. + * @param metricsCollector * @param {GetRowsOptions} [opts] Optional configuration for the read operation. * @param {boolean} [opts.decode=true] If set to `false` it will not decode * Buffer values returned from Bigtable. @@ -68,13 +69,12 @@ import { * match. * @param {object[]} [opts.ranges] A list of key ranges. * @param {string} [opts.start] Start value for key range. - * @param {string} [viewName] The name of the authorized view, if applicable. * @returns {stream} A readable stream of {@link Row} objects. * */ export function createReadStreamInternal( table: TabularApiSurface, - singleRow: boolean, + metricsCollector: OperationMetricsCollector, opts?: GetRowsOptions, ) { const options = opts || {}; @@ -201,11 +201,6 @@ export function createReadStreamInternal( } return originalEnd(chunk, encoding, cb); }; - const metricsCollector = table.bigtable._metricsConfigManager.createOperation( - singleRow ? MethodName.READ_ROW : MethodName.READ_ROWS, - singleRow ? StreamingState.UNARY : StreamingState.STREAMING, - table, - ); metricsCollector.onOperationStart(); const makeNewRequest = () => { metricsCollector.onAttemptStart(); diff --git a/src/utils/getRowsInternal.ts b/src/utils/getRowsInternal.ts index b756a086e..d106b6de9 100644 --- a/src/utils/getRowsInternal.ts +++ b/src/utils/getRowsInternal.ts @@ -20,6 +20,7 @@ import { } from '../tabular-api-surface'; import {createReadStreamInternal} from './createReadStreamInternal'; import {Row} from '../row'; +import {OperationMetricsCollector} from '../client-side-metrics/operation-metrics-collector'; // eslint-disable-next-line @typescript-eslint/no-var-requires const concat = require('concat-stream'); @@ -31,8 +32,7 @@ const concat = require('concat-stream'); * via {@link Table#createReadStream}. * * @param {TabularApiSurface} table The table instance to get rows from. - * @param {boolean} singleRow Boolean to check if the request is for a single row. - * @param {string} [viewName] The name of the authorized view, if applicable. + * @param metricsCollector * @param {object} [optionsOrCallback] Configuration object. See * {@link Table#createReadStream} for a complete list of options. * @param {object} [optionsOrCallback.gaxOptions] Request configuration options, outlined @@ -48,7 +48,7 @@ const concat = require('concat-stream'); */ export function getRowsInternal( table: TabularApiSurface, - singleRow: boolean, + metricsCollector: OperationMetricsCollector, optionsOrCallback?: GetRowsOptions | GetRowsCallback, cb?: GetRowsCallback, ): void | Promise { @@ -56,7 +56,7 @@ export function getRowsInternal( typeof optionsOrCallback === 'function' ? optionsOrCallback : cb!; const options = typeof optionsOrCallback === 'object' ? optionsOrCallback : {}; - createReadStreamInternal(table, singleRow, options) + createReadStreamInternal(table, metricsCollector, options) .on('error', callback) .pipe( concat((rows: Row[]) => { From 37ae9010bb0908bce6804a5d79e1390462addebc Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 6 Jun 2025 14:01:13 -0400 Subject: [PATCH 436/448] Use more specific method --- src/client-side-metrics/exporter.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 8a2ff99a6..3e75c4849 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -312,7 +312,9 @@ export class CloudMonitoringExporter extends MetricExporter { try { const projectId = await this.client.getProjectId(); const request = metricsToRequest(projectId, metrics); - await this.client.createTimeSeries(request as ICreateTimeSeriesRequest); + await this.client.createServiceTimeSeries( + request as ICreateTimeSeriesRequest, + ); // The resultCallback typically accepts a value equal to {code: x} // for some value x along with other info. When the code is equal to 0 // then the operation completed successfully. When the code is not equal From 6b3ec1347f2658c855b030bb11f922df7ca64f54 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 6 Jun 2025 14:50:14 -0400 Subject: [PATCH 437/448] Correct the comment --- src/client-side-metrics/exporter.ts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 3e75c4849..f7e205143 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -318,9 +318,8 @@ export class CloudMonitoringExporter extends MetricExporter { // The resultCallback typically accepts a value equal to {code: x} // for some value x along with other info. When the code is equal to 0 // then the operation completed successfully. When the code is not equal - // to 0 then the operation failed. Open telemetry logs errors to the - // console when the resultCallback passes in non-zero code values and - // logs nothing when the code is 0. + // to 0 then the operation failed. The resultCallback will not log + // anything to the console whether the error code was 0 or not. resultCallback({code: 0}); } catch (error) { resultCallback(error as ServiceError); From 35ed6f9fb98dbc37d6432c43d93b1f853e27e157 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 6 Jun 2025 16:44:58 -0400 Subject: [PATCH 438/448] Add a check to make sure options exist --- src/client-side-metrics/exporter.ts | 2 +- system-test/gcp-metrics-handler.ts | 11 +++-------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index f7e205143..4f373ef36 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -297,7 +297,7 @@ export class CloudMonitoringExporter extends MetricExporter { constructor(options: ClientOptions) { super(); - if (options.apiEndpoint) { + if (options && options.apiEndpoint) { // We want the MetricServiceClient to always hit its default endpoint. delete options.apiEndpoint; } diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 6c765ea02..2608d6e19 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -25,11 +25,6 @@ import { import {Bigtable} from '../src'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import * as assert from 'assert'; -import { - expectedOtelExportInput, - expectedOtelHundredExportInputs, -} from '../test-common/expected-otel-export-input'; -import {replaceTimestamps} from '../test-common/replace-timestamps'; import {ClientOptions} from 'google-gax'; import * as proxyquire from 'proxyquire'; @@ -122,7 +117,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } } })().catch(err => { - throw err; + done(err); }); }); it('Should export a value to two GCPMetricsHandlers', done => { @@ -224,7 +219,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } } })().catch(err => { - throw err; + done(err); }); }); it('Should write two duplicate points inserted into the metrics handler', done => { @@ -305,7 +300,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } } })().catch(err => { - throw err; + done(err); }); }); }); From a89b6aa4ef9526d00795064896b66ef1b3b146b4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 24 Jun 2025 09:34:33 -0400 Subject: [PATCH 439/448] Do console warn. Not throw error --- src/client-side-metrics/operation-metrics-collector.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 4ff2147a6..4851eddbb 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -232,7 +232,7 @@ export class OperationMetricsCollector { } }); } else { - throw new Error('Start time should always be provided'); + console.warn('Start time should always be provided'); } }); } @@ -272,7 +272,7 @@ export class OperationMetricsCollector { (endTime - this.operationStartTime) / BigInt(1000000), ); } else { - throw new Error( + console.warn( 'ProjectId and operationStartTime should always be provided', ); } From b2c6f9087f8d99f1f6764bdbf8c75ff0a5033da2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 24 Jun 2025 15:58:44 -0400 Subject: [PATCH 440/448] Debugging attempts --- src/tabular-api-surface.ts | 3 + system-test/data/read-rows-retry-test.json | 294 --------------------- system-test/read-rows.ts | 5 +- 3 files changed, 6 insertions(+), 296 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 784f1f622..4b6bba541 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -492,6 +492,9 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); objectMode: true, }); + chunkTransformer.on('error', err => { + chunkTransformer.emit('error', err); + }); rowStream = pumpify.obj([requestStream, chunkTransformer, toRowStream]); // Retry on "received rst stream" errors diff --git a/system-test/data/read-rows-retry-test.json b/system-test/data/read-rows-retry-test.json index aad5178c6..133eeb7a8 100644 --- a/system-test/data/read-rows-retry-test.json +++ b/system-test/data/read-rows-retry-test.json @@ -2,43 +2,6 @@ "tests": [ - { - "name": "simple read", - "max_retries": 3, - "request_options": [ - { - "rowKeys": [], - "rowRanges": [{}] - } - ], - "responses": [ - { "row_keys": [ "a", "b", "c" ] } - ], - "row_keys_read": [ - [ "a", "b", "c" ] - ] - }, - - - - { - "name": "retries a failed read", - "max_retries": 3, - "request_options": [ - { "rowKeys": [], - "rowRanges": [{}] - }, - { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "b" } ] } - ], - "responses": [ - { "row_keys": [ "a", "b" ], "end_with_error": 4 }, - { "row_keys": [ "c" ] } - ], - "row_keys_read": [ - [ "a", "b" ], - [ "c" ] - ] - }, @@ -69,263 +32,6 @@ [], [], [], [] ], "error": 4 - }, - - - - - { - "name": "resets the retry counter after a successful read", - "max_retries": 4, - "request_options": [ - { "rowKeys": [], - "rowRanges": [{}] - }, - { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "a" } ] }, - { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "a" } ] }, - { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "a" } ] }, - { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "a" } ] }, - { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "b" } ] }, - { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "b" } ] }, - { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "b" } ] } - ], - "responses": [ - { "row_keys": [ "a" ], "end_with_error": 4 }, - { "end_with_error": 4 }, - { "end_with_error": 4 }, - { "end_with_error": 4 }, - { "row_keys": [ "b" ], "end_with_error": 4 }, - { "end_with_error": 4 }, - { "end_with_error": 4 }, - { "row_keys": [ "c" ] } - ], - "row_keys_read": [ - [ "a" ], [], [], [], [ "b" ], [], [], [ "c" ] - ] - }, - - - - { - "name": "moves the start point of a range being consumed", - "max_retries": 3, - "createReadStream_options": { - "ranges": [{ - "start": "a", - "end": "z" - }] - }, - "request_options": [ - { "rowKeys": [], "rowRanges": [ { "startKeyClosed": "a", "endKeyClosed": "z" } ] }, - { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "b", "endKeyClosed": "z" } ] } - ], - "responses": [ - { "row_keys": [ "a", "b" ], "end_with_error": 4 }, - { "row_keys": [ "c" ] } - ], - "row_keys_read": [ - [ "a", "b" ], - [ "c" ] - ] - }, - - - - { - "name": "removes ranges already consumed", - "max_retries": 3, - "createReadStream_options": { - "ranges": [{ - "start": "a", - "end": "c" - }, { - "start": "x", - "end": "z" - }] - }, - "request_options": [ - { "rowKeys": [], - "rowRanges": [ - { "startKeyClosed": "a", "endKeyClosed": "c" }, - { "startKeyClosed": "x", "endKeyClosed": "z" } - ] }, - { "rowKeys": [], - "rowRanges": [ { "startKeyClosed": "x", "endKeyClosed": "z" } ] } - ], - "responses": [ - { "row_keys": [ "a", "b", "c" ], "end_with_error": 4 }, - { "row_keys": [ "x" ] } - ], - "row_keys_read": [ - [ "a", "b", "c" ], - [ "x" ] - ] - }, - - - - { - "name": "removes keys already read", - "max_retries": 3, - "createReadStream_options": { - "keys": ["a", "b", "x"] - }, - "request_options": [ - { "rowKeys": [ "a", "b", "x" ], "rowRanges": [] }, - { "rowKeys": [ "x" ], "rowRanges": [] } - ], - "responses": [ - { "row_keys": [ "a", "b", "c" ], "end_with_error": 4 }, - { "row_keys": [ "x" ] } - ], - "row_keys_read": [ - [ "a", "b", "c" ], - [ "x" ] - ] - }, - - - { - "name": "adjust the limit based on the number of rows read", - "max_retries": 3, - "createReadStream_options": { - "limit": 10 - }, - "request_options": [ - { "rowKeys": [], "rowRanges": [{}], "rowsLimit": 10 }, - { "rowsLimit": 8, "rowKeys":[], "rowRanges": [ { "startKeyOpen": "b" } ] } - ], - "responses": [ - { "row_keys": [ "a", "b" ], "end_with_error": 4 }, - { "row_keys": [ "x" ] } - ], - "row_keys_read": [ - [ "a", "b" ], - [ "x" ] - ] - }, - - - - - - { - "name": "does the previous 5 things in one giant test case", - "max_retries": 4, - "createReadStream_options": { - "limit": 10, - "ranges": [{ - "start": "a", - "end": "c" - }, { - "start": "p", - "end": "s" - }, { - "start": "x", - "end": "z" - }], - "keys": [ "a", "b", "c", "p", "q", "r", "s", "x", "y", "z" ] - }, - "request_options": [ - { - "rowKeys": [ "a", "b", "c", "p", "q", "r", "s", "x", "y", "z" ], - "rowsLimit": 10, - "rowRanges": [ - { "startKeyClosed": "a", "endKeyClosed": "c" }, - { "startKeyClosed": "p", "endKeyClosed": "s" }, - { "startKeyClosed": "x", "endKeyClosed": "z" } - ] - }, - { - "rowKeys": [ "b", "c", "p", "q", "r", "s", "x", "y", "z" ], - "rowsLimit": 9, - "rowRanges": [ - { "startKeyOpen": "a", "endKeyClosed": "c" }, - { "startKeyClosed": "p", "endKeyClosed": "s" }, - { "startKeyClosed": "x", "endKeyClosed": "z" } - ] - }, - { - "rowKeys": [ "c", "p", "q", "r", "s", "x", "y", "z" ], - "rowsLimit": 8, - "rowRanges": [ - { "startKeyOpen": "b", "endKeyClosed": "c" }, - { "startKeyClosed": "p", "endKeyClosed": "s" }, - { "startKeyClosed": "x", "endKeyClosed": "z" } - ] - }, - { - "rowKeys": [ "c", "p", "q", "r", "s", "x", "y", "z" ], - "rowsLimit": 8, - "rowRanges": [ - { "startKeyOpen": "b", "endKeyClosed": "c" }, - { "startKeyClosed": "p", "endKeyClosed": "s" }, - { "startKeyClosed": "x", "endKeyClosed": "z" } - ] - }, - { - "rowKeys": [ "c", "p", "q", "r", "s", "x", "y", "z" ], - "rowsLimit": 8, - "rowRanges": [ - { "startKeyOpen": "b", "endKeyClosed": "c" }, - { "startKeyClosed": "p", "endKeyClosed": "s" }, - { "startKeyClosed": "x", "endKeyClosed": "z" } - ] - }, - { - "rowKeys": [ "c", "p", "q", "r", "s", "x", "y", "z" ], - "rowsLimit": 8, - "rowRanges": [ - { "startKeyOpen": "b", "endKeyClosed": "c" }, - { "startKeyClosed": "p", "endKeyClosed": "s" }, - { "startKeyClosed": "x", "endKeyClosed": "z" } - ] - }, - { - "rowKeys": [ "p", "q", "r", "s", "x", "y", "z" ], - "rowsLimit": 7, - "rowRanges": [ - { "startKeyClosed": "p", "endKeyClosed": "s" }, - { "startKeyClosed": "x", "endKeyClosed": "z" } - ] - }, - { - "rowKeys": [ "y", "z" ], - "rowsLimit": 2, - "rowRanges": [ - { "startKeyOpen": "x", "endKeyClosed": "z" } - ] - }, - { - "rowKeys": [ "z" ], - "rowsLimit": 1, - "rowRanges": [ - { "startKeyOpen": "y", "endKeyClosed": "z" } - ] - } - ], - "responses": [ - { "row_keys": [ "a" ], "end_with_error": 4 }, - { "row_keys": [ "b" ], "end_with_error": 4 }, - { "end_with_error": 4 }, - { "end_with_error": 4 }, - { "end_with_error": 4 }, - { "row_keys": [ "c" ], "end_with_error": 4 }, - { "row_keys": [ "p", "q", "r", "s", "x" ], "end_with_error": 4 }, - { "row_keys": [ "y" ], "end_with_error": 4 }, - { "row_keys": [ "z" ] } - ], - "row_keys_read": [ - [ "a" ], - [ "b" ], - [], - [], - [], - [ "c" ], - [ "p", "q", "r", "s", "x" ], - [ "y" ], - [ "z" ] - ] } diff --git a/system-test/read-rows.ts b/system-test/read-rows.ts index fe75d7352..9873457aa 100644 --- a/system-test/read-rows.ts +++ b/system-test/read-rows.ts @@ -57,7 +57,7 @@ function dispatch(emitter: EventEmitter, response: any) { const emit = emits[index]; index++; emitter.emit(emit.name, emit.arg); - setImmediate(next); + next(); } } } @@ -123,7 +123,7 @@ describe('Bigtable/Table', () => { }); }); - describe('createReadStream', () => { + describe.only('createReadStream', () => { let clock: sinon.SinonFakeTimers; let endCalled: boolean; let error: ServiceError | null; @@ -199,6 +199,7 @@ describe('Bigtable/Table', () => { .on('end', () => (endCalled = true)) .on('error', err => (error = err as ServiceError)); clock.runAll(); + console.log('clock'); if (test.error) { assert(!endCalled, ".on('end') should not have been invoked"); From b75c7eccaad3b524a65ce1e54b736126600d9d3f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 25 Jun 2025 11:33:24 -0400 Subject: [PATCH 441/448] Remove the sinon clock --- system-test/read-rows.ts | 76 ++++++++++++++++++---------------------- 1 file changed, 35 insertions(+), 41 deletions(-) diff --git a/system-test/read-rows.ts b/system-test/read-rows.ts index 9873457aa..9256d17e5 100644 --- a/system-test/read-rows.ts +++ b/system-test/read-rows.ts @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {Bigtable} from '../src'; +import {AbortableDuplex, Bigtable} from '../src'; import {Mutation} from '../src/mutation.js'; const {tests} = require('../../system-test/data/read-rows-retry-test.json') as { tests: Test[]; @@ -25,6 +25,7 @@ import {EventEmitter} from 'events'; import {Test} from './testTypes'; import {ServiceError, GrpcClient, GoogleError, CallOptions} from 'google-gax'; import {PassThrough} from 'stream'; +import * as mocha from 'mocha'; const {grpc} = new GrpcClient(); @@ -57,7 +58,7 @@ function dispatch(emitter: EventEmitter, response: any) { const emit = emits[index]; index++; emitter.emit(emit.name, emit.arg); - next(); + setImmediate(next); } } } @@ -124,33 +125,19 @@ describe('Bigtable/Table', () => { }); describe.only('createReadStream', () => { - let clock: sinon.SinonFakeTimers; let endCalled: boolean; let error: ServiceError | null; let requestedOptions: Array<{}>; let responses: Array<{}> | null; let rowKeysRead: Array>; - let stub: sinon.SinonStub; beforeEach(() => { - clock = sinon.useFakeTimers({ - toFake: [ - 'setTimeout', - 'clearTimeout', - 'setImmediate', - 'clearImmediate', - 'setInterval', - 'clearInterval', - 'Date', - 'nextTick', - ], - }); endCalled = false; error = null; responses = null; rowKeysRead = []; requestedOptions = []; - stub = sinon.stub(bigtable, 'request').callsFake(cfg => { + bigtable.request = ((cfg: any) => { const reqOpts = cfg.reqOpts; const requestOptions = {} as google.bigtable.v2.IRowSet; if (reqOpts.rows && reqOpts.rows.rowRanges) { @@ -182,39 +169,46 @@ describe('Bigtable/Table', () => { (requestStream as any).abort = () => {}; dispatch(requestStream, responses!.shift()); return requestStream; - }); - }); - - afterEach(() => { - clock.restore(); - stub.restore(); + }) as unknown as () => AbortableDuplex; }); tests.forEach(test => { - it(test.name, () => { + it(test.name, (done: mocha.Done) => { responses = test.responses; TABLE.maxRetries = test.max_retries; TABLE.createReadStream(test.createReadStream_options) .on('data', row => rowKeysRead[rowKeysRead.length - 1].push(row.id)) - .on('end', () => (endCalled = true)) - .on('error', err => (error = err as ServiceError)); - clock.runAll(); - console.log('clock'); + .on('end', () => { + endCalled = true; + doAssertionChecks(); + }) + .on('error', err => { + error = err as ServiceError; + doAssertionChecks(); + }); - if (test.error) { - assert(!endCalled, ".on('end') should not have been invoked"); - assert.strictEqual(error!.code, test.error); - } else { - assert(endCalled, ".on('end') shoud have been invoked"); - assert.ifError(error); + function doAssertionChecks() { + try { + if (test.error) { + assert(!endCalled, ".on('end') should not have been invoked"); + assert.strictEqual(error!.code, test.error); + } else { + assert(endCalled, ".on('end') shoud have been invoked"); + assert.ifError(error); + } + assert.deepStrictEqual(rowKeysRead, test.row_keys_read); + assert(responses); + assert.strictEqual( + responses.length, + 0, + 'not all the responses were used', + ); + assert.deepStrictEqual(requestedOptions, test.request_options); + done(); + } catch (e) { + done(e); + } } - assert.deepStrictEqual(rowKeysRead, test.row_keys_read); - assert.strictEqual( - responses.length, - 0, - 'not all the responses were used', - ); - assert.deepStrictEqual(requestedOptions, test.request_options); }); }); }); From 7f859fee540d96b80564081544c4a9793822b3d5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 25 Jun 2025 11:33:35 -0400 Subject: [PATCH 442/448] Add all the tests back in --- system-test/data/read-rows-retry-test.json | 294 +++++++++++++++++++++ 1 file changed, 294 insertions(+) diff --git a/system-test/data/read-rows-retry-test.json b/system-test/data/read-rows-retry-test.json index 133eeb7a8..65d8ea8e6 100644 --- a/system-test/data/read-rows-retry-test.json +++ b/system-test/data/read-rows-retry-test.json @@ -2,6 +2,43 @@ "tests": [ + { + "name": "simple read", + "max_retries": 3, + "request_options": [ + { + "rowKeys": [], + "rowRanges": [{}] + } + ], + "responses": [ + { "row_keys": [ "a", "b", "c" ] } + ], + "row_keys_read": [ + [ "a", "b", "c" ] + ] + }, + + + + { + "name": "retries a failed read", + "max_retries": 3, + "request_options": [ + { "rowKeys": [], + "rowRanges": [{}] + }, + { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "b" } ] } + ], + "responses": [ + { "row_keys": [ "a", "b" ], "end_with_error": 4 }, + { "row_keys": [ "c" ] } + ], + "row_keys_read": [ + [ "a", "b" ], + [ "c" ] + ] + }, @@ -32,6 +69,263 @@ [], [], [], [] ], "error": 4 + }, + + + + + { + "name": "resets the retry counter after a successful read", + "max_retries": 4, + "request_options": [ + { "rowKeys": [], + "rowRanges": [{}] + }, + { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "a" } ] }, + { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "a" } ] }, + { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "a" } ] }, + { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "a" } ] }, + { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "b" } ] }, + { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "b" } ] }, + { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "b" } ] } + ], + "responses": [ + { "row_keys": [ "a" ], "end_with_error": 4 }, + { "end_with_error": 4 }, + { "end_with_error": 4 }, + { "end_with_error": 4 }, + { "row_keys": [ "b" ], "end_with_error": 4 }, + { "end_with_error": 4 }, + { "end_with_error": 4 }, + { "row_keys": [ "c" ] } + ], + "row_keys_read": [ + [ "a" ], [], [], [], [ "b" ], [], [], [ "c" ] + ] + }, + + + + { + "name": "moves the start point of a range being consumed", + "max_retries": 3, + "createReadStream_options": { + "ranges": [{ + "start": "a", + "end": "z" + }] + }, + "request_options": [ + { "rowKeys": [], "rowRanges": [ { "startKeyClosed": "a", "endKeyClosed": "z" } ] }, + { "rowKeys": [], "rowRanges": [ { "startKeyOpen": "b", "endKeyClosed": "z" } ] } + ], + "responses": [ + { "row_keys": [ "a", "b" ], "end_with_error": 4 }, + { "row_keys": [ "c" ] } + ], + "row_keys_read": [ + [ "a", "b" ], + [ "c" ] + ] + }, + + + + { + "name": "removes ranges already consumed", + "max_retries": 3, + "createReadStream_options": { + "ranges": [{ + "start": "a", + "end": "c" + }, { + "start": "x", + "end": "z" + }] + }, + "request_options": [ + { "rowKeys": [], + "rowRanges": [ + { "startKeyClosed": "a", "endKeyClosed": "c" }, + { "startKeyClosed": "x", "endKeyClosed": "z" } + ] }, + { "rowKeys": [], + "rowRanges": [ { "startKeyClosed": "x", "endKeyClosed": "z" } ] } + ], + "responses": [ + { "row_keys": [ "a", "b", "c" ], "end_with_error": 4 }, + { "row_keys": [ "x" ] } + ], + "row_keys_read": [ + [ "a", "b", "c" ], + [ "x" ] + ] + }, + + + + { + "name": "removes keys already read", + "max_retries": 3, + "createReadStream_options": { + "keys": ["a", "b", "x"] + }, + "request_options": [ + { "rowKeys": [ "a", "b", "x" ], "rowRanges": [] }, + { "rowKeys": [ "x" ], "rowRanges": [] } + ], + "responses": [ + { "row_keys": [ "a", "b", "c" ], "end_with_error": 4 }, + { "row_keys": [ "x" ] } + ], + "row_keys_read": [ + [ "a", "b", "c" ], + [ "x" ] + ] + }, + + + { + "name": "adjust the limit based on the number of rows read", + "max_retries": 3, + "createReadStream_options": { + "limit": 10 + }, + "request_options": [ + { "rowKeys": [], "rowRanges": [{}], "rowsLimit": 10 }, + { "rowsLimit": 8, "rowKeys":[], "rowRanges": [ { "startKeyOpen": "b" } ] } + ], + "responses": [ + { "row_keys": [ "a", "b" ], "end_with_error": 4 }, + { "row_keys": [ "x" ] } + ], + "row_keys_read": [ + [ "a", "b" ], + [ "x" ] + ] + }, + + + + + + { + "name": "does the previous 5 things in one giant test case", + "max_retries": 4, + "createReadStream_options": { + "limit": 10, + "ranges": [{ + "start": "a", + "end": "c" + }, { + "start": "p", + "end": "s" + }, { + "start": "x", + "end": "z" + }], + "keys": [ "a", "b", "c", "p", "q", "r", "s", "x", "y", "z" ] + }, + "request_options": [ + { + "rowKeys": [ "a", "b", "c", "p", "q", "r", "s", "x", "y", "z" ], + "rowsLimit": 10, + "rowRanges": [ + { "startKeyClosed": "a", "endKeyClosed": "c" }, + { "startKeyClosed": "p", "endKeyClosed": "s" }, + { "startKeyClosed": "x", "endKeyClosed": "z" } + ] + }, + { + "rowKeys": [ "b", "c", "p", "q", "r", "s", "x", "y", "z" ], + "rowsLimit": 9, + "rowRanges": [ + { "startKeyOpen": "a", "endKeyClosed": "c" }, + { "startKeyClosed": "p", "endKeyClosed": "s" }, + { "startKeyClosed": "x", "endKeyClosed": "z" } + ] + }, + { + "rowKeys": [ "c", "p", "q", "r", "s", "x", "y", "z" ], + "rowsLimit": 8, + "rowRanges": [ + { "startKeyOpen": "b", "endKeyClosed": "c" }, + { "startKeyClosed": "p", "endKeyClosed": "s" }, + { "startKeyClosed": "x", "endKeyClosed": "z" } + ] + }, + { + "rowKeys": [ "c", "p", "q", "r", "s", "x", "y", "z" ], + "rowsLimit": 8, + "rowRanges": [ + { "startKeyOpen": "b", "endKeyClosed": "c" }, + { "startKeyClosed": "p", "endKeyClosed": "s" }, + { "startKeyClosed": "x", "endKeyClosed": "z" } + ] + }, + { + "rowKeys": [ "c", "p", "q", "r", "s", "x", "y", "z" ], + "rowsLimit": 8, + "rowRanges": [ + { "startKeyOpen": "b", "endKeyClosed": "c" }, + { "startKeyClosed": "p", "endKeyClosed": "s" }, + { "startKeyClosed": "x", "endKeyClosed": "z" } + ] + }, + { + "rowKeys": [ "c", "p", "q", "r", "s", "x", "y", "z" ], + "rowsLimit": 8, + "rowRanges": [ + { "startKeyOpen": "b", "endKeyClosed": "c" }, + { "startKeyClosed": "p", "endKeyClosed": "s" }, + { "startKeyClosed": "x", "endKeyClosed": "z" } + ] + }, + { + "rowKeys": [ "p", "q", "r", "s", "x", "y", "z" ], + "rowsLimit": 7, + "rowRanges": [ + { "startKeyClosed": "p", "endKeyClosed": "s" }, + { "startKeyClosed": "x", "endKeyClosed": "z" } + ] + }, + { + "rowKeys": [ "y", "z" ], + "rowsLimit": 2, + "rowRanges": [ + { "startKeyOpen": "x", "endKeyClosed": "z" } + ] + }, + { + "rowKeys": [ "z" ], + "rowsLimit": 1, + "rowRanges": [ + { "startKeyOpen": "y", "endKeyClosed": "z" } + ] + } + ], + "responses": [ + { "row_keys": [ "a" ], "end_with_error": 4 }, + { "row_keys": [ "b" ], "end_with_error": 4 }, + { "end_with_error": 4 }, + { "end_with_error": 4 }, + { "end_with_error": 4 }, + { "row_keys": [ "c" ], "end_with_error": 4 }, + { "row_keys": [ "p", "q", "r", "s", "x" ], "end_with_error": 4 }, + { "row_keys": [ "y" ], "end_with_error": 4 }, + { "row_keys": [ "z" ] } + ], + "row_keys_read": [ + [ "a" ], + [ "b" ], + [], + [], + [], + [ "c" ], + [ "p", "q", "r", "s", "x" ], + [ "y" ], + [ "z" ] + ] } From beed8cd209371ec45a228677f12b192562a70a64 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 25 Jun 2025 11:39:36 -0400 Subject: [PATCH 443/448] still include the sinon stub --- system-test/read-rows.ts | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/system-test/read-rows.ts b/system-test/read-rows.ts index 9256d17e5..bd45f96f6 100644 --- a/system-test/read-rows.ts +++ b/system-test/read-rows.ts @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {AbortableDuplex, Bigtable} from '../src'; +import {Bigtable} from '../src'; import {Mutation} from '../src/mutation.js'; const {tests} = require('../../system-test/data/read-rows-retry-test.json') as { tests: Test[]; @@ -130,6 +130,7 @@ describe('Bigtable/Table', () => { let requestedOptions: Array<{}>; let responses: Array<{}> | null; let rowKeysRead: Array>; + let stub: sinon.SinonStub; beforeEach(() => { endCalled = false; @@ -137,7 +138,7 @@ describe('Bigtable/Table', () => { responses = null; rowKeysRead = []; requestedOptions = []; - bigtable.request = ((cfg: any) => { + stub = sinon.stub(bigtable, 'request').callsFake(cfg => { const reqOpts = cfg.reqOpts; const requestOptions = {} as google.bigtable.v2.IRowSet; if (reqOpts.rows && reqOpts.rows.rowRanges) { @@ -169,7 +170,11 @@ describe('Bigtable/Table', () => { (requestStream as any).abort = () => {}; dispatch(requestStream, responses!.shift()); return requestStream; - }) as unknown as () => AbortableDuplex; + }); + }); + + afterEach(() => { + stub.restore(); }); tests.forEach(test => { From d9d98a9ca893710971218be5a299f7aa8fe7383b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 25 Jun 2025 11:41:34 -0400 Subject: [PATCH 444/448] Remove chunk transformer handler --- src/tabular-api-surface.ts | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 4b6bba541..259ec048a 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -491,10 +491,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }, objectMode: true, }); - - chunkTransformer.on('error', err => { - chunkTransformer.emit('error', err); - }); + rowStream = pumpify.obj([requestStream, chunkTransformer, toRowStream]); // Retry on "received rst stream" errors From 1957b841fb3c5ace7aa28dae71ce679a636cec15 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 25 Jun 2025 11:43:30 -0400 Subject: [PATCH 445/448] formatting changes --- src/tabular-api-surface.ts | 2 +- system-test/data/read-rows-retry-test.json | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 259ec048a..784f1f622 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -491,7 +491,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }, objectMode: true, }); - + rowStream = pumpify.obj([requestStream, chunkTransformer, toRowStream]); // Retry on "received rst stream" errors diff --git a/system-test/data/read-rows-retry-test.json b/system-test/data/read-rows-retry-test.json index 65d8ea8e6..b893e3cf4 100644 --- a/system-test/data/read-rows-retry-test.json +++ b/system-test/data/read-rows-retry-test.json @@ -146,9 +146,9 @@ "request_options": [ { "rowKeys": [], "rowRanges": [ - { "startKeyClosed": "a", "endKeyClosed": "c" }, - { "startKeyClosed": "x", "endKeyClosed": "z" } - ] }, + { "startKeyClosed": "a", "endKeyClosed": "c" }, + { "startKeyClosed": "x", "endKeyClosed": "z" } + ] }, { "rowKeys": [], "rowRanges": [ { "startKeyClosed": "x", "endKeyClosed": "z" } ] } ], From 284f2b5297d758cbf7b50f81c4c237b1de17346f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 25 Jun 2025 11:47:15 -0400 Subject: [PATCH 446/448] Add space --- system-test/data/read-rows-retry-test.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/data/read-rows-retry-test.json b/system-test/data/read-rows-retry-test.json index b893e3cf4..dd56aa65d 100644 --- a/system-test/data/read-rows-retry-test.json +++ b/system-test/data/read-rows-retry-test.json @@ -149,7 +149,7 @@ { "startKeyClosed": "a", "endKeyClosed": "c" }, { "startKeyClosed": "x", "endKeyClosed": "z" } ] }, - { "rowKeys": [], + { "rowKeys": [], "rowRanges": [ { "startKeyClosed": "x", "endKeyClosed": "z" } ] } ], "responses": [ From dcf53c165f5011a28fc07c4d049c099930418226 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 25 Jun 2025 11:47:40 -0400 Subject: [PATCH 447/448] Add space --- system-test/data/read-rows-retry-test.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system-test/data/read-rows-retry-test.json b/system-test/data/read-rows-retry-test.json index dd56aa65d..d97291a2f 100644 --- a/system-test/data/read-rows-retry-test.json +++ b/system-test/data/read-rows-retry-test.json @@ -144,12 +144,12 @@ }] }, "request_options": [ - { "rowKeys": [], + { "rowKeys": [], "rowRanges": [ { "startKeyClosed": "a", "endKeyClosed": "c" }, { "startKeyClosed": "x", "endKeyClosed": "z" } ] }, - { "rowKeys": [], + { "rowKeys": [], "rowRanges": [ { "startKeyClosed": "x", "endKeyClosed": "z" } ] } ], "responses": [ From 5daed23c4a2af385f0c3495aa6fcef1c2e356f9c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 25 Jun 2025 12:02:18 -0400 Subject: [PATCH 448/448] remove only --- system-test/read-rows.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/read-rows.ts b/system-test/read-rows.ts index bd45f96f6..6f0395da9 100644 --- a/system-test/read-rows.ts +++ b/system-test/read-rows.ts @@ -124,7 +124,7 @@ describe('Bigtable/Table', () => { }); }); - describe.only('createReadStream', () => { + describe('createReadStream', () => { let endCalled: boolean; let error: ServiceError | null; let requestedOptions: Array<{}>;