From c976640d7e6cf523fa4260f6f721e8aa054a0603 Mon Sep 17 00:00:00 2001
From: Patrick Pfrehm
Date: Tue, 8 Apr 2025 16:12:06 -0700
Subject: [PATCH 1/4] Add telemetry catalogs and weaver gen for metadata.yaml
used by mdatagen
fix: use latest mdatagen v0.124.0
---
Makefile | 83 ++
README.md | 2 +-
catalog/nginxoss/metrics.yaml | 63 ++
catalog/nginxoss/registry.yaml | 19 +
catalog/nginxplus/metrics.yaml | 811 ++++++++++++++++++
catalog/nginxplus/registry.yaml | 103 +++
dependencies.Dockerfile | 13 +
.../nginxossreceiver/documentation.md | 8 +-
.../generated_component_test.go | 2 +-
.../internal/metadata/generated_config.go | 6 +-
.../internal/metadata/generated_metrics.go | 10 +-
.../metadata/generated_metrics_test.go | 2 +-
.../internal/metadata/generated_status.go | 4 +-
.../scraper/accesslog/nginx_log_scraper.go | 4 +-
.../accesslog/nginx_log_scraper_test.go | 2 +-
.../scraper/accesslog/testdata/expected.yaml | 8 +-
.../scraper/stubstatus/stub_status_scraper.go | 2 +-
.../scraper/stubstatus/testdata/expected.yaml | 4 +-
.../expected_with_connections_as_gauge.yaml | 6 +-
.../collector/nginxossreceiver/metadata.yaml | 51 +-
.../nginxplusreceiver/documentation.md | 134 +--
.../internal/metadata/generated_metrics.go | 348 ++++----
.../metadata/generated_metrics_test.go | 456 +++++-----
.../collector/nginxplusreceiver/metadata.yaml | 651 +++++++-------
.../collector/nginxplusreceiver/scraper.go | 306 +++----
.../nginxplusreceiver/scraper_test.go | 58 +-
.../nginxplusreceiver/testdata/expected.yaml | 245 +++---
.../registry/metadata/attrs_metadata.yaml.j2 | 41 +
.../metadata/metrics_metadata.yaml.j2 | 30 +
templates/registry/metadata/weaver.yaml | 11 +
30 files changed, 2368 insertions(+), 1115 deletions(-)
create mode 100644 catalog/nginxoss/metrics.yaml
create mode 100644 catalog/nginxoss/registry.yaml
create mode 100644 catalog/nginxplus/metrics.yaml
create mode 100644 catalog/nginxplus/registry.yaml
create mode 100644 dependencies.Dockerfile
create mode 100644 templates/registry/metadata/attrs_metadata.yaml.j2
create mode 100644 templates/registry/metadata/metrics_metadata.yaml.j2
create mode 100644 templates/registry/metadata/weaver.yaml
diff --git a/Makefile b/Makefile
index a83639726..2212e66cf 100644
--- a/Makefile
+++ b/Makefile
@@ -101,6 +101,68 @@ include Makefile.tools
include Makefile.containers
include Makefile.packaging
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# https://github.com/open-telemetry/weaver #
+# These images are for invoking weaver #
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+
+# From where to resolve the containers (e.g. "otel/weaver").
+CONTAINER_REPOSITORY=docker.io
+
+# Per container overrides for the repository resolution.
+WEAVER_CONTAINER_REPOSITORY=$(CONTAINER_REPOSITORY)
+SEMCONVGEN_CONTAINER_REPOSITORY=$(CONTAINER_REPOSITORY)
+OPA_CONTAINER_REPOSITORY=$(CONTAINER_REPOSITORY)
+
+CHECK_TARGETS=install-tools markdownlint
+
+# Versioned, non-qualified references to containers used in this Makefile.
+# These are parsed from dependencies.Dockerfile so dependabot will autoupdate
+# the versions of docker files we use.
+VERSIONED_WEAVER_CONTAINER_NO_REPO=$(shell cat dependencies.Dockerfile | awk '$$4=="weaver" {print $$2}')
+VERSIONED_SEMCONVGEN_CONTAINER_NO_REPO=$(shell cat dependencies.Dockerfile | awk '$$4=="semconvgen" {print $$2}')
+VERSIONED_OPA_CONTAINER_NO_REPO=$(shell cat dependencies.Dockerfile | awk '$$4=="opa" {print $$2}')
+
+# Fully qualified references to containers used in this Makefile. These
+# include the container repository, so that the build will work with tools
+# like "podman" with a default "/etc/containers/registries.conf", where
+# a default respository of "docker.io" is not assumed. This is intended to
+# eliminate errors from podman such as:
+#
+# Error: short-name "otel/weaver:v1.2.3" did not resolve to an alias
+# and no unqualified-search registries are defined in "/etc/containers/registries.conf"
+WEAVER_CONTAINER=$(WEAVER_CONTAINER_REPOSITORY)/$(VERSIONED_WEAVER_CONTAINER_NO_REPO)
+SEMCONVGEN_CONTAINER=$(SEMCONVGEN_CONTAINER_REPOSITORY)/$(VERSIONED_SEMCONVGEN_CONTAINER_NO_REPO)
+OPA_CONTAINER=$(OPA_CONTAINER_REPOSITORY)/$(VERSIONED_OPA_CONTAINER_NO_REPO)
+
+# Determine if "docker" is actually podman
+DOCKER_VERSION_OUTPUT := $(shell docker --version 2>&1)
+DOCKER_IS_PODMAN := $(shell echo $(DOCKER_VERSION_OUTPUT) | grep -c podman)
+
+ifeq ($(DOCKER_IS_PODMAN),0)
+ DOCKER_COMMAND := docker
+else
+ DOCKER_COMMAND := podman
+endif
+
+# Debug printing
+ifdef DEBUG
+$(info Docker version output: $(DOCKER_VERSION_OUTPUT))
+$(info Is Docker actually Podman? $(DOCKER_IS_PODMAN))
+$(info Using command: $(DOCKER_COMMAND))
+endif
+
+DOCKER_RUN=$(DOCKER_COMMAND) run
+DOCKER_USER=$(shell id -u):$(shell id -g)
+DOCKER_USER_IS_HOST_USER_ARG=-u $(DOCKER_USER)
+ifeq ($(DOCKER_COMMAND),podman)
+ # On podman, additional arguments are needed to make "-u" work
+ # correctly with the host user ID and host group ID.
+ #
+ # Error: OCI runtime error: crun: setgroups: Invalid argument
+ DOCKER_USER_IS_HOST_USER_ARG=--userns=keep-id -u $(DOCKER_USER)
+endif
+
.PHONY: help clean no-local-changes build lint format unit-test integration-test run dev run-mock-management-grpc-server generate generate-mocks local-apk-package local-deb-package local-rpm-package
help: ## Show help message
@@ -248,6 +310,27 @@ stop-mock-otel-collector-without-nap: ## Stop running mock management plane OTel
@echo "Stopping mock management plane OTel collector without NAP"
AGENT_IMAGE_WITH_NGINX_PLUS=nginx_plus_$(IMAGE_TAG):latest AGENT_IMAGE_WITH_NGINX_OSS=nginx_oss_$(IMAGE_TAG):latest $(CONTAINER_COMPOSE) -f ./test/mock/collector/docker-compose.yaml down
+# Generate attribute registry metadata for mdatagen.
+# Example uses:
+# make nginxplus-metadata-gen
+# make nginxoss-metadata-gen
+.PHONY: %-metadata-gen
+%-metadata-gen:
+ $(DOCKER_RUN) --rm \
+ $(DOCKER_USER_IS_HOST_USER_ARG) \
+ --mount 'type=bind,source=$(PWD)/templates,target=/home/weaver/templates,readonly' \
+ --mount 'type=bind,source=$(PWD)/catalog/$*,target=/home/weaver/source/,readonly' \
+ --mount 'type=bind,source=$(PWD)/internal/collector,target=/home/weaver/target' \
+ $(WEAVER_CONTAINER) registry generate \
+ --registry=/home/weaver/source \
+ --templates=/home/weaver/templates \
+ metadata \
+ /home/weaver/target/$*receiver
+ cat $(PWD)/internal/collector/$*receiver/*_metadata.yaml > $(PWD)/internal/collector/$*receiver/metadata.yaml
+ rm $(PWD)/internal/collector/$*receiver/*_metadata.yaml
+ @echo "🗃️ Generating go files"
+ @$(GOGEN) $(PWD)/internal/collector/$*receiver/...
+
generate: ## Generate golang code
@echo "🗄️ Generating proto files"
@cd api/grpc && $(GORUN) $(BUF) generate
diff --git a/README.md b/README.md
index b331d103f..4bc155473 100644
--- a/README.md
+++ b/README.md
@@ -24,7 +24,7 @@ The following packages need to be installed:
```
git clone https://github.com/open-telemetry/opentelemetry-collector.git
cd opentelemetry-collector
-git checkout v0.114.0
+git checkout v0.124.0
cd cmd/mdatagen
go install
```
diff --git a/catalog/nginxoss/metrics.yaml b/catalog/nginxoss/metrics.yaml
new file mode 100644
index 000000000..a5c821714
--- /dev/null
+++ b/catalog/nginxoss/metrics.yaml
@@ -0,0 +1,63 @@
+# yaml-language-server: $schema=https://raw.githubusercontent.com/open-telemetry/weaver/v0.9.2/schemas/semconv.schema.json
+groups:
+ # nginx.http.* metrics
+ - id: metric.nginxoss.http.connections
+ type: metric
+ unit: "connections"
+ metric_name: nginx.http.connections
+ stability: experimental
+ brief: "The total number of connections, since NGINX was last started or reloaded."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/status`
+ attributes:
+ - ref: nginx.connections.outcome
+ requirement_level: required
+
+ - id: metric.nginxoss.http.connection.count
+ type: metric
+ unit: "connections"
+ metric_name: nginx.http.connection.count
+ stability: experimental
+ brief: "The current number of connections."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/status`
+ attributes:
+ - ref: nginx.connections.outcome
+ requirement_level: required
+
+ - id: metric.nginxoss.http.requests
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.requests
+ stability: experimental
+ brief: "The total number of client requests received, since NGINX was last started or reloaded."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/status`
+ attributes: []
+
+ - id: metric.nginxoss.http.request.count
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.request.count
+ stability: experimental
+ brief: "The total number of client requests received, since the last collection interval."
+ instrument: gauge
+ note: |
+ Source: HTTP API `/status`
+ attributes: []
+
+ - id: metric.nginxoss.http.response.count
+ type: metric
+ unit: "responses"
+ metric_name: nginx.http.response.count
+ stability: experimental
+ brief: "The total number of HTTP responses, since the last collection interval and grouped by status code range."
+ instrument: gauge
+ note: |
+ Source: Logs `access.log`
+ attributes:
+ - ref: nginx.status_range
+ requirement_level: required
diff --git a/catalog/nginxoss/registry.yaml b/catalog/nginxoss/registry.yaml
new file mode 100644
index 000000000..0808eff8a
--- /dev/null
+++ b/catalog/nginxoss/registry.yaml
@@ -0,0 +1,19 @@
+# yaml-language-server: $schema=https://raw.githubusercontent.com/open-telemetry/weaver/v0.9.2/schemas/semconv.schema.json
+groups:
+ # General NGINX OSS attributes
+ - id: registry.nginxoss
+ type: attribute_group
+ stability: experimental
+ display_name: General NGINX OSS Attributes
+ brief: "Describes NGINX attributes"
+ attributes:
+ - id: nginx.status_range
+ type: string
+ stability: development
+ brief: "A status code range or bucket for a HTTP response's status code."
+ examples: ["1xx","2xx","3xx","4xx","5xx"]
+ - id: nginx.connections.outcome
+ type: string
+ stability: development
+ brief: "The outcome of a connection"
+ examples: ["ACCEPTED","ACTIVE","HANDLED","READING","WRITING","WAITING","DROPPED","IDLE"]
diff --git a/catalog/nginxplus/metrics.yaml b/catalog/nginxplus/metrics.yaml
new file mode 100644
index 000000000..f9aea9b6a
--- /dev/null
+++ b/catalog/nginxplus/metrics.yaml
@@ -0,0 +1,811 @@
+# yaml-language-server: $schema=https://raw.githubusercontent.com/open-telemetry/weaver/v0.9.2/schemas/semconv.schema.json
+groups:
+ # nginx.config metrics
+ - id: metric.nginxplus.config.reloads
+ type: metric
+ unit: "reloads"
+ metric_name: nginx.config.reloads
+ stability: experimental
+ brief: "The total number of NGINX config reloads."
+ instrument: counter
+ note: |
+ Source: Service `nginx-agent`
+ attributes: []
+
+ # nginx.http.* metrics
+ - id: metric.nginxplus.http.connections
+ type: metric
+ unit: "connections"
+ metric_name: nginx.http.connections
+ stability: experimental
+ brief: "The total number of connections."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/connections`
+ attributes:
+ - ref: nginx.connections.outcome
+
+ - id: metric.nginxplus.http.connection.count
+ type: metric
+ unit: "connections"
+ metric_name: nginx.http.connection.count
+ stability: experimental
+ brief: "The current number of connections."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/connections`
+ attributes:
+ - ref: nginx.connections.outcome
+
+ - id: metric.nginxplus.http.limit_conn.requests
+ type: metric
+ unit: "connections"
+ metric_name: nginx.http.limit_conn.requests
+ stability: experimental
+ brief: "The total number of connections to an endpoint with a limit_conn directive."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/limit_conns`
+ attributes:
+ - ref: nginx.limit_conn.outcome
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.limit_req.requests
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.limit_req.requests
+ stability: experimental
+ brief: "The total number of requests to an endpoint with a limit_req directive."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/limit_reqs`
+ attributes:
+ - ref: nginx.limit_req.outcome
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.request.io
+ type: metric
+ unit: "bytes"
+ metric_name: nginx.http.request.io
+ stability: experimental
+ brief: "The total number of HTTP byte IO."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/requests`
+ attributes:
+ - ref: nginx.io.direction
+ - ref: nginx.zone.name
+ - ref: nginx.zone.type
+
+ - id: metric.nginxplus.http.request.discarded
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.request.discarded
+ stability: experimental
+ brief: "The total number of requests completed without sending a response."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/requests`
+ attributes:
+ - ref: nginx.zone.name
+ - ref: nginx.zone.type
+
+ - id: metric.nginxplus.http.request.processing.count
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.request.processing.count
+ stability: experimental
+ brief: "The number of client requests that are currently being processed."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/requests`
+ attributes:
+ - ref: nginx.zone.name
+ - ref: nginx.zone.type
+
+ - id: metric.nginxplus.http.requests
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.requests
+ stability: experimental
+ brief: "The total number of client requests received, since NGINX was last started or reloaded."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/requests`
+ attributes:
+ - ref: nginx.zone.name
+ - ref: nginx.zone.type
+
+ - id: metric.nginxplus.http.request.count
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.request.count
+ stability: experimental
+ brief: "The total number of client requests received, since the last collection interval."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/requests`
+ attributes: []
+
+ - id: metric.nginxplus.http.responses
+ type: metric
+ unit: "responses"
+ metric_name: nginx.http.responses
+ stability: experimental
+ brief: "The total number of HTTP responses sent to clients."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/requests`
+ attributes:
+ - ref: nginx.zone.name
+ - ref: nginx.zone.type
+
+ - id: metric.nginxplus.http.response.status
+ type: metric
+ unit: "responses"
+ metric_name: nginx.http.response.status
+ stability: experimental
+ brief: "The total number of responses since NGINX was last started or reloaded, grouped by status code range."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/requests`
+ attributes:
+ - ref: nginx.status_range
+ - ref: nginx.zone.name
+ - ref: nginx.zone.type
+
+ - id: metric.nginxplus.http.response.count
+ type: metric
+ unit: "responses"
+ metric_name: nginx.http.response.count
+ stability: experimental
+ brief: "The total number of HTTP responses, since the last collection interval and grouped by status code range."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/requests`
+ attributes:
+ - ref: nginx.status_range
+ - ref: nginx.zone.name
+ - ref: nginx.zone.type
+
+ - id: metric.nginxplus.http.upstream.keepalive.count
+ type: metric
+ unit: "connections"
+ metric_name: nginx.http.upstream.keepalive.count
+ stability: experimental
+ brief: "The current number of idle keepalive connections per HTTP upstream."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.zone.name
+ - ref: nginx.upstream.name
+
+ - id: metric.nginxplus.http.upstream.peer.io
+ type: metric
+ unit: "bytes"
+ metric_name: nginx.http.upstream.peer.io
+ stability: experimental
+ brief: "The total number of byte IO per HTTP upstream peer."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.io.direction
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
+ - ref: nginx.peer.address
+ - ref: nginx.peer.name
+
+ - id: metric.nginxplus.http.upstream.peer.connection.count
+ type: metric
+ unit: "connections"
+ metric_name: nginx.http.upstream.peer.connection.count
+ stability: experimental
+ brief: "The average number of active connections per HTTP upstream peer."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.address
+ - ref: nginx.peer.name
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.peer.count
+ type: metric
+ unit: "peers"
+ metric_name: nginx.http.upstream.peer.count
+ stability: experimental
+ brief: "The current count of peers on the HTTP upstream grouped by state."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.peer.state
+ - ref: nginx.zone.name
+ - ref: nginx.upstream.name
+
+ - id: metric.nginxplus.http.upstream.peer.fails
+ type: metric
+ unit: "attempts"
+ metric_name: nginx.http.upstream.peer.fails
+ stability: experimental
+ brief: "The total number of unsuccessful attempts to communicate with the HTTP upstream peer."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.peer.header.time
+ type: metric
+ unit: "ms"
+ metric_name: nginx.http.upstream.peer.header.time
+ stability: experimental
+ brief: "The average time to get the response header from the HTTP upstream peer."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.peer.health_checks
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.upstream.peer.health_checks
+ stability: experimental
+ brief: "The total number of health check requests made to a HTTP upstream peer."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.health_check
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.peer.requests
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.upstream.peer.requests
+ stability: experimental
+ brief: "The total number of client requests forwarded to the HTTP upstream peer."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.peer.response.time
+ type: metric
+ unit: "ms"
+ metric_name: nginx.http.upstream.peer.response.time
+ stability: experimental
+ brief: "The average time to get the full response from the HTTP upstream peer."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.peer.responses
+ type: metric
+ unit: "responses"
+ metric_name: nginx.http.upstream.peer.responses
+ stability: experimental
+ brief: "The total number of responses obtained from the HTTP upstream peer grouped by status range."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.status_range
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.peer.unavailables
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.upstream.peer.unavailables
+ stability: experimental
+ brief: "The total number of times the server became unavailable for client requests ('unavail')."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.peer.state
+ type: metric
+ unit: "is_deployed"
+ metric_name: nginx.http.upstream.peer.state
+ stability: experimental
+ brief: "Current state of an upstream peer in deployment."
+ instrument: gauge
+ attributes:
+ - ref: nginx.peer.state
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+
+ - id: metric.nginxplus.http.upstream.queue.limit
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.upstream.queue.limit
+ stability: experimental
+ brief: "The maximum number of requests that can be in the queue at the same time."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.queue.overflows
+ type: metric
+ unit: "responses"
+ metric_name: nginx.http.upstream.queue.overflows
+ stability: experimental
+ brief: "The total number of requests rejected due to the queue overflow."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.queue.usage
+ type: metric
+ unit: "requests"
+ metric_name: nginx.http.upstream.queue.usage
+ stability: experimental
+ brief: "The current number of requests in the queue."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.http.upstream.zombie.count
+ type: metric
+ unit: "is_deployed"
+ metric_name: nginx.http.upstream.zombie.count
+ stability: experimental
+ brief: "The current number of upstream peers removed from the group but still processing active client requests."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
+
+ # nginx.cache.* metrics
+ - id: metric.nginxplus.cache.bytes_read
+ type: metric
+ unit: "bytes"
+ metric_name: nginx.cache.bytes_read
+ stability: experimental
+ brief: "The total number of bytes read from the cache or proxied server."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/caches`
+ attributes:
+ - ref: nginx.cache.outcome
+ - ref: nginx.cache.name
+
+ - id: metric.nginxplus.cache.responses
+ type: metric
+ unit: "responses"
+ metric_name: nginx.cache.responses
+ stability: experimental
+ brief: "The total number of responses read from the cache or proxied server."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/caches`
+ attributes:
+ - ref: nginx.cache.outcome
+ - ref: nginx.cache.name
+
+ - id: metric.nginxplus.cache.memory.limit
+ type: metric
+ unit: "bytes"
+ metric_name: nginx.cache.memory.limit
+ stability: experimental
+ brief: "The limit on the maximum size of the cache specified in the configuration."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/caches`
+ attributes:
+ - ref: nginx.cache.name
+
+ - id: metric.nginxplus.cache.memory.usage
+ type: metric
+ unit: "bytes"
+ metric_name: nginx.cache.memory.usage
+ stability: experimental
+ brief: "The current size of the cache."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/caches`
+ attributes:
+ - ref: nginx.cache.name
+
+ # nginx.slab.* metrics
+ - id: metric.nginxplus.slab.page.free
+ type: metric
+ unit: "pages"
+ metric_name: nginx.slab.page.free
+ stability: experimental
+ brief: "The current number of free memory pages."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/slabs`
+ attributes:
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.slab.page.limit
+ type: metric
+ unit: "pages"
+ metric_name: nginx.slab.page.limit
+ stability: experimental
+ brief: "The total number of memory pages (free and used)."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/slabs`
+ attributes:
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.slab.page.usage
+ type: metric
+ unit: "pages"
+ metric_name: nginx.slab.page.usage
+ stability: experimental
+ brief: "The current number of used memory pages."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/slabs`
+ attributes:
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.slab.page.utilization
+ type: metric
+ unit: "pages"
+ metric_name: nginx.slab.page.utilization
+ stability: experimental
+ brief: "The current percentage of used memory pages."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/slabs`
+ attributes:
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.slab.slot.usage
+ type: metric
+ unit: "slots"
+ metric_name: nginx.slab.slot.usage
+ stability: experimental
+ brief: "The current number of used memory slots."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/slabs`
+ attributes:
+ - ref: nginx.slab.slot.limit
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.slab.slot.free
+ type: metric
+ unit: "slots"
+ metric_name: nginx.slab.slot.free
+ stability: experimental
+ brief: "The current number of free memory slots."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/http/slabs`
+ attributes:
+ - ref: nginx.slab.slot.limit
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.slab.slot.allocations
+ type: metric
+ unit: "allocations"
+ metric_name: nginx.slab.slot.allocations
+ stability: experimental
+ brief: "The number of attempts to allocate memory of specified size."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/slabs`
+ attributes:
+ - ref: nginx.slab.slot.limit
+ - ref: nginx.slab.slot.allocation.result
+ - ref: nginx.zone.name
+
+ # nginx.ssl.* metrics
+ - id: metric.nginxplus.ssl.handshakes
+ type: metric
+ unit: "handshakes"
+ metric_name: nginx.ssl.handshakes
+ stability: experimental
+ brief: "The total number of SSL handshakes."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/ssl`
+ attributes:
+ - ref: nginx.ssl.status
+ - ref: nginx.ssl.handshake.reason
+
+ - id: metric.nginxplus.ssl.certificate.verify_failures
+ type: metric
+ unit: "certificates"
+ metric_name: nginx.ssl.certificate.verify_failures
+ stability: experimental
+ brief: "The total number of SSL certificate verification failures."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/http/ssl`
+ attributes:
+ - ref: nginx.ssl.verify_failure.reason
+
+ # nginx.stream.* metrics
+ - id: metric.nginxplus.stream.io
+ type: metric
+ unit: "bytes"
+ metric_name: nginx.stream.io
+ stability: experimental
+ brief: "The total number of Stream byte IO."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/stream/zone_sync`
+ attributes:
+ - ref: nginx.io.direction
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.connection.accepted
+ type: metric
+ unit: "connections"
+ metric_name: nginx.stream.connection.accepted
+ stability: experimental
+ brief: "The total number of connections accepted from clients."
+ instrument: counter
+ note: |
+ Cumulative
+ attributes:
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.connection.discarded
+ type: metric
+ unit: "connections"
+ metric_name: nginx.stream.connection.discarded
+ stability: experimental
+ brief: "Total number of connections completed without creating a session."
+ instrument: counter
+ note: |
+ Cumulative
+ attributes:
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.connection.processing.count
+ type: metric
+ unit: "connections"
+ metric_name: nginx.stream.connection.processing.count
+ stability: experimental
+ brief: "The number of client connections that are currently being processed."
+ instrument: gauge
+ attributes:
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.session.status
+ type: metric
+ unit: "sessions"
+ metric_name: nginx.stream.session.status
+ stability: experimental
+ brief: "The total number of completed sessions."
+ instrument: counter
+ note: |
+ Cumulative
+ attributes:
+ - ref: nginx.status_range
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.upstream.peer.io
+ type: metric
+ unit: "bytes"
+ metric_name: nginx.stream.upstream.peer.io
+ stability: experimental
+ brief: "The total number of Stream Upstream Peer byte IO."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.io.direction
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
+ - ref: nginx.peer.address
+ - ref: nginx.peer.name
+
+ - id: metric.nginxplus.stream.upstream.peer.connection.count
+ type: metric
+ unit: "connections"
+ metric_name: nginx.stream.upstream.peer.connection.count
+ stability: experimental
+ brief: "The current number of Stream Upstream Peer connections."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
+ - ref: nginx.peer.address
+ - ref: nginx.peer.name
+
+ - id: metric.nginxplus.stream.upstream.peer.connection.time
+ type: metric
+ unit: "ms"
+ metric_name: nginx.stream.upstream.peer.connection.time
+ stability: experimental
+ brief: "The average time to connect to the stream upstream peer."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
+ - ref: nginx.peer.address
+ - ref: nginx.peer.name
+
+ - id: metric.nginxplus.stream.upstream.peer.connections
+ type: metric
+ unit: "connections"
+ metric_name: nginx.stream.upstream.peer.connections
+ stability: experimental
+ brief: "The total number of client connections forwarded to this stream upstream peer."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
+ - ref: nginx.peer.address
+ - ref: nginx.peer.name
+
+ - id: metric.nginxplus.stream.upstream.peer.count
+ type: metric
+ unit: "peers"
+ metric_name: nginx.stream.upstream.peer.count
+ stability: experimental
+ brief: "The current number of stream upstream peers grouped by state."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.peer.state
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.upstream.peer.fails
+ type: metric
+ unit: "attempts"
+ metric_name: nginx.stream.upstream.peer.fails
+ stability: experimental
+ brief: "The total number of unsuccessful attempts to communicate with the stream upstream peer."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.upstream.peer.health_checks
+ type: metric
+ unit: "requests"
+ metric_name: nginx.stream.upstream.peer.health_checks
+ stability: experimental
+ brief: "The total number of health check requests made to the stream upstream peer."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.health_check
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.upstream.peer.response.time
+ type: metric
+ unit: "ms"
+ metric_name: nginx.stream.upstream.peer.response.time
+ stability: experimental
+ brief: "The average time to receive the last byte of data for the stream upstream peer."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.upstream.peer.ttfb.time
+ type: metric
+ unit: "ms"
+ metric_name: nginx.stream.upstream.peer.ttfb.time
+ stability: experimental
+ brief: "The average time to receive the first byte of data for the stream upstream peer."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.upstream.peer.unavailables
+ type: metric
+ unit: "requests"
+ metric_name: nginx.stream.upstream.peer.unavailables
+ stability: experimental
+ brief: "How many times the server became unavailable for client connections (state 'unavail') due to the number of
+ unsuccessful attempts reaching the max_fails threshold."
+ instrument: counter
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.upstream.peer.state
+ type: metric
+ unit: "deployments"
+ metric_name: nginx.stream.upstream.peer.state
+ stability: experimental
+ brief: "Current state of upstream peers in deployment."
+ note: |
+ If any of the upstream peers in the deployment match the given state then the value will be 1.
+ If no upstream peer is a match then the value will be 0.
+
+ Source: HTTP GET `/api/9/stream/upstreams`
+ instrument: gauge
+ attributes:
+ - ref: nginx.peer.state
+ - ref: nginx.upstream.name
+ - ref: nginx.peer.name
+ - ref: nginx.peer.address
+ - ref: nginx.zone.name
+
+ - id: metric.nginxplus.stream.upstream.zombie.count
+ type: metric
+ unit: "deployments"
+ metric_name: nginx.stream.upstream.zombie.count
+ stability: experimental
+ brief: "The current number of peers removed from the group but still processing active client connections."
+ instrument: gauge
+ note: |
+ Source: HTTP GET `/api/9/stream/upstreams`
+ attributes:
+ - ref: nginx.upstream.name
+ - ref: nginx.zone.name
diff --git a/catalog/nginxplus/registry.yaml b/catalog/nginxplus/registry.yaml
new file mode 100644
index 000000000..2e41c8576
--- /dev/null
+++ b/catalog/nginxplus/registry.yaml
@@ -0,0 +1,103 @@
+# yaml-language-server: $schema=https://raw.githubusercontent.com/open-telemetry/weaver/v0.9.2/schemas/semconv.schema.json
+groups:
+ # General NGINX Plus attributes
+ - id: registry.nginxplus
+ type: attribute_group
+ stability: experimental
+ display_name: General NGINX Plus Attributes
+ brief: "Describes NGINX attributes"
+ attributes:
+ - id: nginx.cache.outcome
+ type: string
+ stability: development
+ brief: "The outcome for an attempt to fetch content from NGINX's cache"
+ examples: ["BYPASS","EXPIRED","HIT","MISS","REVALIDATED","STALE","UPDATING"]
+ - id: nginx.zone.name
+ type: string
+ stability: development
+ brief: "The name of the shared memory zone."
+ examples: ""
+ - id: nginx.zone.type
+ type: string
+ stability: development
+ brief: "The type of shared memory zone, depending on what block it was defined in the NGINX configuration."
+ examples: ["SERVER","LOCATION"]
+ - id: nginx.limit_conn.outcome
+ type: string
+ stability: development
+ brief: "The outcome for attempting to establish a connection to an endpoint that has a limit_conn directive configured."
+ examples: ["PASSED","REJECTED","REJECTED_DRY_RUN"]
+ - id: nginx.limit_req.outcome
+ type: string
+ stability: development
+ brief: "The outcome for attempting to establish a connection to an endpoint that has a limit_req directive configured."
+ examples: ["PASSED","REJECTED","REJECTED_DRY_RUN","DELAYED","DELAYED_DRY_RUN"]
+ - id: nginx.io.direction
+ type: string
+ stability: development
+ brief: "The direction of byte traffic."
+ examples: ["receive","transmit"]
+ - id: nginx.status_range
+ type: string
+ stability: development
+ brief: "A status code range or bucket for a HTTP response's status code."
+ examples: ["1xx","2xx","3xx","4xx","5xx"]
+ - id: nginx.upstream.name
+ type: string
+ stability: development
+ brief: "The name of the upstream block."
+ examples: ""
+ - id: nginx.peer.address
+ type: string
+ stability: development
+ brief: "The address of the peer."
+ examples: ""
+ - id: nginx.peer.name
+ type: string
+ stability: development
+ brief: "The name of the peer."
+ examples: ""
+ - id: nginx.peer.state
+ type: string
+ stability: development
+ brief: "The current state of an upstream peer."
+ examples: ["CHECKING","DOWN","DRAINING","UNAVAILABLE","UNHEALTHY","UP"]
+ - id: nginx.health_check
+ type: string
+ stability: development
+ brief: "The state received from a health check."
+ examples: ["UNHEALTHY","FAIL"]
+ - id: nginx.ssl.status
+ type: string
+ stability: development
+ brief: "The status of a SSL handshake."
+ examples: ["FAILED","REUSE"]
+ - id: nginx.ssl.handshake.reason
+ type: string
+ stability: development
+ brief: "The reason for a SSL handshake failure."
+ examples: ["NO_COMMON_PROTOCOL","NO_COMMON_CIPHER","TIMEOUT","CERT_REJECTED"]
+ - id: nginx.ssl.verify_failure.reason
+ type: string
+ stability: development
+ brief: "The reason for a SSL certificate verification failure."
+ examples: ["NO_CERT","EXPIRED_CERT","REVOKED_CERT","HOSTNAME_MISMATCH","OTHER"]
+ - id: nginx.slab.slot.allocation.result
+ type: string
+ stability: development
+ brief: "Result of an attempt to allocate memory to a slab slot."
+ examples: ["FAILURE","SUCCESS"]
+ - id: nginx.slab.slot.limit
+ type: int
+ stability: development
+ brief: "The upper limit for a slab slot, used as the identifier for the slot."
+ - id: nginx.connections.outcome
+ type: string
+ stability: development
+ brief: "The outcome of a connection"
+ examples: ["ACCEPTED","ACTIVE","HANDLED","READING","WRITING","WAITING","DROPPED","IDLE"]
+ - id: nginx.cache.name
+ type: string
+ stability: development
+ brief: "The name of the cache."
+ examples: ""
diff --git a/dependencies.Dockerfile b/dependencies.Dockerfile
new file mode 100644
index 000000000..d0c9dd404
--- /dev/null
+++ b/dependencies.Dockerfile
@@ -0,0 +1,13 @@
+# DO NOT BUILD
+# This file is just for tracking dependencies of the semantic convention build.
+# Dependabot can keep this file up to date with latest containers.
+
+# Weaver is used to generate markdown docs, and enforce policies on the model.
+FROM otel/weaver:v0.13.2 AS weaver
+
+# OPA is used to test policies enforced by weaver.
+FROM openpolicyagent/opa:1.2.0 AS opa
+
+# Semconv gen is used for backwards compatibility checks.
+# TODO(jsuereth): Remove this when no longer used.
+FROM otel/semconvgen:0.25.0 AS semconvgen
diff --git a/internal/collector/nginxossreceiver/documentation.md b/internal/collector/nginxossreceiver/documentation.md
index 0edb9ad5b..7587a4882 100644
--- a/internal/collector/nginxossreceiver/documentation.md
+++ b/internal/collector/nginxossreceiver/documentation.md
@@ -1,6 +1,6 @@
[comment]: <> (Code generated by mdatagen. DO NOT EDIT.)
-# nginx
+# nginxoss
## Default Metrics
@@ -24,7 +24,7 @@ The current number of connections.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.connections.outcome | The outcome of the connection. | Str: ``ACCEPTED``, ``ACTIVE``, ``HANDLED``, ``READING``, ``WRITING``, ``WAITING`` |
+| nginx.connections.outcome | The outcome of a connection | Str: ``ACCEPTED``, ``ACTIVE``, ``HANDLED``, ``READING``, ``WRITING``, ``WAITING``, ``DROPPED``, ``IDLE`` |
### nginx.http.connections
@@ -38,7 +38,7 @@ The total number of connections, since NGINX was last started or reloaded.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.connections.outcome | The outcome of the connection. | Str: ``ACCEPTED``, ``ACTIVE``, ``HANDLED``, ``READING``, ``WRITING``, ``WAITING`` |
+| nginx.connections.outcome | The outcome of a connection | Str: ``ACCEPTED``, ``ACTIVE``, ``HANDLED``, ``READING``, ``WRITING``, ``WAITING``, ``DROPPED``, ``IDLE`` |
### nginx.http.request.count
@@ -58,7 +58,7 @@ The total number of client requests received, since NGINX was last started or re
### nginx.http.response.count
-The total number of HTTP responses since the last collection interval, grouped by status code range.
+The total number of HTTP responses, since the last collection interval and grouped by status code range.
| Unit | Metric Type | Value Type |
| ---- | ----------- | ---------- |
diff --git a/internal/collector/nginxossreceiver/generated_component_test.go b/internal/collector/nginxossreceiver/generated_component_test.go
index 717edd576..9622f0620 100644
--- a/internal/collector/nginxossreceiver/generated_component_test.go
+++ b/internal/collector/nginxossreceiver/generated_component_test.go
@@ -15,7 +15,7 @@ import (
"go.opentelemetry.io/collector/receiver/receivertest"
)
-var typ = component.MustNewType("nginx")
+var typ = component.MustNewType("nginxoss")
func TestComponentFactoryType(t *testing.T) {
require.Equal(t, typ, NewFactory().Type())
diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_config.go b/internal/collector/nginxossreceiver/internal/metadata/generated_config.go
index 6601fb35d..1c3066135 100644
--- a/internal/collector/nginxossreceiver/internal/metadata/generated_config.go
+++ b/internal/collector/nginxossreceiver/internal/metadata/generated_config.go
@@ -26,7 +26,7 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error {
return nil
}
-// MetricsConfig provides config for nginx metrics.
+// MetricsConfig provides config for nginxoss metrics.
type MetricsConfig struct {
NginxHTTPConnectionCount MetricConfig `mapstructure:"nginx.http.connection.count"`
NginxHTTPConnections MetricConfig `mapstructure:"nginx.http.connections"`
@@ -81,7 +81,7 @@ func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error {
return nil
}
-// ResourceAttributesConfig provides config for nginx resource attributes.
+// ResourceAttributesConfig provides config for nginxoss resource attributes.
type ResourceAttributesConfig struct {
InstanceID ResourceAttributeConfig `mapstructure:"instance.id"`
InstanceType ResourceAttributeConfig `mapstructure:"instance.type"`
@@ -98,7 +98,7 @@ func DefaultResourceAttributesConfig() ResourceAttributesConfig {
}
}
-// MetricsBuilderConfig is a configuration for nginx metrics builder.
+// MetricsBuilderConfig is a configuration for nginxoss metrics builder.
type MetricsBuilderConfig struct {
Metrics MetricsConfig `mapstructure:"metrics"`
ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"`
diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go
index 7a0aa95bd..a1b3af84a 100644
--- a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go
+++ b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go
@@ -23,6 +23,8 @@ const (
AttributeNginxConnectionsOutcomeREADING
AttributeNginxConnectionsOutcomeWRITING
AttributeNginxConnectionsOutcomeWAITING
+ AttributeNginxConnectionsOutcomeDROPPED
+ AttributeNginxConnectionsOutcomeIDLE
)
// String returns the string representation of the AttributeNginxConnectionsOutcome.
@@ -40,6 +42,10 @@ func (av AttributeNginxConnectionsOutcome) String() string {
return "WRITING"
case AttributeNginxConnectionsOutcomeWAITING:
return "WAITING"
+ case AttributeNginxConnectionsOutcomeDROPPED:
+ return "DROPPED"
+ case AttributeNginxConnectionsOutcomeIDLE:
+ return "IDLE"
}
return ""
}
@@ -52,6 +58,8 @@ var MapAttributeNginxConnectionsOutcome = map[string]AttributeNginxConnectionsOu
"READING": AttributeNginxConnectionsOutcomeREADING,
"WRITING": AttributeNginxConnectionsOutcomeWRITING,
"WAITING": AttributeNginxConnectionsOutcomeWAITING,
+ "DROPPED": AttributeNginxConnectionsOutcomeDROPPED,
+ "IDLE": AttributeNginxConnectionsOutcomeIDLE,
}
// AttributeNginxStatusRange specifies the value nginx.status_range attribute.
@@ -335,7 +343,7 @@ type metricNginxHTTPResponseCount struct {
// init fills nginx.http.response.count metric with initial data.
func (m *metricNginxHTTPResponseCount) init() {
m.data.SetName("nginx.http.response.count")
- m.data.SetDescription("The total number of HTTP responses since the last collection interval, grouped by status code range.")
+ m.data.SetDescription("The total number of HTTP responses, since the last collection interval and grouped by status code range.")
m.data.SetUnit("responses")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go
index fd57a5c0c..fe3d51337 100644
--- a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go
+++ b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go
@@ -176,7 +176,7 @@ func TestMetricsBuilder(t *testing.T) {
validatedMetrics["nginx.http.response.count"] = true
assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
- assert.Equal(t, "The total number of HTTP responses since the last collection interval, grouped by status code range.", ms.At(i).Description())
+ assert.Equal(t, "The total number of HTTP responses, since the last collection interval and grouped by status code range.", ms.At(i).Description())
assert.Equal(t, "responses", ms.At(i).Unit())
dp := ms.At(i).Gauge().DataPoints().At(0)
assert.Equal(t, start, dp.StartTimestamp())
diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_status.go b/internal/collector/nginxossreceiver/internal/metadata/generated_status.go
index 3eb166a1a..7f650e234 100644
--- a/internal/collector/nginxossreceiver/internal/metadata/generated_status.go
+++ b/internal/collector/nginxossreceiver/internal/metadata/generated_status.go
@@ -7,8 +7,8 @@ import (
)
var (
- Type = component.MustNewType("nginx")
- ScopeName = "otelcol/nginxreceiver"
+ Type = component.MustNewType("nginxoss")
+ ScopeName = "otelcol/nginxossreceiver"
)
const (
diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go
index b5835ef56..8242fd940 100644
--- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go
+++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go
@@ -164,7 +164,7 @@ func (nls *NginxLogScraper) Scrape(_ context.Context) (pmetric.Metrics, error) {
timeNow := pcommon.NewTimestampFromTime(time.Now())
nls.rb.SetInstanceID(nls.settings.ID.Name())
- nls.rb.SetInstanceType("nginx")
+ nls.rb.SetInstanceType("nginxoss")
nls.logger.Debug("NGINX OSS access log resource info", zap.Any("resource", nls.rb))
nls.mb.RecordNginxHTTPResponseCountDataPoint(
@@ -172,13 +172,11 @@ func (nls *NginxLogScraper) Scrape(_ context.Context) (pmetric.Metrics, error) {
nginxMetrics.responseStatuses.oneHundredStatusRange,
metadata.AttributeNginxStatusRange1xx,
)
-
nls.mb.RecordNginxHTTPResponseCountDataPoint(
timeNow,
nginxMetrics.responseStatuses.twoHundredStatusRange,
metadata.AttributeNginxStatusRange2xx,
)
-
nls.mb.RecordNginxHTTPResponseCountDataPoint(
timeNow,
nginxMetrics.responseStatuses.threeHundredStatusRange,
diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper_test.go b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper_test.go
index 840e15033..94a4e4bf7 100644
--- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper_test.go
+++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper_test.go
@@ -35,7 +35,7 @@ const (
func TestAccessLogScraper_ID(t *testing.T) {
nls := &NginxLogScraper{}
- assert.Equal(t, "nginx", nls.ID().Type().String())
+ assert.Equal(t, "nginxoss", nls.ID().Type().String())
}
func TestAccessLogScraper(t *testing.T) {
diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml b/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml
index 03492a08b..fa43ddf84 100644
--- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml
+++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml
@@ -3,12 +3,14 @@ resourceMetrics:
attributes:
- key: instance.type
value:
- stringValue: nginx
+ stringValue: nginxoss
scopeMetrics:
- metrics:
- - description: The total number of HTTP responses since the last collection interval, grouped by status code range.
+ - description: The total number of HTTP responses, since the last collection interval and grouped by status code range.
name: nginx.http.response.count
gauge:
+ aggregationTemporality: 2
+ isMonotonic: true
dataPoints:
- asInt: 0
attributes:
@@ -38,5 +40,5 @@ resourceMetrics:
timeUnixNano: "1000000"
unit: responses
scope:
- name: otelcol/nginxreceiver
+ name: otelcol/nginxossreceiver
version: latest
diff --git a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper.go b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper.go
index f9173a1b8..2380a2ec6 100644
--- a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper.go
+++ b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper.go
@@ -113,7 +113,7 @@ func (s *NginxStubStatusScraper) Scrape(context.Context) (pmetric.Metrics, error
}
s.rb.SetInstanceID(s.settings.ID.Name())
- s.rb.SetInstanceType("nginx")
+ s.rb.SetInstanceType("nginxoss")
s.settings.Logger.Debug("NGINX OSS stub status resource info", zap.Any("resource", s.rb))
now := pcommon.NewTimestampFromTime(time.Now())
diff --git a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected.yaml b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected.yaml
index a0a424f0b..c5b37652d 100644
--- a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected.yaml
+++ b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected.yaml
@@ -3,7 +3,7 @@ resourceMetrics:
attributes:
- key: instance.type
value:
- stringValue: nginx
+ stringValue: nginxoss
scopeMetrics:
- metrics:
- description: The total number of connections, since NGINX was last started or reloaded.
@@ -69,5 +69,5 @@ resourceMetrics:
isMonotonic: true
unit: requests
scope:
- name: otelcol/nginxreceiver
+ name: otelcol/nginxossreceiver
version: latest
diff --git a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected_with_connections_as_gauge.yaml b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected_with_connections_as_gauge.yaml
index e3fbc52ab..05e08752b 100644
--- a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected_with_connections_as_gauge.yaml
+++ b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected_with_connections_as_gauge.yaml
@@ -3,7 +3,7 @@ resourceMetrics:
attributes:
- key: instance.type
value:
- stringValue: nginx
+ stringValue: nginxoss
scopeMetrics:
- metrics:
- description: The total number of connections, since NGINX was last started or reloaded.
@@ -64,7 +64,7 @@ resourceMetrics:
unit: requests
- description: The total number of client requests received, since the last collection interval.
name: nginx.http.request.count
- gauge:
+ sum:
aggregationTemporality: 2
dataPoints:
- asInt: "31070465"
@@ -72,5 +72,5 @@ resourceMetrics:
isMonotonic: true
unit: requests
scope:
- name: otelcol/nginxreceiver
+ name: otelcol/nginxossreceiver
version: latest
diff --git a/internal/collector/nginxossreceiver/metadata.yaml b/internal/collector/nginxossreceiver/metadata.yaml
index db5ecb2e4..4a3e6dffb 100644
--- a/internal/collector/nginxossreceiver/metadata.yaml
+++ b/internal/collector/nginxossreceiver/metadata.yaml
@@ -1,5 +1,7 @@
-type: nginx
-scope_name: otelcol/nginxreceiver
+# NOTE: THIS FILE IS AUTOGENERATED. DO NOT EDIT BY HAND.
+
+type: nginxoss
+scope_name: otelcol/nginxossreceiver
status:
class: receiver
@@ -21,7 +23,7 @@ resource_attributes:
attributes:
nginx.connections.outcome:
- description: The outcome of the connection.
+ description: "The outcome of a connection"
type: string
enum:
- "ACCEPTED"
@@ -30,8 +32,10 @@ attributes:
- "READING"
- "WRITING"
- "WAITING"
+ - "DROPPED"
+ - "IDLE"
nginx.status_range:
- description: A status code range or bucket for a HTTP response's status code.
+ description: "A status code range or bucket for a HTTP response's status code."
type: string
enum:
- "1xx"
@@ -39,45 +43,44 @@ attributes:
- "3xx"
- "4xx"
- "5xx"
-
metrics:
- nginx.http.requests:
+ nginx.http.connections:
enabled: true
- description: The total number of client requests received, since NGINX was last started or reloaded.
+ description: "The total number of connections, since NGINX was last started or reloaded."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
- unit: "requests"
- nginx.http.request.count:
+ unit: "connections"
+ attributes:
+ - nginx.connections.outcome
+ nginx.http.connection.count:
enabled: true
- description: The total number of client requests received, since the last collection interval.
+ description: "The current number of connections."
gauge:
value_type: int
- unit: "requests"
- nginx.http.connections:
+ unit: "connections"
+ attributes:
+ - nginx.connections.outcome
+ nginx.http.requests:
enabled: true
- description: The total number of connections, since NGINX was last started or reloaded.
+ description: "The total number of client requests received, since NGINX was last started or reloaded."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
- unit: "connections"
- attributes:
- - nginx.connections.outcome
- nginx.http.connection.count:
+ unit: "requests"
+ nginx.http.request.count:
enabled: true
- description: The current number of connections.
+ description: "The total number of client requests received, since the last collection interval."
gauge:
value_type: int
- unit: "connections"
- attributes:
- - nginx.connections.outcome
+ unit: "requests"
nginx.http.response.count:
enabled: true
- description: The total number of HTTP responses since the last collection interval, grouped by status code range.
+ description: "The total number of HTTP responses, since the last collection interval and grouped by status code range."
gauge:
value_type: int
unit: "responses"
- attributes:
- - nginx.status_range
+ attributes:
+ - nginx.status_range
\ No newline at end of file
diff --git a/internal/collector/nginxplusreceiver/documentation.md b/internal/collector/nginxplusreceiver/documentation.md
index 67fc5abb3..8f8e8c340 100644
--- a/internal/collector/nginxplusreceiver/documentation.md
+++ b/internal/collector/nginxplusreceiver/documentation.md
@@ -24,8 +24,8 @@ The total number of bytes read from the cache or proxied server.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.cache.outcome | The outcome for an attempt to fetch content from NGINX's cache. | Str: ``BYPASS``, ``EXPIRED``, ``HIT``, ``MISS``, ``REVALIDATED``, ``STALE``, ``UPDATING`` |
| nginx.cache.name | The name of the cache. | Any Str |
+| nginx.cache.outcome | The outcome for an attempt to fetch content from NGINX's cache | Str: ``BYPASS``, ``EXPIRED``, ``HIT``, ``MISS``, ``REVALIDATED``, ``STALE``, ``UPDATING`` |
### nginx.cache.memory.limit
@@ -67,8 +67,8 @@ The total number of responses read from the cache or proxied server.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.cache.outcome | The outcome for an attempt to fetch content from NGINX's cache. | Str: ``BYPASS``, ``EXPIRED``, ``HIT``, ``MISS``, ``REVALIDATED``, ``STALE``, ``UPDATING`` |
| nginx.cache.name | The name of the cache. | Any Str |
+| nginx.cache.outcome | The outcome for an attempt to fetch content from NGINX's cache | Str: ``BYPASS``, ``EXPIRED``, ``HIT``, ``MISS``, ``REVALIDATED``, ``STALE``, ``UPDATING`` |
### nginx.config.reloads
@@ -90,7 +90,7 @@ The current number of connections.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.connections.outcome | The outcome of the connection. | Str: ``ACCEPTED``, ``ACTIVE``, ``DROPPED``, ``IDLE`` |
+| nginx.connections.outcome | The outcome of a connection | Str: ``ACCEPTED``, ``ACTIVE``, ``HANDLED``, ``READING``, ``WRITING``, ``WAITING``, ``DROPPED``, ``IDLE`` |
### nginx.http.connections
@@ -104,7 +104,7 @@ The total number of connections.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.connections.outcome | The outcome of the connection. | Str: ``ACCEPTED``, ``ACTIVE``, ``DROPPED``, ``IDLE`` |
+| nginx.connections.outcome | The outcome of a connection | Str: ``ACCEPTED``, ``ACTIVE``, ``HANDLED``, ``READING``, ``WRITING``, ``WAITING``, ``DROPPED``, ``IDLE`` |
### nginx.http.limit_conn.requests
@@ -207,7 +207,7 @@ The total number of client requests received, since NGINX was last started or re
### nginx.http.response.count
-The total number of HTTP responses sent to clients since the last collection interval, grouped by status code range.
+The total number of HTTP responses, since the last collection interval and grouped by status code range.
| Unit | Metric Type | Value Type |
| ---- | ----------- | ---------- |
@@ -239,7 +239,7 @@ The total number of responses since NGINX was last started or reloaded, grouped
### nginx.http.responses
-The total number of HTTP responses sent to clients, since NGINX was last started or reloaded.
+The total number of HTTP responses sent to clients.
| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
| ---- | ----------- | ---------- | ----------------------- | --------- |
@@ -264,8 +264,8 @@ The current number of idle keepalive connections per HTTP upstream.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.connection.count
@@ -279,10 +279,10 @@ The average number of active connections per HTTP upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.count
@@ -297,8 +297,8 @@ The current count of peers on the HTTP upstream grouped by state.
| Name | Description | Values |
| ---- | ----------- | ------ |
| nginx.peer.state | The current state of an upstream peer. | Str: ``CHECKING``, ``DOWN``, ``DRAINING``, ``UNAVAILABLE``, ``UNHEALTHY``, ``UP`` |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.fails
@@ -312,10 +312,10 @@ The total number of unsuccessful attempts to communicate with the HTTP upstream
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.header.time
@@ -329,10 +329,10 @@ The average time to get the response header from the HTTP upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.health_checks
@@ -347,10 +347,10 @@ The total number of health check requests made to a HTTP upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
| nginx.health_check | The state received from a health check. | Str: ``UNHEALTHY``, ``FAIL`` |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.io
@@ -365,10 +365,10 @@ The total number of byte IO per HTTP upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
| nginx.io.direction | The direction of byte traffic. | Str: ``receive``, ``transmit`` |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.requests
@@ -382,10 +382,10 @@ The total number of client requests forwarded to the HTTP upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.response.time
@@ -399,10 +399,10 @@ The average time to get the full response from the HTTP upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.responses
@@ -416,11 +416,11 @@ The total number of responses obtained from the HTTP upstream peer grouped by st
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.status_range | A status code range or bucket for a HTTP response's status code. | Str: ``1xx``, ``2xx``, ``3xx``, ``4xx``, ``5xx`` |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.status_range | A status code range or bucket for a HTTP response's status code. | Str: ``1xx``, ``2xx``, ``3xx``, ``4xx``, ``5xx`` |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.state
@@ -434,15 +434,15 @@ Current state of an upstream peer in deployment.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.peer.state | The current state of an upstream peer. | Str: ``CHECKING``, ``DOWN``, ``DRAINING``, ``UNAVAILABLE``, ``UNHEALTHY``, ``UP`` |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.peer.state | The current state of an upstream peer. | Str: ``CHECKING``, ``DOWN``, ``DRAINING``, ``UNAVAILABLE``, ``UNHEALTHY``, ``UP`` |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.peer.unavailables
-Number of times the server became unavailable for client requests (“unavail”).
+The total number of times the server became unavailable for client requests ('unavail').
| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
| ---- | ----------- | ---------- | ----------------------- | --------- |
@@ -452,10 +452,10 @@ Number of times the server became unavailable for client requests (“unavail”
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.queue.limit
@@ -469,8 +469,8 @@ The maximum number of requests that can be in the queue at the same time.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.queue.overflows
@@ -484,8 +484,8 @@ The total number of requests rejected due to the queue overflow.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.queue.usage
@@ -499,8 +499,8 @@ The current number of requests in the queue.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.http.upstream.zombie.count
@@ -514,8 +514,8 @@ The current number of upstream peers removed from the group but still processing
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.slab.page.free
@@ -565,7 +565,7 @@ The current percentage of used memory pages.
| Unit | Metric Type | Value Type |
| ---- | ----------- | ---------- |
-| pages | Gauge | Double |
+| pages | Gauge | Int |
#### Attributes
@@ -585,8 +585,8 @@ The number of attempts to allocate memory of specified size.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.slab.slot.limit | The upper limit for a slab slot, used as the identifier for the slot. | Any Int |
| nginx.slab.slot.allocation.result | Result of an attempt to allocate memory to a slab slot. | Str: ``FAILURE``, ``SUCCESS`` |
+| nginx.slab.slot.limit | The upper limit for a slab slot, used as the identifier for the slot. | Any Int |
| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.slab.slot.free
@@ -645,8 +645,8 @@ The total number of SSL handshakes.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.ssl.status | The status of a SSL handshake. | Str: ``FAILED``, ``REUSE`` |
| nginx.ssl.handshake.reason | The reason for a SSL handshake failure. | Str: ``NO_COMMON_PROTOCOL``, ``NO_COMMON_CIPHER``, ``TIMEOUT``, ``CERT_REJECTED`` |
+| nginx.ssl.status | The status of a SSL handshake. | Str: ``FAILED``, ``REUSE`` |
### nginx.stream.connection.accepted
@@ -732,10 +732,10 @@ The current number of Stream Upstream Peer connections.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.peer.connection.time
@@ -749,10 +749,10 @@ The average time to connect to the stream upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.peer.connections
@@ -766,10 +766,10 @@ The total number of client connections forwarded to this stream upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.peer.count
@@ -784,8 +784,8 @@ The current number of stream upstream peers grouped by state.
| Name | Description | Values |
| ---- | ----------- | ------ |
| nginx.peer.state | The current state of an upstream peer. | Str: ``CHECKING``, ``DOWN``, ``DRAINING``, ``UNAVAILABLE``, ``UNHEALTHY``, ``UP`` |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.peer.fails
@@ -799,9 +799,9 @@ The total number of unsuccessful attempts to communicate with the stream upstrea
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.peer.health_checks
@@ -816,10 +816,10 @@ The total number of health check requests made to the stream upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
| nginx.health_check | The state received from a health check. | Str: ``UNHEALTHY``, ``FAIL`` |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.peer.io
@@ -834,10 +834,10 @@ The total number of Stream Upstream Peer byte IO.
| Name | Description | Values |
| ---- | ----------- | ------ |
| nginx.io.direction | The direction of byte traffic. | Str: ``receive``, ``transmit`` |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.peer.response.time
@@ -851,28 +851,28 @@ The average time to receive the last byte of data for the stream upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.peer.state
-Current state of upstream peers in deployment. If any of the upstream peers in the deployment match the given state then the value will be 1. If no upstream peer is a match then the value will be 0.
+Current state of upstream peers in deployment.
-| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
-| ---- | ----------- | ---------- | ----------------------- | --------- |
-| deployments | Sum | Int | Cumulative | true |
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| deployments | Gauge | Int |
#### Attributes
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.peer.state | The current state of an upstream peer. | Str: ``CHECKING``, ``DOWN``, ``DRAINING``, ``UNAVAILABLE``, ``UNHEALTHY``, ``UP`` |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.peer.state | The current state of an upstream peer. | Str: ``CHECKING``, ``DOWN``, ``DRAINING``, ``UNAVAILABLE``, ``UNHEALTHY``, ``UP`` |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.peer.ttfb.time
@@ -886,14 +886,14 @@ The average time to receive the first byte of data for the stream upstream peer.
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.peer.unavailables
-How many times the server became unavailable for client connections (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold.
+How many times the server became unavailable for client connections (state 'unavail') due to the number of unsuccessful attempts reaching the max_fails threshold.
| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic |
| ---- | ----------- | ---------- | ----------------------- | --------- |
@@ -903,10 +903,10 @@ How many times the server became unavailable for client connections (state “un
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
-| nginx.upstream.name | The name of the upstream block. | Any Str |
| nginx.peer.address | The address of the peer. | Any Str |
| nginx.peer.name | The name of the peer. | Any Str |
+| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
### nginx.stream.upstream.zombie.count
@@ -920,12 +920,12 @@ The current number of peers removed from the group but still processing active c
| Name | Description | Values |
| ---- | ----------- | ------ |
-| nginx.zone.name | The name of the shared memory zone. | Any Str |
| nginx.upstream.name | The name of the upstream block. | Any Str |
+| nginx.zone.name | The name of the shared memory zone. | Any Str |
## Resource Attributes
| Name | Description | Values | Enabled |
| ---- | ----------- | ------ | ------- |
| instance.id | The nginx instance id. | Any Str | true |
-| instance.type | The nginx instance type (nginx, nginxplus) | Any Str | true |
+| instance.type | The nginx instance type (nginx, nginxplus). | Any Str | true |
diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go
index e812fe359..89b1a7128 100644
--- a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go
+++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go
@@ -65,6 +65,10 @@ const (
_ AttributeNginxConnectionsOutcome = iota
AttributeNginxConnectionsOutcomeACCEPTED
AttributeNginxConnectionsOutcomeACTIVE
+ AttributeNginxConnectionsOutcomeHANDLED
+ AttributeNginxConnectionsOutcomeREADING
+ AttributeNginxConnectionsOutcomeWRITING
+ AttributeNginxConnectionsOutcomeWAITING
AttributeNginxConnectionsOutcomeDROPPED
AttributeNginxConnectionsOutcomeIDLE
)
@@ -76,6 +80,14 @@ func (av AttributeNginxConnectionsOutcome) String() string {
return "ACCEPTED"
case AttributeNginxConnectionsOutcomeACTIVE:
return "ACTIVE"
+ case AttributeNginxConnectionsOutcomeHANDLED:
+ return "HANDLED"
+ case AttributeNginxConnectionsOutcomeREADING:
+ return "READING"
+ case AttributeNginxConnectionsOutcomeWRITING:
+ return "WRITING"
+ case AttributeNginxConnectionsOutcomeWAITING:
+ return "WAITING"
case AttributeNginxConnectionsOutcomeDROPPED:
return "DROPPED"
case AttributeNginxConnectionsOutcomeIDLE:
@@ -88,6 +100,10 @@ func (av AttributeNginxConnectionsOutcome) String() string {
var MapAttributeNginxConnectionsOutcome = map[string]AttributeNginxConnectionsOutcome{
"ACCEPTED": AttributeNginxConnectionsOutcomeACCEPTED,
"ACTIVE": AttributeNginxConnectionsOutcomeACTIVE,
+ "HANDLED": AttributeNginxConnectionsOutcomeHANDLED,
+ "READING": AttributeNginxConnectionsOutcomeREADING,
+ "WRITING": AttributeNginxConnectionsOutcomeWRITING,
+ "WAITING": AttributeNginxConnectionsOutcomeWAITING,
"DROPPED": AttributeNginxConnectionsOutcomeDROPPED,
"IDLE": AttributeNginxConnectionsOutcomeIDLE,
}
@@ -705,7 +721,7 @@ func (m *metricNginxCacheBytesRead) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxCacheBytesRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxCacheOutcomeAttributeValue string, nginxCacheNameAttributeValue string) {
+func (m *metricNginxCacheBytesRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxCacheNameAttributeValue string, nginxCacheOutcomeAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -713,8 +729,8 @@ func (m *metricNginxCacheBytesRead) recordDataPoint(start pcommon.Timestamp, ts
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.cache.outcome", nginxCacheOutcomeAttributeValue)
dp.Attributes().PutStr("nginx.cache.name", nginxCacheNameAttributeValue)
+ dp.Attributes().PutStr("nginx.cache.outcome", nginxCacheOutcomeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -861,7 +877,7 @@ func (m *metricNginxCacheResponses) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxCacheResponses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxCacheOutcomeAttributeValue string, nginxCacheNameAttributeValue string) {
+func (m *metricNginxCacheResponses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxCacheNameAttributeValue string, nginxCacheOutcomeAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -869,8 +885,8 @@ func (m *metricNginxCacheResponses) recordDataPoint(start pcommon.Timestamp, ts
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.cache.outcome", nginxCacheOutcomeAttributeValue)
dp.Attributes().PutStr("nginx.cache.name", nginxCacheNameAttributeValue)
+ dp.Attributes().PutStr("nginx.cache.outcome", nginxCacheOutcomeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -1434,7 +1450,7 @@ type metricNginxHTTPResponseCount struct {
// init fills nginx.http.response.count metric with initial data.
func (m *metricNginxHTTPResponseCount) init() {
m.data.SetName("nginx.http.response.count")
- m.data.SetDescription("The total number of HTTP responses sent to clients since the last collection interval, grouped by status code range.")
+ m.data.SetDescription("The total number of HTTP responses, since the last collection interval and grouped by status code range.")
m.data.SetUnit("responses")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
@@ -1542,7 +1558,7 @@ type metricNginxHTTPResponses struct {
// init fills nginx.http.responses metric with initial data.
func (m *metricNginxHTTPResponses) init() {
m.data.SetName("nginx.http.responses")
- m.data.SetDescription("The total number of HTTP responses sent to clients, since NGINX was last started or reloaded.")
+ m.data.SetDescription("The total number of HTTP responses sent to clients.")
m.data.SetUnit("responses")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
@@ -1602,7 +1618,7 @@ func (m *metricNginxHTTPUpstreamKeepaliveCount) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamKeepaliveCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamKeepaliveCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -1610,8 +1626,8 @@ func (m *metricNginxHTTPUpstreamKeepaliveCount) recordDataPoint(start pcommon.Ti
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -1654,7 +1670,7 @@ func (m *metricNginxHTTPUpstreamPeerConnectionCount) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -1662,10 +1678,10 @@ func (m *metricNginxHTTPUpstreamPeerConnectionCount) recordDataPoint(start pcomm
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -1708,7 +1724,7 @@ func (m *metricNginxHTTPUpstreamPeerCount) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue string, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -1717,8 +1733,8 @@ func (m *metricNginxHTTPUpstreamPeerCount) recordDataPoint(start pcommon.Timesta
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("nginx.peer.state", nginxPeerStateAttributeValue)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -1763,7 +1779,7 @@ func (m *metricNginxHTTPUpstreamPeerFails) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerFails) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerFails) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -1771,10 +1787,10 @@ func (m *metricNginxHTTPUpstreamPeerFails) recordDataPoint(start pcommon.Timesta
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -1817,7 +1833,7 @@ func (m *metricNginxHTTPUpstreamPeerHeaderTime) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerHeaderTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerHeaderTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -1825,10 +1841,10 @@ func (m *metricNginxHTTPUpstreamPeerHeaderTime) recordDataPoint(start pcommon.Ti
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -1873,7 +1889,7 @@ func (m *metricNginxHTTPUpstreamPeerHealthChecks) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerHealthChecks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxHealthCheckAttributeValue string, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerHealthChecks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxHealthCheckAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -1882,10 +1898,10 @@ func (m *metricNginxHTTPUpstreamPeerHealthChecks) recordDataPoint(start pcommon.
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("nginx.health_check", nginxHealthCheckAttributeValue)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -1930,7 +1946,7 @@ func (m *metricNginxHTTPUpstreamPeerIo) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxIoDirectionAttributeValue string, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxIoDirectionAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -1939,10 +1955,10 @@ func (m *metricNginxHTTPUpstreamPeerIo) recordDataPoint(start pcommon.Timestamp,
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("nginx.io.direction", nginxIoDirectionAttributeValue)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -1987,7 +2003,7 @@ func (m *metricNginxHTTPUpstreamPeerRequests) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -1995,10 +2011,10 @@ func (m *metricNginxHTTPUpstreamPeerRequests) recordDataPoint(start pcommon.Time
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2041,7 +2057,7 @@ func (m *metricNginxHTTPUpstreamPeerResponseTime) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerResponseTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerResponseTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -2049,10 +2065,10 @@ func (m *metricNginxHTTPUpstreamPeerResponseTime) recordDataPoint(start pcommon.
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2097,7 +2113,7 @@ func (m *metricNginxHTTPUpstreamPeerResponses) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerResponses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxStatusRangeAttributeValue string, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerResponses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxStatusRangeAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -2105,11 +2121,11 @@ func (m *metricNginxHTTPUpstreamPeerResponses) recordDataPoint(start pcommon.Tim
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.status_range", nginxStatusRangeAttributeValue)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.status_range", nginxStatusRangeAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2152,7 +2168,7 @@ func (m *metricNginxHTTPUpstreamPeerState) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerState) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue string, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerState) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxPeerStateAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -2160,11 +2176,11 @@ func (m *metricNginxHTTPUpstreamPeerState) recordDataPoint(start pcommon.Timesta
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.peer.state", nginxPeerStateAttributeValue)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.peer.state", nginxPeerStateAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2201,7 +2217,7 @@ type metricNginxHTTPUpstreamPeerUnavailables struct {
// init fills nginx.http.upstream.peer.unavailables metric with initial data.
func (m *metricNginxHTTPUpstreamPeerUnavailables) init() {
m.data.SetName("nginx.http.upstream.peer.unavailables")
- m.data.SetDescription("Number of times the server became unavailable for client requests (“unavail”).")
+ m.data.SetDescription("The total number of times the server became unavailable for client requests ('unavail').")
m.data.SetUnit("requests")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
@@ -2209,7 +2225,7 @@ func (m *metricNginxHTTPUpstreamPeerUnavailables) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamPeerUnavailables) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamPeerUnavailables) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -2217,10 +2233,10 @@ func (m *metricNginxHTTPUpstreamPeerUnavailables) recordDataPoint(start pcommon.
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2263,7 +2279,7 @@ func (m *metricNginxHTTPUpstreamQueueLimit) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamQueueLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamQueueLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -2271,8 +2287,8 @@ func (m *metricNginxHTTPUpstreamQueueLimit) recordDataPoint(start pcommon.Timest
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2317,7 +2333,7 @@ func (m *metricNginxHTTPUpstreamQueueOverflows) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamQueueOverflows) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamQueueOverflows) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -2325,8 +2341,8 @@ func (m *metricNginxHTTPUpstreamQueueOverflows) recordDataPoint(start pcommon.Ti
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2369,7 +2385,7 @@ func (m *metricNginxHTTPUpstreamQueueUsage) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamQueueUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamQueueUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -2377,8 +2393,8 @@ func (m *metricNginxHTTPUpstreamQueueUsage) recordDataPoint(start pcommon.Timest
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2421,7 +2437,7 @@ func (m *metricNginxHTTPUpstreamZombieCount) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxHTTPUpstreamZombieCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
+func (m *metricNginxHTTPUpstreamZombieCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -2429,8 +2445,8 @@ func (m *metricNginxHTTPUpstreamZombieCount) recordDataPoint(start pcommon.Times
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -2626,14 +2642,14 @@ func (m *metricNginxSlabPageUtilization) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxSlabPageUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, nginxZoneNameAttributeValue string) {
+func (m *metricNginxSlabPageUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
- dp.SetDoubleValue(val)
+ dp.SetIntValue(val)
dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
@@ -2679,7 +2695,7 @@ func (m *metricNginxSlabSlotAllocations) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxSlabSlotAllocations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxSlabSlotLimitAttributeValue int64, nginxSlabSlotAllocationResultAttributeValue string, nginxZoneNameAttributeValue string) {
+func (m *metricNginxSlabSlotAllocations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxSlabSlotAllocationResultAttributeValue string, nginxSlabSlotLimitAttributeValue int64, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -2687,8 +2703,8 @@ func (m *metricNginxSlabSlotAllocations) recordDataPoint(start pcommon.Timestamp
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutInt("nginx.slab.slot.limit", nginxSlabSlotLimitAttributeValue)
dp.Attributes().PutStr("nginx.slab.slot.allocation.result", nginxSlabSlotAllocationResultAttributeValue)
+ dp.Attributes().PutInt("nginx.slab.slot.limit", nginxSlabSlotLimitAttributeValue)
dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
@@ -2891,7 +2907,7 @@ func (m *metricNginxSslHandshakes) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxSslHandshakes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxSslStatusAttributeValue string, nginxSslHandshakeReasonAttributeValue string) {
+func (m *metricNginxSslHandshakes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxSslHandshakeReasonAttributeValue string, nginxSslStatusAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -2899,8 +2915,8 @@ func (m *metricNginxSslHandshakes) recordDataPoint(start pcommon.Timestamp, ts p
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.ssl.status", nginxSslStatusAttributeValue)
dp.Attributes().PutStr("nginx.ssl.handshake.reason", nginxSslHandshakeReasonAttributeValue)
+ dp.Attributes().PutStr("nginx.ssl.status", nginxSslStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3208,7 +3224,7 @@ func (m *metricNginxStreamUpstreamPeerConnectionCount) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3216,10 +3232,10 @@ func (m *metricNginxStreamUpstreamPeerConnectionCount) recordDataPoint(start pco
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3262,7 +3278,7 @@ func (m *metricNginxStreamUpstreamPeerConnectionTime) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerConnectionTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerConnectionTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3270,10 +3286,10 @@ func (m *metricNginxStreamUpstreamPeerConnectionTime) recordDataPoint(start pcom
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3318,7 +3334,7 @@ func (m *metricNginxStreamUpstreamPeerConnections) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3326,10 +3342,10 @@ func (m *metricNginxStreamUpstreamPeerConnections) recordDataPoint(start pcommon
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3372,7 +3388,7 @@ func (m *metricNginxStreamUpstreamPeerCount) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue string, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3381,8 +3397,8 @@ func (m *metricNginxStreamUpstreamPeerCount) recordDataPoint(start pcommon.Times
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("nginx.peer.state", nginxPeerStateAttributeValue)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3427,7 +3443,7 @@ func (m *metricNginxStreamUpstreamPeerFails) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerFails) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerFails) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3435,9 +3451,9 @@ func (m *metricNginxStreamUpstreamPeerFails) recordDataPoint(start pcommon.Times
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3482,7 +3498,7 @@ func (m *metricNginxStreamUpstreamPeerHealthChecks) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerHealthChecks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxHealthCheckAttributeValue string, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerHealthChecks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxHealthCheckAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3491,10 +3507,10 @@ func (m *metricNginxStreamUpstreamPeerHealthChecks) recordDataPoint(start pcommo
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("nginx.health_check", nginxHealthCheckAttributeValue)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3539,7 +3555,7 @@ func (m *metricNginxStreamUpstreamPeerIo) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxIoDirectionAttributeValue string, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxIoDirectionAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3548,10 +3564,10 @@ func (m *metricNginxStreamUpstreamPeerIo) recordDataPoint(start pcommon.Timestam
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("nginx.io.direction", nginxIoDirectionAttributeValue)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3594,7 +3610,7 @@ func (m *metricNginxStreamUpstreamPeerResponseTime) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerResponseTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerResponseTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3602,10 +3618,10 @@ func (m *metricNginxStreamUpstreamPeerResponseTime) recordDataPoint(start pcommo
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3642,39 +3658,37 @@ type metricNginxStreamUpstreamPeerState struct {
// init fills nginx.stream.upstream.peer.state metric with initial data.
func (m *metricNginxStreamUpstreamPeerState) init() {
m.data.SetName("nginx.stream.upstream.peer.state")
- m.data.SetDescription("Current state of upstream peers in deployment. If any of the upstream peers in the deployment match the given state then the value will be 1. If no upstream peer is a match then the value will be 0.")
+ m.data.SetDescription("Current state of upstream peers in deployment.")
m.data.SetUnit("deployments")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+ m.data.SetEmptyGauge()
+ m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerState) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue string, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerState) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxPeerStateAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
- dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.peer.state", nginxPeerStateAttributeValue)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.peer.state", nginxPeerStateAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricNginxStreamUpstreamPeerState) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricNginxStreamUpstreamPeerState) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
@@ -3705,7 +3719,7 @@ func (m *metricNginxStreamUpstreamPeerTtfbTime) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerTtfbTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerTtfbTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3713,10 +3727,10 @@ func (m *metricNginxStreamUpstreamPeerTtfbTime) recordDataPoint(start pcommon.Ti
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3753,7 +3767,7 @@ type metricNginxStreamUpstreamPeerUnavailables struct {
// init fills nginx.stream.upstream.peer.unavailables metric with initial data.
func (m *metricNginxStreamUpstreamPeerUnavailables) init() {
m.data.SetName("nginx.stream.upstream.peer.unavailables")
- m.data.SetDescription("How many times the server became unavailable for client connections (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold.")
+ m.data.SetDescription("How many times the server became unavailable for client connections (state 'unavail') due to the number of unsuccessful attempts reaching the max_fails threshold.")
m.data.SetUnit("requests")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
@@ -3761,7 +3775,7 @@ func (m *metricNginxStreamUpstreamPeerUnavailables) init() {
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamPeerUnavailables) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamPeerUnavailables) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3769,10 +3783,10 @@ func (m *metricNginxStreamUpstreamPeerUnavailables) recordDataPoint(start pcommo
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
- dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
dp.Attributes().PutStr("nginx.peer.address", nginxPeerAddressAttributeValue)
dp.Attributes().PutStr("nginx.peer.name", nginxPeerNameAttributeValue)
+ dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3815,7 +3829,7 @@ func (m *metricNginxStreamUpstreamZombieCount) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricNginxStreamUpstreamZombieCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
+func (m *metricNginxStreamUpstreamZombieCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
if !m.config.Enabled {
return
}
@@ -3823,8 +3837,8 @@ func (m *metricNginxStreamUpstreamZombieCount) recordDataPoint(start pcommon.Tim
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
- dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
dp.Attributes().PutStr("nginx.upstream.name", nginxUpstreamNameAttributeValue)
+ dp.Attributes().PutStr("nginx.zone.name", nginxZoneNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -4180,8 +4194,8 @@ func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics
}
// RecordNginxCacheBytesReadDataPoint adds a data point to nginx.cache.bytes_read metric.
-func (mb *MetricsBuilder) RecordNginxCacheBytesReadDataPoint(ts pcommon.Timestamp, val int64, nginxCacheOutcomeAttributeValue AttributeNginxCacheOutcome, nginxCacheNameAttributeValue string) {
- mb.metricNginxCacheBytesRead.recordDataPoint(mb.startTime, ts, val, nginxCacheOutcomeAttributeValue.String(), nginxCacheNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxCacheBytesReadDataPoint(ts pcommon.Timestamp, val int64, nginxCacheNameAttributeValue string, nginxCacheOutcomeAttributeValue AttributeNginxCacheOutcome) {
+ mb.metricNginxCacheBytesRead.recordDataPoint(mb.startTime, ts, val, nginxCacheNameAttributeValue, nginxCacheOutcomeAttributeValue.String())
}
// RecordNginxCacheMemoryLimitDataPoint adds a data point to nginx.cache.memory.limit metric.
@@ -4195,8 +4209,8 @@ func (mb *MetricsBuilder) RecordNginxCacheMemoryUsageDataPoint(ts pcommon.Timest
}
// RecordNginxCacheResponsesDataPoint adds a data point to nginx.cache.responses metric.
-func (mb *MetricsBuilder) RecordNginxCacheResponsesDataPoint(ts pcommon.Timestamp, val int64, nginxCacheOutcomeAttributeValue AttributeNginxCacheOutcome, nginxCacheNameAttributeValue string) {
- mb.metricNginxCacheResponses.recordDataPoint(mb.startTime, ts, val, nginxCacheOutcomeAttributeValue.String(), nginxCacheNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxCacheResponsesDataPoint(ts pcommon.Timestamp, val int64, nginxCacheNameAttributeValue string, nginxCacheOutcomeAttributeValue AttributeNginxCacheOutcome) {
+ mb.metricNginxCacheResponses.recordDataPoint(mb.startTime, ts, val, nginxCacheNameAttributeValue, nginxCacheOutcomeAttributeValue.String())
}
// RecordNginxConfigReloadsDataPoint adds a data point to nginx.config.reloads metric.
@@ -4265,83 +4279,83 @@ func (mb *MetricsBuilder) RecordNginxHTTPResponsesDataPoint(ts pcommon.Timestamp
}
// RecordNginxHTTPUpstreamKeepaliveCountDataPoint adds a data point to nginx.http.upstream.keepalive.count metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamKeepaliveCountDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamKeepaliveCount.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamKeepaliveCountDataPoint(ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamKeepaliveCount.recordDataPoint(mb.startTime, ts, val, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerConnectionCountDataPoint adds a data point to nginx.http.upstream.peer.connection.count metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerConnectionCountDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerConnectionCount.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerConnectionCountDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerConnectionCount.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerCountDataPoint adds a data point to nginx.http.upstream.peer.count metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerCountDataPoint(ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue AttributeNginxPeerState, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerCount.recordDataPoint(mb.startTime, ts, val, nginxPeerStateAttributeValue.String(), nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerCountDataPoint(ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue AttributeNginxPeerState, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerCount.recordDataPoint(mb.startTime, ts, val, nginxPeerStateAttributeValue.String(), nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerFailsDataPoint adds a data point to nginx.http.upstream.peer.fails metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerFailsDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerFails.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerFailsDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerFails.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerHeaderTimeDataPoint adds a data point to nginx.http.upstream.peer.header.time metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerHeaderTimeDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerHeaderTime.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerHeaderTimeDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerHeaderTime.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerHealthChecksDataPoint adds a data point to nginx.http.upstream.peer.health_checks metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerHealthChecksDataPoint(ts pcommon.Timestamp, val int64, nginxHealthCheckAttributeValue AttributeNginxHealthCheck, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerHealthChecks.recordDataPoint(mb.startTime, ts, val, nginxHealthCheckAttributeValue.String(), nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerHealthChecksDataPoint(ts pcommon.Timestamp, val int64, nginxHealthCheckAttributeValue AttributeNginxHealthCheck, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerHealthChecks.recordDataPoint(mb.startTime, ts, val, nginxHealthCheckAttributeValue.String(), nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerIoDataPoint adds a data point to nginx.http.upstream.peer.io metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerIoDataPoint(ts pcommon.Timestamp, val int64, nginxIoDirectionAttributeValue AttributeNginxIoDirection, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerIo.recordDataPoint(mb.startTime, ts, val, nginxIoDirectionAttributeValue.String(), nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerIoDataPoint(ts pcommon.Timestamp, val int64, nginxIoDirectionAttributeValue AttributeNginxIoDirection, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerIo.recordDataPoint(mb.startTime, ts, val, nginxIoDirectionAttributeValue.String(), nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerRequestsDataPoint adds a data point to nginx.http.upstream.peer.requests metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerRequestsDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerRequests.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerRequestsDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerRequests.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerResponseTimeDataPoint adds a data point to nginx.http.upstream.peer.response.time metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerResponseTimeDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerResponseTime.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerResponseTimeDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerResponseTime.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerResponsesDataPoint adds a data point to nginx.http.upstream.peer.responses metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerResponsesDataPoint(ts pcommon.Timestamp, val int64, nginxStatusRangeAttributeValue AttributeNginxStatusRange, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerResponses.recordDataPoint(mb.startTime, ts, val, nginxStatusRangeAttributeValue.String(), nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerResponsesDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxStatusRangeAttributeValue AttributeNginxStatusRange, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerResponses.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxStatusRangeAttributeValue.String(), nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerStateDataPoint adds a data point to nginx.http.upstream.peer.state metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerStateDataPoint(ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue AttributeNginxPeerState, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerState.recordDataPoint(mb.startTime, ts, val, nginxPeerStateAttributeValue.String(), nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerStateDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxPeerStateAttributeValue AttributeNginxPeerState, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerState.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxPeerStateAttributeValue.String(), nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamPeerUnavailablesDataPoint adds a data point to nginx.http.upstream.peer.unavailables metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerUnavailablesDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamPeerUnavailables.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamPeerUnavailablesDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamPeerUnavailables.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamQueueLimitDataPoint adds a data point to nginx.http.upstream.queue.limit metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamQueueLimitDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamQueueLimit.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamQueueLimitDataPoint(ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamQueueLimit.recordDataPoint(mb.startTime, ts, val, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamQueueOverflowsDataPoint adds a data point to nginx.http.upstream.queue.overflows metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamQueueOverflowsDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamQueueOverflows.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamQueueOverflowsDataPoint(ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamQueueOverflows.recordDataPoint(mb.startTime, ts, val, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamQueueUsageDataPoint adds a data point to nginx.http.upstream.queue.usage metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamQueueUsageDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamQueueUsage.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamQueueUsageDataPoint(ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamQueueUsage.recordDataPoint(mb.startTime, ts, val, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxHTTPUpstreamZombieCountDataPoint adds a data point to nginx.http.upstream.zombie.count metric.
-func (mb *MetricsBuilder) RecordNginxHTTPUpstreamZombieCountDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
- mb.metricNginxHTTPUpstreamZombieCount.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxHTTPUpstreamZombieCountDataPoint(ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxHTTPUpstreamZombieCount.recordDataPoint(mb.startTime, ts, val, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxSlabPageFreeDataPoint adds a data point to nginx.slab.page.free metric.
@@ -4360,13 +4374,13 @@ func (mb *MetricsBuilder) RecordNginxSlabPageUsageDataPoint(ts pcommon.Timestamp
}
// RecordNginxSlabPageUtilizationDataPoint adds a data point to nginx.slab.page.utilization metric.
-func (mb *MetricsBuilder) RecordNginxSlabPageUtilizationDataPoint(ts pcommon.Timestamp, val float64, nginxZoneNameAttributeValue string) {
+func (mb *MetricsBuilder) RecordNginxSlabPageUtilizationDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string) {
mb.metricNginxSlabPageUtilization.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue)
}
// RecordNginxSlabSlotAllocationsDataPoint adds a data point to nginx.slab.slot.allocations metric.
-func (mb *MetricsBuilder) RecordNginxSlabSlotAllocationsDataPoint(ts pcommon.Timestamp, val int64, nginxSlabSlotLimitAttributeValue int64, nginxSlabSlotAllocationResultAttributeValue AttributeNginxSlabSlotAllocationResult, nginxZoneNameAttributeValue string) {
- mb.metricNginxSlabSlotAllocations.recordDataPoint(mb.startTime, ts, val, nginxSlabSlotLimitAttributeValue, nginxSlabSlotAllocationResultAttributeValue.String(), nginxZoneNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxSlabSlotAllocationsDataPoint(ts pcommon.Timestamp, val int64, nginxSlabSlotAllocationResultAttributeValue AttributeNginxSlabSlotAllocationResult, nginxSlabSlotLimitAttributeValue int64, nginxZoneNameAttributeValue string) {
+ mb.metricNginxSlabSlotAllocations.recordDataPoint(mb.startTime, ts, val, nginxSlabSlotAllocationResultAttributeValue.String(), nginxSlabSlotLimitAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxSlabSlotFreeDataPoint adds a data point to nginx.slab.slot.free metric.
@@ -4385,8 +4399,8 @@ func (mb *MetricsBuilder) RecordNginxSslCertificateVerifyFailuresDataPoint(ts pc
}
// RecordNginxSslHandshakesDataPoint adds a data point to nginx.ssl.handshakes metric.
-func (mb *MetricsBuilder) RecordNginxSslHandshakesDataPoint(ts pcommon.Timestamp, val int64, nginxSslStatusAttributeValue AttributeNginxSslStatus, nginxSslHandshakeReasonAttributeValue AttributeNginxSslHandshakeReason) {
- mb.metricNginxSslHandshakes.recordDataPoint(mb.startTime, ts, val, nginxSslStatusAttributeValue.String(), nginxSslHandshakeReasonAttributeValue.String())
+func (mb *MetricsBuilder) RecordNginxSslHandshakesDataPoint(ts pcommon.Timestamp, val int64, nginxSslHandshakeReasonAttributeValue AttributeNginxSslHandshakeReason, nginxSslStatusAttributeValue AttributeNginxSslStatus) {
+ mb.metricNginxSslHandshakes.recordDataPoint(mb.startTime, ts, val, nginxSslHandshakeReasonAttributeValue.String(), nginxSslStatusAttributeValue.String())
}
// RecordNginxStreamConnectionAcceptedDataPoint adds a data point to nginx.stream.connection.accepted metric.
@@ -4415,63 +4429,63 @@ func (mb *MetricsBuilder) RecordNginxStreamSessionStatusDataPoint(ts pcommon.Tim
}
// RecordNginxStreamUpstreamPeerConnectionCountDataPoint adds a data point to nginx.stream.upstream.peer.connection.count metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerConnectionCountDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerConnectionCount.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerConnectionCountDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerConnectionCount.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamPeerConnectionTimeDataPoint adds a data point to nginx.stream.upstream.peer.connection.time metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerConnectionTimeDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerConnectionTime.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerConnectionTimeDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerConnectionTime.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamPeerConnectionsDataPoint adds a data point to nginx.stream.upstream.peer.connections metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerConnectionsDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerConnections.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerConnectionsDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerConnections.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamPeerCountDataPoint adds a data point to nginx.stream.upstream.peer.count metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerCountDataPoint(ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue AttributeNginxPeerState, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerCount.recordDataPoint(mb.startTime, ts, val, nginxPeerStateAttributeValue.String(), nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerCountDataPoint(ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue AttributeNginxPeerState, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerCount.recordDataPoint(mb.startTime, ts, val, nginxPeerStateAttributeValue.String(), nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamPeerFailsDataPoint adds a data point to nginx.stream.upstream.peer.fails metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerFailsDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerFails.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerFailsDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerFails.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamPeerHealthChecksDataPoint adds a data point to nginx.stream.upstream.peer.health_checks metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerHealthChecksDataPoint(ts pcommon.Timestamp, val int64, nginxHealthCheckAttributeValue AttributeNginxHealthCheck, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerHealthChecks.recordDataPoint(mb.startTime, ts, val, nginxHealthCheckAttributeValue.String(), nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerHealthChecksDataPoint(ts pcommon.Timestamp, val int64, nginxHealthCheckAttributeValue AttributeNginxHealthCheck, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerHealthChecks.recordDataPoint(mb.startTime, ts, val, nginxHealthCheckAttributeValue.String(), nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamPeerIoDataPoint adds a data point to nginx.stream.upstream.peer.io metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerIoDataPoint(ts pcommon.Timestamp, val int64, nginxIoDirectionAttributeValue AttributeNginxIoDirection, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerIo.recordDataPoint(mb.startTime, ts, val, nginxIoDirectionAttributeValue.String(), nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerIoDataPoint(ts pcommon.Timestamp, val int64, nginxIoDirectionAttributeValue AttributeNginxIoDirection, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerIo.recordDataPoint(mb.startTime, ts, val, nginxIoDirectionAttributeValue.String(), nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamPeerResponseTimeDataPoint adds a data point to nginx.stream.upstream.peer.response.time metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerResponseTimeDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerResponseTime.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerResponseTimeDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerResponseTime.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamPeerStateDataPoint adds a data point to nginx.stream.upstream.peer.state metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerStateDataPoint(ts pcommon.Timestamp, val int64, nginxPeerStateAttributeValue AttributeNginxPeerState, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerState.recordDataPoint(mb.startTime, ts, val, nginxPeerStateAttributeValue.String(), nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerStateDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxPeerStateAttributeValue AttributeNginxPeerState, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerState.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxPeerStateAttributeValue.String(), nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamPeerTtfbTimeDataPoint adds a data point to nginx.stream.upstream.peer.ttfb.time metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerTtfbTimeDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerTtfbTime.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerTtfbTimeDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerTtfbTime.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamPeerUnavailablesDataPoint adds a data point to nginx.stream.upstream.peer.unavailables metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerUnavailablesDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string) {
- mb.metricNginxStreamUpstreamPeerUnavailables.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamPeerUnavailablesDataPoint(ts pcommon.Timestamp, val int64, nginxPeerAddressAttributeValue string, nginxPeerNameAttributeValue string, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamPeerUnavailables.recordDataPoint(mb.startTime, ts, val, nginxPeerAddressAttributeValue, nginxPeerNameAttributeValue, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// RecordNginxStreamUpstreamZombieCountDataPoint adds a data point to nginx.stream.upstream.zombie.count metric.
-func (mb *MetricsBuilder) RecordNginxStreamUpstreamZombieCountDataPoint(ts pcommon.Timestamp, val int64, nginxZoneNameAttributeValue string, nginxUpstreamNameAttributeValue string) {
- mb.metricNginxStreamUpstreamZombieCount.recordDataPoint(mb.startTime, ts, val, nginxZoneNameAttributeValue, nginxUpstreamNameAttributeValue)
+func (mb *MetricsBuilder) RecordNginxStreamUpstreamZombieCountDataPoint(ts pcommon.Timestamp, val int64, nginxUpstreamNameAttributeValue string, nginxZoneNameAttributeValue string) {
+ mb.metricNginxStreamUpstreamZombieCount.recordDataPoint(mb.startTime, ts, val, nginxUpstreamNameAttributeValue, nginxZoneNameAttributeValue)
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go
index a36035d80..8da0bb9c7 100644
--- a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go
+++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go
@@ -70,7 +70,7 @@ func TestMetricsBuilder(t *testing.T) {
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxCacheBytesReadDataPoint(ts, 1, AttributeNginxCacheOutcomeBYPASS, "nginx.cache.name-val")
+ mb.RecordNginxCacheBytesReadDataPoint(ts, 1, "nginx.cache.name-val", AttributeNginxCacheOutcomeBYPASS)
defaultMetricsCount++
allMetricsCount++
@@ -82,7 +82,7 @@ func TestMetricsBuilder(t *testing.T) {
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxCacheResponsesDataPoint(ts, 1, AttributeNginxCacheOutcomeBYPASS, "nginx.cache.name-val")
+ mb.RecordNginxCacheResponsesDataPoint(ts, 1, "nginx.cache.name-val", AttributeNginxCacheOutcomeBYPASS)
defaultMetricsCount++
allMetricsCount++
@@ -138,67 +138,67 @@ func TestMetricsBuilder(t *testing.T) {
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamKeepaliveCountDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val")
+ mb.RecordNginxHTTPUpstreamKeepaliveCountDataPoint(ts, 1, "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerConnectionCountDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxHTTPUpstreamPeerConnectionCountDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerCountDataPoint(ts, 1, AttributeNginxPeerStateCHECKING, "nginx.zone.name-val", "nginx.upstream.name-val")
+ mb.RecordNginxHTTPUpstreamPeerCountDataPoint(ts, 1, AttributeNginxPeerStateCHECKING, "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerFailsDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxHTTPUpstreamPeerFailsDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerHeaderTimeDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxHTTPUpstreamPeerHeaderTimeDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerHealthChecksDataPoint(ts, 1, AttributeNginxHealthCheckUNHEALTHY, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxHTTPUpstreamPeerHealthChecksDataPoint(ts, 1, AttributeNginxHealthCheckUNHEALTHY, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerIoDataPoint(ts, 1, AttributeNginxIoDirectionReceive, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxHTTPUpstreamPeerIoDataPoint(ts, 1, AttributeNginxIoDirectionReceive, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerRequestsDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxHTTPUpstreamPeerRequestsDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerResponseTimeDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxHTTPUpstreamPeerResponseTimeDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerResponsesDataPoint(ts, 1, AttributeNginxStatusRange1xx, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxHTTPUpstreamPeerResponsesDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", AttributeNginxStatusRange1xx, "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerStateDataPoint(ts, 1, AttributeNginxPeerStateCHECKING, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxHTTPUpstreamPeerStateDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", AttributeNginxPeerStateCHECKING, "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamPeerUnavailablesDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxHTTPUpstreamPeerUnavailablesDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamQueueLimitDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val")
+ mb.RecordNginxHTTPUpstreamQueueLimitDataPoint(ts, 1, "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamQueueOverflowsDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val")
+ mb.RecordNginxHTTPUpstreamQueueOverflowsDataPoint(ts, 1, "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamQueueUsageDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val")
+ mb.RecordNginxHTTPUpstreamQueueUsageDataPoint(ts, 1, "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxHTTPUpstreamZombieCountDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val")
+ mb.RecordNginxHTTPUpstreamZombieCountDataPoint(ts, 1, "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
@@ -218,7 +218,7 @@ func TestMetricsBuilder(t *testing.T) {
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxSlabSlotAllocationsDataPoint(ts, 1, 21, AttributeNginxSlabSlotAllocationResultFAILURE, "nginx.zone.name-val")
+ mb.RecordNginxSlabSlotAllocationsDataPoint(ts, 1, AttributeNginxSlabSlotAllocationResultFAILURE, 21, "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
@@ -234,7 +234,7 @@ func TestMetricsBuilder(t *testing.T) {
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxSslHandshakesDataPoint(ts, 1, AttributeNginxSslStatusFAILED, AttributeNginxSslHandshakeReasonNOCOMMONPROTOCOL)
+ mb.RecordNginxSslHandshakesDataPoint(ts, 1, AttributeNginxSslHandshakeReasonNOCOMMONPROTOCOL, AttributeNginxSslStatusFAILED)
defaultMetricsCount++
allMetricsCount++
@@ -258,51 +258,51 @@ func TestMetricsBuilder(t *testing.T) {
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerConnectionCountDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxStreamUpstreamPeerConnectionCountDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerConnectionTimeDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxStreamUpstreamPeerConnectionTimeDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerConnectionsDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxStreamUpstreamPeerConnectionsDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerCountDataPoint(ts, 1, AttributeNginxPeerStateCHECKING, "nginx.zone.name-val", "nginx.upstream.name-val")
+ mb.RecordNginxStreamUpstreamPeerCountDataPoint(ts, 1, AttributeNginxPeerStateCHECKING, "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerFailsDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val")
+ mb.RecordNginxStreamUpstreamPeerFailsDataPoint(ts, 1, "nginx.peer.address-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerHealthChecksDataPoint(ts, 1, AttributeNginxHealthCheckUNHEALTHY, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxStreamUpstreamPeerHealthChecksDataPoint(ts, 1, AttributeNginxHealthCheckUNHEALTHY, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerIoDataPoint(ts, 1, AttributeNginxIoDirectionReceive, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxStreamUpstreamPeerIoDataPoint(ts, 1, AttributeNginxIoDirectionReceive, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerResponseTimeDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxStreamUpstreamPeerResponseTimeDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerStateDataPoint(ts, 1, AttributeNginxPeerStateCHECKING, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxStreamUpstreamPeerStateDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", AttributeNginxPeerStateCHECKING, "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerTtfbTimeDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxStreamUpstreamPeerTtfbTimeDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamPeerUnavailablesDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val", "nginx.peer.address-val", "nginx.peer.name-val")
+ mb.RecordNginxStreamUpstreamPeerUnavailablesDataPoint(ts, 1, "nginx.peer.address-val", "nginx.peer.name-val", "nginx.upstream.name-val", "nginx.zone.name-val")
defaultMetricsCount++
allMetricsCount++
- mb.RecordNginxStreamUpstreamZombieCountDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val")
+ mb.RecordNginxStreamUpstreamZombieCountDataPoint(ts, 1, "nginx.upstream.name-val", "nginx.zone.name-val")
rb := mb.NewResourceBuilder()
rb.SetInstanceID("instance.id-val")
@@ -343,12 +343,12 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.cache.outcome")
- assert.True(t, ok)
- assert.Equal(t, "BYPASS", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.cache.name")
+ attrVal, ok := dp.Attributes().Get("nginx.cache.name")
assert.True(t, ok)
assert.Equal(t, "nginx.cache.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.cache.outcome")
+ assert.True(t, ok)
+ assert.Equal(t, "BYPASS", attrVal.Str())
case "nginx.cache.memory.limit":
assert.False(t, validatedMetrics["nginx.cache.memory.limit"], "Found a duplicate in the metrics slice: nginx.cache.memory.limit")
validatedMetrics["nginx.cache.memory.limit"] = true
@@ -393,12 +393,12 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.cache.outcome")
- assert.True(t, ok)
- assert.Equal(t, "BYPASS", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.cache.name")
+ attrVal, ok := dp.Attributes().Get("nginx.cache.name")
assert.True(t, ok)
assert.Equal(t, "nginx.cache.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.cache.outcome")
+ assert.True(t, ok)
+ assert.Equal(t, "BYPASS", attrVal.Str())
case "nginx.config.reloads":
assert.False(t, validatedMetrics["nginx.config.reloads"], "Found a duplicate in the metrics slice: nginx.config.reloads")
validatedMetrics["nginx.config.reloads"] = true
@@ -583,7 +583,7 @@ func TestMetricsBuilder(t *testing.T) {
validatedMetrics["nginx.http.response.count"] = true
assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
- assert.Equal(t, "The total number of HTTP responses sent to clients since the last collection interval, grouped by status code range.", ms.At(i).Description())
+ assert.Equal(t, "The total number of HTTP responses, since the last collection interval and grouped by status code range.", ms.At(i).Description())
assert.Equal(t, "responses", ms.At(i).Unit())
dp := ms.At(i).Gauge().DataPoints().At(0)
assert.Equal(t, start, dp.StartTimestamp())
@@ -627,7 +627,7 @@ func TestMetricsBuilder(t *testing.T) {
validatedMetrics["nginx.http.responses"] = true
assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
- assert.Equal(t, "The total number of HTTP responses sent to clients, since NGINX was last started or reloaded.", ms.At(i).Description())
+ assert.Equal(t, "The total number of HTTP responses sent to clients.", ms.At(i).Description())
assert.Equal(t, "responses", ms.At(i).Unit())
assert.True(t, ms.At(i).Sum().IsMonotonic())
assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
@@ -654,12 +654,12 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ attrVal, ok := dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.connection.count":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.connection.count"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.connection.count")
validatedMetrics["nginx.http.upstream.peer.connection.count"] = true
@@ -672,18 +672,18 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.count":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.count"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.count")
validatedMetrics["nginx.http.upstream.peer.count"] = true
@@ -699,12 +699,12 @@ func TestMetricsBuilder(t *testing.T) {
attrVal, ok := dp.Attributes().Get("nginx.peer.state")
assert.True(t, ok)
assert.Equal(t, "CHECKING", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.fails":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.fails"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.fails")
validatedMetrics["nginx.http.upstream.peer.fails"] = true
@@ -719,18 +719,18 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.header.time":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.header.time"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.header.time")
validatedMetrics["nginx.http.upstream.peer.header.time"] = true
@@ -743,18 +743,18 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.health_checks":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.health_checks"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.health_checks")
validatedMetrics["nginx.http.upstream.peer.health_checks"] = true
@@ -772,18 +772,18 @@ func TestMetricsBuilder(t *testing.T) {
attrVal, ok := dp.Attributes().Get("nginx.health_check")
assert.True(t, ok)
assert.Equal(t, "UNHEALTHY", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.io":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.io"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.io")
validatedMetrics["nginx.http.upstream.peer.io"] = true
@@ -801,18 +801,18 @@ func TestMetricsBuilder(t *testing.T) {
attrVal, ok := dp.Attributes().Get("nginx.io.direction")
assert.True(t, ok)
assert.Equal(t, "receive", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.requests":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.requests"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.requests")
validatedMetrics["nginx.http.upstream.peer.requests"] = true
@@ -827,18 +827,18 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.response.time":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.response.time"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.response.time")
validatedMetrics["nginx.http.upstream.peer.response.time"] = true
@@ -851,18 +851,18 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.responses":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.responses"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.responses")
validatedMetrics["nginx.http.upstream.peer.responses"] = true
@@ -877,21 +877,21 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.status_range")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
- assert.Equal(t, "1xx", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
+ assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.status_range")
+ assert.True(t, ok)
+ assert.Equal(t, "1xx", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
- assert.True(t, ok)
- assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.name")
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
assert.True(t, ok)
- assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.state":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.state"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.state")
validatedMetrics["nginx.http.upstream.peer.state"] = true
@@ -904,27 +904,27 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.peer.state")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
- assert.Equal(t, "CHECKING", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
+ assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.peer.state")
+ assert.True(t, ok)
+ assert.Equal(t, "CHECKING", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
- assert.True(t, ok)
- assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.name")
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
assert.True(t, ok)
- assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.peer.unavailables":
assert.False(t, validatedMetrics["nginx.http.upstream.peer.unavailables"], "Found a duplicate in the metrics slice: nginx.http.upstream.peer.unavailables")
validatedMetrics["nginx.http.upstream.peer.unavailables"] = true
assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
- assert.Equal(t, "Number of times the server became unavailable for client requests (“unavail”).", ms.At(i).Description())
+ assert.Equal(t, "The total number of times the server became unavailable for client requests ('unavail').", ms.At(i).Description())
assert.Equal(t, "requests", ms.At(i).Unit())
assert.True(t, ms.At(i).Sum().IsMonotonic())
assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
@@ -933,18 +933,18 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.queue.limit":
assert.False(t, validatedMetrics["nginx.http.upstream.queue.limit"], "Found a duplicate in the metrics slice: nginx.http.upstream.queue.limit")
validatedMetrics["nginx.http.upstream.queue.limit"] = true
@@ -957,12 +957,12 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ attrVal, ok := dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.queue.overflows":
assert.False(t, validatedMetrics["nginx.http.upstream.queue.overflows"], "Found a duplicate in the metrics slice: nginx.http.upstream.queue.overflows")
validatedMetrics["nginx.http.upstream.queue.overflows"] = true
@@ -977,12 +977,12 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ attrVal, ok := dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.queue.usage":
assert.False(t, validatedMetrics["nginx.http.upstream.queue.usage"], "Found a duplicate in the metrics slice: nginx.http.upstream.queue.usage")
validatedMetrics["nginx.http.upstream.queue.usage"] = true
@@ -995,12 +995,12 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ attrVal, ok := dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.http.upstream.zombie.count":
assert.False(t, validatedMetrics["nginx.http.upstream.zombie.count"], "Found a duplicate in the metrics slice: nginx.http.upstream.zombie.count")
validatedMetrics["nginx.http.upstream.zombie.count"] = true
@@ -1013,12 +1013,12 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ attrVal, ok := dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.slab.page.free":
assert.False(t, validatedMetrics["nginx.slab.page.free"], "Found a duplicate in the metrics slice: nginx.slab.page.free")
validatedMetrics["nginx.slab.page.free"] = true
@@ -1074,8 +1074,8 @@ func TestMetricsBuilder(t *testing.T) {
dp := ms.At(i).Gauge().DataPoints().At(0)
assert.Equal(t, start, dp.StartTimestamp())
assert.Equal(t, ts, dp.Timestamp())
- assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
- assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01)
+ assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
+ assert.Equal(t, int64(1), dp.IntValue())
attrVal, ok := dp.Attributes().Get("nginx.zone.name")
assert.True(t, ok)
assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
@@ -1093,12 +1093,12 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.slab.slot.limit")
- assert.True(t, ok)
- assert.EqualValues(t, 21, attrVal.Int())
- attrVal, ok = dp.Attributes().Get("nginx.slab.slot.allocation.result")
+ attrVal, ok := dp.Attributes().Get("nginx.slab.slot.allocation.result")
assert.True(t, ok)
assert.Equal(t, "FAILURE", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.slab.slot.limit")
+ assert.True(t, ok)
+ assert.EqualValues(t, 21, attrVal.Int())
attrVal, ok = dp.Attributes().Get("nginx.zone.name")
assert.True(t, ok)
assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
@@ -1169,12 +1169,12 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.ssl.status")
- assert.True(t, ok)
- assert.Equal(t, "FAILED", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.ssl.handshake.reason")
+ attrVal, ok := dp.Attributes().Get("nginx.ssl.handshake.reason")
assert.True(t, ok)
assert.Equal(t, "NO_COMMON_PROTOCOL", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.ssl.status")
+ assert.True(t, ok)
+ assert.Equal(t, "FAILED", attrVal.Str())
case "nginx.stream.connection.accepted":
assert.False(t, validatedMetrics["nginx.stream.connection.accepted"], "Found a duplicate in the metrics slice: nginx.stream.connection.accepted")
validatedMetrics["nginx.stream.connection.accepted"] = true
@@ -1276,18 +1276,18 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.peer.connection.time":
assert.False(t, validatedMetrics["nginx.stream.upstream.peer.connection.time"], "Found a duplicate in the metrics slice: nginx.stream.upstream.peer.connection.time")
validatedMetrics["nginx.stream.upstream.peer.connection.time"] = true
@@ -1300,18 +1300,18 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.peer.connections":
assert.False(t, validatedMetrics["nginx.stream.upstream.peer.connections"], "Found a duplicate in the metrics slice: nginx.stream.upstream.peer.connections")
validatedMetrics["nginx.stream.upstream.peer.connections"] = true
@@ -1326,18 +1326,18 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.peer.count":
assert.False(t, validatedMetrics["nginx.stream.upstream.peer.count"], "Found a duplicate in the metrics slice: nginx.stream.upstream.peer.count")
validatedMetrics["nginx.stream.upstream.peer.count"] = true
@@ -1353,12 +1353,12 @@ func TestMetricsBuilder(t *testing.T) {
attrVal, ok := dp.Attributes().Get("nginx.peer.state")
assert.True(t, ok)
assert.Equal(t, "CHECKING", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.peer.fails":
assert.False(t, validatedMetrics["nginx.stream.upstream.peer.fails"], "Found a duplicate in the metrics slice: nginx.stream.upstream.peer.fails")
validatedMetrics["nginx.stream.upstream.peer.fails"] = true
@@ -1373,15 +1373,15 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
+ assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
assert.True(t, ok)
- assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.peer.health_checks":
assert.False(t, validatedMetrics["nginx.stream.upstream.peer.health_checks"], "Found a duplicate in the metrics slice: nginx.stream.upstream.peer.health_checks")
validatedMetrics["nginx.stream.upstream.peer.health_checks"] = true
@@ -1399,18 +1399,18 @@ func TestMetricsBuilder(t *testing.T) {
attrVal, ok := dp.Attributes().Get("nginx.health_check")
assert.True(t, ok)
assert.Equal(t, "UNHEALTHY", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.peer.io":
assert.False(t, validatedMetrics["nginx.stream.upstream.peer.io"], "Found a duplicate in the metrics slice: nginx.stream.upstream.peer.io")
validatedMetrics["nginx.stream.upstream.peer.io"] = true
@@ -1428,18 +1428,18 @@ func TestMetricsBuilder(t *testing.T) {
attrVal, ok := dp.Attributes().Get("nginx.io.direction")
assert.True(t, ok)
assert.Equal(t, "receive", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.peer.response.time":
assert.False(t, validatedMetrics["nginx.stream.upstream.peer.response.time"], "Found a duplicate in the metrics slice: nginx.stream.upstream.peer.response.time")
validatedMetrics["nginx.stream.upstream.peer.response.time"] = true
@@ -1452,47 +1452,45 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.peer.state":
assert.False(t, validatedMetrics["nginx.stream.upstream.peer.state"], "Found a duplicate in the metrics slice: nginx.stream.upstream.peer.state")
validatedMetrics["nginx.stream.upstream.peer.state"] = true
- assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
- assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
- assert.Equal(t, "Current state of upstream peers in deployment. If any of the upstream peers in the deployment match the given state then the value will be 1. If no upstream peer is a match then the value will be 0.", ms.At(i).Description())
+ assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+ assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+ assert.Equal(t, "Current state of upstream peers in deployment.", ms.At(i).Description())
assert.Equal(t, "deployments", ms.At(i).Unit())
- assert.True(t, ms.At(i).Sum().IsMonotonic())
- assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
- dp := ms.At(i).Sum().DataPoints().At(0)
+ dp := ms.At(i).Gauge().DataPoints().At(0)
assert.Equal(t, start, dp.StartTimestamp())
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.peer.state")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
- assert.Equal(t, "CHECKING", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
+ assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.peer.state")
+ assert.True(t, ok)
+ assert.Equal(t, "CHECKING", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
- assert.True(t, ok)
- assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.name")
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
assert.True(t, ok)
- assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.peer.ttfb.time":
assert.False(t, validatedMetrics["nginx.stream.upstream.peer.ttfb.time"], "Found a duplicate in the metrics slice: nginx.stream.upstream.peer.ttfb.time")
validatedMetrics["nginx.stream.upstream.peer.ttfb.time"] = true
@@ -1505,24 +1503,24 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.peer.unavailables":
assert.False(t, validatedMetrics["nginx.stream.upstream.peer.unavailables"], "Found a duplicate in the metrics slice: nginx.stream.upstream.peer.unavailables")
validatedMetrics["nginx.stream.upstream.peer.unavailables"] = true
assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type())
assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len())
- assert.Equal(t, "How many times the server became unavailable for client connections (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold.", ms.At(i).Description())
+ assert.Equal(t, "How many times the server became unavailable for client connections (state 'unavail') due to the number of unsuccessful attempts reaching the max_fails threshold.", ms.At(i).Description())
assert.Equal(t, "requests", ms.At(i).Unit())
assert.True(t, ms.At(i).Sum().IsMonotonic())
assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality())
@@ -1531,18 +1529,18 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.peer.address")
+ attrVal, ok := dp.Attributes().Get("nginx.peer.address")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.address-val", attrVal.Str())
attrVal, ok = dp.Attributes().Get("nginx.peer.name")
assert.True(t, ok)
assert.Equal(t, "nginx.peer.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
case "nginx.stream.upstream.zombie.count":
assert.False(t, validatedMetrics["nginx.stream.upstream.zombie.count"], "Found a duplicate in the metrics slice: nginx.stream.upstream.zombie.count")
validatedMetrics["nginx.stream.upstream.zombie.count"] = true
@@ -1555,12 +1553,12 @@ func TestMetricsBuilder(t *testing.T) {
assert.Equal(t, ts, dp.Timestamp())
assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
assert.Equal(t, int64(1), dp.IntValue())
- attrVal, ok := dp.Attributes().Get("nginx.zone.name")
- assert.True(t, ok)
- assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
- attrVal, ok = dp.Attributes().Get("nginx.upstream.name")
+ attrVal, ok := dp.Attributes().Get("nginx.upstream.name")
assert.True(t, ok)
assert.Equal(t, "nginx.upstream.name-val", attrVal.Str())
+ attrVal, ok = dp.Attributes().Get("nginx.zone.name")
+ assert.True(t, ok)
+ assert.Equal(t, "nginx.zone.name-val", attrVal.Str())
}
}
})
diff --git a/internal/collector/nginxplusreceiver/metadata.yaml b/internal/collector/nginxplusreceiver/metadata.yaml
index 61d43d588..4014eec62 100644
--- a/internal/collector/nginxplusreceiver/metadata.yaml
+++ b/internal/collector/nginxplusreceiver/metadata.yaml
@@ -1,3 +1,5 @@
+# NOTE: THIS FILE IS AUTOGENERATED. DO NOT EDIT BY HAND.
+
type: nginxplus
scope_name: otelcol/nginxplusreceiver
@@ -15,13 +17,16 @@ resource_attributes:
type: string
enabled: true
instance.type:
- description: The nginx instance type (nginx, nginxplus)
+ description: The nginx instance type (nginx, nginxplus).
type: string
enabled: true
attributes:
+ nginx.cache.name:
+ description: "The name of the cache."
+ type: string
nginx.cache.outcome:
- description: The outcome for an attempt to fetch content from NGINX's cache.
+ description: "The outcome for an attempt to fetch content from NGINX's cache"
type: string
enum:
- "BYPASS"
@@ -31,24 +36,39 @@ attributes:
- "REVALIDATED"
- "STALE"
- "UPDATING"
- nginx.zone.name:
- description: The name of the shared memory zone.
+ nginx.connections.outcome:
+ description: "The outcome of a connection"
type: string
- nginx.zone.type:
- description: The type of shared memory zone, depending on what block it was defined in the NGINX configuration.
+ enum:
+ - "ACCEPTED"
+ - "ACTIVE"
+ - "HANDLED"
+ - "READING"
+ - "WRITING"
+ - "WAITING"
+ - "DROPPED"
+ - "IDLE"
+ nginx.health_check:
+ description: "The state received from a health check."
type: string
enum:
- - "SERVER"
- - "LOCATION"
+ - "UNHEALTHY"
+ - "FAIL"
+ nginx.io.direction:
+ description: "The direction of byte traffic."
+ type: string
+ enum:
+ - "receive"
+ - "transmit"
nginx.limit_conn.outcome:
- description: The outcome for attempting to establish a connection to an endpoint that has a limit_conn directive configured.
+ description: "The outcome for attempting to establish a connection to an endpoint that has a limit_conn directive configured."
type: string
enum:
- "PASSED"
- "REJECTED"
- "REJECTED_DRY_RUN"
nginx.limit_req.outcome:
- description: The outcome for attempting to establish a connection to an endpoint that has a limit_req directive configured.
+ description: "The outcome for attempting to establish a connection to an endpoint that has a limit_req directive configured."
type: string
enum:
- "PASSED"
@@ -56,32 +76,14 @@ attributes:
- "REJECTED_DRY_RUN"
- "DELAYED"
- "DELAYED_DRY_RUN"
- nginx.io.direction:
- description: The direction of byte traffic.
- type: string
- enum:
- - "receive"
- - "transmit"
- nginx.status_range:
- description: A status code range or bucket for a HTTP response's status code.
- type: string
- enum:
- - "1xx"
- - "2xx"
- - "3xx"
- - "4xx"
- - "5xx"
- nginx.upstream.name:
- description: The name of the upstream block.
- type: string
nginx.peer.address:
- description: The address of the peer.
+ description: "The address of the peer."
type: string
nginx.peer.name:
- description: The name of the peer.
+ description: "The name of the peer."
type: string
nginx.peer.state:
- description: The current state of an upstream peer.
+ description: "The current state of an upstream peer."
type: string
enum:
- "CHECKING"
@@ -90,28 +92,31 @@ attributes:
- "UNAVAILABLE"
- "UNHEALTHY"
- "UP"
- nginx.health_check:
- description: The state received from a health check.
- type: string
- enum:
- - "UNHEALTHY"
- - "FAIL"
- nginx.ssl.status:
- description: The status of a SSL handshake.
+ nginx.slab.slot.allocation.result:
+ description: "Result of an attempt to allocate memory to a slab slot."
type: string
enum:
- - "FAILED"
- - "REUSE"
+ - "FAILURE"
+ - "SUCCESS"
+ nginx.slab.slot.limit:
+ description: "The upper limit for a slab slot, used as the identifier for the slot."
+ type: int
nginx.ssl.handshake.reason:
- description: The reason for a SSL handshake failure.
+ description: "The reason for a SSL handshake failure."
type: string
enum:
- "NO_COMMON_PROTOCOL"
- "NO_COMMON_CIPHER"
- "TIMEOUT"
- "CERT_REJECTED"
+ nginx.ssl.status:
+ description: "The status of a SSL handshake."
+ type: string
+ enum:
+ - "FAILED"
+ - "REUSE"
nginx.ssl.verify_failure.reason:
- description: The reason for a SSL certificate verification failure.
+ description: "The reason for a SSL certificate verification failure."
type: string
enum:
- "NO_CERT"
@@ -119,31 +124,31 @@ attributes:
- "REVOKED_CERT"
- "HOSTNAME_MISMATCH"
- "OTHER"
- nginx.slab.slot.allocation.result:
- description: Result of an attempt to allocate memory to a slab slot.
+ nginx.status_range:
+ description: "A status code range or bucket for a HTTP response's status code."
type: string
enum:
- - "FAILURE"
- - "SUCCESS"
- nginx.slab.slot.limit:
- description: The upper limit for a slab slot, used as the identifier for the slot.
- type: int
- nginx.connections.outcome:
- description: The outcome of the connection.
+ - "1xx"
+ - "2xx"
+ - "3xx"
+ - "4xx"
+ - "5xx"
+ nginx.upstream.name:
+ description: "The name of the upstream block."
type: string
- enum:
- - "ACCEPTED"
- - "ACTIVE"
- - "DROPPED"
- - "IDLE"
- nginx.cache.name:
- description: The name of the cache.
+ nginx.zone.name:
+ description: "The name of the shared memory zone."
type: string
-
+ nginx.zone.type:
+ description: "The type of shared memory zone, depending on what block it was defined in the NGINX configuration."
+ type: string
+ enum:
+ - "SERVER"
+ - "LOCATION"
metrics:
nginx.config.reloads:
enabled: true
- description: The total number of NGINX config reloads.
+ description: "The total number of NGINX config reloads."
sum:
value_type: int
monotonic: true
@@ -151,624 +156,618 @@ metrics:
unit: "reloads"
nginx.http.connections:
enabled: true
- description: The total number of connections.
+ description: "The total number of connections."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "connections"
- attributes:
+ attributes:
- nginx.connections.outcome
nginx.http.connection.count:
enabled: true
- description: The current number of connections.
+ description: "The current number of connections."
gauge:
value_type: int
unit: "connections"
- attributes:
+ attributes:
- nginx.connections.outcome
- nginx.http.request.count:
- enabled: true
- description: The total number of client requests received, since the last collection interval.
- gauge:
- value_type: int
- unit: "requests"
- nginx.cache.bytes_read:
- enabled: true
- description: The total number of bytes read from the cache or proxied server.
- sum:
- value_type: int
- monotonic: true
- aggregation_temporality: cumulative
- unit: "bytes"
- attributes:
- - nginx.cache.outcome
- - nginx.cache.name
- nginx.cache.responses:
- enabled: true
- description: The total number of responses read from the cache or proxied server.
- sum:
- value_type: int
- monotonic: true
- aggregation_temporality: cumulative
- unit: "responses"
- attributes:
- - nginx.cache.outcome
- - nginx.cache.name
- nginx.cache.memory.limit:
- enabled: true
- description: The limit on the maximum size of the cache specified in the configuration.
- gauge:
- value_type: int
- unit: "bytes"
- attributes:
- - nginx.cache.name
- nginx.cache.memory.usage:
- enabled: true
- description: The current size of the cache.
- gauge:
- value_type: int
- unit: "bytes"
- attributes:
- - nginx.cache.name
nginx.http.limit_conn.requests:
enabled: true
- description: The total number of connections to an endpoint with a limit_conn directive.
+ description: "The total number of connections to an endpoint with a limit_conn directive."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "connections"
- attributes:
- - nginx.limit_conn.outcome
+ attributes:
+ - nginx.limit_conn.outcome
- nginx.zone.name
nginx.http.limit_req.requests:
enabled: true
- description: The total number of requests to an endpoint with a limit_req directive.
+ description: "The total number of requests to an endpoint with a limit_req directive."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "requests"
- attributes:
- - nginx.limit_req.outcome
+ attributes:
+ - nginx.limit_req.outcome
- nginx.zone.name
nginx.http.request.io:
enabled: true
- description: The total number of HTTP byte IO.
+ description: "The total number of HTTP byte IO."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "bytes"
- attributes:
- - nginx.io.direction
- - nginx.zone.name
+ attributes:
+ - nginx.io.direction
+ - nginx.zone.name
- nginx.zone.type
nginx.http.request.discarded:
enabled: true
- description: The total number of requests completed without sending a response.
+ description: "The total number of requests completed without sending a response."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "requests"
- attributes:
- - nginx.zone.name
+ attributes:
+ - nginx.zone.name
- nginx.zone.type
nginx.http.request.processing.count:
enabled: true
- description: The number of client requests that are currently being processed.
+ description: "The number of client requests that are currently being processed."
gauge:
value_type: int
unit: "requests"
- attributes:
- - nginx.zone.name
+ attributes:
+ - nginx.zone.name
- nginx.zone.type
nginx.http.requests:
enabled: true
- description: The total number of client requests received, since NGINX was last started or reloaded.
+ description: "The total number of client requests received, since NGINX was last started or reloaded."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "requests"
- attributes:
- - nginx.zone.name
+ attributes:
+ - nginx.zone.name
- nginx.zone.type
+ nginx.http.request.count:
+ enabled: true
+ description: "The total number of client requests received, since the last collection interval."
+ gauge:
+ value_type: int
+ unit: "requests"
nginx.http.responses:
enabled: true
- description: The total number of HTTP responses sent to clients, since NGINX was last started or reloaded.
+ description: "The total number of HTTP responses sent to clients."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "responses"
- attributes:
- - nginx.zone.name
- - nginx.zone.type
- nginx.http.response.count:
- enabled: true
- description: The total number of HTTP responses sent to clients since the last collection interval, grouped by status code range.
- gauge:
- value_type: int
- unit: "responses"
- attributes:
- - nginx.status_range
- - nginx.zone.name
+ attributes:
+ - nginx.zone.name
- nginx.zone.type
nginx.http.response.status:
enabled: true
- description: The total number of responses since NGINX was last started or reloaded, grouped by status code range.
+ description: "The total number of responses since NGINX was last started or reloaded, grouped by status code range."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "responses"
- attributes:
- - nginx.status_range
- - nginx.zone.name
+ attributes:
+ - nginx.status_range
+ - nginx.zone.name
+ - nginx.zone.type
+ nginx.http.response.count:
+ enabled: true
+ description: "The total number of HTTP responses, since the last collection interval and grouped by status code range."
+ gauge:
+ value_type: int
+ unit: "responses"
+ attributes:
+ - nginx.status_range
+ - nginx.zone.name
- nginx.zone.type
nginx.http.upstream.keepalive.count:
enabled: true
- description: The current number of idle keepalive connections per HTTP upstream.
+ description: "The current number of idle keepalive connections per HTTP upstream."
gauge:
value_type: int
unit: "connections"
- attributes:
+ attributes:
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
nginx.http.upstream.peer.io:
enabled: true
- description: The total number of byte IO per HTTP upstream peer.
+ description: "The total number of byte IO per HTTP upstream peer."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "bytes"
- attributes:
- - nginx.io.direction
+ attributes:
+ - nginx.io.direction
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.http.upstream.peer.connection.count:
enabled: true
- description: The average number of active connections per HTTP upstream peer.
+ description: "The average number of active connections per HTTP upstream peer."
gauge:
value_type: int
unit: "connections"
- attributes:
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.http.upstream.peer.count:
enabled: true
- description: The current count of peers on the HTTP upstream grouped by state.
+ description: "The current count of peers on the HTTP upstream grouped by state."
gauge:
value_type: int
unit: "peers"
- attributes:
- - nginx.peer.state
+ attributes:
+ - nginx.peer.state
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
nginx.http.upstream.peer.fails:
enabled: true
- description: The total number of unsuccessful attempts to communicate with the HTTP upstream peer.
+ description: "The total number of unsuccessful attempts to communicate with the HTTP upstream peer."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "attempts"
- attributes:
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.http.upstream.peer.header.time:
enabled: true
- description: The average time to get the response header from the HTTP upstream peer.
+ description: "The average time to get the response header from the HTTP upstream peer."
gauge:
value_type: int
- unit: ms
- attributes:
+ unit: "ms"
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.http.upstream.peer.health_checks:
enabled: true
- description: The total number of health check requests made to a HTTP upstream peer.
+ description: "The total number of health check requests made to a HTTP upstream peer."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "requests"
- attributes:
- - nginx.health_check
+ attributes:
+ - nginx.health_check
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.http.upstream.peer.requests:
enabled: true
- description: The total number of client requests forwarded to the HTTP upstream peer.
+ description: "The total number of client requests forwarded to the HTTP upstream peer."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "requests"
- attributes:
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.http.upstream.peer.response.time:
enabled: true
- description: The average time to get the full response from the HTTP upstream peer.
+ description: "The average time to get the full response from the HTTP upstream peer."
gauge:
value_type: int
- unit: ms
- attributes:
+ unit: "ms"
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.http.upstream.peer.responses:
enabled: true
- description: The total number of responses obtained from the HTTP upstream peer grouped by status range.
+ description: "The total number of responses obtained from the HTTP upstream peer grouped by status range."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "responses"
- attributes:
- - nginx.status_range
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.status_range
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.http.upstream.peer.unavailables:
enabled: true
- description: Number of times the server became unavailable for client requests (“unavail”).
+ description: "The total number of times the server became unavailable for client requests ('unavail')."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "requests"
- attributes:
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.http.upstream.peer.state:
enabled: true
- description: Current state of an upstream peer in deployment.
+ description: "Current state of an upstream peer in deployment."
gauge:
value_type: int
unit: "is_deployed"
- attributes:
- - nginx.peer.state
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.peer.state
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.http.upstream.queue.limit:
enabled: true
- description: The maximum number of requests that can be in the queue at the same time.
+ description: "The maximum number of requests that can be in the queue at the same time."
gauge:
value_type: int
unit: "requests"
- attributes:
+ attributes:
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
nginx.http.upstream.queue.overflows:
enabled: true
- description: The total number of requests rejected due to the queue overflow.
+ description: "The total number of requests rejected due to the queue overflow."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "responses"
- attributes:
+ attributes:
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
nginx.http.upstream.queue.usage:
enabled: true
- description: The current number of requests in the queue.
+ description: "The current number of requests in the queue."
gauge:
value_type: int
unit: "requests"
- attributes:
+ attributes:
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
nginx.http.upstream.zombie.count:
enabled: true
- description: The current number of upstream peers removed from the group but still processing active client requests.
+ description: "The current number of upstream peers removed from the group but still processing active client requests."
gauge:
value_type: int
unit: "is_deployed"
- attributes:
+ attributes:
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
+ nginx.cache.bytes_read:
+ enabled: true
+ description: "The total number of bytes read from the cache or proxied server."
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: cumulative
+ unit: "bytes"
+ attributes:
+ - nginx.cache.name
+ - nginx.cache.outcome
+ nginx.cache.responses:
+ enabled: true
+ description: "The total number of responses read from the cache or proxied server."
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: cumulative
+ unit: "responses"
+ attributes:
+ - nginx.cache.name
+ - nginx.cache.outcome
+ nginx.cache.memory.limit:
+ enabled: true
+ description: "The limit on the maximum size of the cache specified in the configuration."
+ gauge:
+ value_type: int
+ unit: "bytes"
+ attributes:
+ - nginx.cache.name
+ nginx.cache.memory.usage:
+ enabled: true
+ description: "The current size of the cache."
+ gauge:
+ value_type: int
+ unit: "bytes"
+ attributes:
+ - nginx.cache.name
nginx.slab.page.free:
enabled: true
- description: The current number of free memory pages.
+ description: "The current number of free memory pages."
gauge:
value_type: int
unit: "pages"
- attributes:
+ attributes:
- nginx.zone.name
nginx.slab.page.limit:
enabled: true
- description: The total number of memory pages (free and used).
+ description: "The total number of memory pages (free and used)."
gauge:
value_type: int
unit: "pages"
- attributes:
+ attributes:
- nginx.zone.name
nginx.slab.page.usage:
enabled: true
- description: The current number of used memory pages.
+ description: "The current number of used memory pages."
gauge:
value_type: int
unit: "pages"
- attributes:
+ attributes:
- nginx.zone.name
nginx.slab.page.utilization:
enabled: true
- description: The current percentage of used memory pages.
+ description: "The current percentage of used memory pages."
gauge:
- value_type: double
+ value_type: int
unit: "pages"
- attributes:
+ attributes:
- nginx.zone.name
nginx.slab.slot.usage:
enabled: true
- description: The current number of used memory slots.
+ description: "The current number of used memory slots."
gauge:
value_type: int
unit: "slots"
- attributes:
- - nginx.slab.slot.limit
+ attributes:
+ - nginx.slab.slot.limit
- nginx.zone.name
nginx.slab.slot.free:
enabled: true
- description: The current number of free memory slots.
+ description: "The current number of free memory slots."
gauge:
value_type: int
unit: "slots"
- attributes:
- - nginx.slab.slot.limit
+ attributes:
+ - nginx.slab.slot.limit
- nginx.zone.name
nginx.slab.slot.allocations:
enabled: true
- description: The number of attempts to allocate memory of specified size.
+ description: "The number of attempts to allocate memory of specified size."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "allocations"
- attributes:
- - nginx.slab.slot.limit
- - nginx.slab.slot.allocation.result
+ attributes:
+ - nginx.slab.slot.allocation.result
+ - nginx.slab.slot.limit
- nginx.zone.name
nginx.ssl.handshakes:
enabled: true
- description: The total number of SSL handshakes.
+ description: "The total number of SSL handshakes."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "handshakes"
- attributes:
+ attributes:
+ - nginx.ssl.handshake.reason
- nginx.ssl.status
- - nginx.ssl.handshake.reason
nginx.ssl.certificate.verify_failures:
enabled: true
- description: The total number of SSL certificate verification failures.
+ description: "The total number of SSL certificate verification failures."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "certificates"
- attributes:
+ attributes:
- nginx.ssl.verify_failure.reason
nginx.stream.io:
enabled: true
- description: The total number of Stream byte IO.
+ description: "The total number of Stream byte IO."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "bytes"
- attributes:
- - nginx.io.direction
+ attributes:
+ - nginx.io.direction
- nginx.zone.name
nginx.stream.connection.accepted:
enabled: true
- description: The total number of connections accepted from clients.
+ description: "The total number of connections accepted from clients."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "connections"
- attributes:
+ attributes:
- nginx.zone.name
nginx.stream.connection.discarded:
enabled: true
- description: Total number of connections completed without creating a session.
+ description: "Total number of connections completed without creating a session."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "connections"
- attributes:
+ attributes:
- nginx.zone.name
nginx.stream.connection.processing.count:
enabled: true
- description: The number of client connections that are currently being processed.
+ description: "The number of client connections that are currently being processed."
gauge:
value_type: int
unit: "connections"
- attributes:
+ attributes:
- nginx.zone.name
nginx.stream.session.status:
enabled: true
- description: The total number of completed sessions.
+ description: "The total number of completed sessions."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "sessions"
- attributes:
- - nginx.status_range
+ attributes:
+ - nginx.status_range
- nginx.zone.name
nginx.stream.upstream.peer.io:
enabled: true
- description: The total number of Stream Upstream Peer byte IO.
+ description: "The total number of Stream Upstream Peer byte IO."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "bytes"
- attributes:
- - nginx.io.direction
+ attributes:
+ - nginx.io.direction
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.stream.upstream.peer.connection.count:
enabled: true
- description: The current number of Stream Upstream Peer connections.
+ description: "The current number of Stream Upstream Peer connections."
gauge:
value_type: int
unit: "connections"
- attributes:
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.stream.upstream.peer.connection.time:
enabled: true
- description: The average time to connect to the stream upstream peer.
+ description: "The average time to connect to the stream upstream peer."
gauge:
value_type: int
- unit: ms
- attributes:
+ unit: "ms"
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.stream.upstream.peer.connections:
enabled: true
- description: The total number of client connections forwarded to this stream upstream peer.
+ description: "The total number of client connections forwarded to this stream upstream peer."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "connections"
- attributes:
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.stream.upstream.peer.count:
enabled: true
- description: The current number of stream upstream peers grouped by state.
+ description: "The current number of stream upstream peers grouped by state."
gauge:
value_type: int
- monotonic: true
- aggregation_temporality: cumulative
unit: "peers"
- attributes:
- - nginx.peer.state
+ attributes:
+ - nginx.peer.state
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
nginx.stream.upstream.peer.fails:
enabled: true
- description: The total number of unsuccessful attempts to communicate with the stream upstream peer.
+ description: "The total number of unsuccessful attempts to communicate with the stream upstream peer."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "attempts"
- attributes:
+ attributes:
+ - nginx.peer.address
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
nginx.stream.upstream.peer.health_checks:
enabled: true
- description: The total number of health check requests made to the stream upstream peer.
+ description: "The total number of health check requests made to the stream upstream peer."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "requests"
- attributes:
- - nginx.health_check
+ attributes:
+ - nginx.health_check
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.stream.upstream.peer.response.time:
enabled: true
- description: The average time to receive the last byte of data for the stream upstream peer.
+ description: "The average time to receive the last byte of data for the stream upstream peer."
gauge:
value_type: int
- unit: ms
- attributes:
+ unit: "ms"
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.stream.upstream.peer.ttfb.time:
enabled: true
- description: The average time to receive the first byte of data for the stream upstream peer.
+ description: "The average time to receive the first byte of data for the stream upstream peer."
gauge:
value_type: int
- unit: ms
- attributes:
+ unit: "ms"
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.stream.upstream.peer.unavailables:
enabled: true
- description: How many times the server became unavailable for client connections (state “unavail”) due to the number of
- unsuccessful attempts reaching the max_fails threshold.
+ description: "How many times the server became unavailable for client connections (state 'unavail') due to the number of unsuccessful attempts reaching the max_fails threshold."
sum:
value_type: int
monotonic: true
aggregation_temporality: cumulative
unit: "requests"
- attributes:
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.stream.upstream.peer.state:
enabled: true
- description: Current state of upstream peers in deployment. If any of the upstream peers in the deployment match the
- given state then the value will be 1. If no upstream peer is a match then the value will be 0.
- sum:
+ description: "Current state of upstream peers in deployment."
+ gauge:
value_type: int
- monotonic: true
- aggregation_temporality: cumulative
unit: "deployments"
- attributes:
- - nginx.peer.state
+ attributes:
+ - nginx.peer.address
+ - nginx.peer.name
+ - nginx.peer.state
+ - nginx.upstream.name
- nginx.zone.name
- - nginx.upstream.name
- - nginx.peer.address
- - nginx.peer.name
nginx.stream.upstream.zombie.count:
enabled: true
- description: The current number of peers removed from the group but still processing active client connections.
+ description: "The current number of peers removed from the group but still processing active client connections."
gauge:
value_type: int
unit: "deployments"
- attributes:
- - nginx.zone.name
- - nginx.upstream.name
+ attributes:
+ - nginx.upstream.name
+ - nginx.zone.name
\ No newline at end of file
diff --git a/internal/collector/nginxplusreceiver/scraper.go b/internal/collector/nginxplusreceiver/scraper.go
index a9800addf..1332925ad 100644
--- a/internal/collector/nginxplusreceiver/scraper.go
+++ b/internal/collector/nginxplusreceiver/scraper.go
@@ -252,152 +252,152 @@ func (nps *NginxPlusScraper) recordStreamMetrics(stats *plusapi.Stats, now pcomm
now,
int64(peer.Received),
metadata.AttributeNginxIoDirectionReceive,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerIoDataPoint(
now,
int64(peer.Sent),
metadata.AttributeNginxIoDirectionTransmit,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerConnectionCountDataPoint(
now,
int64(peer.Active),
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerConnectionTimeDataPoint(
now,
int64(peer.ConnectTime),
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerConnectionsDataPoint(
now,
int64(peer.Connections),
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerHealthChecksDataPoint(
now,
int64(peer.HealthChecks.Checks),
0,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerHealthChecksDataPoint(
now,
int64(peer.HealthChecks.Fails),
metadata.AttributeNginxHealthCheckFAIL,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerHealthChecksDataPoint(
now,
int64(peer.HealthChecks.Unhealthy),
metadata.AttributeNginxHealthCheckUNHEALTHY,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerResponseTimeDataPoint(
now,
int64(peer.ResponseTime),
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerTtfbTimeDataPoint(
now,
int64(peer.FirstByteTime),
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerUnavailablesDataPoint(
now,
int64(peer.Unavail),
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateChecking),
- metadata.AttributeNginxPeerStateCHECKING,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateCHECKING,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateDown),
- metadata.AttributeNginxPeerStateDOWN,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateDOWN,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateDraining),
- metadata.AttributeNginxPeerStateDRAINING,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateDRAINING,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateUnavail),
- metadata.AttributeNginxPeerStateUNAVAILABLE,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateUNAVAILABLE,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateUnhealthy),
- metadata.AttributeNginxPeerStateUNHEALTHY,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateUNHEALTHY,
+ upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateUp),
- metadata.AttributeNginxPeerStateUP,
- upstream.Zone,
- upstreamName,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateUP,
+ upstreamName,
+ upstream.Zone,
)
peerStates[peer.State]++
@@ -407,46 +407,46 @@ func (nps *NginxPlusScraper) recordStreamMetrics(stats *plusapi.Stats, now pcomm
now,
int64(peerStates[peerStateChecking]),
metadata.AttributeNginxPeerStateCHECKING,
- upstream.Zone,
upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerCountDataPoint(
now,
int64(peerStates[peerStateDown]),
metadata.AttributeNginxPeerStateDOWN,
- upstream.Zone,
upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerCountDataPoint(
now,
int64(peerStates[peerStateDraining]),
metadata.AttributeNginxPeerStateDRAINING,
- upstream.Zone,
upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerCountDataPoint(
now,
int64(peerStates[peerStateUnavail]),
metadata.AttributeNginxPeerStateUNAVAILABLE,
- upstream.Zone,
upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerCountDataPoint(
now,
int64(peerStates[peerStateUnhealthy]),
metadata.AttributeNginxPeerStateUNHEALTHY,
- upstream.Zone,
upstreamName,
+ upstream.Zone,
)
nps.mb.RecordNginxStreamUpstreamPeerCountDataPoint(
now,
int64(peerStates[peerStateUp]),
metadata.AttributeNginxPeerStateUP,
- upstream.Zone,
upstreamName,
+ upstream.Zone,
)
- nps.mb.RecordNginxStreamUpstreamZombieCountDataPoint(now, int64(upstream.Zombies), upstream.Zone, upstreamName)
+ nps.mb.RecordNginxStreamUpstreamZombieCountDataPoint(now, int64(upstream.Zombies), upstreamName, upstream.Zone)
}
}
@@ -454,39 +454,39 @@ func (nps *NginxPlusScraper) recordSSLMetrics(now pcommon.Timestamp, stats *plus
nps.mb.RecordNginxSslHandshakesDataPoint(
now,
int64(stats.SSL.HandshakesFailed),
- metadata.AttributeNginxSslStatusFAILED,
0,
+ metadata.AttributeNginxSslStatusFAILED,
)
nps.mb.RecordNginxSslHandshakesDataPoint(now, int64(stats.SSL.Handshakes), 0, 0)
nps.mb.RecordNginxSslHandshakesDataPoint(
now,
int64(stats.SSL.SessionReuses),
- metadata.AttributeNginxSslStatusREUSE,
0,
+ metadata.AttributeNginxSslStatusREUSE,
)
nps.mb.RecordNginxSslHandshakesDataPoint(
now,
int64(stats.SSL.NoCommonProtocol),
- metadata.AttributeNginxSslStatusFAILED,
metadata.AttributeNginxSslHandshakeReasonNOCOMMONPROTOCOL,
+ metadata.AttributeNginxSslStatusFAILED,
)
nps.mb.RecordNginxSslHandshakesDataPoint(
now,
int64(stats.SSL.NoCommonCipher),
- metadata.AttributeNginxSslStatusFAILED,
metadata.AttributeNginxSslHandshakeReasonNOCOMMONCIPHER,
+ metadata.AttributeNginxSslStatusFAILED,
)
nps.mb.RecordNginxSslHandshakesDataPoint(
now,
int64(stats.SSL.HandshakeTimeout),
- metadata.AttributeNginxSslStatusFAILED,
metadata.AttributeNginxSslHandshakeReasonTIMEOUT,
+ metadata.AttributeNginxSslStatusFAILED,
)
nps.mb.RecordNginxSslHandshakesDataPoint(
now,
int64(stats.SSL.PeerRejectedCert),
- metadata.AttributeNginxSslStatusFAILED,
metadata.AttributeNginxSslHandshakeReasonCERTREJECTED,
+ metadata.AttributeNginxSslStatusFAILED,
)
nps.mb.RecordNginxSslCertificateVerifyFailuresDataPoint(
@@ -532,15 +532,15 @@ func (nps *NginxPlusScraper) recordSlabPageMetrics(stats *plusapi.Stats, now pco
nps.mb.RecordNginxSlabSlotAllocationsDataPoint(
now,
int64(slot.Fails),
- slotNumber,
metadata.AttributeNginxSlabSlotAllocationResultFAILURE,
+ slotNumber,
name,
)
nps.mb.RecordNginxSlabSlotAllocationsDataPoint(
now,
int64(slot.Reqs),
- slotNumber,
metadata.AttributeNginxSlabSlotAllocationResultSUCCESS,
+ slotNumber,
name,
)
}
@@ -549,7 +549,7 @@ func (nps *NginxPlusScraper) recordSlabPageMetrics(stats *plusapi.Stats, now pco
func (nps *NginxPlusScraper) recordHTTPUpstreamPeerMetrics(stats *plusapi.Stats, now pcommon.Timestamp) {
for name, upstream := range stats.Upstreams {
- nps.mb.RecordNginxHTTPUpstreamKeepaliveCountDataPoint(now, int64(upstream.Keepalive), upstream.Zone, name)
+ nps.mb.RecordNginxHTTPUpstreamKeepaliveCountDataPoint(now, int64(upstream.Keepalive), name, upstream.Zone)
peerStates := make(map[string]int)
@@ -558,209 +558,209 @@ func (nps *NginxPlusScraper) recordHTTPUpstreamPeerMetrics(stats *plusapi.Stats,
now,
int64(peer.Received),
metadata.AttributeNginxIoDirectionReceive,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerIoDataPoint(
now,
int64(peer.Sent),
metadata.AttributeNginxIoDirectionTransmit,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerConnectionCountDataPoint(
now,
int64(peer.Active),
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerFailsDataPoint(
now,
int64(peer.Fails),
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerHeaderTimeDataPoint(
now,
int64(peer.HeaderTime),
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerHealthChecksDataPoint(
now,
int64(peer.HealthChecks.Checks),
0,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerHealthChecksDataPoint(
now,
int64(peer.HealthChecks.Fails),
metadata.AttributeNginxHealthCheckFAIL,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerHealthChecksDataPoint(
now,
int64(peer.HealthChecks.Unhealthy),
metadata.AttributeNginxHealthCheckUNHEALTHY,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerRequestsDataPoint(
now,
int64(peer.Requests),
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerResponseTimeDataPoint(
now,
int64(peer.ResponseTime),
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerResponsesDataPoint(
now,
int64(peer.Responses.Total),
- 0,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ 0,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerResponsesDataPoint(
now,
int64(peer.Responses.Responses1xx),
- metadata.AttributeNginxStatusRange1xx,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxStatusRange1xx,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerResponsesDataPoint(
now,
int64(peer.Responses.Responses2xx),
- metadata.AttributeNginxStatusRange2xx,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxStatusRange2xx,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerResponsesDataPoint(
now,
int64(peer.Responses.Responses3xx),
- metadata.AttributeNginxStatusRange3xx,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxStatusRange3xx,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerResponsesDataPoint(
now,
int64(peer.Responses.Responses4xx),
- metadata.AttributeNginxStatusRange4xx,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxStatusRange4xx,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerResponsesDataPoint(
now,
int64(peer.Responses.Responses5xx),
- metadata.AttributeNginxStatusRange5xx,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxStatusRange5xx,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerUnavailablesDataPoint(
now,
int64(peer.Unavail),
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateChecking),
- metadata.AttributeNginxPeerStateCHECKING,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateCHECKING,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateDown),
- metadata.AttributeNginxPeerStateDOWN,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateDOWN,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateDraining),
- metadata.AttributeNginxPeerStateDRAINING,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateDRAINING,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateUnavail),
- metadata.AttributeNginxPeerStateUNAVAILABLE,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateUNAVAILABLE,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateUnhealthy),
- metadata.AttributeNginxPeerStateUNHEALTHY,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateUNHEALTHY,
+ name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerStateDataPoint(
now,
boolToInt64(peer.State == peerStateUp),
- metadata.AttributeNginxPeerStateUP,
- upstream.Zone,
- name,
peer.Server,
peer.Name,
+ metadata.AttributeNginxPeerStateUP,
+ name,
+ upstream.Zone,
)
peerStates[peer.State]++
@@ -770,49 +770,49 @@ func (nps *NginxPlusScraper) recordHTTPUpstreamPeerMetrics(stats *plusapi.Stats,
now,
int64(peerStates[peerStateChecking]),
metadata.AttributeNginxPeerStateCHECKING,
- upstream.Zone,
name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerCountDataPoint(
now,
int64(peerStates[peerStateDown]),
metadata.AttributeNginxPeerStateDOWN,
- upstream.Zone,
name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerCountDataPoint(
now,
int64(peerStates[peerStateDraining]),
metadata.AttributeNginxPeerStateDRAINING,
- upstream.Zone,
name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerCountDataPoint(
now,
int64(peerStates[peerStateUnavail]),
metadata.AttributeNginxPeerStateUNAVAILABLE,
- upstream.Zone,
name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerCountDataPoint(
now,
int64(peerStates[peerStateUnhealthy]),
metadata.AttributeNginxPeerStateUNHEALTHY,
- upstream.Zone,
name,
+ upstream.Zone,
)
nps.mb.RecordNginxHTTPUpstreamPeerCountDataPoint(
now,
int64(peerStates[peerStateUp]),
metadata.AttributeNginxPeerStateUP,
- upstream.Zone,
name,
+ upstream.Zone,
)
- nps.mb.RecordNginxHTTPUpstreamQueueLimitDataPoint(now, int64(upstream.Queue.MaxSize), upstream.Zone, name)
- nps.mb.RecordNginxHTTPUpstreamQueueOverflowsDataPoint(now, int64(upstream.Queue.Overflows), upstream.Zone, name)
- nps.mb.RecordNginxHTTPUpstreamQueueUsageDataPoint(now, int64(upstream.Queue.Size), upstream.Zone, name)
- nps.mb.RecordNginxHTTPUpstreamZombieCountDataPoint(now, int64(upstream.Zombies), upstream.Zone, name)
+ nps.mb.RecordNginxHTTPUpstreamQueueLimitDataPoint(now, int64(upstream.Queue.MaxSize), name, upstream.Zone)
+ nps.mb.RecordNginxHTTPUpstreamQueueOverflowsDataPoint(now, int64(upstream.Queue.Overflows), name, upstream.Zone)
+ nps.mb.RecordNginxHTTPUpstreamQueueUsageDataPoint(now, int64(upstream.Queue.Size), name, upstream.Zone)
+ nps.mb.RecordNginxHTTPUpstreamZombieCountDataPoint(now, int64(upstream.Zombies), name, upstream.Zone)
}
}
@@ -884,31 +884,36 @@ func (nps *NginxPlusScraper) recordServerZoneHTTPMetrics(sz plusapi.ServerZone,
int64(sz.Responses.Responses1xx)-nps.previousServerZoneResponses[szName].oneHundredStatusRange,
metadata.AttributeNginxStatusRange1xx,
szName,
- metadata.AttributeNginxZoneTypeSERVER)
+ metadata.AttributeNginxZoneTypeSERVER,
+ )
nps.mb.RecordNginxHTTPResponseCountDataPoint(now,
int64(sz.Responses.Responses2xx)-nps.previousServerZoneResponses[szName].twoHundredStatusRange,
metadata.AttributeNginxStatusRange2xx,
szName,
- metadata.AttributeNginxZoneTypeSERVER)
+ metadata.AttributeNginxZoneTypeSERVER,
+ )
nps.mb.RecordNginxHTTPResponseCountDataPoint(now,
int64(sz.Responses.Responses3xx)-nps.previousServerZoneResponses[szName].threeHundredStatusRange,
metadata.AttributeNginxStatusRange3xx,
szName,
- metadata.AttributeNginxZoneTypeSERVER)
+ metadata.AttributeNginxZoneTypeSERVER,
+ )
nps.mb.RecordNginxHTTPResponseCountDataPoint(now,
int64(sz.Responses.Responses4xx)-nps.previousServerZoneResponses[szName].fourHundredStatusRange,
metadata.AttributeNginxStatusRange4xx,
szName,
- metadata.AttributeNginxZoneTypeSERVER)
+ metadata.AttributeNginxZoneTypeSERVER,
+ )
nps.mb.RecordNginxHTTPResponseCountDataPoint(now,
int64(sz.Responses.Responses5xx)-nps.previousServerZoneResponses[szName].fiveHundredStatusRange,
metadata.AttributeNginxStatusRange5xx,
szName,
- metadata.AttributeNginxZoneTypeSERVER)
+ metadata.AttributeNginxZoneTypeSERVER,
+ )
respStatus := ResponseStatuses{
oneHundredStatusRange: int64(sz.Responses.Responses1xx),
@@ -991,31 +996,36 @@ func (nps *NginxPlusScraper) recordLocationZoneHTTPMetrics(lz plusapi.LocationZo
int64(lz.Responses.Responses1xx)-nps.previousLocationZoneResponses[lzName].oneHundredStatusRange,
metadata.AttributeNginxStatusRange1xx,
lzName,
- metadata.AttributeNginxZoneTypeLOCATION)
+ metadata.AttributeNginxZoneTypeLOCATION,
+ )
nps.mb.RecordNginxHTTPResponseCountDataPoint(now,
int64(lz.Responses.Responses2xx)-nps.previousLocationZoneResponses[lzName].twoHundredStatusRange,
metadata.AttributeNginxStatusRange2xx,
lzName,
- metadata.AttributeNginxZoneTypeLOCATION)
+ metadata.AttributeNginxZoneTypeLOCATION,
+ )
nps.mb.RecordNginxHTTPResponseCountDataPoint(now,
int64(lz.Responses.Responses3xx)-nps.previousLocationZoneResponses[lzName].threeHundredStatusRange,
metadata.AttributeNginxStatusRange3xx,
lzName,
- metadata.AttributeNginxZoneTypeLOCATION)
+ metadata.AttributeNginxZoneTypeLOCATION,
+ )
nps.mb.RecordNginxHTTPResponseCountDataPoint(now,
int64(lz.Responses.Responses4xx)-nps.previousLocationZoneResponses[lzName].fourHundredStatusRange,
metadata.AttributeNginxStatusRange4xx,
lzName,
- metadata.AttributeNginxZoneTypeLOCATION)
+ metadata.AttributeNginxZoneTypeLOCATION,
+ )
nps.mb.RecordNginxHTTPResponseCountDataPoint(now,
int64(lz.Responses.Responses5xx)-nps.previousLocationZoneResponses[lzName].fiveHundredStatusRange,
metadata.AttributeNginxStatusRange5xx,
lzName,
- metadata.AttributeNginxZoneTypeLOCATION)
+ metadata.AttributeNginxZoneTypeLOCATION,
+ )
respStatus := ResponseStatuses{
oneHundredStatusRange: int64(lz.Responses.Responses1xx),
@@ -1089,44 +1099,44 @@ func (nps *NginxPlusScraper) recordCacheMetrics(stats *plusapi.Stats, now pcommo
nps.mb.RecordNginxCacheBytesReadDataPoint(
now,
int64(cache.Bypass.Bytes),
- metadata.AttributeNginxCacheOutcomeBYPASS,
name,
+ metadata.AttributeNginxCacheOutcomeBYPASS,
)
nps.mb.RecordNginxCacheBytesReadDataPoint(
now,
int64(cache.Expired.Bytes),
- metadata.AttributeNginxCacheOutcomeEXPIRED,
name,
+ metadata.AttributeNginxCacheOutcomeEXPIRED,
)
nps.mb.RecordNginxCacheBytesReadDataPoint(
now,
int64(cache.Hit.Bytes),
- metadata.AttributeNginxCacheOutcomeHIT,
name,
+ metadata.AttributeNginxCacheOutcomeHIT,
)
nps.mb.RecordNginxCacheBytesReadDataPoint(
now,
int64(cache.Miss.Bytes),
- metadata.AttributeNginxCacheOutcomeMISS,
name,
+ metadata.AttributeNginxCacheOutcomeMISS,
)
nps.mb.RecordNginxCacheBytesReadDataPoint(
now,
int64(cache.Revalidated.Bytes),
- metadata.AttributeNginxCacheOutcomeREVALIDATED,
name,
+ metadata.AttributeNginxCacheOutcomeREVALIDATED,
)
nps.mb.RecordNginxCacheBytesReadDataPoint(
now,
int64(cache.Stale.Bytes),
- metadata.AttributeNginxCacheOutcomeSTALE,
name,
+ metadata.AttributeNginxCacheOutcomeSTALE,
)
nps.mb.RecordNginxCacheBytesReadDataPoint(
now,
int64(cache.Updating.Bytes),
- metadata.AttributeNginxCacheOutcomeUPDATING,
name,
+ metadata.AttributeNginxCacheOutcomeUPDATING,
)
nps.mb.RecordNginxCacheMemoryLimitDataPoint(now, int64(cache.MaxSize), name)
@@ -1135,44 +1145,44 @@ func (nps *NginxPlusScraper) recordCacheMetrics(stats *plusapi.Stats, now pcommo
nps.mb.RecordNginxCacheResponsesDataPoint(
now,
int64(cache.Bypass.Responses),
- metadata.AttributeNginxCacheOutcomeBYPASS,
name,
+ metadata.AttributeNginxCacheOutcomeBYPASS,
)
nps.mb.RecordNginxCacheResponsesDataPoint(
now,
int64(cache.Expired.Responses),
- metadata.AttributeNginxCacheOutcomeEXPIRED,
name,
+ metadata.AttributeNginxCacheOutcomeEXPIRED,
)
nps.mb.RecordNginxCacheResponsesDataPoint(
now,
int64(cache.Hit.Responses),
- metadata.AttributeNginxCacheOutcomeHIT,
name,
+ metadata.AttributeNginxCacheOutcomeHIT,
)
nps.mb.RecordNginxCacheResponsesDataPoint(
now,
int64(cache.Miss.Responses),
- metadata.AttributeNginxCacheOutcomeMISS,
name,
+ metadata.AttributeNginxCacheOutcomeMISS,
)
nps.mb.RecordNginxCacheResponsesDataPoint(
now,
int64(cache.Revalidated.Responses),
- metadata.AttributeNginxCacheOutcomeREVALIDATED,
name,
+ metadata.AttributeNginxCacheOutcomeREVALIDATED,
)
nps.mb.RecordNginxCacheResponsesDataPoint(
now,
int64(cache.Stale.Responses),
- metadata.AttributeNginxCacheOutcomeSTALE,
name,
+ metadata.AttributeNginxCacheOutcomeSTALE,
)
nps.mb.RecordNginxCacheResponsesDataPoint(
now,
int64(cache.Updating.Responses),
- metadata.AttributeNginxCacheOutcomeUPDATING,
name,
+ metadata.AttributeNginxCacheOutcomeUPDATING,
)
}
}
diff --git a/internal/collector/nginxplusreceiver/scraper_test.go b/internal/collector/nginxplusreceiver/scraper_test.go
index cd02112c3..7dfe82e4c 100644
--- a/internal/collector/nginxplusreceiver/scraper_test.go
+++ b/internal/collector/nginxplusreceiver/scraper_test.go
@@ -38,22 +38,68 @@ func TestScraper(t *testing.T) {
// To test the nginx.http.response.count metric calculation we need to set the previousLocationZoneResponses &
// previousSeverZoneResponses then call scrape a second time as the first time it is called the previous responses
// are set using the API
+
+ /* Initial metrics for location zone:
+ {
+ "location_test": {
+ "requests": 34,
+ "responses": {
+ "1xx": 7,
+ "2xx": 31,
+ "3xx": 0,
+ "4xx": 3,
+ "5xx": 0,
+ "codes": {
+ "200": 31,
+ "404": 3
+ },
+ "total": 34
+ },
+ "discarded": 0,
+ "received": 3609,
+ "sent": 23265
+ }
+ }
+ */
scraper.previousLocationZoneResponses = map[string]ResponseStatuses{
"location_test": {
- oneHundredStatusRange: 3, // 4
- twoHundredStatusRange: 29, // 2
+ oneHundredStatusRange: 3, // delta 4
+ twoHundredStatusRange: 29, // delta 2
threeHundredStatusRange: 0,
- fourHundredStatusRange: 1, // 2
+ fourHundredStatusRange: 1, // delta 2
fiveHundredStatusRange: 0,
},
}
+ /* Initial metrics for server zone:
+ {
+ "test": {
+ "processing": 1,
+ "requests": 32,
+ "responses": {
+ "1xx": 5,
+ "2xx": 29,
+ "3xx": 0,
+ "4xx": 2,
+ "5xx": 0,
+ "codes": {
+ "200": 29,
+ "404": 2
+ },
+ "total": 31
+ },
+ "discarded": 0,
+ "received": 3312,
+ "sent": 21860
+ }
+ }
+ */
scraper.previousServerZoneResponses = map[string]ResponseStatuses{
"test": {
- oneHundredStatusRange: 3, // 2
- twoHundredStatusRange: 0, // 29
+ oneHundredStatusRange: 3, // delta 2
+ twoHundredStatusRange: 0, // delta 29
threeHundredStatusRange: 0,
- fourHundredStatusRange: 1, // 1
+ fourHundredStatusRange: 1, // delta 1
fiveHundredStatusRange: 0,
},
}
diff --git a/internal/collector/nginxplusreceiver/testdata/expected.yaml b/internal/collector/nginxplusreceiver/testdata/expected.yaml
index d3d4edcc4..d39c2a1d1 100644
--- a/internal/collector/nginxplusreceiver/testdata/expected.yaml
+++ b/internal/collector/nginxplusreceiver/testdata/expected.yaml
@@ -48,6 +48,123 @@ resourceMetrics:
stringValue: ACTIVE
isMonotonic: true
unit: connections
+ - description: The total number of HTTP responses, since the last collection interval and grouped by status code range.
+ name: nginx.http.response.count
+ gauge:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "2"
+ attributes:
+ - key: nginx.status_range
+ value:
+ stringValue: 1xx
+ - key: nginx.zone.name
+ value:
+ stringValue: test
+ - key: nginx.zone.type
+ value:
+ stringValue: SERVER
+ - asInt: "29"
+ attributes:
+ - key: nginx.status_range
+ value:
+ stringValue: 2xx
+ - key: nginx.zone.name
+ value:
+ stringValue: test
+ - key: nginx.zone.type
+ value:
+ stringValue: SERVER
+ - asInt: "0"
+ attributes:
+ - key: nginx.status_range
+ value:
+ stringValue: 3xx
+ - key: nginx.zone.name
+ value:
+ stringValue: test
+ - key: nginx.zone.type
+ value:
+ stringValue: SERVER
+ - asInt: "1"
+ attributes:
+ - key: nginx.status_range
+ value:
+ stringValue: 4xx
+ - key: nginx.zone.name
+ value:
+ stringValue: test
+ - key: nginx.zone.type
+ value:
+ stringValue: SERVER
+ - asInt: "0"
+ attributes:
+ - key: nginx.status_range
+ value:
+ stringValue: 5xx
+ - key: nginx.zone.name
+ value:
+ stringValue: test
+ - key: nginx.zone.type
+ value:
+ stringValue: SERVER
+ - asInt: "4"
+ attributes:
+ - key: nginx.status_range
+ value:
+ stringValue: 1xx
+ - key: nginx.zone.name
+ value:
+ stringValue: location_test
+ - key: nginx.zone.type
+ value:
+ stringValue: LOCATION
+ - asInt: "2"
+ attributes:
+ - key: nginx.status_range
+ value:
+ stringValue: 2xx
+ - key: nginx.zone.name
+ value:
+ stringValue: location_test
+ - key: nginx.zone.type
+ value:
+ stringValue: LOCATION
+ - asInt: "0"
+ attributes:
+ - key: nginx.status_range
+ value:
+ stringValue: 3xx
+ - key: nginx.zone.name
+ value:
+ stringValue: location_test
+ - key: nginx.zone.type
+ value:
+ stringValue: LOCATION
+ - asInt: "2"
+ attributes:
+ - key: nginx.status_range
+ value:
+ stringValue: 4xx
+ - key: nginx.zone.name
+ value:
+ stringValue: location_test
+ - key: nginx.zone.type
+ value:
+ stringValue: LOCATION
+ - asInt: "0"
+ attributes:
+ - key: nginx.status_range
+ value:
+ stringValue: 5xx
+ - key: nginx.zone.name
+ value:
+ stringValue: location_test
+ - key: nginx.zone.type
+ value:
+ stringValue: LOCATION
+ isMonotonic: true
+ unit: responses
- description: The total number of responses since NGINX was last started or reloaded, grouped by status code range.
name: nginx.http.response.status
sum:
@@ -164,7 +281,7 @@ resourceMetrics:
value:
stringValue: LOCATION
isMonotonic: true
- unit: responses
+ unit: responses
- description: The total number of requests completed without sending a response.
name: nginx.http.request.discarded
sum:
@@ -290,7 +407,7 @@ resourceMetrics:
timeUnixNano: "1000000"
isMonotonic: true
unit: requests
- - description: The total number of client requests received, since the last collection interval.
+ - description: The total number of client requests received, since the last collection interval.
name: nginx.http.request.count
gauge:
aggregationTemporality: 2
@@ -863,7 +980,7 @@ resourceMetrics:
stringValue: 5xx
isMonotonic: true
unit: responses
- - description: Number of times the server became unavailable for client requests (“unavail”).
+ - description: The total number of times the server became unavailable for client requests ('unavail').
name: nginx.http.upstream.peer.unavailables
sum:
aggregationTemporality: 2
@@ -1914,7 +2031,7 @@ resourceMetrics:
stringValue: upstream_test_zone
isMonotonic: true
unit: ms
- - description: How many times the server became unavailable for client connections (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold.
+ - description: How many times the server became unavailable for client connections (state 'unavail') due to the number of unsuccessful attempts reaching the max_fails threshold.
name: nginx.stream.upstream.peer.unavailables
sum:
aggregationTemporality: 2
@@ -1935,9 +2052,9 @@ resourceMetrics:
stringValue: upstream_test_zone
isMonotonic: true
unit: requests
- - description: Current state of upstream peers in deployment. If any of the upstream peers in the deployment match the given state then the value will be 1. If no upstream peer is a match then the value will be 0.
+ - description: Current state of upstream peers in deployment.
name: nginx.stream.upstream.peer.state
- sum:
+ gauge:
aggregationTemporality: 2
dataPoints:
- asInt: "1"
@@ -2132,122 +2249,6 @@ resourceMetrics:
stringValue: upstream_test_zone
isMonotonic: true
unit: deployments
- - description: The total number of HTTP responses sent to clients since the last collection interval, grouped by status code range.
- name: nginx.http.response.count
- gauge:
- aggregationTemporality: 2
- dataPoints:
- - asInt: "2"
- attributes:
- - key: nginx.status_range
- value:
- stringValue: 1xx
- - key: nginx.zone.name
- value:
- stringValue: test
- - key: nginx.zone.type
- value:
- stringValue: SERVER
- - asInt: "29"
- attributes:
- - key: nginx.status_range
- value:
- stringValue: 2xx
- - key: nginx.zone.name
- value:
- stringValue: test
- - key: nginx.zone.type
- value:
- stringValue: SERVER
- - asInt: "0"
- attributes:
- - key: nginx.status_range
- value:
- stringValue: 3xx
- - key: nginx.zone.name
- value:
- stringValue: test
- - key: nginx.zone.type
- value:
- stringValue: SERVER
- - asInt: "1"
- attributes:
- - key: nginx.status_range
- value:
- stringValue: 4xx
- - key: nginx.zone.name
- value:
- stringValue: test
- - key: nginx.zone.type
- value:
- stringValue: SERVER
- - asInt: "0"
- attributes:
- - key: nginx.status_range
- value:
- stringValue: 5xx
- - key: nginx.zone.name
- value:
- stringValue: test
- - key: nginx.zone.type
- value:
- stringValue: SERVER
- - asInt: "4"
- attributes:
- - key: nginx.status_range
- value:
- stringValue: 1xx
- - key: nginx.zone.name
- value:
- stringValue: location_test
- - key: nginx.zone.type
- value:
- stringValue: LOCATION
- - asInt: "2"
- attributes:
- - key: nginx.status_range
- value:
- stringValue: 2xx
- - key: nginx.zone.name
- value:
- stringValue: location_test
- - key: nginx.zone.type
- value:
- stringValue: LOCATION
- - asInt: "0"
- attributes:
- - key: nginx.status_range
- value:
- stringValue: 3xx
- - key: nginx.zone.name
- value:
- stringValue: location_test
- - key: nginx.zone.type
- value:
- stringValue: LOCATION
- - asInt: "2"
- attributes:
- - key: nginx.status_range
- value:
- stringValue: 4xx
- - key: nginx.zone.name
- value:
- stringValue: location_test
- - key: nginx.zone.type
- value:
- stringValue: LOCATION
- - asInt: "0"
- attributes:
- - key: nginx.status_range
- value:
- stringValue: 5xx
- - key: nginx.zone.name
- value:
- stringValue: location_test
- - key: nginx.zone.type
- value:
- stringValue: LOCATION
- unit: responses
scope:
name: otelcol/nginxplusreceiver
version: latest
diff --git a/templates/registry/metadata/attrs_metadata.yaml.j2 b/templates/registry/metadata/attrs_metadata.yaml.j2
new file mode 100644
index 000000000..f39ac018c
--- /dev/null
+++ b/templates/registry/metadata/attrs_metadata.yaml.j2
@@ -0,0 +1,41 @@
+# NOTE: THIS FILE IS AUTOGENERATED. DO NOT EDIT BY HAND.
+
+{%- set registry = ctx[0].id.split('.')[1] %}
+
+type: {{registry | safe}}
+scope_name: otelcol/{{registry | safe}}receiver
+
+status:
+ class: receiver
+ stability:
+ beta: [metrics]
+ distributions: [contrib]
+ codeowners:
+ active: [aphralG, dhurley, craigell, sean-breen, Rashmiti, CVanF5]
+
+resource_attributes:
+ instance.id:
+ description: The nginx instance id.
+ type: string
+ enabled: true
+ instance.type:
+ description: The nginx instance type (nginx, nginxplus).
+ type: string
+ enabled: true
+
+attributes:
+{%- for group in ctx %}
+{%- if group.type == "attribute_group" %}
+{%- for attribute in group.attributes | sort(attribute="name") %}
+ {{attribute.name | safe}}:
+ description: {{attribute.brief | trim}}
+ type: {{ attribute.type | safe }}
+{%- if attribute.examples %}
+ enum:
+ {%- for e in attribute.examples %}
+ - {{ e | trim }}
+ {%- endfor %}
+{%- endif %}
+{%- endfor %}
+{%- endif %}
+{%- endfor %}
diff --git a/templates/registry/metadata/metrics_metadata.yaml.j2 b/templates/registry/metadata/metrics_metadata.yaml.j2
new file mode 100644
index 000000000..2bc1d1931
--- /dev/null
+++ b/templates/registry/metadata/metrics_metadata.yaml.j2
@@ -0,0 +1,30 @@
+
+metrics:
+{%- for group in ctx if not group.deprecated | sort(attribute="metric_name") %}
+{%- if group.type == "metric" %}
+ {{group.metric_name | safe}}:
+ enabled: true
+ description: {{group.brief | trim}}
+{%- if group.instrument == "counter" %}
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: cumulative
+{%- elif group.instrument == "updowncounter" %}
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation_temporality: delta
+{%- else %}
+ gauge:
+ value_type: int
+{%- endif %}
+ unit: {{ group.unit }}
+{%- endif %}
+{%- if group.attributes %}
+ attributes:
+{%- for attribute in group.attributes | sort(attribute="name") %}
+ - {{attribute.name | safe}}
+{%- endfor %}
+{%- endif %}
+{%- endfor %}
diff --git a/templates/registry/metadata/weaver.yaml b/templates/registry/metadata/weaver.yaml
new file mode 100644
index 000000000..254eaf7f8
--- /dev/null
+++ b/templates/registry/metadata/weaver.yaml
@@ -0,0 +1,11 @@
+params:
+ excluded_attributes: ["messaging.client_id"]
+templates:
+ - pattern: attrs_metadata.yaml.j2
+ filter: >
+ .groups | map(select(.type == "attribute_group")) | map(select(.id | startswith("registry")))
+ application_mode: single
+ - pattern: metrics_metadata.yaml.j2
+ filter: >
+ .groups | map(select(.type == "metric"))
+ application_mode: single
\ No newline at end of file
From 69234a0ef15d22d2e13a276a108352fda70ff48d Mon Sep 17 00:00:00 2001
From: Patrick Pfrehm
Date: Thu, 22 May 2025 09:01:33 -0700
Subject: [PATCH 2/4] fix: update codeowners
---
internal/collector/nginxossreceiver/metadata.yaml | 2 +-
internal/collector/nginxplusreceiver/metadata.yaml | 2 +-
templates/registry/metadata/attrs_metadata.yaml.j2 | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/internal/collector/nginxossreceiver/metadata.yaml b/internal/collector/nginxossreceiver/metadata.yaml
index 4a3e6dffb..4ca6198c1 100644
--- a/internal/collector/nginxossreceiver/metadata.yaml
+++ b/internal/collector/nginxossreceiver/metadata.yaml
@@ -9,7 +9,7 @@ status:
beta: [metrics]
distributions: [contrib]
codeowners:
- active: [aphralG, dhurley, craigell, sean-breen, Rashmiti, CVanF5]
+ active: [aphralG, dhurley, craigell, sean-breen, CVanF5]
resource_attributes:
instance.id:
diff --git a/internal/collector/nginxplusreceiver/metadata.yaml b/internal/collector/nginxplusreceiver/metadata.yaml
index 4014eec62..29ef25b9e 100644
--- a/internal/collector/nginxplusreceiver/metadata.yaml
+++ b/internal/collector/nginxplusreceiver/metadata.yaml
@@ -9,7 +9,7 @@ status:
beta: [metrics]
distributions: [contrib]
codeowners:
- active: [aphralG, dhurley, craigell, sean-breen, Rashmiti, CVanF5]
+ active: [aphralG, dhurley, craigell, sean-breen, CVanF5]
resource_attributes:
instance.id:
diff --git a/templates/registry/metadata/attrs_metadata.yaml.j2 b/templates/registry/metadata/attrs_metadata.yaml.j2
index f39ac018c..d3bd16902 100644
--- a/templates/registry/metadata/attrs_metadata.yaml.j2
+++ b/templates/registry/metadata/attrs_metadata.yaml.j2
@@ -11,7 +11,7 @@ status:
beta: [metrics]
distributions: [contrib]
codeowners:
- active: [aphralG, dhurley, craigell, sean-breen, Rashmiti, CVanF5]
+ active: [aphralG, dhurley, craigell, sean-breen, CVanF5]
resource_attributes:
instance.id:
From 9943e7b5c0450e367ffeb94dd820d5c88089f677 Mon Sep 17 00:00:00 2001
From: Patrick Pfrehm
Date: Thu, 29 May 2025 07:14:05 -0700
Subject: [PATCH 3/4] fix: http request count
---
internal/collector/nginxplusreceiver/scraper.go | 8 +++++++-
internal/collector/nginxplusreceiver/scraper_test.go | 2 ++
.../collector/nginxplusreceiver/testdata/expected.yaml | 2 +-
3 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/internal/collector/nginxplusreceiver/scraper.go b/internal/collector/nginxplusreceiver/scraper.go
index 1332925ad..cad555ae9 100644
--- a/internal/collector/nginxplusreceiver/scraper.go
+++ b/internal/collector/nginxplusreceiver/scraper.go
@@ -46,6 +46,7 @@ type NginxPlusScraper struct {
logger *zap.Logger
settings receiver.Settings
init sync.Once
+ previousHTTPRequestsTotal uint64
}
type ResponseStatuses struct {
@@ -107,6 +108,8 @@ func (nps *NginxPlusScraper) Scrape(ctx context.Context) (pmetric.Metrics, error
nps.logger.Error("Failed to get stats from plus API", zap.Error(err))
return
}
+
+ nps.previousHTTPRequestsTotal = stats.HTTPRequests.Total
nps.createPreviousServerZoneResponses(stats)
nps.createPreviousLocationZoneResponses(stats)
})
@@ -194,7 +197,10 @@ func (nps *NginxPlusScraper) recordMetrics(stats *plusapi.Stats) {
// HTTP Requests
nps.mb.RecordNginxHTTPRequestsDataPoint(now, int64(stats.HTTPRequests.Total), "", 0)
- nps.mb.RecordNginxHTTPRequestCountDataPoint(now, int64(stats.HTTPRequests.Current))
+
+ requestsDiff := int64(stats.HTTPRequests.Total) - int64(nps.previousHTTPRequestsTotal)
+ nps.mb.RecordNginxHTTPRequestCountDataPoint(now, requestsDiff)
+ nps.previousHTTPRequestsTotal = stats.HTTPRequests.Total
nps.recordCacheMetrics(stats, now)
nps.recordHTTPLimitMetrics(stats, now)
diff --git a/internal/collector/nginxplusreceiver/scraper_test.go b/internal/collector/nginxplusreceiver/scraper_test.go
index 7dfe82e4c..b2e8bd972 100644
--- a/internal/collector/nginxplusreceiver/scraper_test.go
+++ b/internal/collector/nginxplusreceiver/scraper_test.go
@@ -104,6 +104,8 @@ func TestScraper(t *testing.T) {
},
}
+ scraper.previousHTTPRequestsTotal = 3
+
actualMetrics, err := scraper.Scrape(context.Background())
require.NoError(t, err)
diff --git a/internal/collector/nginxplusreceiver/testdata/expected.yaml b/internal/collector/nginxplusreceiver/testdata/expected.yaml
index d39c2a1d1..8b5437f44 100644
--- a/internal/collector/nginxplusreceiver/testdata/expected.yaml
+++ b/internal/collector/nginxplusreceiver/testdata/expected.yaml
@@ -412,7 +412,7 @@ resourceMetrics:
gauge:
aggregationTemporality: 2
dataPoints:
- - asInt: "3"
+ - asInt: "44"
timeUnixNano: "1000000"
isMonotonic: true
unit: requests
From 6ddbe6aff6c5806e543bd4427d2b06c10406fde9 Mon Sep 17 00:00:00 2001
From: Patrick Pfrehm
Date: Thu, 29 May 2025 13:05:28 -0700
Subject: [PATCH 4/4] add code owners as param
---
templates/registry/metadata/attrs_metadata.yaml.j2 | 2 +-
templates/registry/metadata/weaver.yaml | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/templates/registry/metadata/attrs_metadata.yaml.j2 b/templates/registry/metadata/attrs_metadata.yaml.j2
index d3bd16902..6275bf099 100644
--- a/templates/registry/metadata/attrs_metadata.yaml.j2
+++ b/templates/registry/metadata/attrs_metadata.yaml.j2
@@ -11,7 +11,7 @@ status:
beta: [metrics]
distributions: [contrib]
codeowners:
- active: [aphralG, dhurley, craigell, sean-breen, CVanF5]
+ active: [{{params.code_owners | safe}}]
resource_attributes:
instance.id:
diff --git a/templates/registry/metadata/weaver.yaml b/templates/registry/metadata/weaver.yaml
index 254eaf7f8..ab7ef7007 100644
--- a/templates/registry/metadata/weaver.yaml
+++ b/templates/registry/metadata/weaver.yaml
@@ -1,5 +1,6 @@
params:
excluded_attributes: ["messaging.client_id"]
+ code_owners: aphralG, dhurley, craigell, sean-breen, CVanF5
templates:
- pattern: attrs_metadata.yaml.j2
filter: >