diff --git a/Makefile b/Makefile index e7ab69d09e..5d60befb9d 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ TMP_DIRECTORY = ./tmp -CHARTS ?= opentelemetry-collector opentelemetry-operator opentelemetry-demo opentelemetry-ebpf opentelemetry-kube-stack +CHARTS ?= opentelemetry-collector opentelemetry-operator opentelemetry-demo opentelemetry-ebpf opentelemetry-kube-stack opentelemetry-ebpf-instrumentation OPERATOR_APP_VERSION ?= "$(shell cat ./charts/opentelemetry-operator/Chart.yaml | sed -nr 's/appVersion: ([0-9]+\.[0-9]+\.[0-9]+)/\1/p')" .PHONY: generate-examples diff --git a/README.md b/README.md index 741a902329..8cde5c40bc 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # OpenTelemetry Helm Charts -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/opentelemetry-helm)](https://artifacthub.io/packages/search?repo=opentelemetry-helm) This repository contains [Helm](https://helm.sh/) charts for OpenTelemetry project. @@ -38,6 +38,13 @@ The chart can be used to install [OpenTelemetry Operator](https://github.com/ope in a Kubernetes cluster. More detailed documentation can be found in [OpenTelemetry Operator chart directory](./charts/opentelemetry-operator). + +### OpenTelemetry eBPF Instrumentation + +The chart can be used to install [OpenTelemetry eBPF Instrumentation](https://github.com/open-telemetry/opentelemetry-ebpf-instrumentation) +in a Kubernetes cluster. More detailed documentation can be found in +[OpenTelemetry Operator chart directory](./charts/opentelemetry-ebpf-instrumentation). + ## Contributing See [CONTRIBUTING.md](./CONTRIBUTING.md). diff --git a/charts/opentelemetry-ebpf-instrumentation/.helmignore b/charts/opentelemetry-ebpf-instrumentation/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/opentelemetry-ebpf-instrumentation/Chart.yaml b/charts/opentelemetry-ebpf-instrumentation/Chart.yaml new file mode 100644 index 0000000000..c36552c75e --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/Chart.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v2 +name: opentelemetry-ebpf-instrumentation +version: 0.1.0 +description: OpenTelemetry eBPF instrumentation Helm chart for Kubernetes +type: application +home: https://opentelemetry.io/ +sources: + - https://github.com/coralogix/opentelemetry-helm-charts + - https://github.com/open-telemetry/opentelemetry-helm-charts +icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png +maintainers: + - name: dmitryax + - name: jaronoff97 + - name: TylerHelmuth + - name: nimrodavni78 +appVersion: 0.1.0 diff --git a/charts/opentelemetry-ebpf-instrumentation/README.md b/charts/opentelemetry-ebpf-instrumentation/README.md new file mode 100644 index 0000000000..2b08f32651 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/README.md @@ -0,0 +1,28 @@ +# OpenTelemetry Collector eBPF Helm Chart + +The helm chart installs [OpenTelemetry eBPF Instrumentation](https://github.com/open-telemetry/opentelemetry-ebpf-instrumentation) +in kubernetes cluster. + +## Prerequisites + +- Kubernetes 1.24+ +- Helm 3.9+ + +## Installing the Chart + +Add OpenTelemetry Helm repository: + +```console +helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts +``` + +To install the chart with the release name my-opentelemetry-ebpf, run the following command: + +```console +helm install my-opentelemetry-ebpf-instrumentation open-telemetry/opentelemetry-ebpf-instrumentation +``` + +### Other configuration options + +The [values.yaml](./values.yaml) file contains information about all other configuration +options for this chart. diff --git a/charts/opentelemetry-ebpf-instrumentation/ci/default-values.yaml b/charts/opentelemetry-ebpf-instrumentation/ci/default-values.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/clusterrole.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/clusterrole.yaml new file mode 100644 index 0000000000..d1231bf9ba --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/clusterrole.yaml @@ -0,0 +1,21 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: example-opentelemetry-ebpf-instrumentation + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: rbac +rules: + - apiGroups: [ "apps" ] + resources: [ "replicasets" ] + verbs: [ "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "pods", "services", "nodes" ] + verbs: [ "list", "watch", "get" ] diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/clusterrolebinding.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/clusterrolebinding.yaml new file mode 100644 index 0000000000..d1d1730807 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/clusterrolebinding.yaml @@ -0,0 +1,22 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: example-opentelemetry-ebpf-instrumentation + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: rbac +subjects: + - kind: ServiceAccount + name: example-opentelemetry-ebpf-instrumentation + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: example-opentelemetry-ebpf-instrumentation diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/configmap.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/configmap.yaml new file mode 100644 index 0000000000..50f58e2359 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/configmap.yaml @@ -0,0 +1,38 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-ebpf-instrumentation + namespace: default + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: config +data: + ebpf-instrument-config.yml: | + attributes: + kubernetes: + enable: true + discovery: + exclude_services: + - exe_path: .*ebpf-instrument.*|.*otelcol.* + services: + - k8s_namespace: . + filter: + network: + k8s_dst_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + k8s_src_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + otel_metrics_export: + endpoint: http://${HOST_IP}:4318 + otel_traces_export: + endpoint: http://${HOST_IP}:4317 + prometheus_export: + path: /metrics + port: 9090 diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/daemonset.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/daemonset.yaml new file mode 100644 index 0000000000..229a908c98 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/daemonset.yaml @@ -0,0 +1,63 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: example-opentelemetry-ebpf-instrumentation + namespace: default + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: workload +spec: + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 2ddd12b5cac5f3eab28d8bc500cb3db81df076a39a5143c879aed1772dfdc4ee + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: workload + spec: + serviceAccountName: example-opentelemetry-ebpf-instrumentation + hostPID: true + initContainers: + containers: + - name: ebpf-instrument + image: docker.io/otel/ebpf-instrument:main + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + ports: + - name: metrics + containerPort: 9090 + protocol: TCP + env: + - name: OTEL_EBPF_CONFIG_PATH + value: "/etc/ebpf-instrument/config/ebpf-instrument-config.yml" + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + volumeMounts: + - mountPath: /etc/ebpf-instrument/config + name: ebpf-instrument-config + volumes: + - name: ebpf-instrument-config + configMap: + name: example-opentelemetry-ebpf-instrumentation diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/serviceaccount.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/serviceaccount.yaml new file mode 100644 index 0000000000..2cb6dc45f6 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/default/rendered/serviceaccount.yaml @@ -0,0 +1,16 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-ebpf-instrumentation + namespace: default + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: rbac +automountServiceAccountToken: true diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/default/values.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/default/values.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/cache-deployment.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/cache-deployment.yaml new file mode 100644 index 0000000000..a1a0869bca --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/cache-deployment.yaml @@ -0,0 +1,42 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/cache-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: opentelemetry-ebpf-instrumentation-k8s-cache + namespace: default + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation-k8s-cache + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: workload +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation-k8s-cache + template: + metadata: + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation-k8s-cache + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + spec: + serviceAccountName: example-opentelemetry-ebpf-instrumentation + containers: + - name: obi-k8s-cache + image: docker.io/otel/opentelemetry-ebpf-k8s-cache:main + imagePullPolicy: IfNotPresent + ports: + - containerPort: 50055 + protocol: TCP + name: grpc + env: + - name: OTEL_EBPF_K8S_CACHE_PORT + value: "50055" diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/cache-service.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/cache-service.yaml new file mode 100644 index 0000000000..5f3c51a5bc --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/cache-service.yaml @@ -0,0 +1,23 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/cache-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: opentelemetry-ebpf-instrumentation-k8s-cache + namespace: default + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation-k8s-cache + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: networking +spec: + ports: + - port: 50055 + protocol: TCP + targetPort: grpc + name: grpc + selector: + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation-k8s-cache diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/clusterrole.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/clusterrole.yaml new file mode 100644 index 0000000000..d1231bf9ba --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/clusterrole.yaml @@ -0,0 +1,21 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: example-opentelemetry-ebpf-instrumentation + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: rbac +rules: + - apiGroups: [ "apps" ] + resources: [ "replicasets" ] + verbs: [ "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "pods", "services", "nodes" ] + verbs: [ "list", "watch", "get" ] diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/clusterrolebinding.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/clusterrolebinding.yaml new file mode 100644 index 0000000000..d1d1730807 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/clusterrolebinding.yaml @@ -0,0 +1,22 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: example-opentelemetry-ebpf-instrumentation + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: rbac +subjects: + - kind: ServiceAccount + name: example-opentelemetry-ebpf-instrumentation + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: example-opentelemetry-ebpf-instrumentation diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/configmap.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/configmap.yaml new file mode 100644 index 0000000000..50f58e2359 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/configmap.yaml @@ -0,0 +1,38 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-ebpf-instrumentation + namespace: default + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: config +data: + ebpf-instrument-config.yml: | + attributes: + kubernetes: + enable: true + discovery: + exclude_services: + - exe_path: .*ebpf-instrument.*|.*otelcol.* + services: + - k8s_namespace: . + filter: + network: + k8s_dst_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + k8s_src_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + otel_metrics_export: + endpoint: http://${HOST_IP}:4318 + otel_traces_export: + endpoint: http://${HOST_IP}:4317 + prometheus_export: + path: /metrics + port: 9090 diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/daemonset.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/daemonset.yaml new file mode 100644 index 0000000000..56ada46745 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/daemonset.yaml @@ -0,0 +1,65 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: example-opentelemetry-ebpf-instrumentation + namespace: default + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: workload +spec: + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 2ddd12b5cac5f3eab28d8bc500cb3db81df076a39a5143c879aed1772dfdc4ee + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: workload + spec: + serviceAccountName: example-opentelemetry-ebpf-instrumentation + hostPID: true + initContainers: + containers: + - name: ebpf-instrument + image: docker.io/otel/ebpf-instrument:main + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + ports: + - name: metrics + containerPort: 9090 + protocol: TCP + env: + - name: OTEL_EBPF_CONFIG_PATH + value: "/etc/ebpf-instrument/config/ebpf-instrument-config.yml" + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: OTEL_EBPF_KUBE_META_CACHE_ADDRESS + value: opentelemetry-ebpf-instrumentation-k8s-cache:50055 + volumeMounts: + - mountPath: /etc/ebpf-instrument/config + name: ebpf-instrument-config + volumes: + - name: ebpf-instrument-config + configMap: + name: example-opentelemetry-ebpf-instrumentation diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/serviceaccount.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/serviceaccount.yaml new file mode 100644 index 0000000000..2cb6dc45f6 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/rendered/serviceaccount.yaml @@ -0,0 +1,16 @@ +--- +# Source: opentelemetry-ebpf-instrumentation/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-ebpf-instrumentation + namespace: default + labels: + helm.sh/chart: opentelemetry-ebpf-instrumentation-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf-instrumentation + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: obi + app.kubernetes.io/component: rbac +automountServiceAccountToken: true diff --git a/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/values.yaml b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/values.yaml new file mode 100644 index 0000000000..e71a11ac22 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/examples/with-k8s-cache/values.yaml @@ -0,0 +1,2 @@ +k8sCache: + replicas: 1 diff --git a/charts/opentelemetry-ebpf-instrumentation/templates/_helpers.tpl b/charts/opentelemetry-ebpf-instrumentation/templates/_helpers.tpl new file mode 100644 index 0000000000..184de37141 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/templates/_helpers.tpl @@ -0,0 +1,154 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "obi.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "obi.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "obi.namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "obi.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "obi.labels" -}} +helm.sh/chart: {{ include "obi.chart" . }} +{{ include "obi.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: obi +{{- end }} + +{{/* +Selector (pod) labels +*/}} +{{- define "obi.selectorLabels" -}} +app.kubernetes.io/name: {{ include "obi.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- with .Values.podLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "obi.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "obi.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Calculate name of image ID to use for "obi". +*/}} +{{- define "obi.imageId" -}} +{{- if .Values.image.digest }} +{{- $digest := .Values.image.digest }} +{{- if not (hasPrefix "sha256:" $digest) }} +{{- $digest = printf "sha256:%s" $digest }} +{{- end }} +{{- printf "@%s" $digest }} +{{- else if .Values.image.tag }} +{{- printf ":%s" .Values.image.tag }} +{{- else }} +{{- printf ":%s" .Chart.AppVersion }} +{{- end }} +{{- end }} + +{{/* +Calculate name of image ID to use for "obi-cache". +*/}} +{{- define "obi.k8sCache.imageId" -}} +{{- if .Values.k8sCache.image.digest }} +{{- $digest := .Values.k8sCache.image.digest }} +{{- if not (hasPrefix "sha256:" $digest) }} +{{- $digest = printf "sha256:%s" $digest }} +{{- end }} +{{- printf "@%s" $digest }} +{{- else if .Values.k8sCache.image.tag }} +{{- printf ":%s" .Values.k8sCache.image.tag }} +{{- else }} +{{- printf ":%s" .Chart.AppVersion }} +{{- end }} +{{- end }} + +{{/* +Common kube cache labels +*/}} +{{- define "obi.cache.labels" -}} +helm.sh/chart: {{ include "obi.chart" . }} +{{ include "obi.cache.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: obi +{{- end }} + +{{/* +Selector (pod) labels +*/}} +{{- define "obi.cache.selectorLabels" -}} +app.kubernetes.io/name: {{ .Values.k8sCache.service.name }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- with .Values.k8sCache.podLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Generate the configmap data based on preset and configuration values +*/}} +{{- define "obi.configData" -}} +{{- $config := deepCopy .Values.config.data }} +{{- if eq .Values.preset "network" }} +{{- if not .Values.config.data.network }} +{{- $_ := set $config "network" (dict "enable" true) }} +{{- end }} +{{- end }} +{{- if eq .Values.preset "application" }} +{{- if not .Values.config.data.discovery }} +{{- $discovery := dict "services" (list (dict "k8s_namespace" ".")) "exclude_services" (list (dict "exe_path" ".*ebpf-instrument.*|.*otelcol.*")) }} +{{- $_ := set $config "discovery" $discovery }} +{{- end }} +{{- end }} +{{- toYaml $config }} +{{- end }} diff --git a/charts/opentelemetry-ebpf-instrumentation/templates/cache-deployment.yaml b/charts/opentelemetry-ebpf-instrumentation/templates/cache-deployment.yaml new file mode 100644 index 0000000000..16c129a13a --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/templates/cache-deployment.yaml @@ -0,0 +1,78 @@ +{{- if .Values.k8sCache.replicas }} +{{- $root := . }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.k8sCache.service.name }} + namespace: {{ include "obi.namespace" .}} + labels: + {{- include "obi.cache.labels" . | nindent 4 }} + app.kubernetes.io/component: workload + {{- with .Values.k8sCache.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.k8sCache.replicas }} + selector: + matchLabels: + app.kubernetes.io/name: {{ .Values.k8sCache.service.name }} + template: + metadata: + {{- with .Values.k8sCache.podAnnotations }} + annotations: + {{- tpl (toYaml . | nindent 8) $root }} + {{- end }} + labels: + {{- include "obi.cache.labels" . | nindent 8 }} + spec: + {{- if .Values.serviceAccount.create }} + serviceAccountName: {{ include "obi.serviceAccountName" . }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml .Values.image.pullSecrets | nindent 8 }} + {{- end }} + containers: + - name: obi-k8s-cache + image: {{ .Values.k8sCache.image.registry }}/{{ .Values.k8sCache.image.repository }}{{ include "obi.k8sCache.imageId" . }} + imagePullPolicy: {{ .Values.k8sCache.image.pullPolicy }} + ports: + - containerPort: {{ .Values.k8sCache.service.port }} + protocol: TCP + name: grpc + {{- if .Values.k8sCache.profilePort }} + - name: profile + containerPort: {{ .Values.k8sCache.profilePort }} + protocol: TCP + {{- end }} + {{- if .Values.k8sCache.internalMetrics.port }} + - name: {{ .Values.k8sCache.internalMetrics.portName }} + containerPort: {{ .Values.k8sCache.internalMetrics.port }} + protocol: TCP + {{- end }} + {{- with .Values.k8sCache.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + env: + - name: OTEL_EBPF_K8S_CACHE_PORT + value: "{{ .Values.k8sCache.service.port }}" + {{- if .Values.k8sCache.profilePort }} + - name: OTEL_EBPF_K8S_CACHE_PROFILE_PORT + value: "{{ .Values.k8sCache.profilePort }}" + {{- end }} + {{- if .Values.k8sCache.internalMetrics.port }} + - name: OTEL_EBPF_K8S_CACHE_INTERNAL_METRICS_PROMETHEUS_PORT + value: "{{ .Values.k8sCache.internalMetrics.port }}" + {{- end }} + {{- range $key, $value := .Values.k8sCache.env }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + {{- range $key, $value := .Values.k8sCache.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 16 }} + {{- end }} +{{- end }} diff --git a/charts/opentelemetry-ebpf-instrumentation/templates/cache-service.yaml b/charts/opentelemetry-ebpf-instrumentation/templates/cache-service.yaml new file mode 100644 index 0000000000..106b22ec4f --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/templates/cache-service.yaml @@ -0,0 +1,26 @@ +{{- if .Values.k8sCache.replicas }} +{{- $root := . }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.k8sCache.service.name }} + namespace: {{ include "obi.namespace" .}} + labels: + {{- include "obi.cache.labels" . | nindent 4 }} + app.kubernetes.io/component: networking + {{- with .Values.k8sCache.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} +spec: + ports: + - port: {{ .Values.k8sCache.service.port }} + protocol: TCP + targetPort: grpc + name: grpc + selector: + app.kubernetes.io/name: {{ .Values.k8sCache.service.name }} +{{- end }} diff --git a/charts/opentelemetry-ebpf-instrumentation/templates/clusterrole.yaml b/charts/opentelemetry-ebpf-instrumentation/templates/clusterrole.yaml new file mode 100644 index 0000000000..e15cdda3ce --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/templates/clusterrole.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "obi.fullname" . }} + labels: + {{- include "obi.labels" . | nindent 4 }} + app.kubernetes.io/component: rbac + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: [ "apps" ] + resources: [ "replicasets" ] + verbs: [ "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "pods", "services", "nodes" ] + verbs: [ "list", "watch", "get" ] + {{- with .Values.rbac.extraClusterRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- end }} diff --git a/charts/opentelemetry-ebpf-instrumentation/templates/clusterrolebinding.yaml b/charts/opentelemetry-ebpf-instrumentation/templates/clusterrolebinding.yaml new file mode 100644 index 0000000000..e653c342a3 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/templates/clusterrolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "obi.fullname" . }} + labels: + {{- include "obi.labels" . | nindent 4 }} + app.kubernetes.io/component: rbac + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "obi.serviceAccountName" . }} + namespace: {{ include "obi.namespace" .}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "obi.fullname" . }} +{{- end }} diff --git a/charts/opentelemetry-ebpf-instrumentation/templates/configmap.yaml b/charts/opentelemetry-ebpf-instrumentation/templates/configmap.yaml new file mode 100644 index 0000000000..d072430c00 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/templates/configmap.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.config.skipConfigMapCheck }} +{{- if and (not .Values.config.create) (eq .Values.config.name "") }} + {{- fail "if .Values.config.name is not set, then .Values.config.create should be set to true to use default configuration" }} +{{- end }} +{{- end }} +{{- if and (.Values.config.create) (eq .Values.config.name "") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "obi.fullname" . }} + namespace: {{ include "obi.namespace" . }} + labels: + {{- include "obi.labels" . | nindent 4 }} + app.kubernetes.io/component: config + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + ebpf-instrument-config.yml: | +{{- include "obi.configData" . | nindent 4 }} +{{- end }} diff --git a/charts/opentelemetry-ebpf-instrumentation/templates/daemonset.yaml b/charts/opentelemetry-ebpf-instrumentation/templates/daemonset.yaml new file mode 100644 index 0000000000..dee1d1f391 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/templates/daemonset.yaml @@ -0,0 +1,144 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "obi.fullname" . }} + namespace: {{ include "obi.namespace" .}} + labels: + {{- include "obi.labels" . | nindent 4 }} + app.kubernetes.io/component: workload + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "obi.selectorLabels" . | nindent 6 }} + {{- with .Values.updateStrategy }} + updateStrategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "obi.labels" . | nindent 8 }} + app.kubernetes.io/component: workload + spec: + {{- if .Values.serviceAccount.create }} + serviceAccountName: {{ include "obi.serviceAccountName" . }} + {{- end }} + {{- if eq .Values.preset "application" }} + hostPID: true + {{- end }} + {{- if or (eq .Values.preset "network") .Values.config.data.network }} + hostNetwork: true + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + initContainers: + {{- if .Values.initContainers }} + {{- toYaml .Values.initContainers | nindent 8 }} + {{- end }} + containers: + - name: ebpf-instrument + image: {{ .Values.image.registry }}/{{ .Values.image.repository }}{{ include "obi.imageId" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + {{- if .Values.privileged }} + {{- with .Values.securityContext }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- else }} + runAsUser: 0 + readOnlyRootFilesystem: true + capabilities: + add: + - BPF + - SYS_PTRACE + - NET_RAW + - CHECKPOINT_RESTORE + - DAC_READ_SEARCH + - PERFMON + {{- with .Values.extraCapabilities }} + {{- toYaml . | nindent 16 }} + {{- end }} + drop: + - ALL + {{- end }} + ports: + {{- if or (.Values.service.targetPort) (.Values.config.data.prometheus_export) }} + - name: {{ .Values.service.portName }} + containerPort: {{ .Values.service.targetPort | default .Values.config.data.prometheus_export.port }} + protocol: TCP + {{- end }} + {{- if (and (or (.Values.service.internalMetrics.targetPort) ((and .Values.config.data.internal_metrics .Values.config.data.internal_metrics.prometheus))) (not (eq .Values.config.data.prometheus_export.port .Values.config.data.internal_metrics.prometheus.port))) }} + - name: {{ .Values.service.internalMetrics.portName }} + containerPort: {{ .Values.service.internalMetrics.targetPort | default .Values.config.data.internal_metrics.prometheus.port }} + protocol: TCP + {{- end }} + {{- if .Values.config.data.profile_port }} + - name: profile + containerPort: {{ .Values.config.data.profile_port }} + protocol: TCP + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + env: + - name: OTEL_EBPF_CONFIG_PATH + value: "/etc/ebpf-instrument/config/ebpf-instrument-config.yml" + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + {{- if .Values.k8sCache.replicas }} + - name: OTEL_EBPF_KUBE_META_CACHE_ADDRESS + value: {{ .Values.k8sCache.service.name }}:{{ .Values.k8sCache.service.port }} + {{- end }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 16 }} + {{- end }} + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + volumeMounts: + - mountPath: /etc/ebpf-instrument/config + name: ebpf-instrument-config + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml .Values.image.pullSecrets | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: ebpf-instrument-config + configMap: + name: {{ default (include "obi.fullname" .) .Values.config.name }} + {{- with .Values.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/opentelemetry-ebpf-instrumentation/templates/service.yaml b/charts/opentelemetry-ebpf-instrumentation/templates/service.yaml new file mode 100644 index 0000000000..3f1c479367 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/templates/service.yaml @@ -0,0 +1,73 @@ +{{- if .Values.service.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "obi.fullname" . }} + namespace: {{ include "obi.namespace" .}} + labels: + {{- include "obi.labels" . | nindent 4 }} + app.kubernetes.io/component: networking + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} +spec: + {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- with .Values.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + {{- else if eq .Values.service.type "LoadBalancer" }} + type: LoadBalancer + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- with .Values.service.loadBalancerClass }} + loadBalancerClass: {{ . }} + {{- end }} + {{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- else }} + type: {{ .Values.service.type }} + {{- end }} + {{- with .Values.service.externalIPs }} + externalIPs: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ . }} + {{- end }} + ports: + {{- if or (.Values.service.targetPort) (.Values.config.data.prometheus_export) }} + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort | default .Values.config.data.prometheus_export.port }} + {{- with .Values.service.appProtocol }} + appProtocol: {{ . }} + {{- end }} + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- if (and (or (.Values.service.internalMetrics.targetPort) (.Values.config.data.internal_metrics)) (not (eq .Values.config.data.prometheus_export.port .Values.config.data.internal_metrics.prometheus.port))) }} + - name: {{ .Values.service.internalMetrics.portName }} + port: {{ .Values.service.internalMetrics.port }} + protocol: TCP + targetPort: {{ .Values.service.internalMetrics.targetPort | default .Values.config.data.internal_metrics.prometheus.port }} + {{- with .Values.service.internalMetrics.appProtocol }} + appProtocol: {{ . }} + {{- end }} + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.internalMetrics.nodePort))) }} + nodePort: {{ .Values.service.internalMetrics.nodePort }} + {{- end }} + {{- end }} + selector: + {{- include "obi.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/opentelemetry-ebpf-instrumentation/templates/serviceaccount.yaml b/charts/opentelemetry-ebpf-instrumentation/templates/serviceaccount.yaml new file mode 100644 index 0000000000..1aac1d7285 --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "obi.serviceAccountName" . }} + namespace: {{ include "obi.namespace" .}} + labels: + {{- include "obi.labels" . | nindent 4 }} + app.kubernetes.io/component: rbac + {{- with .Values.serviceAccount.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} diff --git a/charts/opentelemetry-ebpf-instrumentation/templates/servicemonitor.yaml b/charts/opentelemetry-ebpf-instrumentation/templates/servicemonitor.yaml new file mode 100644 index 0000000000..151f0ed91a --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/templates/servicemonitor.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.service.enabled .Values.serviceMonitor.enabled .Values.config.data.prometheus_export }} +{{- $root := . }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "obi.fullname" . }} + namespace: {{ include "obi.namespace" .}} + labels: + {{- include "obi.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- with .Values.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.serviceMonitor.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} +spec: + endpoints: + {{- if or (.Values.service.targetPort) (.Values.config.data.prometheus_export) }} + - port: {{ .Values.service.portName }} + path: {{ .Values.config.data.prometheus_export.path }} + scheme: http + {{- with .Values.serviceMonitor.endpoint }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- if (and (or (.Values.service.internalMetrics.targetPort) (.Values.config.data.internal_metrics)) (not (eq .Values.config.data.prometheus_export.port .Values.config.data.internal_metrics.prometheus.port))) }} + - port: {{ .Values.service.internalMetrics.portName }} + path: {{ .Values.config.data.internal_metrics.prometheus.path }} + scheme: http + {{- with .Values.serviceMonitor.internalMetrics.endpoint }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + jobLabel: {{ .Values.serviceMonitor.jobLabel | default (include "obi.fullname" .) }} + selector: + matchLabels: + {{- include "obi.labels" . | nindent 6 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/opentelemetry-ebpf-instrumentation/values.yaml b/charts/opentelemetry-ebpf-instrumentation/values.yaml new file mode 100644 index 0000000000..01d34a289c --- /dev/null +++ b/charts/opentelemetry-ebpf-instrumentation/values.yaml @@ -0,0 +1,327 @@ +image: + # -- Opentelemetry eBPF Instrumentation image registry (defaults to ghcr.io) + registry: "ghcr.io" + # -- Opentelemetry eBPF Instrumentation image repository. + repository: otel/ebpf-instrument + # -- (string) Opentelemetry eBPF Instrumentation image tag. When empty, the Chart's appVersion is + # used. + tag: main + # -- Opentelemetry eBPF Instrumentation image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`. + digest: "" + # -- Opentelemetry eBPF Instrumentation image pull policy. + pullPolicy: IfNotPresent + # -- Optional set of image pull secrets. + pullSecrets: [] + +# -- Overrides the chart's name +nameOverride: "" + +# -- Overrides the chart's computed fullname. +fullnameOverride: "" + +# -- Override the deployment namespace +namespaceOverride: "" + +## DaemonSet annotations +# annotations: {} + +rbac: + # -- Whether to create RBAC resources for Opentelemetry eBPF Instrumentation + create: true + # -- Extra custer roles to be created for Opentelemetry eBPF Instrumentation + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + +serviceAccount: + # -- Specifies whether a service account should be created + create: true + # -- Automatically mount a ServiceAccount's API credentials? + automount: true + # -- ServiceAccount labels. + labels: {} + # -- Annotations to add to the service account + annotations: {} + # -- The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podSecurityContext: {} +# fsGroup: 2000 + +# -- If set to false, deploys an unprivileged / less privileged setup. +privileged: true + +# -- Extra capabilities for unprivileged / less privileged setup. +extraCapabilities: [] + # - SYS_RESOURCE # <-- pre 5.11 only. Allows Opentelemetry eBPF Instrumentation to increase the amount of locked memory. +# - SYS_ADMIN # <-- Required for Go application trace context propagation, or if kernel.perf_event_paranoid >= 3 on Debian distributions. + +# -- Security context for privileged setup. +securityContext: + privileged: true + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +priorityClassName: "" + # system-node-critical +# system-cluster-critical + +## -- Expose the Opentelemetry eBPF Instrumentation Prometheus and internal metrics service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + # -- whether to create a service for metrics + enabled: false + # -- type of the service + type: ClusterIP + # -- Service annotations. + annotations: {} + # -- Service labels. + labels: {} + # -- cluster IP + clusterIP: "" + # -- loadbalancer IP + loadBalancerIP: "" + # -- loadbalancer class name + loadBalancerClass: "" + # -- source ranges for loadbalancer + loadBalancerSourceRanges: [] + # -- Prometheus metrics service port + port: 80 + # -- targetPort overrides the Prometheus metrics port. It defaults to the value of `prometheus_export.port` + # from the Opentelemetry eBPF Instrumentation configuration file. + targetPort: null + # -- name of the port for Prometheus metrics. + portName: metrics + # -- Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + internalMetrics: + # -- internal metrics service port + port: 8080 + # -- targetPort overrides the internal metrics port. It defaults to the value of `internal_metrics.prometheus.port` + # from the Opentelemetry eBPF Instrumentation configuration file. + targetPort: null + # -- name of the port for internal metrics. + portName: int-metrics + # -- Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m +# memory: 128Mi + +## -- See `kubectl explain daemonset.spec.updateStrategy` for more +## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy +updateStrategy: + # -- update strategy type + type: RollingUpdate + +# -- Additional volumes on the output daemonset definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# -- Additional volumeMounts on the output daemonset definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +# -- The nodeSelector field allows user to constrain which nodes your DaemonSet pods are scheduled to based on labels on the node +nodeSelector: {} + +# -- Tolerations allow pods to be scheduled on nodes with specific taints +tolerations: [] + +# -- used for scheduling of pods based on affinity rules +affinity: {} + +# -- Adds custom annotations to the Opentelemetry eBPF Instrumentation Pods. +podAnnotations: {} + +# -- Adds custom labels to the Opentelemetry eBPF Instrumentation Pods. +podLabels: {} + +## https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +# -- Determines how DNS resolution is handled for that pod. +# If `.Values.preset` is set to `network` or `.Values.config.data.network` is enabled, Opentelemetry eBPF Instrumentation requires `hostNetwork` access, causing cluster service DNS resolution to fail. +# It is recommended not to change this if Opentelemetry eBPF Instrumentation sends traces and metrics to Grafana components via k8s service. +dnsPolicy: ClusterFirstWithHostNet + +## The below default configuration +## 1. looks for ALL the services in the host +## 2. export metrics as prometheus metrics by default at 9090 port +## 3. enables kubernetes attribute +## Note: The default configuration is used if config.create=true and config.name="" +config: + # -- set to true, to skip the check around the ConfigMap creation + skipConfigMapCheck: false + # -- set to true, to use the below default configurations + create: true + ## -- Provide the name of the external configmap containing the Opentelemetry eBPF Instrumentation configuration. + ## To create configmap from configuration file, user can use the below command. Note: The name 'ebpf-instrument-config.yaml' is important. + ## `kubectl create cm --from-file=ebpf-instrument-config.yaml= -n ` + ## If empty, default configuration below is used. + name: "" + # -- default value of Opentelemetry eBPF Instrumentation configuration + data: + # profile_port: 6060 + # open_port: 8443 + # routes: + # unmatched: heuristic + # log_level: info + ## or alternatively use + # grafana: + # otlp: + # cloud_zone: prod-eu-west-0 + # cloud_instance_id: 123456 + # cloud_api_key: + otel_traces_export: + endpoint: "http://${HOST_IP}:4317" + otel_metrics_export: + endpoint: "http://${HOST_IP}:4318" + attributes: + kubernetes: + enable: true + filter: + network: + k8s_dst_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + k8s_src_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + ## to enable network metrics + # network: + # enable: true + prometheus_export: + port: 9090 + path: /metrics + ## to enable internal metrics + # internal_metrics: + # prometheus: + # port: 6060 + # path: /metrics + +## Env variables that will override configmap values +## For example: +## OTEL_EBPF_INTERNAL_METRICS_PROMETHEUS_PORT: 9090 +# -- extra environment variables +env: {} + # OTEL_EBPF_INTERNAL_METRICS_PROMETHEUS_PORT: 9090 +# OTEL_EBPF_TRACE_PRINTER: "text" + +# -- extra environment variables to be set from resources such as k8s configMaps/secrets +envValueFrom: {} + # ENV_NAME: + # secretKeyRef: + # name: secret-name +# key: value_key + +# -- Preconfigures some default properties for network or application observability. +# Accepted values are "network" or "application". +preset: application + +# -- Enable creation of ServiceMonitor for scraping of prometheus HTTP endpoint +serviceMonitor: + enabled: false + # -- Add custom labels to the ServiceMonitor resource + additionalLabels: {} + # -- ServiceMonitor annotations + annotations: {} + metrics: + # -- ServiceMonitor Prometheus scraping endpoint. + # Target port and path is set based on service and `prometheus_export` values. + # For additional values, see the ServiceMonitor spec + endpoint: + interval: 15s + internalMetrics: + # -- ServiceMonitor internal metrics scraping endpoint. + # Target port and path is set based on service and `internal_metrics` values. + # For additional values, see the ServiceMonitor spec + endpoint: + interval: 15s + # -- Prometheus job label. + # If empty, chart release name is used + jobLabel: "" + +# -- Options to deploy the Kubernetes metadata cache as a separate service +k8sCache: + # -- Number of replicas for the Kubernetes metadata cache service. 0 disables the service. + replicas: 0 + # -- Enables the profile port for the Opentelemetry eBPF Instrumentation cache + profilePort: 0 + ## Env variables that will override configmap values + ## For example: + ## Opentelemetry eBPF Instrumentation_K8S_CACHE_LOG_LEVEL: "debug" + # -- extra environment variables + env: {} + # Opentelemetry eBPF Instrumentation_K8S_CACHE_LOG_LEVEL: "debug" + + # -- extra environment variables to be set from resources such as k8s configMaps/secrets + envValueFrom: {} + # ENV_NAME: + # secretKeyRef: + # name: secret-name + # key: value_key + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + image: + # -- K8s Cache image registry (defaults to ghcr.io) + registry: "ghcr.io" + # -- K8s Cache image repository. + repository: otel/opentelemetry-ebpf-k8s-cache + # -- (string) K8s Cache image tag. When empty, the Chart's appVersion is used. + tag: main + # -- K8s Cache image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`. + digest: null + # -- K8s Cache image pull policy. + pullPolicy: IfNotPresent + # -- Optional set of image pull secrets. + pullSecrets: [] + service: + # -- Name of both the Service and Deployment + name: opentelemetry-ebpf-instrumentation-k8s-cache + # -- Port of the Kubernetes metadata cache service. + port: 50055 + # -- Service annotations. + annotations: {} + # -- Service labels. + labels: {} + internalMetrics: + # 0: disabled by default + port: 0 + path: /metrics + portName: metrics + # prometheus: + # port: 6060 + # path: /metrics + # -- Deployment annotations. + annotations: {} + # -- Adds custom annotations to the Opentelemetry eBPF Instrumentation Kube Cache Pods. + podAnnotations: {} + # -- Adds custom labels to the Opentelemetry eBPF Instrumentation Kube Cache Pods. + podLabels: {}