Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
promtail:
config:
snippets:
# -- `scapeConfigs` is exactly the part of https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs
# -- The value will be created as a Kubernetes ConfigMap and then mounted to the Promtail Pod.
# -- Not really need to change this value. It's set to scrape all logs of ScalarDL/DB Pods by using regular expression.
scrapeConfigs: |
# -- the `scalardl` job scrapes all the logs from Scalar Ledger Pods, Scalar Auditor Pods, and the corresponding Envoy Pods
- job_name: scalardl
pipeline_stages:
- docker: {}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: keep
regex: (.*)scalardl-(.+)
source_labels:
- pod
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
# -- the `scalardb` job scrapes all the logs of ScalarDB Server Pods and the corresponding Envoy Pods
- job_name: scalardb
pipeline_stages:
- docker: {}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: keep
regex: (.*)scalardb-(.+)
source_labels:
- pod
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
# -- the `scalar-admin-for-kubernetes` job scrapes all the logs of Scalar Admin for Kubernetes Pods
- job_name: scalar-admin-for-kubernetes
pipeline_stages:
- docker: {}
- cri: {}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: keep
regex: (.*)scalar-admin-for-kubernetes-(.+)
source_labels:
- pod
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
defaultRules:
# -- Default PrometheusRules are not enabled
create: false

alertmanager:
# -- alertmanager is enabled
enabled: true

# -- Only check own namespace
alertmanagerConfigNamespaceSelector: null

grafana:
# -- grafana is enabled
enabled: true

# -- Default Grafana dashboards are not enabled
defaultDashboardsEnabled: false

sidecar:
datasources:
enabled: true
defaultDatasourceEnabled: false
label: grafana_datasource
labelValue: "1"
dashboards:
enabled: true
label: grafana_dashboard
labelValue: "1"
# -- Resource limits & requests
resources: {}
# requests:
# memory: 400Mi

# -- Grafana's primary configuration
grafana.ini:
security:
# -- allow Grafana to be embedded (not set the X-Frame-Options header)
# -- If you use Scalar Manager, you need to set allow_embedding to true.
# -- https://grafana.com/docs/grafana/latest/administration/configuration/#allow_embedding
allow_embedding: false

# -- Additional data source configurations
additionalDataSources:
- name: Prometheus
type: prometheus
uid: prometheus
url: http://scalar-monitoring-kube-pro-prometheus:9090/
access: proxy
editable: false
isDefault: false
jsonData:
timeInterval: 30s
# - name: Loki
# type: loki
# uid: loki
# url: http://scalar-logging-loki:3100/
# access: proxy
# editable: false
# isDefault: false

kubeApiServer:
# -- Scraping kube-apiserver is disabled
enabled: false

kubeControllerManager:
# -- Scraping kube-controller-manager is disabled
enabled: false

coreDns:
# -- Scraping CoreDNS is disabled
enabled: false

kubeEtcd:
# -- Scraping etcd is disabled
enabled: false

kubeScheduler:
# -- Scraping kube-scheduler is disabled
enabled: false

kubeProxy:
# -- Scraping kube-proxy is disabled
enabled: false

kubelet:
# -- Scraping kubelet is disabled
enabled: false

kubeStateMetrics:
# -- kube-state-metrics is disabled
enabled: false

nodeExporter:
# -- node-exporter is disabled
enabled: false

prometheusOperator:
# -- Prometheus Operator is enabled
enabled: true

admissionWebhooks:
patch:
# -- Resource limits & requests
resources: {}
# requests:
# memory: 400Mi

namespaces:
# -- Only check own namespace
releaseNamespace: true

kubeletService:
# -- kubelet service for scraping kubelets is disabled
enabled: false

## -- Resource limits & requests
resources: {}
# requests:
# memory: 400Mi

prometheus:
# -- Prometheus is enabled
enabled: true

prometheusSpec:
# -- All PrometheusRules are enabled
ruleSelectorNilUsesHelmValues: false

# -- Only check own namespace
ruleNamespaceSelector: {}

# -- All ServiceMonitors are enabled
serviceMonitorSelectorNilUsesHelmValues: false

# -- Only check own namespace
serviceMonitorNamespaceSelector: {}

# -- All PodMonitors are enabled
podMonitorSelectorNilUsesHelmValues: false

# -- Only check own namespace
podMonitorNamespaceSelector: {}

# -- All Probes are enabled
probeSelectorNilUsesHelmValues: false

# -- Only check own namespace
probeNamespaceSelector: {}

## -- Resource limits & requests
resources: {}
# requests:
# memory: 400Mi

## -- Prometheus StorageSpec for persistent data
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
storageSpec: {}
## Using PersistentVolumeClaim
##
# volumeClaimTemplate:
# spec:
# storageClassName: gluster
# accessModes: ["ReadWriteOnce"]
# resources:
# requests:
# storage: 50Gi
# selector: {}
Loading