Skip to content

Commit 69cdffc

Browse files
AUTO: Sync Helm Charts docs to ScalarDB docs site repo (#756)
Co-authored-by: josh-wong <joshua.wong@scalar-labs.com>
1 parent c572ac6 commit 69cdffc

File tree

35 files changed

+9800
-0
lines changed

35 files changed

+9800
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
promtail:
2+
config:
3+
snippets:
4+
# -- `scapeConfigs` is exactly the part of https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs
5+
# -- The value will be created as a Kubernetes ConfigMap and then mounted to the Promtail Pod.
6+
# -- Not really need to change this value. It's set to scrape all logs of ScalarDL/DB Pods by using regular expression.
7+
scrapeConfigs: |
8+
# -- the `scalardl` job scrapes all the logs from Scalar Ledger Pods, Scalar Auditor Pods, and the corresponding Envoy Pods
9+
- job_name: scalardl
10+
pipeline_stages:
11+
- docker: {}
12+
kubernetes_sd_configs:
13+
- role: pod
14+
relabel_configs:
15+
- source_labels:
16+
- __meta_kubernetes_pod_node_name
17+
target_label: __host__
18+
- action: replace
19+
source_labels:
20+
- __meta_kubernetes_pod_name
21+
target_label: pod
22+
- action: keep
23+
regex: (.*)scalardl-(.+)
24+
source_labels:
25+
- pod
26+
- replacement: /var/log/pods/*$1/*.log
27+
separator: /
28+
source_labels:
29+
- __meta_kubernetes_pod_uid
30+
- __meta_kubernetes_pod_container_name
31+
target_label: __path__
32+
# -- the `scalardb` job scrapes all the logs of ScalarDB Server Pods and the corresponding Envoy Pods
33+
- job_name: scalardb
34+
pipeline_stages:
35+
- docker: {}
36+
kubernetes_sd_configs:
37+
- role: pod
38+
relabel_configs:
39+
- source_labels:
40+
- __meta_kubernetes_pod_node_name
41+
target_label: __host__
42+
- action: replace
43+
source_labels:
44+
- __meta_kubernetes_pod_name
45+
target_label: pod
46+
- action: keep
47+
regex: (.*)scalardb-(.+)
48+
source_labels:
49+
- pod
50+
- replacement: /var/log/pods/*$1/*.log
51+
separator: /
52+
source_labels:
53+
- __meta_kubernetes_pod_uid
54+
- __meta_kubernetes_pod_container_name
55+
target_label: __path__
56+
# -- the `scalar-admin-for-kubernetes` job scrapes all the logs of Scalar Admin for Kubernetes Pods
57+
- job_name: scalar-admin-for-kubernetes
58+
pipeline_stages:
59+
- docker: {}
60+
- cri: {}
61+
kubernetes_sd_configs:
62+
- role: pod
63+
relabel_configs:
64+
- source_labels:
65+
- __meta_kubernetes_pod_node_name
66+
target_label: __host__
67+
- action: replace
68+
source_labels:
69+
- __meta_kubernetes_pod_name
70+
target_label: pod
71+
- action: keep
72+
regex: (.*)scalar-admin-for-kubernetes-(.+)
73+
source_labels:
74+
- pod
75+
- replacement: /var/log/pods/*$1/*.log
76+
separator: /
77+
source_labels:
78+
- __meta_kubernetes_pod_uid
79+
- __meta_kubernetes_pod_container_name
80+
target_label: __path__
Lines changed: 167 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,167 @@
1+
defaultRules:
2+
# -- Default PrometheusRules are not enabled
3+
create: false
4+
5+
alertmanager:
6+
# -- alertmanager is enabled
7+
enabled: true
8+
9+
# -- Only check own namespace
10+
alertmanagerConfigNamespaceSelector: null
11+
12+
grafana:
13+
# -- grafana is enabled
14+
enabled: true
15+
16+
# -- Default Grafana dashboards are not enabled
17+
defaultDashboardsEnabled: false
18+
19+
sidecar:
20+
datasources:
21+
enabled: true
22+
defaultDatasourceEnabled: false
23+
label: grafana_datasource
24+
labelValue: "1"
25+
dashboards:
26+
enabled: true
27+
label: grafana_dashboard
28+
labelValue: "1"
29+
# -- Resource limits & requests
30+
resources: {}
31+
# requests:
32+
# memory: 400Mi
33+
34+
# -- Grafana's primary configuration
35+
grafana.ini:
36+
security:
37+
# -- allow Grafana to be embedded (not set the X-Frame-Options header)
38+
# -- If you use Scalar Manager, you need to set allow_embedding to true.
39+
# -- https://grafana.com/docs/grafana/latest/administration/configuration/#allow_embedding
40+
allow_embedding: false
41+
42+
# -- Additional data source configurations
43+
additionalDataSources:
44+
- name: Prometheus
45+
type: prometheus
46+
uid: prometheus
47+
url: http://scalar-monitoring-kube-pro-prometheus:9090/
48+
access: proxy
49+
editable: false
50+
isDefault: false
51+
jsonData:
52+
timeInterval: 30s
53+
# - name: Loki
54+
# type: loki
55+
# uid: loki
56+
# url: http://scalar-logging-loki:3100/
57+
# access: proxy
58+
# editable: false
59+
# isDefault: false
60+
61+
kubeApiServer:
62+
# -- Scraping kube-apiserver is disabled
63+
enabled: false
64+
65+
kubeControllerManager:
66+
# -- Scraping kube-controller-manager is disabled
67+
enabled: false
68+
69+
coreDns:
70+
# -- Scraping CoreDNS is disabled
71+
enabled: false
72+
73+
kubeEtcd:
74+
# -- Scraping etcd is disabled
75+
enabled: false
76+
77+
kubeScheduler:
78+
# -- Scraping kube-scheduler is disabled
79+
enabled: false
80+
81+
kubeProxy:
82+
# -- Scraping kube-proxy is disabled
83+
enabled: false
84+
85+
kubelet:
86+
# -- Scraping kubelet is disabled
87+
enabled: false
88+
89+
kubeStateMetrics:
90+
# -- kube-state-metrics is disabled
91+
enabled: false
92+
93+
nodeExporter:
94+
# -- node-exporter is disabled
95+
enabled: false
96+
97+
prometheusOperator:
98+
# -- Prometheus Operator is enabled
99+
enabled: true
100+
101+
admissionWebhooks:
102+
patch:
103+
# -- Resource limits & requests
104+
resources: {}
105+
# requests:
106+
# memory: 400Mi
107+
108+
namespaces:
109+
# -- Only check own namespace
110+
releaseNamespace: true
111+
112+
kubeletService:
113+
# -- kubelet service for scraping kubelets is disabled
114+
enabled: false
115+
116+
## -- Resource limits & requests
117+
resources: {}
118+
# requests:
119+
# memory: 400Mi
120+
121+
prometheus:
122+
# -- Prometheus is enabled
123+
enabled: true
124+
125+
prometheusSpec:
126+
# -- All PrometheusRules are enabled
127+
ruleSelectorNilUsesHelmValues: false
128+
129+
# -- Only check own namespace
130+
ruleNamespaceSelector: {}
131+
132+
# -- All ServiceMonitors are enabled
133+
serviceMonitorSelectorNilUsesHelmValues: false
134+
135+
# -- Only check own namespace
136+
serviceMonitorNamespaceSelector: {}
137+
138+
# -- All PodMonitors are enabled
139+
podMonitorSelectorNilUsesHelmValues: false
140+
141+
# -- Only check own namespace
142+
podMonitorNamespaceSelector: {}
143+
144+
# -- All Probes are enabled
145+
probeSelectorNilUsesHelmValues: false
146+
147+
# -- Only check own namespace
148+
probeNamespaceSelector: {}
149+
150+
## -- Resource limits & requests
151+
resources: {}
152+
# requests:
153+
# memory: 400Mi
154+
155+
## -- Prometheus StorageSpec for persistent data
156+
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
157+
storageSpec: {}
158+
## Using PersistentVolumeClaim
159+
##
160+
# volumeClaimTemplate:
161+
# spec:
162+
# storageClassName: gluster
163+
# accessModes: ["ReadWriteOnce"]
164+
# resources:
165+
# requests:
166+
# storage: 50Gi
167+
# selector: {}

0 commit comments

Comments
 (0)