diff --git a/CHANGELOG.md b/CHANGELOG.md index cfadfbc..0397487 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +# 2025-07-29 +### Changed +- Renamed service "OCI Logging Analytics" to "OCI Log Analytics" across all documentation and references + - This is a non-breaking change that maintains backward compatibility + - Updated service references in documentation, decscription, and comments + # 2025-06-17 ### Added - Introduced a new DaemonSet that uses eBPF (Extended Berkeley Packet Filter) to capture TCP connection logs and builds application/network topology representing workload to workload relationships within the Kubernetes cluster. diff --git a/README.md b/README.md index 4ee2da4..784e17b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # OCI Kubernetes Monitoring Solution -OCI Kubernetes Monitoring Solution is a turn-key Kubernetes monitoring and management package based on OCI Logging Analytics cloud service, OCI Monitoring, OCI Management Agent and Fluentd. +OCI Kubernetes Monitoring Solution is a turn-key Kubernetes monitoring and management package based on OCI Log Analytics cloud service, OCI Monitoring, OCI Management Agent and Fluentd. It enables DevOps, Cloud Admins, Developers, and Sysadmins to @@ -11,7 +11,7 @@ It enables DevOps, Cloud Admins, Developers, and Sysadmins to across their entire environment - using Logs, Metrics, and Object metadata. -It does extensive enrichment of logs, metrics and object information to enable cross correlation across entities from different tiers in OCI Logging Analytics. A collection of dashboards is provided to get users started quickly. +It does extensive enrichment of logs, metrics and object information to enable cross correlation across entities from different tiers in OCI Log Analytics. A collection of dashboards is provided to get users started quickly. ## Solution UI @@ -40,8 +40,8 @@ It does extensive enrichment of logs, metrics and object information to enable c ### Pre-requisites -* OCI Logging Analytics service must be onboarded with the minimum required policies, in the OCI region where you want to monitor. Refer [Logging Analytics Quick Start](https://docs.oracle.com/en-us/iaas/logging-analytics/doc/quick-start.html) for details. -* Create OCI Logging Analytics LogGroup(s) if not done already. Refer [Create Log Group](https://docs.oracle.com/en-us/iaas/logging-analytics/doc/create-logging-analytics-resources.html#GUID-D1758CFB-861F-420D-B12F-34D1CC5E3E0E) for details. +* OCI Log Analytics service must be onboarded with the minimum required policies, in the OCI region where you want to monitor. Refer [Log Analytics Quick Start](https://docs.oracle.com/en-us/iaas/log-analytics/doc/quick-start.html) for details. +* Create OCI Log Analytics LogGroup(s) if not done already. Refer [Create Log Group](https://docs.oracle.com/en-us/iaas/log-analytics/doc/create-logging-analytics-resources.html#GUID-D1758CFB-861F-420D-B12F-34D1CC5E3E0E) for details. * Compartments: * OKE Compartment: where OKE and related infra resources are created. @@ -65,7 +65,7 @@ It does extensive enrichment of logs, metrics and object information to enable c ``` - **Note**: _This dynamic group is not required for non OKE or when you choose to use Config file based AuthZ for monitoring the logs._ - * Create a user and user group using which the logs to be published to OCI Logging Analytics. Refer [Managing Users](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/managingusers.htm) and [Managing User Groups](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/managinggroups.htm) for details. + * Create a user and user group using which the logs to be published to OCI Log Analytics. Refer [Managing Users](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/managingusers.htm) and [Managing User Groups](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/managinggroups.htm) for details. - **Note**: _This is not required for OKE when you choose to use the default (Instance principal) AuthZ mechanism._ * Create a policy with following statements. @@ -125,7 +125,7 @@ It does extensive enrichment of logs, metrics and object information to enable c | Deployment Method | Supported Environments | Solution UI | Dashboards | Customisations | Comments | | :----: | :----: | :----: | :----: | :----: | :----: | -| OCI Logging Analytics Connect Cluster | OKE*** | :heavy_check_mark: | Manual | Partial Control (Recommended) | Customisations are possible through `Helm` once deployed using `Logging Analytics Connect Cluster` flow from Console, which is applicable for both Automatic and Manual Deployment modes. We recommend choosing Manual Deployment mode for OKE clusters with Private API Server endpoint, as support for the automatic deployment for the same would be available soon. | +| OCI Log Analytics Connect Cluster | OKE*** | :heavy_check_mark: | Manual | Partial Control (Recommended) | Customisations are possible through `Helm` once deployed using `Log Analytics Connect Cluster` flow from Console, which is applicable for both Automatic and Manual Deployment modes. We recommend choosing Manual Deployment mode for OKE clusters with Private API Server endpoint, as support for the automatic deployment for the same would be available soon. | | Helm | All* | :heavy_check_mark:** | Manual| Full Control (Recommended) | | | OCI Resource Manager | OKE | :heavy_check_mark:** | :heavy_check_mark: | Partial Control | Customisations are possible through `Helm` once deployed using `OCI Resource Manager`. | | Terraform | OKE | :heavy_check_mark:** | :heavy_check_mark: | Partial Control | Customisations are possible through `Helm` once deployed using `Terraform`. | @@ -133,17 +133,17 @@ It does extensive enrichment of logs, metrics and object information to enable c \* For some environments, modification of the configuration may be required. -\** Solution UI experience including Topology and other visualisations are available for customers deploying the solution using methods other than `OCI Logging Analytics Connect Cluster`, only if some additional steps are followed as mentioned in their individual sections. +\** Solution UI experience including Topology and other visualisations are available for customers deploying the solution using methods other than `OCI Log Analytics Connect Cluster`, only if some additional steps are followed as mentioned in their individual sections. \*** Connect cluster support for EKS and clusters other than OKE (partially automated flow) would be available soon. Meanwhile, if you would like to experience the Solution for EKS, use [helm](#helm) or other deployment methods. -#### OCI Logging Analytics Connect Cluster +#### OCI Log Analytics Connect Cluster -This newly launched UI based workflow from Logging Analytics Console is the recommended approach to start enabling Kubernetes Monitoring Solution for your OKE clusters. In this approach, you would go through a guided flow to enable the monitoring. It has support for both Automatic and Manual deployment modes to install helm charts onto your OKE clusters. The creation of various OCI resources like Logging Analytics LogGroup, Entity, Management Agent Install Key is automatically taken care in this approach irrespective of the deployment method that you choose. The required IAM Dynamic Group and Policies for the collection of logs, metrics, objects discovery data into OCI, can be optionally enabled when using this flow. +This newly launched UI based workflow from Log Analytics Console is the recommended approach to start enabling Kubernetes Monitoring Solution for your OKE clusters. In this approach, you would go through a guided flow to enable the monitoring. It has support for both Automatic and Manual deployment modes to install helm charts onto your OKE clusters. The creation of various OCI resources like Log Analytics LogGroup, Entity, Management Agent Install Key is automatically taken care in this approach irrespective of the deployment method that you choose. The required IAM Dynamic Group and Policies for the collection of logs, metrics, objects discovery data into OCI, can be optionally enabled when using this flow. -Customisations are possible through helm once deployed using `Logging Analytics Connect Cluster` flow from Console, which is applicable for both Automatic and Manual Deployment modes. We recommend choosing Manual Deployment mode for OKE clusters with Private API Server endpoint, as support for the automatic deployment for the same would be available soon. +Customisations are possible through helm once deployed using `Log Analytics Connect Cluster` flow from Console, which is applicable for both Automatic and Manual Deployment modes. We recommend choosing Manual Deployment mode for OKE clusters with Private API Server endpoint, as support for the automatic deployment for the same would be available soon. -Refer [this doc](https://docs.oracle.com/en-us/iaas/logging-analytics/doc/kubernetes-solution.html) for complete instructions on using this approach. +Refer [this doc](https://docs.oracle.com/en-us/iaas/log-analytics/doc/kubernetes-solution.html) for complete instructions on using this approach. :hourglass_flowing_sand: Connect cluster support for EKS and clusters other than OKE (partially automated flow) would be available soon. Meanwhile, if you would like to experience the Solution for EKS, use [helm](#helm) or other deployment methods. @@ -155,7 +155,7 @@ Refer [this doc](https://docs.oracle.com/en-us/iaas/logging-analytics/doc/kubern * Helm ([Installation instructions](https://helm.sh/docs/intro/install/)). * [OCI CLI](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/cliconcepts.htm) -##### 1 Create Logging Analytics Entity of type Kubernetes Cluster +##### 1 Create Log Analytics Entity of type Kubernetes Cluster * Prepate Entity metadata which represents Kubernetes Cluster's details. - Sample entity_metadata.json @@ -166,19 +166,19 @@ Refer [this doc](https://docs.oracle.com/en-us/iaas/logging-analytics/doc/kubern - => Replace with Cluster's creation time in the format, YYYY-MM-DDTHH:MM:SSZ. It is used to distinguish 2 clusters with same name if exists. - => Replace with OCID of OKE cluster OR ARN of EKS cluster, etc. - => Replace with version of Kubernetes running on the cluster. - - => Replace with OCID of the compartment in which the `Logging Analytics LogGroup` exists. Note that for the Logging Analytics Solution UI to work properly, you must keep all your OCI resources like `Logging Analytics LogGroup`, `Logging Analytics Entity`, `Management Agent Install Key` under the same compartment. + - => Replace with OCID of the compartment in which the `Log Analytics LogGroup` exists. Note that for the Log Analytics Solution UI to work properly, you must keep all your OCI resources like `Log Analytics LogGroup`, `Log Analytics Entity`, `Management Agent Install Key` under the same compartment. - => Replace with `OKE` for OKE cluster, `EKS` for Amazon EKS Cluster, etc. -* Create Logging Analytics Entity of type Kubernetes Cluster using above created metadata. +* Create Log Analytics Entity of type Kubernetes Cluster using above created metadata. - Sample command to create entity using OCI CLI ``` oci log-analytics entity create --name _ --namespace-name --compartment-id --entity-type-name omc_kubernetes_cluster --metadata file://entity_metadata.json ``` - - => Namespace of the tenancy in which the Logging Analytics is subscribed. You find it by `Go to OCI Logging Analytics Administration, click Service Details, and note the namespace value.` + - => Namespace of the tenancy in which the Log Analytics is subscribed. You find it by `Go to OCI Log Analytics Administration, click Service Details, and note the namespace value.` -##### 2 Create Logging Analytics LogGroup +##### 2 Create Log Analytics LogGroup -Create OCI Logging Analytics LogGroup(s) if not done already. Refer [Create Log Group](https://docs.oracle.com/en-us/iaas/logging-analytics/doc/create-logging-analytics-resources.html#GUID-D1758CFB-861F-420D-B12F-34D1CC5E3E0E) for details. +Create OCI Log Analytics LogGroup(s) if not done already. Refer [Create Log Group](https://docs.oracle.com/en-us/iaas/log-analytics/doc/create-logging-analytics-resources.html#GUID-D1758CFB-861F-420D-B12F-34D1CC5E3E0E) for details. ##### 3 Download helm chart @@ -187,7 +187,7 @@ Create OCI Logging Analytics LogGroup(s) if not done already. Refer [Create Log ##### 4 Update values.yaml -**Note** that for the Logging Analytics Solution UI to work properly, you must keep all your OCI resources like `Logging Analytics LogGroup`, `Logging Analytics Entity`, `Management Agent Install Key` under the same compartment. +**Note** that for the Log Analytics Solution UI to work properly, you must keep all your OCI resources like `Log Analytics LogGroup`, `Log Analytics Entity`, `Management Agent Install Key` under the same compartment. * Create override_values.yaml, to override the minimum required variables in values.yaml. - override_values.yaml @@ -195,16 +195,15 @@ Create OCI Logging Analytics LogGroup(s) if not done already. Refer [Create Log global: # -- OCID for OKE cluster or a unique ID for other Kubernetes clusters. kubernetesClusterID: - # -- Provide a unique name for the cluster. This would help in uniquely identifying the logs and metrics data at OCI Logging Analytics and OCI Monitoring respectively. - # -- Must follow the format '_' instead of just the cluster name. + # -- Provide a unique name for the cluster. This would help in uniquely identifying the logs and metrics data at OCI Log Analytics and OCI Monitoring respectively. kubernetesClusterName: oci-onm-logan: - # Go to OCI Logging Analytics Administration, click Service Details, and note the namespace value. + # Go to OCI Log Analytics Administration, click Service Details, and note the namespace value. ociLANamespace: - # OCI Logging Analytics Log Group OCID + # OCI Log Analytics Log Group OCID ociLALogGroupID: - # OCI Logging Analytics Entity (of Kubernetes Cluster Type) OCID. + # OCI Log Analytics Entity (of Kubernetes Cluster Type) OCID. ociLAClusterEntityID: oci-onm-mgmt-agent: @@ -230,12 +229,12 @@ helm upgrade --values > override_values.yaml ``` -\ => Replace with release name. The default release name used while installing through `OCI Logging Analytics Connect Cluster` is `oci-kubernetes-monitoring`. +\ => Replace with release name. The default release name used while installing through `OCI Log Analytics Connect Cluster` is `oci-kubernetes-monitoring`. ##### 5.c (Optional) Import Dashboards @@ -307,13 +306,13 @@ Launch OCI Resource Manager Stack in OCI Tenancy and Region of the OKE Cluster, * Kubectl ([Installation instructions](https://kubernetes.io/docs/tasks/tools/#kubectl)). * [OCI CLI](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/cliconcepts.htm) -##### 1 Create Logging Analytics Entity of type Kubernetes Cluster +##### 1 Create Log Analytics Entity of type Kubernetes Cluster -Refer [here](1-create-logging-analytics-entity-of-type-kubernetes-cluster) +Refer [here](1-create-log-analytics-entity-of-type-kubernetes-cluster) -##### 2 Create Logging Analytics LogGroup +##### 2 Create Log Analytics LogGroup -Refer [here](2-create-logging-analytics-loggroup) +Refer [here](2-create-log-analytics-loggroup) ##### 3 Download helm chart @@ -367,37 +366,9 @@ Refer [here](#3c-import-dashboards). ### Upgrading to a major version -#### 3.6.0 to 4.0.0 - -For changes in this release, refer to [CHANGELOG.md](CHANGELOG.md) - -##### Upgrade instructions - -1. Update IAM Policies: - * This version requires additional policy statements for infrastructure discovery. - * See the pre-requisites section in the [README](../README.md#0-pre-requisites) for details. - -1. As mentioned in the change log, this version introduces a new DaemonSet that uses eBPF (Extended Berkeley Packet Filter) to capture TCP connection logs and builds application/network topology representing workload to workload relationships within the Kubernetes cluster. - * To be able to run the required eBPF program, the pods needs to run in privileged mode but restricting to CAP_BPF capability only. - * In your environment, if you have any restrictions with respect to running pods in privileged mode, you may need to adjust your cluster configuration accordingly. - -2. Upgrade the Helm chart: - - ```sh - # fetch latest (4.x) helm repo for oci - helm repo update oci-onm - - # fetch the current release configuration - helm get values -n > override_values.yaml - - # Upgrade the helm chart - helm upgrade oci/oci-onm -n -f override_values.yaml - ``` - - #### 2.x to 3.x -One of the major changes introduced in 3.0.0 is refactoring of helm chart where major features of the solution got split into separate sub-charts. 2.x has only support for logs and objects collection using Fluentd and OCI Logging Analytics and this is now moved into a separate chart oci-onm-logan and included as a sub-chart to the main chart oci-onm. This is a breaking change w.r.t the values.yaml and any customisations that you might have done on top of it. There is no breaking change w.r.t functionality offered in 2.x. For full list of changes in 3.x, refer to [changelog](CHANGELOG.md). +One of the major changes introduced in 3.0.0 is refactoring of helm chart where major features of the solution got split into separate sub-charts. 2.x has only support for logs and objects collection using Fluentd and OCI Log Analytics and this is now moved into a separate chart oci-onm-logan and included as a sub-chart to the main chart oci-onm. This is a breaking change w.r.t the values.yaml and any customisations that you might have done on top of it. There is no breaking change w.r.t functionality offered in 2.x. For full list of changes in 3.x, refer to [changelog](CHANGELOG.md). You may fall into one of the below categories and may need to take actions accordingly. @@ -420,13 +391,13 @@ We recommend you to uninstall the release created using 2.x chart and follow the global: # -- OCID for OKE cluster or a unique ID for other Kubernetes clusters. kubernetesClusterID: ocid1.cluster.oc1.phx.aaaaaaaaa....... - # -- Provide a unique name for the cluster. This would help in uniquely identifying the logs and metrics data at OCI Logging Analytics and OCI Monitoring respectively. + # -- Provide a unique name for the cluster. This would help in uniquely identifying the logs and metrics data at OCI Log Analytics and OCI Monitoring respectively. kubernetesClusterName: oci-onm-logan: - # Go to OCI Logging Analytics Administration, click Service Details, and note the namespace value. + # Go to OCI Log Analytics Administration, click Service Details, and note the namespace value. ociLANamespace: - # OCI Logging Analytics Log Group OCID + # OCI Log Analytics Log Group OCID ociLALogGroupID: ocid1.loganalyticsloggroup.oc1.phx.amaaaaaa...... ##### Have customisations to the existing chart or values.yaml @@ -456,14 +427,14 @@ If you are already using a separate values.yaml for your customisations, you sti global: # -- OCID for OKE cluster or a unique ID for other Kubernetes clusters. kubernetesClusterID: ocid1.cluster.oc1.phx.aaaaaaaaa....... - # -- Provide a unique name for the cluster. This would help in uniquely identifying the logs and metrics data at OCI Logging Analytics and OCI Monitoring respectively. + # -- Provide a unique name for the cluster. This would help in uniquely identifying the logs and metrics data at OCI Log Analytics and OCI Monitoring respectively. kubernetesClusterName: oci-onm-logan: runtime: docker - # Go to OCI Logging Analytics Administration, click Service Details, and note the namespace value. + # Go to OCI Log Analytics Administration, click Service Details, and note the namespace value. ociLANamespace: - # OCI Logging Analytics Log Group OCID + # OCI Log Analytics Log Group OCID ociLALogGroupID: ocid1.loganalyticsloggroup.oc1.phx.amaaaaaa...... ##### Example 2: Customisation of a specific log diff --git a/charts/logan/Chart.yaml b/charts/logan/Chart.yaml index 7a5bdba..cf81e66 100644 --- a/charts/logan/Chart.yaml +++ b/charts/logan/Chart.yaml @@ -3,9 +3,9 @@ apiVersion: v2 name: oci-onm-logan -description: Charts for sending Kubernetes platform logs, compute logs, and Kubernetes Objects information to OCI Logging Analytics. +description: Charts for sending Kubernetes platform logs, compute logs, and Kubernetes Objects information to OCI Log Analytics. type: application -version: 4.0.0 +version: 4.0.1 appVersion: "3.0.0" dependencies: diff --git a/charts/logan/README.md b/charts/logan/README.md index ee4fb89..27cfc31 100644 --- a/charts/logan/README.md +++ b/charts/logan/README.md @@ -2,7 +2,7 @@ ![Version: 3.0.0](https://img.shields.io/badge/Version-3.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) -Charts for sending Kubernetes platform logs, compute logs, and Kubernetes Objects information to OCI Logging Analytics. +Charts for sending Kubernetes platform logs, compute logs, and Kubernetes Objects information to OCI Log Analytics. ## Requirements @@ -15,15 +15,15 @@ Charts for sending Kubernetes platform logs, compute logs, and Kubernetes Object | Key | Type | Default | Description | |-----|------|---------|-------------| | authtype | string | `"InstancePrincipal"` | Allowed values: InstancePrincipal, config | -| extraEnv | list | `[]` | Use this to tag all the collected logs with one or more key:value pairs. Key must be a valid field in Logging Analytics metadata: "Client Host Region": "PCT" "Environment": "Production" "Third key": "Third Value" @param extra environment variables. Example name: ENV_VARIABLE_NAME value: ENV_VARIABLE_VALUE | +| extraEnv | list | `[]` | Use this to tag all the collected logs with one or more key:value pairs. Key must be a valid field in Log Analytics metadata: "Client Host Region": "PCT" "Environment": "Production" "Third key": "Third Value" @param extra environment variables. Example name: ENV_VARIABLE_NAME value: ENV_VARIABLE_VALUE | | extraVolumeMounts | list | `[]` | @param extraVolumeMounts Mount extra volume(s). Example: - name: tmpDir mountPath: /tmp | | extraVolumes | list | `[]` | @param extraVolumes Extra volumes. Example: - name: tmpDir hostPath: path: /tmp log | | fluentd.baseDir | string | `"/var/log"` | Base directory on the node (with read write permission) for storing fluentd plugins related data. | | fluentd.customFluentdConf | string | `""` | | -| fluentd.customLogs | string | `nil` | Configuration for any custom logs which are not part of the default configuration defined in this file. All the pod/container logs will be collected as per "genericContainerLogs" section. Use this section to create a custom configuration for any of the container logs. Also, you can use this section to define configuration for any other log path existing on a Kubernetes worker node custom-id1: path: /var/log/containers/custom*.log Logging Analytics log source to use for parsing and processing the logs: ociLALogSourceName: "Custom1 Logs" The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: Set isContainerLog to false if the log is not a container log (/var/log/containers/*.log). Default value is true. isContainerLog: true | +| fluentd.customLogs | string | `nil` | Configuration for any custom logs which are not part of the default configuration defined in this file. All the pod/container logs will be collected as per "genericContainerLogs" section. Use this section to create a custom configuration for any of the container logs. Also, you can use this section to define configuration for any other log path existing on a Kubernetes worker node custom-id1: path: /var/log/containers/custom*.log Log Analytics log source to use for parsing and processing the logs: ociLALogSourceName: "Custom1 Logs" The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: Set isContainerLog to false if the log is not a container log (/var/log/containers/*.log). Default value is true. isContainerLog: true | | fluentd.file | string | `"fluent.conf"` | Fluentd config file name | | fluentd.genericContainerLogs.exclude_path | list | `["\"/var/log/containers/kube-proxy-*.log\"","\"/var/log/containers/kube-flannel-*.log\"","\"/var/log/containers/kube-dns-autoscaler-*.log\"","\"/var/log/containers/coredns-*.log\"","\"/var/log/containers/csi-oci-node-*.log\"","\"/var/log/containers/proxymux-client-*.log\"","\"/var/log/containers/cluster-autoscaler-*.log\""]` | List of log paths to exclude that are already part of other specific configurations defined (like Kube Proxy, Kube Flannel) If you want to create a custom configuration for any of the container logs using the customLogs section, then exclude the corresponding log path here. | -| fluentd.genericContainerLogs.ociLALogSourceName | string | `"Kubernetes Container Generic Logs"` | Default Logging Analytics log source to use for parsing and processing the logs: Kubernetes Container Generic Logs. | +| fluentd.genericContainerLogs.ociLALogSourceName | string | `"Kubernetes Container Generic Logs"` | Default Log Analytics log source to use for parsing and processing the logs: Kubernetes Container Generic Logs. | | fluentd.genericContainerLogs.path | string | `"/var/log/containers/*.log"` | | | fluentd.kubernetesMetadataFilter.ca_file | string | `nil` | Path to CA file for Kubernetes server certificate validation | | fluentd.kubernetesMetadataFilter.kubernetes_url | string | `nil` | Kubernetes API server URL. Alternatively, environment variables KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT can be used Environment variable are given preference. | @@ -74,7 +74,7 @@ Charts for sending Kubernetes platform logs, compute logs, and Kubernetes Object | oci.configFiles.config | string | `"# Replace each of the below fields with actual values.\n[DEFAULT]\nuser=\nfingerprint=\nkey_file=\ntenancy=\nregion="` | config file [data](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm) Replace each of the below fields with actual values. [DEFAULT] user= fingerprint= key_file= tenancy= region= | | oci.file | string | `"config"` | Config file name | | oci.path | string | `"/var/opt/.oci"` | Path to the OCI API config file | -| ociLALogGroupID | string | `nil` | OCID of Logging Analytics Log Group to send logs to. Can be overridden for individual log types. e.g. ocid1.loganalyticsloggroup.oc1.phx.amaaaaasdfaskriauucc55rlwlxe4ahe2vfmtuoqa6qsgu7mb6jugxacsk6a | +| ociLALogGroupID | string | `nil` | OCID of Log Analytics Log Group to send logs to. Can be overridden for individual log types. e.g. ocid1.loganalyticsloggroup.oc1.phx.amaaaaasdfaskriauucc55rlwlxe4ahe2vfmtuoqa6qsgu7mb6jugxacsk6a | | ociLANamespace | string | `nil` | | | resourceNamePrefix | string | `"{{ .Values.global.resourceNamePrefix }}"` | Resoure Name Prefix: Wherever allowed, this prefix will be used with all resources used by this chart | | resources.limits | object | `{"memory":"500Mi"}` | Limits | diff --git a/charts/logan/templates/logs-configmap.yaml b/charts/logan/templates/logs-configmap.yaml index 84296b1..f7f63e8 100644 --- a/charts/logan/templates/logs-configmap.yaml +++ b/charts/logan/templates/logs-configmap.yaml @@ -285,7 +285,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -322,7 +322,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -407,7 +407,7 @@ data: {{- end }} - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -523,7 +523,7 @@ data: annotation_match [ ".*" ] - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true diff --git a/charts/logan/values.yaml b/charts/logan/values.yaml index 578ae01..9498c92 100644 --- a/charts/logan/values.yaml +++ b/charts/logan/values.yaml @@ -31,7 +31,7 @@ oci-onm-common: # Allowed values: docker, cri(for OKE 1.20 and above) runtime: cri -# -- Authentication type for authenticating with OCI Logging Analytics service +# -- Authentication type for authenticating with OCI Log Analytics service # -- Allowed values: InstancePrincipal, config authtype: InstancePrincipal @@ -57,10 +57,10 @@ image: # -- Image pull policy imagePullPolicy: Always -# -- Logging Analytics namespace. Can be found in OCI console --> Logging Analytics --> Administration --> Service +# -- Log Analytics namespace. Can be found in OCI console --> Log Analytics --> Administration --> Service ociLANamespace: -# -- OCID of Logging Analytics Log Group to send logs to. +# -- OCID of Log Analytics Log Group to send logs to. # Can be overridden for individual log types. # e.g. ocid1.loganalyticsloggroup.oc1.phx.amaaaaasdfaskriauucc55rlwlxe4ahe2vfmtuoqa6qsgu7mb6jugxacsk6a ociLALogGroupID: @@ -90,8 +90,8 @@ enableTCPConnectLogs: true # -- Enables collection of AWS EKS Control Plane logs through CloudWatch or S3 Fluentd plugin enableEKSControlPlaneLogs: false -# Logging Analytics additional metadata. Use this to tag all the collected logs with one or more key:value pairs. -# Key must be a valid field in Logging Analytics +# Log Analytics additional metadata. Use this to tag all the collected logs with one or more key:value pairs. +# Key must be a valid field in Log Analytics #metadata: #"Client Host Region": "PCT" #"Environment": "Production" @@ -291,7 +291,7 @@ fluentd: kube-proxy: # kube-proxy Log file location. path: /var/log/containers/kube-proxy-*.log - # Logging Analytics log source to use for parsing and processing Kubernetes Proxy Logs. + # Log Analytics log source to use for parsing and processing Kubernetes Proxy Logs. ociLALogSourceName: "Kubernetes Proxy Logs" # Regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ @@ -310,7 +310,7 @@ fluentd: kube-flannel: # kube-flannel log files location. path: /var/log/containers/kube-flannel-*.log - # Logging Analytics log source to use for parsing and processing Kubernetes Flannel Logs. + # Log Analytics log source to use for parsing and processing Kubernetes Flannel Logs. ociLALogSourceName: "Kubernetes Flannel Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ @@ -318,7 +318,7 @@ fluentd: # -- Kubernetes DNS Autoscaler Logs collection configuration kube-dns-autoscaler: path: /var/log/containers/kube-dns-autoscaler-*.log - # Logging Analytics log source to use for parsing and processing Kubernetes DNS Autoscaler Logs. + # Log Analytics log source to use for parsing and processing Kubernetes DNS Autoscaler Logs. ociLALogSourceName: "Kubernetes DNS Autoscaler Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ @@ -327,7 +327,7 @@ fluentd: coredns: # coredns log files location. path: /var/log/containers/coredns-*.log - # Logging Analytics log source to use for parsing and processing Kubernetes Core DNS Logs. + # Log Analytics log source to use for parsing and processing Kubernetes Core DNS Logs. ociLALogSourceName: "Kubernetes Core DNS Logs" # Regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^\[[^\]]+\]\s+/ @@ -336,21 +336,21 @@ fluentd: csinode: # csinode log files location. path: /var/log/containers/csi-oci-node-*.log,/var/log/containers/ebs-csi-node-*.log - # Logging Analytics log source to use for parsing and processing Kubernetes CSI Node Driver Logs. + # Log Analytics log source to use for parsing and processing Kubernetes CSI Node Driver Logs. ociLALogSourceName: "Kubernetes CSI Node Driver Logs" # -- Proxymux Client Logs collection configuration proxymux: # proxymux log files location.. path: /var/log/containers/proxymux-client-*.log - # Logging Analytics log source to use for parsing and processing OKE Proxymux Client Logs. + # Log Analytics log source to use for parsing and processing OKE Proxymux Client Logs. ociLALogSourceName: "OKE Proxymux Client Logs" # -- Kubernetes Autoscaler Logs collection configuration cluster-autoscaler: # cluster autoscalar log files location. path: /var/log/containers/cluster-autoscaler-*.log - # Logging Analytics log source to use for parsing and processing Kubernetes Autoscaler Logs. + # Log Analytics log source to use for parsing and processing Kubernetes Autoscaler Logs. ociLALogSourceName: "Kubernetes Autoscaler Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ @@ -359,14 +359,14 @@ fluentd: csi-controller: # csi controller log files location. path: /var/log/containers/ebs-csi-controller-*.log - # Logging Analytics log source to use for parsing and processing Kubernetes CSI Controller Logs. + # Log Analytics log source to use for parsing and processing Kubernetes CSI Controller Logs. ociLALogSourceName: "Kubernetes CSI Controller Logs" # Config specific to API Server Logs Collection kube-apiserver: # The path to the source files. path: /var/log/containers/kube-apiserver-*.log - # Logging Analytics log source to use for parsing and processing the logs: Kubernetes API Server Logs. + # Log Analytics log source to use for parsing and processing the logs: Kubernetes API Server Logs. ociLALogSourceName: "Kubernetes API Server Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ @@ -375,14 +375,14 @@ fluentd: etcd: # The path to the source files. path: /var/log/containers/etcd-*.log - # Logging Analytics log source to use for parsing and processing the logs: Kubernetes etcd Logs. + # Log Analytics log source to use for parsing and processing the logs: Kubernetes etcd Logs. ociLALogSourceName: "Kubernetes etcd Logs" # Config specific to kube-controller-manager Logs Collection kube-controller-manager: # The path to the source files. path: /var/log/containers/kube-controller-manager-*.log - # Logging Analytics log source to use for parsing and processing the logs: Kubernetes Controller Manager Logs. + # Log Analytics log source to use for parsing and processing the logs: Kubernetes Controller Manager Logs. ociLALogSourceName: "Kubernetes Controller Manager Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ @@ -391,7 +391,7 @@ fluentd: kube-scheduler: # The path to the source files. path: /var/log/containers/kube-scheduler-*.log - # Logging Analytics log source to use for parsing and processing the logs: Kubernetes Scheduler Logs. + # Log Analytics log source to use for parsing and processing the logs: Kubernetes Scheduler Logs. ociLALogSourceName: "Kubernetes Scheduler Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ @@ -400,14 +400,14 @@ fluentd: tcpconnect: # The path to the source files. path: /var/log/containers/*-logan-tcpconnect*.log - # Logging Analytics log source to use for parsing and processing the logs: TCP CONNECT Logs + # Log Analytics log source to use for parsing and processing the logs: TCP CONNECT Logs ociLALogSourceName: "Kubernetes TCP Connect Logs" # Config specific to Kubernetes Audit Logs Collection kube-audit: # The path to the source files. path: /var/log/kubernetes/audit/audit* - # Logging Analytics log source to use for parsing and processing the logs: Kubernetes Audit Logs. + # Log Analytics log source to use for parsing and processing the logs: Kubernetes Audit Logs. ociLALogSourceName: "Kubernetes Audit Logs" # Configuration for Linux System specific logs like CronLogs and SecureLogs @@ -429,7 +429,7 @@ fluentd: cronlog: # cron log file path path: /var/log/cron* - # Logging Analytics log source to use for parsing and processing Linux Cron Logs. + # Log Analytics log source to use for parsing and processing Linux Cron Logs. ociLALogSourceName: "Linux Cron Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/ @@ -440,21 +440,21 @@ fluentd: securelog: # linux secure logs file path path: /var/log/secure* - # Logging Analytics log source to use for parsing and processing Linux Secure Logs. + # Log Analytics log source to use for parsing and processing Linux Secure Logs. ociLALogSourceName: "Linux Secure Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/ # -- kubelet logs collection configuration kubeletlog: - # Logging Analytics log source to use for parsing and processing Kubernetes Kubelet Logs. + # Log Analytics log source to use for parsing and processing Kubernetes Kubelet Logs. ociLALogSourceName: "Kubernetes Kubelet Logs" # -- Linux syslog collection configuration syslog: # syslog file path path: /var/log/messages* - # Logging Analytics log source to use for parsing and processing Linux Syslog Logs. + # Log Analytics log source to use for parsing and processing Linux Syslog Logs. ociLALogSourceName: "Linux Syslog Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/ @@ -463,7 +463,7 @@ fluentd: maillog: # maillog file path path: /var/log/maillog* - # Logging Analytics log source to use for parsing and processing Linux Mail Delivery Logs. + # Log Analytics log source to use for parsing and processing Linux Mail Delivery Logs. ociLALogSourceName: "Linux Mail Delivery Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/ @@ -472,14 +472,14 @@ fluentd: linuxauditlog: # audit log file path path: /var/log/audit/audit* - # Logging Analytics log source to use for parsing and processing Linux Audit Logs. + # Log Analytics log source to use for parsing and processing Linux Audit Logs. ociLALogSourceName: "Linux Audit Logs" # -- Linux uptrack logs collection configuration uptracklog: # uptrack log files path. path: /var/log/uptrack* - # Logging Analytics log source to use for parsing and processing ksplice Logs. + # Log Analytics log source to use for parsing and processing ksplice Logs. ociLALogSourceName: "Ksplice Logs" # The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: /^\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}/ @@ -488,7 +488,7 @@ fluentd: yum: # yum log files path path: /var/log/yum.log* - # Logging Analytics log source to use for parsing and processing Linux YUM Logs. + # Log Analytics log source to use for parsing and processing Linux YUM Logs. ociLALogSourceName: "Linux YUM Logs" # Configuration for AWS EKS Control Plane logs like API Server, Audit, Authenticator etc. @@ -531,7 +531,7 @@ fluentd: sqsQueue: "apiserver" # S3 object key objectKey: .*?kube-apiserver/ - # Logging Analytics log source to use for parsing and processing EKS Control Plane API Server Logs. + # Log Analytics log source to use for parsing and processing EKS Control Plane API Server Logs. ociLALogSourceName: "Kubernetes API Server Logs" multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ #metadata: @@ -548,7 +548,7 @@ fluentd: sqsQueue: "audit" # S3 object key objectKey: .*?kube-apiserver-audit - # Logging Analytics log source to use for parsing and processing EKS Control Plane Audit Logs. + # Log Analytics log source to use for parsing and processing EKS Control Plane Audit Logs. ociLALogSourceName: "Kubernetes Audit Logs" #metadata: #"Client Host Region": "America" @@ -562,7 +562,7 @@ fluentd: sqsQueue: "authenticator" # S3 object key objectKey: .*?authenticator - # Logging Analytics log source to use for parsing and processing EKS Control Plane Authenticator Logs. + # Log Analytics log source to use for parsing and processing EKS Control Plane Authenticator Logs. ociLALogSourceName: "AWS EKS Authenticator Logs" multilineStartRegExp: /^time=/ #metadata: @@ -577,7 +577,7 @@ fluentd: sqsQueue: "kube-controller-manager" # S3 object key objectKey: .*?kube-controller-manager - # Logging Analytics log source to use for parsing and processing EKS Control Plane Kube Controller Manager Logs. + # Log Analytics log source to use for parsing and processing EKS Control Plane Kube Controller Manager Logs. ociLALogSourceName: "Kubernetes Controller Manager Logs" multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ #metadata: @@ -592,7 +592,7 @@ fluentd: sqsQueue: "cloud-controller-manager" # S3 object key objectKey: .*?cloud-controller-manager - # Logging Analytics log source to use for parsing and processing EKS Control Plane Cloud Controller Manager Logs. + # Log Analytics log source to use for parsing and processing EKS Control Plane Cloud Controller Manager Logs. ociLALogSourceName: "Cloud Controller Manager Logs" multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ #metadata: @@ -607,7 +607,7 @@ fluentd: sqsQueue: "scheduler" # S3 object key objectKey: .*?kube-scheduler - # Logging Analytics log source to use for parsing and processing EKS Control Plane Scheduler Logs. + # Log Analytics log source to use for parsing and processing EKS Control Plane Scheduler Logs. ociLALogSourceName: "Kubernetes Scheduler Logs" multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/ #metadata: @@ -620,7 +620,7 @@ fluentd: # Generic configuration for all container/pod logs genericContainerLogs: - # -- Default Logging Analytics log source to use for parsing and processing the logs: Kubernetes Container Generic Logs. + # -- Default Log Analytics log source to use for parsing and processing the logs: Kubernetes Container Generic Logs. ociLALogSourceName: "Kubernetes Container Generic Logs" path: /var/log/containers/*.log # -- List of log paths to exclude that are already part of other specific configurations defined (like Kube Proxy, Kube Flannel) @@ -653,7 +653,7 @@ fluentd: # A unique identifier to represent the configuration for a single log path #custom-id1: #path: /var/log/containers/custom*.log - # Logging Analytics log source to use for parsing and processing the logs: + # Log Analytics log source to use for parsing and processing the logs: #ociLALogSourceName: "Custom1 Logs" # The regular expression pattern for the starting line in case of multi-line logs. #multilineStartRegExp: @@ -672,7 +672,7 @@ fluentd: #timezone: #custom-id2: #path: /var/log/custom/*.log - # Logging Analytics log source to use for parsing and processing the logs: + # Log Analytics log source to use for parsing and processing the logs: #ociLALogSourceName: "Custom2 Logs" # The regular expression pattern for the starting line in case of multi-line logs. #multilineStartRegExp: @@ -718,7 +718,7 @@ k8sDiscovery: # chunk_limit: Max number of objects that can be fetched via K8s GET API call. Default: 1000 chunk_limit: # collect_warning_events_only: flag to only collect warning event logs - # Default: All logs are collected and sent to OCI logging analytics for processing + # Default: All logs are collected and sent to OCI Log Analytics for processing collect_warning_events_only: false # backoffLimit: Specify the number of retries before considering a Job as failed backoffLimit: 0 @@ -734,7 +734,7 @@ k8sDiscovery: # Base64 encoded string of OCI freeform and defined tags # expected/example format: {"definedTags":{"Oracle-Recommended-Tags.ResourceOwner":"testOwner","Oracle-Recommended-Tags.ResourceUsage":"testUsage","test.key":"testOwner"},"freeformTags":{"project":"logan","test_number":"1"}} oci_tags_base64: - # This is a helper stack launched by the discovery job to enable service logs collection into Logging Analytics for the Infra components related to the OKE cluster. + # This is a helper stack launched by the discovery job to enable service logs collection into Log Analytics for the Infra components related to the OKE cluster. # [Users are usually not expected to alter this] rms_template_base64_encoded: UEsDBBQAAAAIAKiEcVrCu61l9Q4AAJA4AAAOABwAZmlsdGVyLWxvZ3MucHlVVAkAA2QC2Gc7k9pndXgLAAEE9QEAAAQUAAAA1Vttc9s2Ev6uX4HK1yM1lWTHSe4a32g6jq2kviqxx5I703M8HIqEJMYUyQNIORqf//vt4oUkKOrNTj9UHxyJWCwWi91nX4gc/HCYcXY4DqJDGi1IskxncfS60TggF1GSpScEvhFyTXmcMY+SMOApiSdk8L5NeDaOaErcyCeXv/XJWZjxlDKgv8xSnCpmns2od0+CCQnjKScuo8QNGXX9JaGROw6pTyYxIwGuRZhahYuZF5MqqeTYKXimM7oUPDWv8ZIkjC6COOOHXsYYjUDae0rmcRSkMQuiKeFxmKVBHBE74/h7wihFGeYkdae81ThQG04zFnHiu6kL0pF/Dy8/Cyo3/RdJYzKmxIsjns3lorBv5uIwKu59FoRpByaFwZi5LID9BPMkZiBK/o1R/e0rjyP9HXQ0BZkaExbPSZKAvClRQ+rXARldnl+CePN4AafhosJhydEsYH4ncVm6LK3akNQn5JwmNPJp5C0JbFyeMUlc796dUg7b46kbovq8MM78Dp/RMIS55zEMikdEPALVxovAp3DwiZAJTw6OgKN+/SwEapcTFAItJEgtYD0XK/wC3M5KjB4C+OOGD+4yX51cnl1oyYbnv7WJ50bkgZIMuLtoapwKtrnU2jYK6eEckAnMLrT834zylEuF6l9d13cTUBzXuv11NLo6lc8kZcZC0OLrLhhKqInAItiyIcfjJIVtgmRq7DJBi7rCR6xNhjdXV9f94dD5tT+40jO8oAsGMwmmeg4+diZBSAsCHkwjyjTBUPwqRt0snSmSXPIL3Hzk0SswDi9I3JCf05BOXRRnBIYfVZiQXDFToAAL+XAxGPWvncHlR+fmeuCM+p+uBqejPukRa5amCT85PFRG2X2Uk566KMwjp7Ad3wnpgoaOH8/dIHo6PD46Pjp6+/rVoeuhBPwQ9gdKHYDr/+LFczSNObjkhd97LP10Av/p79Lzh5QtAo/2HuVPh8vfeljDUO8RZPCfrMawf3p99usu4ne8ZK8dcOoyb/bnSd647o+u/3CGo2uQ9+MfILEwMLsB4AMAAybdO24T8Pe5+y2YZ3MSZfMxGAe4AANCdG+kHIM7xJOJMwGNx0xNod+SOALxAjfUBEQSiDlgMmnGHfBejyKe927fHL1pkzfH79rk7dER/jnGP6/xz5s7wRLcXLiJmgzg54MPAhCiMIgqjRbCjQc+mYLDRpJYORr4ezoTLCQ1TxlQTZcidMzjDFEuRWZIwinncEgNPbdXdk8btOGo/fdMDRrrk4g+aE4kHn+lXtrQP3sFEAzlI7ulB7tCGruJxgO202zrLdRS8ApJw6cIUIACDhjJ3EXrCiLquGyaoblwu3UiTkDQ4NbKwAFSFGOAUb4Ti1Hb6jCrTUDfKdo12I8j7RgepsuE4kPQKNg4PAB4TfABAqGiWs/Wr7CVTlDHFjbmZmHasz6DYVWWUbPkMgfkYUYxtnBO/TZRkDd24RdBAEOr9AQ8kYAjuPvkf2A94ZLMqRvJkOLTBQRUnmIklDwvIow9fiZQBaOCih0YHFYgsHY1xUgILIXKmBQDIVgse7ZeVmutCr2KCiXvPVRoBIvWejFh/ztJStaKmlREVRw3ynre/3B6MxhtFLduLV5aS6aJDiLNJosdqMxSkvMNZhuWmIex6ztjN0QLYDuvgbOInrVpqfvSUp7Mbx2fpm4Q8l3W+e1nomZtWiQtK0tGD0hFN/H/7M6p0FXqYiIcLeJ7TGXTGfgU91iQpGo58ceWK3FAKgYZLjBQUkiowoeAPYIUcuooToma0C2wRuJWaQeQ8MbMbo4AsTtMTUCXRmwNGPW7zVYdx5IxbGTJd2e5agIbOYe7c66c+Ea23h46yM94I8d0DUcxh4n6pHq0GAI/QDLMRdjHiNqMmeuFVGTxkIDOm1jMeAAcKjogc5QNPB4ilY+jmD8fkBsMUDAIuQQETxgBZjr/l4gvkR6o83pA8cqn2BdqLTBVDMgq6zyEIB3OQVoMl5DWOzWZmC1p22otFTenYQwHTYb9s8vP586g/3t/4Jxffjq9+Kz1rET7oUeaiLPNQsNQuXkzEfy7MrezmfUFLfxL1779Mrz7qfU3K18un1WzFPAQvLpTFmeJ/UoS05DTYjGxQ7GY2HF3CrsUzxyR9svHao/bFtM8rvung0/DW8HmTuUajILhQLIpsny7cNc2KYIRsEBVyEc6jMhnSq0QcFaiqCUiDcah609DMkSg0Tous5Z8CJCaT2WUKxQCaV/xAz/xOHZSrFBAlJh3abQIGGRXoCgbo5+Tj1stY96E+lSGRAeqNJicZ2KiPtohxV+8Ovz29uhdsyvLedtgLw9vKvPEsj5rWGE4XD0xU9xA6dVJtGLVYcHkHYu3VQH9nEyqqJcrq13WD5T8SYxNg15Fa4WI9JtHk5T0xT+4aSjgqXlQuvyjmsZu3ohui0QEYYBE7Ul2VC7fXxIhTbdgfEJ+5F3IlSGU2bRl6oh+C1LtRlL/AtvWqq7G3w425USISyLFFOmmn4k2kI8HGSeYlpe4XBVZZpFi8pocs7LCInCLbZf4gTpLQbmKpyyLsB9T6rqQ3z9xREv0OVVBrneh3L3zVoKNf5wwVlL1Sk7Z1hmkE2Hy0CujQauGrYM7LGx1nSmmNALFLHty0q2lflt37RVS0CTL6fBHHRFEoSlloteV05ae1U2BgQUUfc49XTrG/nMGeqR2NlQqTjJjYDc5femZdWfM+JM9R50oyvoy31k9w4aMGF4YYN9CVcC2hghEhTaUYOks9h1MO9tKMBUhAPVLgwLgry6HI6sc8XgCUQotS1fKSbyyAgrUU5yVG9ew/tjfwhmjxFbGZXwQxmNbN5FKVcD/5KIix7bypEqu1GVuAIkxxAdHtjtUaa6Uix1bkXVyO5+R0m8pcFEd1wa2bZbYJIZaFRuZkO8Tl6QBeJ5sUfhEtsaI6kyq9jpMtJDqTBJZJ8Q6Pjp+2zl61Tk6Hh394+Ttu5Pjn7vv/vnmPxYSQxCKGUA7WpS7iAMfV0JLQmwCrpK9bomresFNEgDDr/GYm1kEzGFLW8vS1gDURkNWRhFwB3+odjtYjBu5Uwrp99IRvNu6Ey/IVN5sJhtyPraBeuQxPyALUQm2q1e/lQ9KDos1q29QiAdlCiUwEGnRizGza2fwqQyVOeqdA32uhGLU1AbQVNRTUFb1BLTVR4L4SXtbnRp/kHosrDpX5a0QVOSjYgeg2hoGVao6Pptm53OUH+TT0OzFOxmOohsvVIhdVDstjH9gbBxjrgCjOALWEcfBEl1bsEAOjnglIzcMXxHiMeNTqnu/bOZVqx7F+gOcwmBwUkVHKJE4zacWC3ex0E7sFoKQweFWsb/TFCssRyyjRmWm1pAlDpYbQpce9j1j8DL9pZ2DToG0OeCBuswDN3jjA/FHvEBL6Rw3rucWc4KJGEQzL/WTrLtb1WvAb1oaPPse0b/MaKazMuBVLL26WQ0mKvg52kgCyuugRa0Fo6JyhwfyrR3O4w6+fWuXDqiAkJy3eEHXI7d3uSq0+LLSNdiXtGJ4KjAodIqfqnMCgTjhsseY6GYMQWxxlWCbD39lt63yuZVZmYdhWCesUqaEhEmNjmCwksDUbGxHLzTLmqr6TAEC3leYaK6OtmjMNHe1qtnHlXH8mDh2Ulkcfn3EsQu/Lt8T82tmBVVZ8fNUpHsqYtXHSlGlK9uuXbH4KMsXllGJUfUfHXdI4ZjbpqycTjVsb5lfYyQrgX4Li/o0oPRry/xW2Z8MZ+9C+gK5ny1OwMjLqpQSkEQ3yQSObhBNYtu6iII0cFNxKUCdChAQKSC4RRhSWQoiqsg3GsUVha5OGtd0V9e+AWoYkvh0nE1t60wSEyQmObHk5OdLreuVrbZr20YLVze1JBNd0lWbR1uY6DKy/MyoHytbwtaRWkutVGwkFhdEJGzLtjiLQaewV0e1/wtIL4aMRm8thW62lzgfkFOSRQGUOyIdeZgF3qyUhxBR6YfFuRK7ZEatPFvHOx74clK4A0SzKVSkectdXVuAjFpqOMdQLLNXmr5aLvHWT3DVl2oKIRj1aLDAKgHzd6/OOmRSVI7Spd46LFyqUmqa7104ktD1qN388gVKy2azVfa4akO9nt0q3SaulWZ6PcsK0Tp+W4rvzYW3UDszLjSpxrYv+jhawcQWl350w6a1Q0Wuq3F9wtIw9UUpXCrPUtQzyFFqX4mUq1vMTuqvaegWZtGxrHHiLUi77rNzw/PZK5h1F0ZEoYmaguzZezDuYuCbs0kYP2CyZT2bJ1adJVllEVqYwEqXzoTFpvVB1v2DeGqR06sLfRkBTXNC8RVFEYN42VKsH7nVbK9dFz8yXDilhsnGdo9s4eSdnhe2ttZubAI7AdcC5MXX17hFEXGVTxSb3bPjtQJ7mEJrZ3XS2BnTUgZ0awHKW3eNkoPJBklvY8UieW9Ldqqfpray5r4zt+9lX44Vm9hboKL2ymfmX1QY/6mntVkFPuM9u4l/RvBAGNz0GreChmtufe2Chs9y+t2h8FnsV3DQUMV3g8MVMGziOnqZvU1VfhQaViSW4PQshnsg6VCU1nsgaXm/Gk9rJX8ZrGL7ehVV8XOgrvHaFZYvxt51qqjH3opbvhyCw/F2+PUQEUAvlrjjsD8UGwe1NyK/yNQ3b+8vhcgqwSblKlw0zYzEW7TO1l1++X6p6Z+emX6vxNTUxaY3BnvuZCU/je9p5/5n3vGSDt54tJ7LWeFyVfAXAPNe0Lx3kqvtUqFyvdx/3WxXb+87YK1WzTbAvc/GtOMmARoXxQvElrxngO1ehr0t1pFNPTEmqDcMcW9G8b94PAO4zbPcG7krHrE3eO+gr78IgssLA6oviTdZ8A0D9oV4GsNAIN984f8oEfvWdPoVyaMlCt8TvcST5KxHn1YbSqIz3jQZNe9058bP5gmcrjFabEjmOSXCR2vhhhm+Sa1n+9QqGTne55mLJozM2zw3zK+FkwPxir0gVTlVnSQvahQphYsuEO5h3waQ2W0elBCvOMO1reVGMCGOuKvjOOJShONgoHMcdTPCOKiDfK2xywNP3oUSl4GQQa8p7xp08Py78AfTXgyePT3rvP/+5mOxC9kyf6YCC9LYE6+3/J2UJX4ctRr/B1BLAwQUAAAACACYgy9a7w4W3ZYFAAAtEgAABwAcAG1haW4udGZVVAkAA+eUh2c7k9pndXgLAAEE9QEAAAQUAAAAzVfbbhs3EH3XVwzoFJEASYnRtwBOkSZ+SBvEQRL0xRAIane0YrQiVZLrZGHo3zu87E1eu07QADUMy8sdnhmemTlDncEHgwdhEEppHegNGLS6MhlamEoFf3y6eg8bbfbCzcBpOAhrwW1x7x8+1G6rFdjMyIOblDoTpYXbCYDeIdeZzOECrmkz7OZwA4R2I8wyKyvr0MAL2K2un6+SNdkCWdtqbZ2ZBqhlgzKHxfn5HM7PZ5PjpO/mEPzz6B/izwWwjSzJwaLUhV0eatYZHoTbAnSGcflXBmchzIXZW3iaFp8Cxf30WWXNs7VUz5rVEO8BjXCSAKWSTgpHloRGoS521RqNQod2sdf0UhupCjahTXQyWueB5TaCSOyUXf9iV2wOX7RUU/qk30gb1kScKCtsyIsolsj7YrVClekcp8FitprNyE2pRc7XohQqQxO9/YCbAcoD3lIueY5OSErKjx2qq4hRN+RnYxA9LN8LJQrMf6+5EwX5ugXWLjECSIUzkqDjAId2W9q+R1N4V2bpF5bN68/0ME9Y46794XPcSIV5xCKwFia98Cje6Rl8RJFDpssSM4c55MIJmG7QZVt6WtfDPvLQVEqcyreg4vGfPOy4CPTkGOjxK0v8RrQpClKqG98sRnzlBv+u0LrUFEvq5qp0y8jmJBRIYZuz8HXNrRPZzjM58dnzCZLk49u81QGfpUTrSFTXzAOyFbwI+9mT22afN8+PvLfg97j6gEcGFy9b/LBPdrqzvBvcBThTecOjF4AzeBuOeyI/gSPWUMKA3UsKi+JhdGHE3ktUPF5PJJrkDwRmHk+4ML6Yn9z6dHtGDBZUaUfWvLbxdQTodX1nUfYt7jZsZ7jrG570Wmfl+lYjpd9Z5sPIc70XMkXOFtnwZabVRhactBR9AlRVlvAbsPfUoL7RRsyObBXT01YOc3KPPolOZpQQi646cL9GGRgq+WlpFUaTqRJ79MravKHCKGvCslxUjoKnf3kubaZv0NS8pSCMkyOPQbAY0ztdQAANJX71+i0kULBobiQFO32cl4g6m3SHHI2dPTbqFGVgIdOVaqeDl5QSVeG206ZSRjp3Bi/hOSXmnHLyPEDsaZq7PSrHw/wNiVJ7Pnzh5UvaQynqSHJwNtrkXSYmQfNiL1CB+cR83krriUzUfhUW2jOKsqwhMyi85H3dooJaVxDrpTK01g3ImJM/L5t5AgvoJzMICRvT7xOJ9qsjwhyt+qtBCOUGszqj8o7SJwulDfJsK1SBHvy6z9B86Hs+cLJqpOlfKzpdih6f1+l9UMvvKS+6Zy1lPqMa8X3cjgFUYl2S0xspujkA7ZD2iSXxP3Ef4vW3N6y9jHujY4soSkp4XjfIo3iPHCZB0op2iiTPvQES4Db+YyltwGi9xoER4wozPud6/YWmb3QVFZQ4P7R3gF4q7rDSTIOxE86ivFyGpxBSmvPUIfZ+kYjyYGPbE0EcRbZtS3W0IianPXsRpy0tZDj1++OUX8ZyZf7CxTibHfm7V8Rfz6CjMaYt1WaszAdEwKci7fDbm1v0p8uPf719fRkatGnvMIJSYyUK4gOZkCAU2tRx90hYya7l7tQuhRGgk3bfsUkvGrMAFIOmgEn925j961Am3yOdo3fJ/5sWncGnxM5rrRRVJLXhyIwLF4FGLk7Hms22PFHpJ31EGR1tA5jHTLMH2/I/nGpjsdJMuffawE6mXAQJg66ptpaJnz/wzuDz1ZurF/CmOpTSd06QmPQFfXLSWzu6ufdOHIs7r4kKmYXVNA2S7KT7/lB6HkxL2iSdv2OGL72dQdPdWjlKRuvge3IXfwaCBAMfPaFoLAbbwoa22k63tcbHSfM3NLITdGTX57ADabh81RRPJPVENONX5n6JtQY/Qy1yPKDKqY79Hex6bDyvfo6o/ANQSwMEFAAAAAgAmIMvWt5I6DVOAQAAwAIAAAsAHABwcm92aWRlci50ZlVUCQAD55SHZzuT2md1eAsAAQT1AQAABBQAAACFUt1qwjAUvu9THPR6yVTYQNhA9EZ2I+wBSpae2jCbdCdpmRvds++YGu1A2E0J39/J+dKARKp0VMN3BkD40RrCIm/IdaZA8hEGcNrA0/kM4F1LGoGRiSOlDyiZn5zJjl3G2RP58wwP4nEg+vjFz4Bk1eF2WKV8ZbSjRibd7dC5WIjFODaYGv+LPGlux92L2VzMrnl91mdZaoBX5N1i9BQ2Trc12qACu5dQhdD4pZSF014MTQjtaon2rvXSKOXl2tnABrnabeXr5oUDvAyp8zRDO1uafUsxVlShzuK0rfVBWV5kR8Zq03BtqzZUjsxXVMYH2w97dIoEXzQfgMG/jrFQmgPCm/JYgGI738bo5Oe78YRjztZilDKGWdV6pEECMFJdYJY0ZDoVMH/HY5KMoLxRoWJVaeweiQkbkmoE/a398gtw90xM4UrFp2R4Cv0vUEsDBBQAAAAIALOzLVpj23A1hwAAAMUAAAAKABwAb3V0cHV0cy50ZlVUCQADekaFZzuT2md1eAsAAQT1AQAABBQAAAB1jsEKAjEMRO/9itCTgvQDhOKnlLgbSrU2a5qsgvjvdsGLB08Dw8y8YdPFFHxpK18pCT6S0N2oa+qTlEU9vBzAitUI4lAJM50tQ4ygMrwTXLi0nQd/gBkVAz2VpGENfyfDIpwFb3s4QrNa3ds5/v6onFNXVOu/4MrTmOSppJHIpeVN0wYc7Q9QSwMEFAAAAAgAmIMvWlk6oj2EAQAAGwUAAAkAHABpbnB1dHMudGZVVAkAA+eUh2c7k9pndXgLAAEE9QEAAAQUAAAAxVM9b9swEN31Kw7yEgN2gK4BumXo1CzZiRN5otlSPII8GhUC/fdQdtpKCoIWXnKDAN3H07undzugXw9Q8hHzqSspHL80Z0wOO0/QsnYqkXUcWnhpAGSMBF8hS3LBNlPT7C7TnFB70p6Ludc8bAAMD+iWADX+YAAY6rF4qan2OwdqYQffMBnQbMjAGX0h6DlBHOXEAbJOLsoBeueF0tGzzfdxnLn8/apnNKpDj0FTyivqA8Y77n6Qlrs5CxBwuDB6iwUxgMrefFSre0ZMMlAQVbsWtWm/X9PJpQskn89D+5KrZp/Og8Og1u0fuGv5R63CgH4Up7Oa32ziEv85J2jz1nm/14U+EVVnDc+16U2KK8b+MLvSBTLvS3WdlWlfoF3itHNqOkC7ALjmYLoczBz1CU/BjxdfP9IZninLzP1a3dyPUKhGHtWs/n9cUSjeb/SuICVTuh0hJndGIfWTRhVRTjeBVD0spQoV5OY1NIfeWVWPn27CMNQVu53smP1qrkefqZleAVBLAwQUAAAACAArlnpZogqurvoAAABjAQAACAAcAGRlYnVnLnRmVVQJAAPaykVnO5PaZ3V4CwABBPUBAAAEFAAAAG2OMU/DQAyF9/yKp5ShlapLWpgqVQysSLAwR+biJgeXczjfpUJV/ztJERsebFn+3nte4UnG7+i6PmFtN9jX+/vt0h+2eIlkPYNCW0mESwo6nZx3lFhNscKzsxyUW+TQckTqGW/BTRyVPF45Dk51Xv84TDtTgxTayzmAEvqURj1UlagauYUZK0Plf3mt8uhNUURWydEySi+WfDO/wCVKFyb55CbSuYn8lVlToza6MZW4FICVHBLmOmKiaFp+zx0escMB9e0cEi/AER8qgYOVlte3ACPWNV66zoVumU1LiTazZgkONPCsKe8uI6XeDNJmz9fqP4lZjMvi+gNQSwECHgMUAAAACACohHFawrutZfUOAACQOAAADgAYAAAAAAABAAAApIEAAAAAZmlsdGVyLWxvZ3MucHlVVAUAA2QC2Gd1eAsAAQT1AQAABBQAAABQSwECHgMUAAAACACYgy9a7w4W3ZYFAAAtEgAABwAYAAAAAAABAAAApIE9DwAAbWFpbi50ZlVUBQAD55SHZ3V4CwABBPUBAAAEFAAAAFBLAQIeAxQAAAAIAJiDL1reSOg1TgEAAMACAAALABgAAAAAAAEAAACkgRQVAABwcm92aWRlci50ZlVUBQAD55SHZ3V4CwABBPUBAAAEFAAAAFBLAQIeAxQAAAAIALOzLVpj23A1hwAAAMUAAAAKABgAAAAAAAEAAACkgacWAABvdXRwdXRzLnRmVVQFAAN6RoVndXgLAAEE9QEAAAQUAAAAUEsBAh4DFAAAAAgAmIMvWlk6oj2EAQAAGwUAAAkAGAAAAAAAAQAAAKSBchcAAGlucHV0cy50ZlVUBQAD55SHZ3V4CwABBPUBAAAEFAAAAFBLAQIeAxQAAAAIACuWelmiCq6u+gAAAGMBAAAIABgAAAAAAAEAAACkgTkZAABkZWJ1Zy50ZlVUBQAD2spFZ3V4CwABBPUBAAAEFAAAAFBLBQYAAAAABgAGAN8BAAB1GgAAAAA= # Maximum amount of time (in seconds) the job should wait while checking the status of stack APPLY operation. Default: 300 diff --git a/charts/oci-onm/Chart.yaml b/charts/oci-onm/Chart.yaml index 369854a..f81fefa 100644 --- a/charts/oci-onm/Chart.yaml +++ b/charts/oci-onm/Chart.yaml @@ -3,7 +3,7 @@ apiVersion: v2 name: oci-onm -description: Helm chart for collecting Kubernetes logs & objects and metrics using Fluentd and ManagementAgent into OCI Logging Analytics and OCI Monitoring respectively. +description: Helm chart for collecting Kubernetes logs & objects and metrics using Fluentd and ManagementAgent into OCI Log Analytics and OCI Monitoring respectively. # A chart can be either an 'application' or a 'library' chart. # @@ -18,7 +18,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 4.0.0 +version: 4.0.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to @@ -32,7 +32,7 @@ dependencies: repository: "file://../common" condition: oci-onm-common.enabled - name: oci-onm-logan - version: "4.0.0" + version: "4.0.1" repository: "file://../logan" condition: oci-onm-logan.enabled - name: oci-onm-mgmt-agent diff --git a/charts/oci-onm/README.md b/charts/oci-onm/README.md index bb84456..d0e47f0 100644 --- a/charts/oci-onm/README.md +++ b/charts/oci-onm/README.md @@ -2,7 +2,7 @@ ![Version: 3.0.0](https://img.shields.io/badge/Version-3.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) -Helm chart for collecting Kubernetes logs & objects and metrics using Fluentd and ManagementAgent into OCI Logging Analytics and OCI Monitoring respectively. +Helm chart for collecting Kubernetes logs & objects and metrics using Fluentd and ManagementAgent into OCI Log Analytics and OCI Monitoring respectively. ## Requirements @@ -17,7 +17,7 @@ Helm chart for collecting Kubernetes logs & objects and metrics using Fluentd an | Key | Type | Default | Description | |-----|------|---------|-------------| | global.kubernetesClusterID | string | `nil` | OKE OCID for an OKE cluster or an unique ID for other Kubernetes clusters. | -| global.kubernetesClusterName | string | `nil` | Provide a unique name for the cluster. This would help uniquely identifying the logs and metrics data at OCI Logging Analytics and OCI Monitoring respectivelt, when moitoring multiple clustersa | +| global.kubernetesClusterName | string | `nil` | Provide a unique name for the cluster. This would help uniquely identifying the logs and metrics data at OCI Log Analytics and OCI Monitoring respectivelt, when moitoring multiple clustersa | | global.namespace | string | `"oci-onm"` | Kubernetes Namespace in which the resources to be created. Set oci-kubernetes-monitoring-common:createNamespace set to true, if the namespace doesn't exist. | | global.resourceNamePrefix | string | `"oci-onm"` | Prefix to be attached to resources created through this chart. Not all resources may have this prefix. | | oci-onm-common.createNamespace | bool | `true` | If createNamespace is set to true, it tries to create the namespace defined in 'namespace' variable. | diff --git a/charts/oci-onm/values.yaml b/charts/oci-onm/values.yaml index 0aaf84f..8c2ab1d 100644 --- a/charts/oci-onm/values.yaml +++ b/charts/oci-onm/values.yaml @@ -8,7 +8,7 @@ global: resourceNamePrefix: oci-onm # -- OKE OCID for an OKE cluster or an unique ID for other Kubernetes clusters. kubernetesClusterID: - # -- Provide a unique name for the cluster. This would help uniquely identifying the logs and metrics data at OCI Logging Analytics and OCI Monitoring respectivelt, when moitoring multiple clustersa + # -- Provide a unique name for the cluster. This would help uniquely identifying the logs and metrics data at OCI Log Analytics and OCI Monitoring respectivelt, when moitoring multiple clustersa kubernetesClusterName: oci-onm-common: @@ -32,11 +32,11 @@ oci-onm-logan: kubernetesClusterName: "{{ .Values.global.kubernetesClusterName }}" image: url: container-registry.oracle.com/oci_observability_management/oci-la-fluentd-collector:1.7.0 - # Go to OCI Logging Analytics Administration, click Service Details, and note the namespace value. + # Go to OCI Log Analytics Administration, click Service Details, and note the namespace value. ociLANamespace: - # OCI Logging Analytics Default Log Group OCID + # OCI Log Analytics Default Log Group OCID ociLALogGroupID: - # OCI Logging Analytics Entity OCID representing the target cluster + # OCI Log Analytics Entity OCID representing the target cluster ociLAClusterEntityID: # This parameter is required only for realms where the OCI Ruby SDK is not supported. # Format: .oci. diff --git a/docs/FAQ.md b/docs/FAQ.md index fe877dd..a9852f6 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -2,7 +2,7 @@ ### What are the offerings of OCI Kubernetes Monitoring Solution ? -OCI Kubernetes Monitoring Solution is a turn-key Kubernetes monitoring and management package based on OCI Logging Analytics cloud service, OCI Monitoring, OCI Management Agent and Fluentd. It helps collecting various telemetry data (logs, metrics, Kubernetes objects state) from your Kubernetes cluster into OCI Logging Analytics and OCI Monitoring). It also provides rich visual experiences using the collected information through Kubernetes Solution UX and pre-defined set of Dashboards. +OCI Kubernetes Monitoring Solution is a turn-key Kubernetes monitoring and management package based on OCI Log Analytics cloud service, OCI Monitoring, OCI Management Agent and Fluentd. It helps collecting various telemetry data (logs, metrics, Kubernetes objects state) from your Kubernetes cluster into OCI Log Analytics and OCI Monitoring). It also provides rich visual experiences using the collected information through Kubernetes Solution UX and pre-defined set of Dashboards. ### What are the supported methods of installation ? @@ -51,7 +51,7 @@ _Additionally, the following volumes of type hostPath would be created and mount #### Logs -The solutions offers collection of various logs of from the Kubernetes cluster into OCI Logging Analytics and offer rich analytics on top of the collected logs. Users may choose to customise the log collection by modifying the out of the box configuration that it provides. +The solutions offers collection of various logs of from the Kubernetes cluster into OCI Log Analytics and offer rich analytics on top of the collected logs. Users may choose to customise the log collection by modifying the out of the box configuration that it provides. * Kubernetes System/Service Logs * The following logs are configured to be collected by default under this category. @@ -73,7 +73,7 @@ The solutions offers collection of various logs of from the Kubernetes cluster i * Ksplice Uptrack logs * Yum logs * Pod/Container (Application) Logs - * All the container logs available under `/var/log/containers/` on each worker nodes would be collected by default and processed using a generic Log Source named `Kubernetes Container Generic Logs`. However, users have ability to process different container logs using different Parsers/Sources at Logging Analytics. Refer [this](#custom-logs.md) section to learn on how to perform the customisations. + * All the container logs available under `/var/log/containers/` on each worker nodes would be collected by default and processed using a generic Log Source named `Kubernetes Container Generic Logs`. However, users have ability to process different container logs using different Parsers/Sources at Log Analytics. Refer [this](#custom-logs.md) section to learn on how to perform the customisations. #### Metrics @@ -208,13 +208,13 @@ By default `/var/log` of underlying Kubernetes Node is mounted to `oci-onm-logan * By default, Fluentd pods responsible for logs collection uses single flush thread. Though this works for most of the moderate log volumes, this can be tuned by using the following helm variable : * [`oci-onm-logan.fluentd.ociLoggingAnalyticsOutputPlugin.buffer.flush_thread_count`](https://github.com/oracle-quickstart/oci-kubernetes-monitoring/blob/main/charts/logan/values.yaml#L183) * Buffer size - * By default, the solution uses Fluentd’s file buffer with size set to 5GB as default buffer size, which is used for buffering of chunks in-case of delays in sending the data to OCI Logging Analytics and/or to handle outages at OCI without data loss. **We recommend** to modify/tune this to a size (to a higher or lower value) based on your environment and importance of data and other relevant factors. Use the following helm variable to modify the same : + * By default, the solution uses Fluentd’s file buffer with size set to 5GB as default buffer size, which is used for buffering of chunks in-case of delays in sending the data to OCI Log Analytics and/or to handle outages at OCI without data loss. **We recommend** to modify/tune this to a size (to a higher or lower value) based on your environment and importance of data and other relevant factors. Use the following helm variable to modify the same : * [`oci-onm-logan.fluentd.ociLoggingAnalyticsOutputPlugin.buffer.total_limit_size`](https://github.com/oracle-quickstart/oci-kubernetes-monitoring/blob/main/charts/logan/values.yaml#L186) * Read from Head * By default, the solution tries to collect all the pod logs available on the nodes since beginning. Use the following helm variable to alter the behaviour if you wish to collect only new logs after the installation of the solution : * [`oci-onm-logan.fluentd.tailPlugin.readFromHead`](https://github.com/oracle-quickstart/oci-kubernetes-monitoring/blob/main/charts/logan/values.yaml#L227) -### How to collect pod logs using custom OCI Logging Analytics Source instead of using Kubernetes Container Generic Logs Source ? +### How to collect pod logs using custom OCI Log Analytics Source instead of using Kubernetes Container Generic Logs Source ? Refer [here](custom-logs.md). @@ -404,7 +404,7 @@ oci-onm-logan: ### How to set timezone override ? -If a log record contains a timezone identifier, the **Logging Analytics service** will use that timezone. However, if there is no timezone information, the service defaults to **UTC**. +If a log record contains a timezone identifier, the **Log Analytics service** will use that timezone. However, if there is no timezone information, the service defaults to **UTC**. To override this default, use the `timezone` parameter in your `values.yaml` file. This parameter can be configured at different levels. @@ -514,7 +514,7 @@ Sample Error : E, [2023-08-07T10:17:13.710854 #18] ERROR -- : oci upload exception : Error while uploading the payload. { 'message': 'execution expired', 'status': 0, 'opc-request-id': 'D733ED0C244340748973D8A035068955', 'response-body': '' } ``` -* Check if your OCNE setup configuration has `restrict-service-externalip` value set to `true` for kubernetes module. If yes, update it to false to allow access to Logging Analytics endpoint from containers. Refer [this](https://docs.oracle.com/en/operating-systems/olcne/1.3/orchestration/external-ips.html#8.4-Enabling-Access-to-all-externalIPs) for more details. If the issue is still not resolved, +* Check if your OCNE setup configuration has `restrict-service-externalip` value set to `true` for kubernetes module. If yes, update it to false to allow access to Log Analytics endpoint from containers. Refer [this](https://docs.oracle.com/en/operating-systems/olcne/1.3/orchestration/external-ips.html#8.4-Enabling-Access-to-all-externalIPs) for more details. If the issue is still not resolved, * Check if your OCNE setup configuration has `selinux` value set to `enforcing` in globals section. If yes, you may need to start the fluentd containers in privileged mode. To achieve the same, set `privileged` to true in override_values.yaml. ``` @@ -647,7 +647,7 @@ oci-onm-logan: ### Control plane log collection for AWS EKS (Amazon Elastic Kubernetes Service) AWS EKS control plane logs are available in CloudWatch. -Once the control plane log collection is enabled, the logs are directly pulled from CloudWatch and ingested into OCI Logging Analytics for further analysis. Alternatively, the logs can be routed over to S3 and pulled from there. +Once the control plane log collection is enabled, the logs are directly pulled from CloudWatch and ingested into OCI Log Analytics for further analysis. Alternatively, the logs can be routed over to S3 and pulled from there. #### How to collect EKS control plane logs from CloudWatch? To collect the logs from CloudWatch directly, modify your override_values.yaml to add the following EKS specific variables. Various other variables are available in the values.yaml file and can be updated as necessary. @@ -668,7 +668,7 @@ oci-onm-logan: ``` #### How to collect EKS control plane logs from S3? -If you run into [CloudWatch service quotas](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html), you can alternatively route the logs to S3 and collect them. The control plane logs in S3 need to be in a specific format for the default log collection to work. Please refer [EKS CP Logs Streaming to S3](./eks-cp-logs.md) for instructions on how to configure streaming of Control Plane logs to S3 and subsequently collect them in OCI Logging Analytics. Once the streaming of logs is setup, modify your override_values.yaml to add the following EKS specific variables. Various other variables are available in the values.yaml file and can be updated as necessary. +If you run into [CloudWatch service quotas](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html), you can alternatively route the logs to S3 and collect them. The control plane logs in S3 need to be in a specific format for the default log collection to work. Please refer [EKS CP Logs Streaming to S3](./eks-cp-logs.md) for instructions on how to configure streaming of Control Plane logs to S3 and subsequently collect them in OCI Log Analytics. Once the streaming of logs is setup, modify your override_values.yaml to add the following EKS specific variables. Various other variables are available in the values.yaml file and can be updated as necessary. ``` .. diff --git a/docs/custom-logs.md b/docs/custom-logs.md index 118ce24..035031a 100644 --- a/docs/custom-logs.md +++ b/docs/custom-logs.md @@ -3,8 +3,8 @@ ### How to use custom logSource (oci_la_log_source_name) and/or other custom configuration for Pod/Container Logs collected through "Kubernetes Container Generic Logs" logSource ? A generic source with time only parser is defined/configured for collecting all application pod logs from /var/log/containers/ out of the box. -This is to ensure that all the logs generated by all pods are collected and pushed to Logging Analytics. -Often you may need to configure a custom logSource for a particular pod log, either by using one of the existing OOB logSources at Logging Analytics or by defining one custom logSource matching to the requirements. +This is to ensure that all the logs generated by all pods are collected and pushed to Log Analytics. +Often you may need to configure a custom logSource for a particular pod log, either by using one of the existing OOB logSources at Log Analytics or by defining one custom logSource matching to the requirements. Once you have defined/identified a logSource for a particular pod log, the following are couple of ways to get those pod logs associated to the logSource. #### Use Pod Annotations @@ -98,7 +98,7 @@ oci-onm-logan: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true diff --git a/docs/eks-cp-logs.md b/docs/eks-cp-logs.md index a6671bc..4d84dff 100644 --- a/docs/eks-cp-logs.md +++ b/docs/eks-cp-logs.md @@ -1,6 +1,6 @@ ## Streaming of Control Plane logs from CloudWatch to S3 -We can use a CloudWatch logs subscription to stream log data in near real-time to AWS S3. Once available in S3, the log data can be pulled and ingested into OCI Logging Analytics. +We can use a CloudWatch logs subscription to stream log data in near real-time to AWS S3. Once available in S3, the log data can be pulled and ingested into OCI Log Analytics. The high level flow of CloudWatch logs to S3 looks as follows diff --git a/logan/docker-images/v1.0/oraclelinux/8-slim/Dockerfile b/logan/docker-images/v1.0/oraclelinux/8-slim/Dockerfile index baf4c5c..cddc358 100644 --- a/logan/docker-images/v1.0/oraclelinux/8-slim/Dockerfile +++ b/logan/docker-images/v1.0/oraclelinux/8-slim/Dockerfile @@ -29,7 +29,7 @@ RUN microdnf -y module enable ruby:3.3 \ && gem install bundler -v 2.5.16 \ # Install development dependent packages for gems native installation && microdnf --enablerepo ol8_codeready_builder -y install --nodocs gcc make redhat-rpm-config openssl ruby-devel gcc-c++ libtool libffi-devel bzip2 git libyaml-devel which elfutils-libelf-devel clang llvm \ -# Install Fluentd, it's dependencies along with other run time dependencies for OCI Logging Analytics Solution +# Install Fluentd, it's dependencies along with other run time dependencies for OCI Log Analytics Solution && bundle config silence_root_warning true \ && bundle config --local path /fluentd/vendor/bundle \ && bundle config --global jobs 9 \ diff --git a/logan/kubernetes-resources/logs-collection/configmap-cri.yaml b/logan/kubernetes-resources/logs-collection/configmap-cri.yaml index 1b8c5cf..731d81a 100644 --- a/logan/kubernetes-resources/logs-collection/configmap-cri.yaml +++ b/logan/kubernetes-resources/logs-collection/configmap-cri.yaml @@ -87,7 +87,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -137,7 +137,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -187,7 +187,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -237,7 +237,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -287,7 +287,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -327,7 +327,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -367,7 +367,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -419,7 +419,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -449,7 +449,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -495,7 +495,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -508,7 +508,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -538,7 +538,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -566,7 +566,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -596,7 +596,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -624,7 +624,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -670,7 +670,7 @@ data: annotation_match [ ".*" ] - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true diff --git a/logan/kubernetes-resources/logs-collection/configmap-docker.yaml b/logan/kubernetes-resources/logs-collection/configmap-docker.yaml index 348f7b8..3dc3adc 100644 --- a/logan/kubernetes-resources/logs-collection/configmap-docker.yaml +++ b/logan/kubernetes-resources/logs-collection/configmap-docker.yaml @@ -87,7 +87,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -126,7 +126,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -165,7 +165,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -204,7 +204,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -243,7 +243,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -272,7 +272,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -301,7 +301,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -342,7 +342,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -372,7 +372,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -418,7 +418,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -431,7 +431,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -461,7 +461,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -489,7 +489,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -519,7 +519,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -547,7 +547,7 @@ data: - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true @@ -593,7 +593,7 @@ data: annotation_match [ ".*" ] - # Record transformer filter to apply Logging Analytics configuration to each record. + # Record transformer filter to apply Log Analytics configuration to each record. @type record_transformer enable_ruby true diff --git a/logan/kubernetes-resources/logs-collection/fluentd-daemonset.yaml b/logan/kubernetes-resources/logs-collection/fluentd-daemonset.yaml index 9c1096d..77b7398 100644 --- a/logan/kubernetes-resources/logs-collection/fluentd-daemonset.yaml +++ b/logan/kubernetes-resources/logs-collection/fluentd-daemonset.yaml @@ -80,10 +80,10 @@ spec: - name: FLUENTD_CONF value: "/var/opt/conf/fluent.conf" # change as required - name: FLUENT_OCI_DEFAULT_LOGGROUP_ID - # Replace this value with actual logging analytics log group - value: + # Replace this value with actual Log Analytics log group + value: - name: FLUENT_OCI_NAMESPACE - # Replace this value with actual namespace of logging analytics + # Replace this value with actual namespace of Log Analytics value: - name: FLUENT_OCI_KUBERNETES_CLUSTER_ID # Replace this value with Kubernetes Cluster ID @@ -94,7 +94,7 @@ spec: - name: OCI_READ_FROM_HEAD value: "true" # set it false to collect only current logs # Uncomment the following section if using config file base AuthZ instead of default InstancePrincipal based AuthZ. - # For OKE, recommended AuthZ approach to connect to OCI Logging Analytics APIs is InstancePrincipal. + # For OKE, recommended AuthZ approach to connect to OCI Log Analytics APIs is InstancePrincipal. #- name: FLUENT_OCI_CONFIG_LOCATION #value: "/var/opt/.oci/config" ## parameters to limit the memory and requests for the pods @@ -120,7 +120,7 @@ spec: readOnly: true # Mount directory where oci config exists # Uncomment the following section if using config file base AuthZ instead of default InstancePrincipal based AuthZ. - # For OKE, recommended AuthZ approach to connect to OCI Logging Analytics APIs is InstancePrincipal. + # For OKE, recommended AuthZ approach to connect to OCI Log Analytics APIs is InstancePrincipal. #- name: ociconfigdir #mountPath: /var/opt/.oci #readOnly: true @@ -143,7 +143,7 @@ spec: name: oci-la-fluentd-logs-configmap # change as required # Mount directory where oci config exists # Uncomment the following section if using config file base AuthZ instead of default InstancePrincipal based AuthZ. - # For OKE, recommended AuthZ approach to connect to OCI Logging Analytics APIs is InstancePrincipal. + # For OKE, recommended AuthZ approach to connect to OCI Log Analytics APIs is InstancePrincipal. #- name: ociconfigdir #projected: #sources: diff --git a/logan/kubernetes-resources/objects-collection/fluentd-deployment.yaml b/logan/kubernetes-resources/objects-collection/fluentd-deployment.yaml index 143e10b..06cb06b 100644 --- a/logan/kubernetes-resources/objects-collection/fluentd-deployment.yaml +++ b/logan/kubernetes-resources/objects-collection/fluentd-deployment.yaml @@ -83,10 +83,10 @@ spec: - name: FLUENTD_CONF value: "/var/opt/conf/fluent.conf" # change as required - name: FLUENT_OCI_DEFAULT_LOGGROUP_ID - # Replace this value with actual logging analytics log group - value: + # Replace this value with actual Log Analytics log group + value: - name: FLUENT_OCI_NAMESPACE - # Replace this value with actual namespace of logging analytics + # Replace this value with actual namespace of Log Analytics value: - name: FLUENT_OCI_KUBERNETES_CLUSTER_ID # Replace this value with Kubernetes Cluster ID @@ -95,7 +95,7 @@ spec: # Replace this value with Kubernetes Cluster Name value: # Uncomment the following section if using config file base AuthZ instead of default InstancePrincipal based AuthZ. - # For OKE, recommended AuthZ approach to connect to OCI Logging Analytics APIs is InstancePrincipal. + # For OKE, recommended AuthZ approach to connect to OCI Log Analytics APIs is InstancePrincipal. #- name: FLUENT_OCI_CONFIG_LOCATION #value: "/var/opt/.oci/config" ## parameters to limit the memory and requests for the pods @@ -114,7 +114,7 @@ spec: readOnly: true # Mount directory where oci config exists # Uncomment the following section if using config file base AuthZ instead of default InstancePrincipal based AuthZ. - # For OKE, recommended AuthZ approach to connect to OCI Logging Analytics APIs is InstancePrincipal. + # For OKE, recommended AuthZ approach to connect to OCI Log Analytics APIs is InstancePrincipal. #- name: ociconfigdir #mountPath: /var/opt/.oci #readOnly: true @@ -130,7 +130,7 @@ spec: name: oci-la-fluentd-objects-configmap # change as required # Mount directory where oci config exists # Uncomment the following section if using config file base AuthZ instead of default InstancePrincipal based AuthZ. - # For OKE, recommended AuthZ approach to connect to OCI Logging Analytics APIs is InstancePrincipal. + # For OKE, recommended AuthZ approach to connect to OCI Log Analytics APIs is InstancePrincipal. #- name: ociconfigdir #projected: #sources: diff --git a/terraform/modules/helm/helm-inputs.tf b/terraform/modules/helm/helm-inputs.tf index 7a94e29..da9a4a1 100644 --- a/terraform/modules/helm/helm-inputs.tf +++ b/terraform/modules/helm/helm-inputs.tf @@ -49,10 +49,10 @@ variable "kubernetes_namespace" { } #### -## OCI Logging Analytics Information +## OCI Log Analytics Information #### -# OCI Logging Analytics LogGroup OCID +# OCI Log Analytics LogGroup OCID variable "oci_la_log_group_ocid" { type = string default = "" @@ -63,7 +63,7 @@ variable "oci_la_namespace" { type = string } -# OCI Logging Analytics Kubernetes Cluster Entity OCID +# OCI Log Analytics Kubernetes Cluster Entity OCID variable "oci_la_cluster_entity_ocid" { type = string } @@ -124,7 +124,7 @@ variable "tags" { #### variable "LOGAN_ENDPOINT" { - description = "Logging Analytics Endpoint." + description = "Log Analytics Endpoint." type = string default = null } diff --git a/terraform/modules/iam/iam.tf b/terraform/modules/iam/iam.tf index c1227a8..16cbf12 100644 --- a/terraform/modules/iam/iam.tf +++ b/terraform/modules/iam/iam.tf @@ -16,7 +16,7 @@ locals { # Policy policy_name = "oci-kubernetes-monitoring-${local.cluster_ocid_md5}" - policy_desc = "Auto generated by Resource Manager Stack - oci-kubernetes-monitoring. Allows Fluentd and MgmtAgent Pods running inside Kubernetes Cluster to send the data to OCI Logging Analytics and OCI Monitoring respectively." + policy_desc = "Auto generated by Resource Manager Stack - oci-kubernetes-monitoring. Allows Fluentd and MgmtAgent Pods running inside Kubernetes Cluster to send the data to OCI Log Analytics and OCI Monitoring respectively." onm_compartment_scope = var.root_compartment_ocid == var.oci_onm_compartment_ocid ? "tenancy" : "compartment id ${var.oci_onm_compartment_ocid}" oke_compartment_scope = var.root_compartment_ocid == var.oke_compartment_ocid ? "tenancy" : "compartment id ${var.oke_compartment_ocid}" @@ -33,7 +33,7 @@ locals { infra_discovery_stmt = [ # Allows log analytics service to query OKE infra resources # TODO: check if CLUSTER_READ will lead to duplicate ENTITY creation via service connector flow - # Ref - https://docs.oracle.com/en-us/iaas/logging-analytics/doc/ingest-logs-other-oci-services-using-service-connector.html#LOGAN-GUID-3848C538-28AC-4F53-B217-90129278D84F + # Ref - https://docs.oracle.com/en-us/iaas/log-analytics/doc/ingest-logs-other-oci-services-using-service-connector.html#LOGAN-GUID-3848C538-28AC-4F53-B217-90129278D84F "Allow resource loganalyticsvrp LogAnalyticsVirtualResource to {VCN_READ,SUBNET_READ,LOAD_BALANCER_READ,CLUSTER_READ,VNIC_READ} in ${local.oke_compartment_scope}", # https://docs.oracle.com/en-us/iaas/Content/Identity/Reference/contengpolicyreference.htm "Allow dynamic-group ${local.dynamic_group_name} to {CLUSTER_READ} in ${local.oke_compartment_scope} where target.cluster.id='${var.oke_cluster_ocid}'", diff --git a/terraform/modules/logan/logan-inputs.tf b/terraform/modules/logan/logan-inputs.tf index cc584be..6125320 100644 --- a/terraform/modules/logan/logan-inputs.tf +++ b/terraform/modules/logan/logan-inputs.tf @@ -16,17 +16,17 @@ variable "compartment_ocid" { type = string } -# Option to create Logging Analytics +# Option to create Log Analytics variable "opt_create_new_la_log_group" { type = bool } -# OCI Logging Analytics Log Group name (user input) +# OCI Log Analytics Log Group name (user input) variable "log_group_display_name" { type = string } -# OCI Logging Analytics LogGroup OCID (user input) +# OCI Log Analytics LogGroup OCID (user input) variable "log_group_ocid" { type = string } diff --git a/terraform/modules/logan/logan.tf b/terraform/modules/logan/logan.tf index 656ff25..332a72b 100644 --- a/terraform/modules/logan/logan.tf +++ b/terraform/modules/logan/logan.tf @@ -14,7 +14,7 @@ data "oci_log_analytics_namespaces" "logan_namespaces" { # User Facing Error postcondition { condition = !(self.namespace_collection == null) - error_message = "Tenancy is not on-boarded to OCI Logging Analytics service." + error_message = "Tenancy is not on-boarded to OCI Log Analytics service." } } } diff --git a/terraform/modules/main/main-inputs.tf b/terraform/modules/main/main-inputs.tf index 5e75564..9f01e28 100644 --- a/terraform/modules/main/main-inputs.tf +++ b/terraform/modules/main/main-inputs.tf @@ -93,7 +93,7 @@ variable "opt_import_dashboards" { ## Logan Module #### -# Option to create Logging Analytics +# Option to create Log Analytics variable "opt_create_new_la_log_group" { type = bool default = false @@ -161,7 +161,7 @@ variable "oke_cluster_entity_ocid" { type = string } -# OCI Logging Analytics LogGroup OCID provided by user +# OCI Log Analytics LogGroup OCID provided by user variable "log_group_ocid" { type = string } @@ -177,7 +177,7 @@ variable "enable_service_log" { #### variable "LOGAN_ENDPOINT" { - description = "Logging Analytics Endpoint." + description = "Log Analytics Endpoint." type = string default = null } diff --git a/terraform/modules/main/main.tf b/terraform/modules/main/main.tf index d251c1a..e5ed079 100644 --- a/terraform/modules/main/main.tf +++ b/terraform/modules/main/main.tf @@ -71,7 +71,7 @@ module "iam" { } } -# Create Logging Analytics Resources +# Create Log Analytics Resources module "logan" { source = "../logan" count = local.module_controls_enable_logan_module ? 1 : 0 diff --git a/terraform/oke/schema.yaml b/terraform/oke/schema.yaml index f162ad1..73972c1 100644 --- a/terraform/oke/schema.yaml +++ b/terraform/oke/schema.yaml @@ -3,8 +3,8 @@ # yaml-language-server: $schema=./meta-schema.yaml title: OCI Kubernetes Monitoring Solution -description: "Monitor, manage, and generate insights into your Kubernetes deployed in OCI, third party public clouds, private clouds, or on-premises including managed Kubernetes deployments. The solution utilizes the following OCI services: Logging Analytics, Monitoring, and Management Agent." -informationalText: "Monitor, manage, and generate insights into your Kubernetes deployed in OCI, third party public clouds, private clouds, or on-premises including managed Kubernetes deployments. The solution utilizes the following OCI services: Logging Analytics, Monitoring, and Management Agent." +description: "Monitor, manage, and generate insights into your Kubernetes deployed in OCI, third party public clouds, private clouds, or on-premises including managed Kubernetes deployments. The solution utilizes the following OCI services: Log Analytics, Monitoring, and Management Agent." +informationalText: "Monitor, manage, and generate insights into your Kubernetes deployed in OCI, third party public clouds, private clouds, or on-premises including managed Kubernetes deployments. The solution utilizes the following OCI services: Log Analytics, Monitoring, and Management Agent." schemaVersion: 1.1.0 version: "20221004" @@ -93,7 +93,7 @@ variables: # Override logan endpoint for discovery and fluentd collection LOGAN_ENDPOINT: type: string - title: Logging Analytics Endpoint. [ Do not use in Production ] + title: Log Analytics Endpoint. [ Do not use in Production ] # default: add default values here for env override #### [Section] @@ -179,18 +179,18 @@ variables: For the full list of resources, see oci-kubernetes-monitoring. default: ${compartment_ocid} - # Option to create Logging Analytics + # Option to create Log Analytics opt_create_new_la_log_group: # change this to create new log group type: boolean title: Create a new log group default: false - # OCI Logging Analytics LogGroup OCID of existing LogGroup + # OCI Log Analytics LogGroup OCID of existing LogGroup oci_la_log_group_ocid: type: oci:logan:loggroup:id dependsOn: compartmentId: ${oci_onm_compartment_ocid} - title: OCI Logging Analytics log group + title: OCI Log Analytics log group description: Log groups are logical containers for log data. They provide access control for your data by using IAM policies. required: true visible: @@ -203,7 +203,7 @@ variables: # maxLength: 100 # Do not use maxLength #minLength: 1 required: false - title: OCI Logging Analytics log group name + title: OCI Log Analytics log group name description: |- To make the log group easy-to-find in Dashboards and Log Explorer pages, provide a unique name related to your cluster name. If not provided, the stack creates a log group based on OKE cluster's name and creation date. @@ -212,19 +212,19 @@ variables: - ${opt_create_new_la_log_group} pattern: '(^\S.*\S$|^$)' - # Option to create a new OCI Logging Analytics Entity + # Option to create a new OCI Log Analytics Entity opt_create_oci_la_entity: type: boolean - title: Create a new Logging Analytics entity for this cluster - description: Clear the check box if you want to use an existing Logging Analytics entity. + title: Create a new Log Analytics entity for this cluster + description: Clear the check box if you want to use an existing Log Analytics entity. default: true - # User Provided OCI Logging Analytics Entity OCID + # User Provided OCI Log Analytics Entity OCID oke_cluster_entity_ocid: type: string - title: OCID of OCI Logging Analytics entity + title: OCID of OCI Log Analytics entity # default: "null" - description: This must be a valid Logging Analytics entity of the type Kubernetes Cluster. + description: This must be a valid Log Analytics entity of the type Kubernetes Cluster. required: true pattern: '^(ocid1\.loganalyticsentity\.\S+$)' # maxLength: 93 # Don't set as realm ID is part of OCID diff --git a/terraform/oke/stack-inputs.tf b/terraform/oke/stack-inputs.tf index 5895f7c..302c717 100644 --- a/terraform/oke/stack-inputs.tf +++ b/terraform/oke/stack-inputs.tf @@ -133,13 +133,13 @@ variable "oci_onm_compartment_ocid" { type = string } -# Option to create Logging Analytics +# Option to create Log Analytics variable "opt_create_new_la_log_group" { type = bool default = false } -# OCI Logging Analytics LogGroup OCID +# OCI Log Analytics LogGroup OCID variable "oci_la_log_group_ocid" { type = string default = null @@ -158,7 +158,7 @@ variable "oci_la_log_group_name" { } } -# Option to create Logging Analytics +# Option to create Log Analytics variable "opt_create_oci_la_entity" { type = bool default = true @@ -172,7 +172,7 @@ variable "oke_cluster_entity_ocid" { # User Facing Error validation { condition = var.oke_cluster_entity_ocid == null ? true : length(regexall("^(ocid1\\.loganalyticsentity\\.\\S+)$", var.oke_cluster_entity_ocid)) > 0 ? true : false - error_message = "Invalid OCI Logging Analytics entity OCID" + error_message = "Invalid OCI Log Analytics entity OCID" } } @@ -241,7 +241,7 @@ variable "template_id" { variable "toggle_use_local_helm_chart" { type = string - default = false + default = true } # Ref - https://confluence.oci.oraclecorp.com/display/TERSI/FAQs#FAQs-Q.HowdoItestonPre-ProdenvironmentORHowdoImakeTerraformproviderpointtocustomControlPlane(CP)endpoint @@ -253,7 +253,7 @@ variable "CLIENT_HOST_OVERRIDES" { } variable "LOGAN_ENDPOINT" { - description = "Logging Analytics Endpoint." + description = "Log Analytics Endpoint." type = string default = null } diff --git a/terraform/oke/terraform-sample.tfvars b/terraform/oke/terraform-sample.tfvars index e5e9bcd..b1ce339 100644 --- a/terraform/oke/terraform-sample.tfvars +++ b/terraform/oke/terraform-sample.tfvars @@ -36,7 +36,7 @@ oci_la_log_group_name = "" # Optional: A LogGroup with ClusterName_Cluster opt_create_oci_la_entity = true // Alternative option for Entity: # opt_create_oci_la_entity = false -# oke_cluster_entity_ocid = "" +# oke_cluster_entity_ocid = "" // If you opt to import dashboards: // Ensure to manually delete the dashboards when you destroy the resources since the dashboards are not deleted automatically.