diff --git a/build.sbt b/build.sbt index b740f33..a8218e0 100644 --- a/build.sbt +++ b/build.sbt @@ -137,6 +137,19 @@ def setupCommonDockerImageConfig(project: Project): Project = project .settings( dockerRepository := Some("localhost:5001"), + dockerBuildCommand := { + if (sys.props("os.arch") != "amd64") { + // use buildx with platform to build supported amd64 images on other CPU architectures + // this may require that you have first run 'docker buildx create' to set docker buildx up + dockerExecCommand.value ++ Seq( + "buildx", + "build", + "--platform=linux/amd64", + //"--platform=linux/arm64/v8", + "--load" + ) ++ dockerBuildOptions.value :+ "." + } else dockerBuildCommand.value + }, dockerBaseImage := "eclipse-temurin:11.0.16.1_1-jdk-focal", Docker / aggregate := false, Compile / packageDoc / publishArtifact := false diff --git a/infra/Pulumi.yaml b/infra/Pulumi.yaml deleted file mode 100644 index 4267c05..0000000 --- a/infra/Pulumi.yaml +++ /dev/null @@ -1,7 +0,0 @@ -name: vss -description: VSS infra -runtime: scala -config: - vss:localRegistry: "localhost:5001" - vss:imageName: "vss-zio" - vss:imageTag: "0.1.0-SNAPSHOT" \ No newline at end of file diff --git a/infra/aws-eks/Main.scala b/infra/aws-eks/Main.scala new file mode 100644 index 0000000..ea48a33 --- /dev/null +++ b/infra/aws-eks/Main.scala @@ -0,0 +1,44 @@ +import besom.* +import besom.api.{aws, awsx, eks} + +@main def main = Pulumi.run { + val appName = "vss" + val vpc = awsx.ec2.Vpc( + name = s"$appName-vpc", + awsx.ec2.VpcArgs( + cidrBlock = "10.1.0.0/16", + enableDnsHostnames = true, + enableDnsSupport = true + ) + ) + + val cluster = eks.Cluster( + name = s"$appName-cluster", + eks.ClusterArgs( + vpcId = vpc.vpcId, + subnetIds = vpc.publicSubnetIds, + desiredCapacity = 2, + minSize = 1, + maxSize = 2, + storageClasses = "gp2" + ) + ) + + val repo = aws.ecr.Repository( + s"$appName-repository", + aws.ecr.RepositoryArgs( + imageTagMutability = "MUTABLE" + ) + ) + + val authorizationToken = + aws.ecr.getAuthorizationToken(aws.ecr.GetAuthorizationTokenArgs(registryId = repo.registryId)) + + Stack(cluster).exports( + registryEndpoint = authorizationToken.proxyEndpoint, + repositoryUrl = repo.repositoryUrl, + accessKeyId = authorizationToken.map(_.userName).asSecret, + secretAccessKey = authorizationToken.map(_.password).asSecret, + kubeconfig = cluster.kubeconfigJson + ) +} diff --git a/infra/aws-eks/Pulumi.yaml b/infra/aws-eks/Pulumi.yaml new file mode 100644 index 0000000..1e54620 --- /dev/null +++ b/infra/aws-eks/Pulumi.yaml @@ -0,0 +1,5 @@ +name: vss-aws-eks +runtime: scala +description: VSS AWS EKS infra +config: + aws:region: us-west-2 \ No newline at end of file diff --git a/infra/aws-eks/project.scala b/infra/aws-eks/project.scala new file mode 100644 index 0000000..bb5f9a0 --- /dev/null +++ b/infra/aws-eks/project.scala @@ -0,0 +1,6 @@ +//> using scala "3.3.3" +//> using options -Werror -Wunused:all -Wvalue-discard -Wnonunit-statement +//> using plugin "org.virtuslab::besom-compiler-plugin:0.2.2" +//> using dep "org.virtuslab::besom-core:0.2.2" +//> using dep "org.virtuslab::besom-awsx:2.5.0-core.0.2" +//> using dep "org.virtuslab::besom-eks:2.2.1-core.0.2" diff --git a/infra/azure-aks/Main.scala b/infra/azure-aks/Main.scala new file mode 100644 index 0000000..c7f2c79 --- /dev/null +++ b/infra/azure-aks/Main.scala @@ -0,0 +1,76 @@ +import besom.* +import besom.api.azurenative + +@main def main = Pulumi.run { + val appName = "vss" + + val resourceGroup = azurenative.resources.ResourceGroup(s"$appName-resource-group") + + val identity = azurenative.managedidentity.UserAssignedIdentity( + name = s"$appName-identity", + azurenative.managedidentity.UserAssignedIdentityArgs(resourceGroupName = resourceGroup.name) + ) + + val k8sCluster = azurenative.containerservice.ManagedCluster( + name = s"$appName-cluster", + azurenative.containerservice.ManagedClusterArgs( + resourceGroupName = resourceGroup.name, + dnsPrefix = appName, + identity = azurenative.containerservice.inputs.ManagedClusterIdentityArgs( + `type` = azurenative.containerservice.enums.ResourceIdentityType.UserAssigned, + userAssignedIdentities = List(identity.id) + ), + agentPoolProfiles = List( + azurenative.containerservice.inputs.ManagedClusterAgentPoolProfileArgs( + count = 1, + vmSize = "Standard_DS2_v2", + mode = azurenative.containerservice.enums.AgentPoolMode.System, + name = "agentpool", + osType = azurenative.containerservice.enums.OsType.Linux + ) + ) + ) + ) + + val kubeconfig = azurenative.containerservice + .listManagedClusterUserCredentials( + azurenative.containerservice.ListManagedClusterUserCredentialsArgs( + resourceName = k8sCluster.name, + resourceGroupName = resourceGroup.name + ) + ) + .kubeconfigs + .map(_.head.value) + + val registry = azurenative.containerregistry.Registry( + name = s"$appName-registry", + azurenative.containerregistry.RegistryArgs( + resourceGroupName = resourceGroup.name, + registryName = s"${appName}Registry", + sku = azurenative.containerregistry.inputs.SkuArgs( + name = azurenative.containerregistry.enums.SkuName.Basic + ), + adminUserEnabled = true + ), + opts = opts(deleteBeforeReplace = true) + ) + + val registryCredentials = azurenative.containerregistry.listRegistryCredentials( + azurenative.containerregistry.ListRegistryCredentialsArgs( + registryName = registry.name, + resourceGroupName = resourceGroup.name + ) + ) + + Stack.exports( + registryEndpoint = registry.loginServer, + repositoryUrl = p"${registry.loginServer}/$appName", + accessKeyId = registryCredentials.username, + // TODO make it secret + secretAccessKey = registryCredentials.passwords.map(_.head.head).value, + // TODO make it secret + kubeconfig = kubeconfig.map(base64Decoder) + ) +} + +private def base64Decoder: String => String = v => new String(java.util.Base64.getDecoder.decode(v.getBytes)) diff --git a/infra/azure-aks/Pulumi.yaml b/infra/azure-aks/Pulumi.yaml new file mode 100644 index 0000000..aa5ac04 --- /dev/null +++ b/infra/azure-aks/Pulumi.yaml @@ -0,0 +1,5 @@ +name: vss-azure-aks +runtime: scala +description: VSS Azure AKS infra +config: + azure-native:location: East US \ No newline at end of file diff --git a/infra/azure-aks/project.scala b/infra/azure-aks/project.scala new file mode 100644 index 0000000..5824a5d --- /dev/null +++ b/infra/azure-aks/project.scala @@ -0,0 +1,5 @@ +//> using scala "3.3.3" +//> using options -Werror -Wunused:all -Wvalue-discard -Wnonunit-statement +//> using plugin "org.virtuslab::besom-compiler-plugin:0.2.2" +//> using dep "org.virtuslab::besom-core:0.2.2" +//> using dep "org.virtuslab::besom-azure-native:2.29.0-core.0.2" diff --git a/infra/gcp-gke/Main.scala b/infra/gcp-gke/Main.scala new file mode 100644 index 0000000..edbd9f1 --- /dev/null +++ b/infra/gcp-gke/Main.scala @@ -0,0 +1,117 @@ +import besom.* +import besom.api.gcp + +enum GCPService(val name: String): + case KubernetesEngine extends GCPService("container.googleapis.com") + case ArtifactRegistry extends GCPService("artifactregistry.googleapis.com") + +@main def main = Pulumi.run { + val appName = "vss" + + // Enable GCP service(s) for the current project + val enableServices: Map[GCPService, Output[gcp.projects.Service]] = + GCPService.values + .map(api => api -> projectService(api)) + .toMap + + val k8sCluster = gcp.container.Cluster( + name = s"$appName-cluster", + gcp.container.ClusterArgs( + deletionProtection = false, + initialNodeCount = 1, + minMasterVersion = "1.29.1-gke.1589018", + nodeVersion = "1.29.1-gke.1589018", + nodeConfig = gcp.container.inputs.ClusterNodeConfigArgs( + machineType = "n1-standard-1", + oauthScopes = List( + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring" + ) + ) + ), + opts = opts(dependsOn = enableServices(GCPService.KubernetesEngine)) + ) + + val context = p"${k8sCluster.project}_${k8sCluster.location}_${k8sCluster.name}" + val kubeconfig = + p"""apiVersion: v1 + |clusters: + |- cluster: + | certificate-authority-data: ${k8sCluster.masterAuth.clusterCaCertificate.map(_.get).asPlaintext} + | server: https://${k8sCluster.endpoint} + | name: $context + |contexts: + |- context: + | cluster: $context + | user: $context + | name: $context + |current-context: $context + |kind: Config + |preferences: {} + |users: + |- name: $context + | user: + | exec: + | apiVersion: client.authentication.k8s.io/v1beta1 + | command: gke-gcloud-auth-plugin + | installHint: Install gke-gcloud-auth-plugin for use with kubectl by following + | https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke + | provideClusterInfo: true + |""".stripMargin + + val repository = gcp.artifactregistry.Repository( + name = s"$appName-repository", + gcp.artifactregistry.RepositoryArgs( + repositoryId = s"$appName-repository", + format = "DOCKER" + ), + opts = opts(dependsOn = enableServices(GCPService.ArtifactRegistry)) + ) + + val serviceAccount = gcp.serviceaccount.Account( + name = s"$appName-service-account", + gcp.serviceaccount.AccountArgs( + accountId = s"$appName-service-account", + displayName = "Service Account for Artifact Registry" + ) + ) + + val serviceAccountKey = gcp.serviceaccount.Key( + name = s"$appName-service-account-key", + gcp.serviceaccount.KeyArgs( + serviceAccountId = serviceAccount.accountId, + privateKeyType = "TYPE_GOOGLE_CREDENTIALS_FILE" + ) + ) + + val repositoryIamBinding = gcp.artifactregistry.RepositoryIamMember( + name = s"$appName-service-iam-everyone", + gcp.artifactregistry.RepositoryIamMemberArgs( + repository = repository.repositoryId, + role = "roles/artifactregistry.repoAdmin", + member = "allUsers" + ) + ) + + Stack(repositoryIamBinding).exports( + registryEndpoint = p"${k8sCluster.location}-docker.pkg.dev", + repositoryUrl = p"${k8sCluster.location}-docker.pkg.dev/${k8sCluster.project}/${repository.repositoryId}/$appName", + accessKeyId = "_json_key_base64", + secretAccessKey = serviceAccountKey.privateKey, + kubeconfig = kubeconfig + ) +} + +private def projectService(api: GCPService)(using Context): Output[gcp.projects.Service] = + gcp.projects.Service( + name = s"enable-${api.name.replace(".", "-")}", + gcp.projects.ServiceArgs( + service = api.name, + /* if true - at every destroy this will disable the dependent services for the whole project */ + disableDependentServices = true, + /* if true - at every destroy this will disable the service for the whole project */ + disableOnDestroy = true + ) + ) diff --git a/infra/gcp-gke/Pulumi.yaml b/infra/gcp-gke/Pulumi.yaml new file mode 100644 index 0000000..723cc07 --- /dev/null +++ b/infra/gcp-gke/Pulumi.yaml @@ -0,0 +1,6 @@ +name: vss-gcp-gke +runtime: scala +description: VSS GCP GKE infra +config: + gcp:project: besom-413811 + gcp:region: us-west1 \ No newline at end of file diff --git a/infra/gcp-gke/project.scala b/infra/gcp-gke/project.scala new file mode 100644 index 0000000..ce416ef --- /dev/null +++ b/infra/gcp-gke/project.scala @@ -0,0 +1,5 @@ +//> using scala "3.3.3" +//> using options -Werror -Wunused:all -Wvalue-discard -Wnonunit-statement +//> using plugin "org.virtuslab::besom-compiler-plugin:0.2.2" +//> using dep "org.virtuslab::besom-core:0.2.2" +//> using dep "org.virtuslab::besom-gcp:7.9.0-core.0.2" diff --git a/infra/k8s/Pulumi.yaml b/infra/k8s/Pulumi.yaml new file mode 100644 index 0000000..51d3c83 --- /dev/null +++ b/infra/k8s/Pulumi.yaml @@ -0,0 +1,11 @@ +name: vss +description: VSS infra +runtime: scala +config: + vss:localRepository: "localhost:5001/vss-zio" + vss:imageTag: "0.1.0-SNAPSHOT" + vss:cluster: "remote" + vss:clusterOrg: "organization" + vss:clusterProject: "vss-aws-eks" + vss:clusterStack: "dev" + grafana:auth: "admin:admin" \ No newline at end of file diff --git a/infra/project.scala b/infra/k8s/project.scala similarity index 53% rename from infra/project.scala rename to infra/k8s/project.scala index fa8f045..a5d1f3c 100644 --- a/infra/project.scala +++ b/infra/k8s/project.scala @@ -1,3 +1,5 @@ //> using scala 3.3.3 //> using dep org.virtuslab::besom-core:0.2.2 //> using dep org.virtuslab::besom-kubernetes::4.8.0-core.0.2 +//> using dep org.virtuslab::besom-docker:4.5.1-core.0.2 +//> using dep org.virtuslab::besom-grafana:0.2.0-core.0.2 diff --git a/infra/k8s/src/main/scala/Cluster.scala b/infra/k8s/src/main/scala/Cluster.scala new file mode 100644 index 0000000..40cac46 --- /dev/null +++ b/infra/k8s/src/main/scala/Cluster.scala @@ -0,0 +1,11 @@ +enum Cluster(val name: String): + case Local extends Cluster("local") + case Remote extends Cluster("remote") + +object Cluster: + val parseName = Cluster.values + .map(c => c.name -> c) + .toMap + .withDefault(str => + throw Exception(s"$str value not allowed. Available values are local or remote. Change vss:cluster configuration") + ) diff --git a/infra/k8s/src/main/scala/Grafana.scala b/infra/k8s/src/main/scala/Grafana.scala new file mode 100644 index 0000000..17c6026 --- /dev/null +++ b/infra/k8s/src/main/scala/Grafana.scala @@ -0,0 +1,192 @@ +import besom.* +import besom.api.kubernetes as k8s +import besom.api.grafana +import k8s.apps.v1.inputs.* +import k8s.apps.v1.{Deployment, DeploymentArgs} +import k8s.core.v1.inputs.* +import k8s.core.v1.{ConfigMapArgs, ServiceArgs, *} +import k8s.core.v1.enums.ServiceSpecType +import k8s.meta.v1.inputs.* + +object Grafana: + val appName: NonEmptyString = "grafana" + val labels = Map("app" -> "grafana") + val port = 3000 + private val imageTag = "10.1.9" + + def deploy(using + Context + )(namespace: Output[Namespace], postgresService: Output[Service], k8sProvider: Output[k8s.Provider]) = { + val grafanaConfigMap = ConfigMap( + s"$appName-config", + ConfigMapArgs( + metadata = ObjectMetaArgs(name = s"$appName-init-config-map", namespace = namespace.metadata.name), + data = Map( + "grafana.ini" -> + p""" + |[database] + |type = postgres + |host = ${postgresService.metadata.name.map(_.get)}:${Postgres.port} + |name = vss + |user = postgres + |password = postgres + """.stripMargin + ) + ), + opts(provider = k8sProvider, deleteBeforeReplace = true) + ) + + Deployment( + appName, + DeploymentArgs( + spec = DeploymentSpecArgs( + selector = LabelSelectorArgs(matchLabels = labels), + replicas = 1, + template = PodTemplateSpecArgs( + metadata = ObjectMetaArgs( + name = s"$appName-deployment", + labels = labels, + namespace = namespace.metadata.name + ), + spec = PodSpecArgs( + securityContext = PodSecurityContextArgs(runAsUser = 0, fsGroup = 0), + containers = List( + ContainerArgs( + name = appName, + image = s"grafana/grafana:$imageTag", + ports = List( + ContainerPortArgs(containerPort = port) + ), + readinessProbe = ProbeArgs( + failureThreshold = 3, + httpGet = HttpGetActionArgs( + path = "/robots.txt", + port = port, + scheme = "HTTP" + ), + initialDelaySeconds = 10, + periodSeconds = 30, + successThreshold = 1, + timeoutSeconds = 2 + ), + livenessProbe = ProbeArgs( + failureThreshold = 3, + tcpSocket = TcpSocketActionArgs(port = port), + initialDelaySeconds = 30, + periodSeconds = 10, + successThreshold = 1, + timeoutSeconds = 1 + ), + resources = ResourceRequirementsArgs( + requests = Map("cpu" -> "250m", "memory" -> "750Mi") + ), + volumeMounts = List( + VolumeMountArgs( + mountPath = "/etc/grafana/grafana.ini", + subPath = "grafana.ini", + name = s"$appName-config" + ) + ) + ) + ), + volumes = List( + VolumeArgs( + name = s"$appName-config", + configMap = ConfigMapVolumeSourceArgs( + name = grafanaConfigMap.metadata.name + ) + ) + ) + ) + ) + ), + metadata = ObjectMetaArgs( + name = s"$appName-deployment", + namespace = namespace.metadata.name + ) + ), + opts(provider = k8sProvider) + ) + } + + def deployService(using + Context + )( + lokiUrl: Output[String], + jaegerUrl: Output[String], + serviceType: Output[ServiceSpecType], + namespace: Output[Namespace], + grafanaDeployment: Output[Deployment], + k8sProvider: Output[k8s.Provider] + ) = + val service = Service( + appName, + ServiceArgs( + spec = ServiceSpecArgs( + selector = labels, + sessionAffinity = "None", + `type` = serviceType, + ports = List( + ServicePortArgs(port = port, targetPort = port) + ) + ), + metadata = ObjectMetaArgs( + name = s"$appName-service", + namespace = namespace.metadata.name + ) + ), + opts(dependsOn = grafanaDeployment, provider = k8sProvider) + ) + + val serviceUrl = + service.status.loadBalancer.ingress + .map( + _.flatMap(_.headOption) + .flatMap(ingress => + ingress.ip match + case None => + ingress.hostname + case other => + other + ) + .getOrElse(p"localhost") + ) + .flatMap(host => p"http://$host:$port") + + val grafanaProvider = grafana.Provider( + name = s"$appName-provider", + grafana.ProviderArgs( + retryWait = 20, // seconds + retries = 8, + url = serviceUrl, + auth = config.requireString("grafana:auth") // grafana.config.getAuth + ), + opts = opts(dependsOn = grafanaDeployment, deletedWith = grafanaDeployment) + ) + + val lokiDataSource = grafana.DataSource( + name = s"$appName-loki-data-source", + grafana.DataSourceArgs( + url = lokiUrl, + `type` = "loki", + basicAuthEnabled = false, + isDefault = true + ), + opts = opts(provider = grafanaProvider, dependsOn = service, deletedWith = grafanaDeployment) + ) + + val jaegerDataSource = grafana.DataSource( + name = s"$appName-jaeger-data-source", + grafana.DataSourceArgs( + url = jaegerUrl, + `type` = "jaeger", + basicAuthEnabled = false, + isDefault = false + ), + opts = opts(provider = grafanaProvider, dependsOn = service, deletedWith = grafanaDeployment) + ) + for + _ <- lokiDataSource + _ <- jaegerDataSource + yield serviceUrl + end deployService diff --git a/infra/src/main/scala/Jaeger.scala b/infra/k8s/src/main/scala/Jaeger.scala similarity index 84% rename from infra/src/main/scala/Jaeger.scala rename to infra/k8s/src/main/scala/Jaeger.scala index 513614b..d648d5d 100644 --- a/infra/src/main/scala/Jaeger.scala +++ b/infra/k8s/src/main/scala/Jaeger.scala @@ -13,6 +13,7 @@ import besom.aliases.NonEmptyString object Jaeger { val appName: NonEmptyString = "jaeger" // todo fix inference in NonEmptyString val labels = Map("app" -> "jaeger") + private val imageTag = "1.36" // https://www.jaegertracing.io/docs/1.6/getting-started/#all-in-one-docker-image - port descriptions val ports = Map( @@ -25,7 +26,7 @@ object Jaeger { "zipkin-collector" -> (None, 9411) ) - def deploy(using Context)(namespace: Output[Namespace]) = Deployment( + def deploy(using Context)(namespace: Output[Namespace], k8sProvider: Output[k8s.Provider]) = Deployment( appName, DeploymentArgs( spec = DeploymentSpecArgs( @@ -41,7 +42,7 @@ object Jaeger { containers = List( ContainerArgs( name = appName, - image = "jaegertracing/all-in-one:1.36", + image = s"jaegertracing/all-in-one:$imageTag", ports = ports.map { case (name, (protocol, port)) => ContainerPortArgs(containerPort = port, protocol) }.toList, @@ -64,10 +65,13 @@ object Jaeger { name = s"$appName-deployment", namespace = namespace.metadata.name ) - ) + ), + opts(provider = k8sProvider) ) - def deployService(using Context)(namespace: Output[Namespace]) = Service( + def deployService(using + Context + )(namespace: Output[Namespace], jaegerDeployment: Output[Deployment], k8sProvider: Output[k8s.Provider]) = Service( appName, ServiceArgs( spec = ServiceSpecArgs( @@ -80,7 +84,8 @@ object Jaeger { name = s"$appName-service", namespace = namespace.metadata.name ) - ) + ), + opts(dependsOn = jaegerDeployment, provider = k8sProvider) ) } diff --git a/infra/src/main/scala/Kafka.scala b/infra/k8s/src/main/scala/Kafka.scala similarity index 81% rename from infra/src/main/scala/Kafka.scala rename to infra/k8s/src/main/scala/Kafka.scala index 0c74b59..454dacb 100644 --- a/infra/src/main/scala/Kafka.scala +++ b/infra/k8s/src/main/scala/Kafka.scala @@ -2,7 +2,7 @@ import besom.* import besom.util.* import besom.api.kubernetes as k8s import k8s.core.v1.inputs.* -import k8s.core.v1.{ConfigMap, Namespace, Service, ConfigMapArgs, ServiceArgs} +import k8s.core.v1.{ConfigMap, ConfigMapArgs, Namespace, Service, ServiceArgs} import k8s.apps.v1.inputs.* import k8s.apps.v1.{Deployment, DeploymentArgs} import k8s.meta.v1.inputs.* @@ -13,8 +13,11 @@ object Kafka { val labels = Map("app" -> "kafka") val kafkaServiceName = s"$appName-service" val port = 9092 + private val imageTag = "7.0.1" - def deploy(using Context)(namespace: Output[Namespace], zookeeperService: Output[Service]) = Deployment( + def deploy(using + Context + )(namespace: Output[Namespace], zookeeperService: Output[Service], k8sProvider: Output[k8s.Provider]) = Deployment( appName, DeploymentArgs( spec = DeploymentSpecArgs( @@ -30,7 +33,7 @@ object Kafka { containers = List( ContainerArgs( name = "kafka-broker", - image = "confluentinc/cp-kafka:7.0.1", + image = s"confluentinc/cp-kafka:$imageTag", ports = List( ContainerPortArgs(containerPort = port) ), @@ -61,10 +64,13 @@ object Kafka { name = "kafka-deployment", namespace = namespace.metadata.name ) - ) + ), + opts(provider = k8sProvider) ) - def deployService(using Context)(namespace: Output[Namespace]) = Service( + def deployService(using + Context + )(namespace: Output[Namespace], kafkaDeployment: Output[Deployment], k8sProvider: Output[k8s.Provider]) = Service( appName, ServiceArgs( spec = ServiceSpecArgs( @@ -77,7 +83,8 @@ object Kafka { name = kafkaServiceName, namespace = namespace.metadata.name ) - ) + ), + opts(dependsOn = kafkaDeployment, provider = k8sProvider) ) } diff --git a/infra/k8s/src/main/scala/Loki.scala b/infra/k8s/src/main/scala/Loki.scala new file mode 100644 index 0000000..022755a --- /dev/null +++ b/infra/k8s/src/main/scala/Loki.scala @@ -0,0 +1,150 @@ +import besom.* +import besom.aliases.NonEmptyString +import besom.api.kubernetes as k8s +import k8s.apps.v1.inputs.* +import k8s.apps.v1.{Deployment, DeploymentArgs} +import k8s.core.v1.inputs.* +import k8s.core.v1.{ConfigMapArgs, ServiceArgs, *} +import k8s.meta.v1.inputs.* +import besom.internal.{Context, Output} +import besom.util.NonEmptyString +import besom.aliases.NonEmptyString + +object Loki: + val appName: NonEmptyString = "loki" + val labels = Map("app" -> "loki") + val port = 3100 + private val imageTag = "2.8.11" + private val configFileName = "loki.yaml" + + def deploy(using Context)(namespace: Output[Namespace], k8sProvider: Output[k8s.Provider]) = + val configMap = ConfigMap( + s"$appName-config", + ConfigMapArgs( + metadata = ObjectMetaArgs(name = s"$appName-config", namespace = namespace.metadata.name), + data = Map( + configFileName -> + s"""auth_enabled: false + | + |server: + | http_listen_port: $port + | + |ingester: + | wal: + | dir: /tmp/wal + | lifecycler: + | address: 127.0.0.1 + | ring: + | kvstore: + | store: inmemory + | replication_factor: 1 + | chunk_idle_period: 15m + | chunk_retain_period: 30s + | + |schema_config: + | configs: + | - from: 2020-10-24 + | store: boltdb-shipper + | object_store: filesystem + | schema: v11 + | index: + | prefix: index_ + | period: 24h + | + |storage_config: + | boltdb_shipper: + | active_index_directory: /tmp/loki/index + | cache_location: /tmp/loki/cache + | cache_ttl: 24h + | shared_store: filesystem + | filesystem: + | directory: /tmp/loki/chunks + | + |compactor: + | working_directory: /tmp/loki/compactor + | shared_store: filesystem + | + |limits_config: + | reject_old_samples: true + | reject_old_samples_max_age: 168h + | + |chunk_store_config: + | max_look_back_period: 0s + | + |table_manager: + | retention_deletes_enabled: false + | retention_period: 0s + """.stripMargin + ) + ), + opts(provider = k8sProvider) + ) + + Deployment( + appName, + DeploymentArgs( + spec = DeploymentSpecArgs( + selector = LabelSelectorArgs(matchLabels = labels), + replicas = 1, + template = PodTemplateSpecArgs( + metadata = ObjectMetaArgs( + name = s"$appName-deployment", + labels = labels, + namespace = namespace.metadata.name + ), + spec = PodSpecArgs( + containers = List( + ContainerArgs( + name = appName, + image = s"grafana/loki:$imageTag", + ports = List( + ContainerPortArgs(containerPort = port) + ), + args = List(s"-config.file=/etc/loki/$configFileName"), + volumeMounts = List( + VolumeMountArgs( + mountPath = "/etc/loki", + readOnly = true, + name = s"$appName-config-volume" + ) + ) + ) + ), + volumes = List( + VolumeArgs( + name = s"$appName-config-volume", + configMap = ConfigMapVolumeSourceArgs( + name = configMap.metadata.name + ) + ) + ) + ) + ) + ), + metadata = ObjectMetaArgs( + name = s"$appName-deployment", + namespace = namespace.metadata.name + ) + ), + opts(provider = k8sProvider) + ) + + def deployService(using + Context + )(namespace: Output[Namespace], lokiDeployment: Output[Deployment], k8sProvider: Output[k8s.Provider]) = Service( + appName, + ServiceArgs( + spec = ServiceSpecArgs( + selector = labels, + `type` = k8s.core.v1.enums.ServiceSpecType.ClusterIP, + ports = List( + ServicePortArgs(port = port, targetPort = port) + ) + ), + metadata = ObjectMetaArgs( + name = s"$appName-service", + namespace = namespace.metadata.name + ) + ), + opts(dependsOn = lokiDeployment, provider = k8sProvider) + ) diff --git a/infra/k8s/src/main/scala/Main.scala b/infra/k8s/src/main/scala/Main.scala new file mode 100644 index 0000000..ea7fd23 --- /dev/null +++ b/infra/k8s/src/main/scala/Main.scala @@ -0,0 +1,196 @@ +import besom.* +import besom.api.kubernetes.core.v1.enums.ServiceSpecType +import besom.api.kubernetes.core.v1.{Namespace, NamespaceArgs} +import besom.api.kubernetes.meta.v1.inputs.ObjectMetaArgs +import besom.api.{docker, kubernetes as k8s} +import besom.json.* +import besom.json.DefaultJsonProtocol.StringJsonFormat + +@main def main = Pulumi.run { + val appName: NonEmptyString = "vss" + val clusterConfig = config + .requireString("cluster") + .map(Cluster.parseName) + + val serviceType = clusterConfig.map: + case Cluster.Remote => + ServiceSpecType.LoadBalancer + case _ => + ServiceSpecType.ClusterIP + + val clusterStack = + for + orgName <- config.getString("clusterOrg").getOrElse("organization") + projName <- config.requireString("clusterProject") + stackName <- config.requireString("clusterStack") + stack <- StackReference(name = s"$orgName/$projName/$stackName") + yield stack + + val k8sProvider = clusterConfig.flatMap: + case Cluster.Local => + k8s.Provider(name = s"$appName-local-provider") + case Cluster.Remote => + k8s.Provider( + name = s"$appName-remote-provider", + k8s.ProviderArgs(kubeconfig = clusterStack.requireOutput("kubeconfig").convertTo[String]) + ) + + val appNamespace = Namespace( + name = appName, + NamespaceArgs(metadata = ObjectMetaArgs(name = appName)), + opts = opts(provider = k8sProvider) + ) + + val imageTag = config.requireString("imageTag") + val localRepository = config.requireString("localRepository") + + val registryEndpoint = clusterStack.requireOutput("registryEndpoint").convertTo[String] + val repositoryUrl = clusterStack.requireOutput("repositoryUrl").convertTo[String] + val secretAccessKeyJsValue = clusterStack.requireOutput("secretAccessKey") + val accessKeyIdJsValue = clusterStack.requireOutput("accessKeyId") + val secretAccessKey = secretAccessKeyJsValue.convertTo[String] + val accessKeyId = accessKeyIdJsValue.convertTo[String] + + val appImage = clusterConfig.flatMap: + case Cluster.Local => + p"$localRepository:$imageTag" + case Cluster.Remote => + val dockerProvider = docker.Provider( + name = s"$appName-docker-provider", + docker.ProviderArgs( + registryAuth = List( + docker.inputs.ProviderRegistryAuthArgs( + address = registryEndpoint, + username = accessKeyId, + password = secretAccessKey + ) + ) + ) + ) + val tag = docker.Tag( + name = s"$appName-tag", + docker.TagArgs( + sourceImage = p"$localRepository:$imageTag", + targetImage = p"$repositoryUrl:$imageTag" + ), + opts = opts(provider = dockerProvider) + ) + docker + .RegistryImage( + name = s"$appName-image", + docker.RegistryImageArgs(name = tag.targetImage), + opts = opts(provider = dockerProvider) + ) + .name + + val appImagePullSecret = clusterConfig.flatMap: + case Cluster.Local => + Output(None) + case Cluster.Remote => + val secret = k8s.core.v1.Secret( + s"$appName-registry-secret", + k8s.core.v1.SecretArgs( + metadata = ObjectMetaArgs( + name = s"$appName-registry-secret", + namespace = appNamespace.metadata.name + ), + `type` = "kubernetes.io/dockerconfigjson", + stringData = Map( + ".dockerconfigjson" -> + jsObjectOutput( + "auths" -> jsObjectOutput( + repositoryUrl -> jsObjectOutput( + "username" -> accessKeyIdJsValue, + "password" -> secretAccessKeyJsValue, + "auth" -> p"$accessKeyId:$secretAccessKey".map(base64).map(JsString(_)) + ) + ) + ).prettyPrint + ) + ), + opts = opts(provider = k8sProvider, deleteBeforeReplace = true) + ) + secret.map(Some(_)) + + // loki + val lokiDeployment = Loki.deploy(appNamespace, k8sProvider) + val lokiService = Loki.deployService(appNamespace, lokiDeployment, k8sProvider) + val lokiUrl = p"http://${lokiService.metadata.name.map(_.get)}:${Loki.port}" + + // promtail + val promtailDaemonSet = Promtail.deploy(lokiUrl, appNamespace, k8sProvider) + + // zookeeper + val zooDeployment = Zookeeper.deploy(appNamespace, k8sProvider) + val zooService = Zookeeper.deployService(appNamespace, zooDeployment, k8sProvider) + + // kafka + val kafkaDeployment = Kafka.deploy(appNamespace, zooService, k8sProvider) + val kafkaService = Kafka.deployService(appNamespace, kafkaDeployment, k8sProvider) + + // postgres + val postgresDeployment = Postgres.deploy(appNamespace, k8sProvider) + val postgresService = Postgres.deployService(appNamespace, postgresDeployment, k8sProvider) + + // jaeger + val jaegerDeployment = Jaeger.deploy(appNamespace, k8sProvider) + val jaegerService = Jaeger.deployService(appNamespace, jaegerDeployment, k8sProvider) + val jaegerUrl = p"http://${jaegerService.metadata.name.map(_.get)}:${Jaeger.ports("frontend")._2}" + + // grafana + val grafanaDeployment = Grafana.deploy(appNamespace, postgresService, k8sProvider) + val grafanaServiceUrl = + Grafana.deployService(lokiUrl, jaegerUrl, serviceType, appNamespace, grafanaDeployment, k8sProvider) + + // vss + val vssDeployment = + VSS.deploy(appImagePullSecret, appImage, appNamespace, postgresService, kafkaService, jaegerService, k8sProvider) + val vssServiceUrl = VSS.deployService(serviceType, appNamespace, vssDeployment, k8sProvider) + + Stack.exports( + appImage = appImage, + grafanaServiceUrl = grafanaServiceUrl, + vssServiceUrl = vssServiceUrl, + namespaceName = appNamespace.metadata.name, + lokiDeploymentName = lokiDeployment.metadata.name, + lokiServiceName = lokiService.metadata.name, + grafanaDeploymentName = grafanaDeployment.metadata.name, + promtailDaemonSetName = promtailDaemonSet.metadata.name, + zookeeperDeploymentName = zooDeployment.metadata.name, + zookeeperServiceName = zooService.metadata.name, + kafkaDeploymentName = kafkaDeployment.metadata.name, + kafkaServiceName = kafkaService.metadata.name, + postgresDeploymentName = postgresDeployment.metadata.name, + postgresServiceName = postgresService.metadata.name, + jaegerDeploymentName = jaegerDeployment.metadata.name, + jaegerServiceName = jaegerService.metadata.name, + vssDeploymentName = vssDeployment.metadata.name + ) +} + +private def base64: String => String = v => java.util.Base64.getEncoder.encodeToString(v.getBytes) + +private def jsObjectOutput( + members: (String | Output[String], JsValue | Output[JsValue])* +)(using Context): Output[JsObject] = + Output + .sequence( + members.toSeq.map { + case (k: String, v: JsValue) => + Output(k, v) + case (k: String, ov: Output[JsValue]) => + ov.map(v => (k, v)) + case (ok: Output[String], v: JsValue) => + ok.map(k => (k, v)) + case (ok: Output[String], ov: Output[JsValue]) => + ok.zip(ov) + } + ) + .map(o => JsObject.apply(o*)) + +extension (o: Output[StackReference]) + def requireOutput(name: NonEmptyString)(using Context): Output[JsValue] = o.flatMap(_.requireOutput(name)) + +extension (o: Output[JsValue]) + def convertTo[T : JsonReader]: Output[T] = o.map(_.convertTo[T]) + def prettyPrint: Output[String] = o.map(_.prettyPrint) diff --git a/infra/src/main/scala/Ops.scala b/infra/k8s/src/main/scala/Ops.scala similarity index 100% rename from infra/src/main/scala/Ops.scala rename to infra/k8s/src/main/scala/Ops.scala diff --git a/infra/src/main/scala/Postgres.scala b/infra/k8s/src/main/scala/Postgres.scala similarity index 82% rename from infra/src/main/scala/Postgres.scala rename to infra/k8s/src/main/scala/Postgres.scala index 5b480d2..40471a8 100644 --- a/infra/src/main/scala/Postgres.scala +++ b/infra/k8s/src/main/scala/Postgres.scala @@ -13,19 +13,22 @@ object Postgres { val appName: NonEmptyString = "postgres" // todo fix inference in NonEmptyString val labels = Map("app" -> "postgres") val port = 5432 + private val imageTag = "14.1-alpine" - def deploy(using Context)(namespace: Output[Namespace]) = { + def deploy(using Context)(namespace: Output[Namespace], k8sProvider: Output[k8s.Provider]) = { val postgresPV = PersistentVolume( appName, k8s.core.v1.PersistentVolumeArgs( metadata = ObjectMetaArgs(name = s"$appName-pv", namespace = namespace.metadata.name), spec = PersistentVolumeSpecArgs( + storageClassName = "standard", capacity = Map("storage" -> "8Gi"), accessModes = List("ReadWriteMany"), - hostPath = HostPathVolumeSourceArgs("/data/db") + hostPath = HostPathVolumeSourceArgs("/mnt/disks/share/data/db") ) - ) + ), + opts(provider = k8sProvider) ) val postgresPVC = PersistentVolumeClaim( @@ -33,12 +36,15 @@ object Postgres { k8s.core.v1.PersistentVolumeClaimArgs( metadata = ObjectMetaArgs(name = s"$appName-pvc", namespace = namespace.metadata.name), spec = PersistentVolumeClaimSpecArgs( + storageClassName = "standard", + volumeName = postgresPV.metadata.name, accessModes = List("ReadWriteMany"), resources = VolumeResourceRequirementsArgs( requests = Map("storage" -> "8Gi") ) ) - ) + ), + opts(provider = k8sProvider) ) val postgresConfigMap = ConfigMap( @@ -51,7 +57,8 @@ object Postgres { "POSTGRES_USER" -> "postgres", "POSTGRES_PASSWORD" -> "postgres" ) - ) + ), + opts(provider = k8sProvider) ) val initConfigMap = ConfigMap( @@ -59,8 +66,9 @@ object Postgres { ConfigMapArgs( metadata = ObjectMetaArgs(name = s"$appName-init-config-map", namespace = namespace.metadata.name), // path starts from besom/.scala-build - data = Ops.readFileIntoConfigMap("../commons/src/main/resources/tables.sql", Some("init.sql")) - ) + data = Ops.readFileIntoConfigMap("../../commons/src/main/resources/tables.sql", Some("init.sql")) + ), + opts(provider = k8sProvider) ) Deployment( @@ -79,7 +87,7 @@ object Postgres { containers = List( ContainerArgs( name = appName, - image = "postgres:14.1-alpine", + image = s"postgres:$imageTag", ports = List( ContainerPortArgs(containerPort = port) ), @@ -120,11 +128,14 @@ object Postgres { name = s"$appName-deployment", namespace = namespace.metadata.name ) - ) + ), + opts(provider = k8sProvider) ) } - def deployService(using Context)(namespace: Output[Namespace]) = Service( + def deployService(using + Context + )(namespace: Output[Namespace], postgresDeployment: Output[Deployment], k8sProvider: Output[k8s.Provider]) = Service( appName, ServiceArgs( spec = ServiceSpecArgs( @@ -137,6 +148,7 @@ object Postgres { name = s"$appName-service", namespace = namespace.metadata.name ) - ) + ), + opts(dependsOn = postgresDeployment, provider = k8sProvider) ) } diff --git a/infra/k8s/src/main/scala/Promtail.scala b/infra/k8s/src/main/scala/Promtail.scala new file mode 100644 index 0000000..ea22f99 --- /dev/null +++ b/infra/k8s/src/main/scala/Promtail.scala @@ -0,0 +1,190 @@ +import besom.* +import besom.api.kubernetes as k8s +import k8s.apps.v1.inputs.* +import k8s.apps.v1.{DaemonSet, DaemonSetArgs, Deployment, DeploymentArgs} +import k8s.core.v1.inputs.* +import k8s.core.v1.{ConfigMapArgs, ServiceAccountArgs, *} +import k8s.meta.v1.inputs.* +import k8s.rbac.v1.inputs.{PolicyRuleArgs, RoleRefArgs, SubjectArgs} +import k8s.rbac.v1.{ClusterRole, ClusterRoleArgs, ClusterRoleBinding, ClusterRoleBindingArgs} + +object Promtail: + val appName: NonEmptyString = "promtail" + val labels = Map("app" -> "promtail") + val port = 9080 + private val imageTag = "2.8.11" + private val configFileName = "promtail.yaml" + + def deploy(using + Context + )(lokiUrl: Output[String], namespace: Output[Namespace], k8sProvider: Output[k8s.Provider]) = + val configMap = ConfigMap( + s"$appName-config", + ConfigMapArgs( + metadata = ObjectMetaArgs(name = s"$appName-config", namespace = namespace.metadata.name), + data = Map( + configFileName -> + p"""server: + | http_listen_port: $port + | grpc_listen_port: 0 + |clients: + |- url: $lokiUrl/loki/api/v1/push + |positions: + | filename: /tmp/positions.yaml + |target_config: + | sync_period: 10s + |scrape_configs: + |- job_name: pod-logs + | kubernetes_sd_configs: + | - role: pod + | pipeline_stages: + | - docker: {} + | relabel_configs: + | - source_labels: + | - __meta_kubernetes_pod_node_name + | target_label: __host__ + | - action: labelmap + | regex: __meta_kubernetes_pod_label_(.+) + | - action: replace + | replacement: $$1 + | separator: / + | source_labels: + | - __meta_kubernetes_namespace + | - __meta_kubernetes_pod_name + | target_label: job + | - action: replace + | source_labels: + | - __meta_kubernetes_namespace + | target_label: namespace + | - action: replace + | source_labels: + | - __meta_kubernetes_pod_name + | target_label: pod + | - action: replace + | source_labels: + | - __meta_kubernetes_pod_container_name + | target_label: container + | - replacement: /var/log/pods/*$$1/*.log + | separator: / + | source_labels: + | - __meta_kubernetes_pod_uid + | - __meta_kubernetes_pod_container_name + | target_label: __path__ + |""".stripMargin + ) + ), + opts(provider = k8sProvider) + ) + + val serviceAccount = ServiceAccount( + s"$appName-service-account", + ServiceAccountArgs( + metadata = ObjectMetaArgs(name = s"$appName-service-account", namespace = namespace.metadata.name) + ), + opts(provider = k8sProvider) + ) + + val clusterRole = ClusterRole( + s"$appName-cluster-role", + ClusterRoleArgs( + metadata = ObjectMetaArgs(name = s"$appName-cluster-role", namespace = namespace.metadata.name), + rules = List( + PolicyRuleArgs( + apiGroups = List(""), + resources = List("nodes", "services", "pods"), + verbs = List("get", "watch", "list") + ) + ) + ), + opts(provider = k8sProvider) + ) + + val clusterRoleBinding = ClusterRoleBinding( + s"$appName-cluster-role-binding", + ClusterRoleBindingArgs( + metadata = ObjectMetaArgs(name = s"$appName-cluster-role-binding", namespace = namespace.metadata.name), + subjects = List( + SubjectArgs( + kind = "ServiceAccount", + name = serviceAccount.metadata.name.map(_.get), + namespace = namespace.metadata.name + ) + ), + roleRef = RoleRefArgs( + kind = "ClusterRole", + name = clusterRole.metadata.name.map(_.get), + apiGroup = "rbac.authorization.k8s.io" + ) + ), + opts = opts(provider = k8sProvider, retainOnDelete = false, dependsOn = List(clusterRole, serviceAccount)) + ) + + DaemonSet( + s"$appName-daemon-set", + DaemonSetArgs( + metadata = ObjectMetaArgs(name = s"$appName-daemon-set", namespace = namespace.metadata.name), + spec = DaemonSetSpecArgs( + selector = LabelSelectorArgs(matchLabels = labels), + template = PodTemplateSpecArgs( + metadata = ObjectMetaArgs( + name = s"$appName-deployment", + labels = labels, + namespace = namespace.metadata.name + ), + spec = PodSpecArgs( + serviceAccount = serviceAccount.metadata.name, + containers = List( + ContainerArgs( + name = appName, + image = s"grafana/promtail:$imageTag", + ports = List( + ContainerPortArgs(containerPort = port) + ), + args = List(s"-config.file=/etc/promtail/$configFileName"), + env = List( + EnvVarArgs( + name = "HOSTNAME", + valueFrom = EnvVarSourceArgs( + fieldRef = ObjectFieldSelectorArgs(fieldPath = "spec.nodeName") + ) + ) + ), + volumeMounts = List( + VolumeMountArgs( + mountPath = "/var/log", + name = "logs" + ), + VolumeMountArgs( + mountPath = "/etc/promtail", + name = s"$appName-config" + ), + VolumeMountArgs( + mountPath = "/var/lib/docker/containers", + readOnly = true, + name = "varlibdockercontainers" + ) + ) + ) + ), + volumes = List( + VolumeArgs( + name = "logs", + hostPath = HostPathVolumeSourceArgs(path = "/var/log") + ), + VolumeArgs( + name = "varlibdockercontainers", + hostPath = HostPathVolumeSourceArgs(path = "/var/lib/docker/containers") + ), + VolumeArgs( + name = s"$appName-config", + configMap = ConfigMapVolumeSourceArgs( + name = configMap.metadata.name + ) + ) + ) + ) + ) + ) + ), + opts = opts(provider = k8sProvider, dependsOn = clusterRoleBinding) + ) diff --git a/infra/src/main/scala/VSS.scala b/infra/k8s/src/main/scala/VSS.scala similarity index 55% rename from infra/src/main/scala/VSS.scala rename to infra/k8s/src/main/scala/VSS.scala index 9e298c8..e5eba03 100644 --- a/infra/src/main/scala/VSS.scala +++ b/infra/k8s/src/main/scala/VSS.scala @@ -1,15 +1,13 @@ import besom.* -import besom.util.* import besom.api.kubernetes as k8s -import k8s.core.v1.inputs.* -import k8s.core.v1.{ConfigMap, ConfigMapArgs, Namespace, Service, ServiceArgs} -import k8s.apps.v1.inputs.* -import k8s.apps.v1.{Deployment, DeploymentArgs} -import k8s.meta.v1.inputs.* -import besom.internal.{Context, Output} -import besom.internal.Config +import besom.api.kubernetes.apps.v1.inputs.* +import besom.api.kubernetes.apps.v1.{Deployment, DeploymentArgs} +import besom.api.kubernetes.core.v1.enums.ServiceSpecType +import besom.api.kubernetes.core.v1.inputs.* +import besom.api.kubernetes.core.v1.{Namespace, Service, ServiceArgs} +import besom.api.kubernetes.meta.v1.inputs.* -object VSS { +object VSS: val appName: NonEmptyString = "vss-app" // todo fix inference in NonEmptyString val labels = Map("app" -> "vss-app") val ports = Map( @@ -20,17 +18,14 @@ object VSS { ) def deploy(using Context)( - config: Config, + k8sRegistrySecret: Output[Option[k8s.core.v1.Secret]], + image: Output[String], namespace: Output[Namespace], postgresService: Output[Service], kafkaService: Output[Service], - jaegerService: Output[Service] + jaegerService: Output[Service], + k8sProvider: Output[k8s.Provider] ) = { - val localRegistry = config.requireString("localRegistry") - val imageName = config.requireString("imageName") - val imageTag = config.requireString("imageTag") - val image = pulumi"$localRegistry/$imageName:$imageTag" - Deployment( appName, DeploymentArgs( @@ -44,11 +39,13 @@ object VSS { namespace = namespace.metadata.name ), spec = PodSpecArgs( + imagePullSecrets = + k8sRegistrySecret.map(_.map(secret => LocalObjectReferenceArgs(name = secret.metadata.name)).toList), containers = List( ContainerArgs( name = appName, image = image, - imagePullPolicy = "IfNotPresent", + imagePullPolicy = "Always", ports = ports.map { case (name, (protocol, port)) => ContainerPortArgs(containerPort = port, protocol) }.toList, @@ -74,24 +71,45 @@ object VSS { name = s"$appName-deployment", namespace = namespace.metadata.name ) - ) + ), + opts(provider = k8sProvider) ) } - def deployService(using Context)(namespace: Output[Namespace]) = Service( - appName, - ServiceArgs( - spec = ServiceSpecArgs( - selector = labels, - ports = ports.map { case (name, (protocol, port)) => - ServicePortArgs(name = name, port = port, targetPort = port, protocol = protocol) - }.toList + def deployService(using Context)( + serviceType: Output[ServiceSpecType], + namespace: Output[Namespace], + vssDeployment: Output[Deployment], + k8sProvider: Output[k8s.Provider] + ) = + val service = Service( + appName, + ServiceArgs( + spec = ServiceSpecArgs( + `type` = serviceType, + selector = labels, + ports = ports.map { case (name, (protocol, port)) => + ServicePortArgs(name = name, port = port, targetPort = port, protocol = protocol) + }.toList + ), + metadata = ObjectMetaArgs( + name = s"$appName-service", + namespace = namespace.metadata.name + ) ), - metadata = ObjectMetaArgs( - name = s"$appName-service", - namespace = namespace.metadata.name - ) + opts(dependsOn = vssDeployment, provider = k8sProvider) ) - ) -} + service.status.loadBalancer.ingress + .map( + _.flatMap(_.headOption) + .flatMap(ingress => + ingress.ip match + case None => + ingress.hostname + case other => + other + ) + .getOrElse(p"localhost") + ) + .flatMap(host => p"http://$host:${ports("main-http")._2}/docs") diff --git a/infra/src/main/scala/Zookeeper.scala b/infra/k8s/src/main/scala/Zookeeper.scala similarity index 76% rename from infra/src/main/scala/Zookeeper.scala rename to infra/k8s/src/main/scala/Zookeeper.scala index c386a44..f795d13 100644 --- a/infra/src/main/scala/Zookeeper.scala +++ b/infra/k8s/src/main/scala/Zookeeper.scala @@ -2,7 +2,7 @@ import besom.* import besom.util.NonEmptyString import besom.api.kubernetes as k8s import k8s.core.v1.inputs.* -import k8s.core.v1.{ConfigMap, Namespace, Service, ConfigMapArgs, ServiceArgs} +import k8s.core.v1.{ConfigMap, ConfigMapArgs, Namespace, Service, ServiceArgs} import k8s.apps.v1.inputs.* import k8s.apps.v1.{Deployment, DeploymentArgs} import k8s.meta.v1.inputs.* @@ -12,8 +12,9 @@ import besom.aliases.NonEmptyString object Zookeeper { val appName: NonEmptyString = "zookeeper" // todo fix inference in NonEmptyString val labels = Map("app" -> "zookeeper") + private val imageTag = "7.0.1" - def deploy(using Context)(namespace: Output[Namespace]) = Deployment( + def deploy(using Context)(namespace: Output[Namespace], k8sProvider: Output[k8s.Provider]) = Deployment( appName, DeploymentArgs( spec = DeploymentSpecArgs( @@ -29,7 +30,7 @@ object Zookeeper { containers = List( ContainerArgs( name = appName, - image = "confluentinc/cp-zookeeper:7.0.1", + image = s"confluentinc/cp-zookeeper:$imageTag", ports = List( ContainerPortArgs(containerPort = 2181) ), @@ -46,10 +47,13 @@ object Zookeeper { name = s"$appName-deployment", namespace = namespace.metadata.name ) - ) + ), + opts(provider = k8sProvider) ) - def deployService(using Context)(namespace: Output[Namespace]) = Service( + def deployService(using + Context + )(namespace: Output[Namespace], zooDeployment: Output[Deployment], k8sProvider: Output[k8s.Provider]) = Service( appName, ServiceArgs( spec = ServiceSpecArgs( @@ -62,7 +66,8 @@ object Zookeeper { name = s"$appName-service", namespace = namespace.metadata.name ) - ) + ), + opts(dependsOn = zooDeployment, provider = k8sProvider) ) } diff --git a/infra/src/main/scala/Main.scala b/infra/src/main/scala/Main.scala deleted file mode 100644 index 922385c..0000000 --- a/infra/src/main/scala/Main.scala +++ /dev/null @@ -1,42 +0,0 @@ -import besom.* -import besom.api.kubernetes.core.v1.{Namespace, Service} -import besom.internal.{Config, Output} - -@main def main = Pulumi.run { - - val appNamespace = Namespace(name = "vss") - - // zookeeper - val zooDeployment = Zookeeper.deploy(appNamespace) - val zooService = Zookeeper.deployService(appNamespace) - - // kafka - val kafkaDeployment = Kafka.deploy(appNamespace, zooService) - val kafkaService = Kafka.deployService(appNamespace) - - // postgres - val postgresDeployment = Postgres.deploy(appNamespace) - val postgresService = Postgres.deployService(appNamespace) - - // jaeger - val jaegerDeployment = Jaeger.deploy(appNamespace) - val jaegerService = Jaeger.deployService(appNamespace) - - // vss - val vssDeployment = VSS.deploy(config, appNamespace, postgresService, kafkaService, jaegerService) - val vssService = VSS.deployService(appNamespace) - - Stack.exports( - namespaceName = appNamespace.metadata.name, - zookeeperDeploymentName = zooDeployment.metadata.name, - zookeeperServiceName = zooService.metadata.name, - kafkaDeploymentName = kafkaDeployment.metadata.name, - kafkaServiceName = kafkaService.metadata.name, - postgresDeploymentName = postgresDeployment.metadata.name, - postgresServiceName = postgresService.metadata.name, - jaegerDeploymentName = jaegerDeployment.metadata.name, - jaegerServiceName = jaegerService.metadata.name, - vssDeploymentName = vssDeployment.metadata.name, - vssServiceName = vssService.metadata.name - ) -} diff --git a/project/build.properties b/project/build.properties index f344c14..b19d4e1 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version = 1.8.2 +sbt.version = 1.9.7 diff --git a/project/plugins.sbt b/project/plugins.sbt index 22ae078..20637c1 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,4 +1,4 @@ addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.6") libraryDependencies ++= Seq("com.thesamet.scalapb.zio-grpc" %% "zio-grpc-codegen" % "0.6.0-rc4") addSbtPlugin("org.typelevel" % "sbt-fs2-grpc" % "2.5.11") -addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.8.0") +addSbtPlugin("com.github.sbt" %% "sbt-native-packager" % "1.9.16")