diff --git a/cloud/linode/cilium_loadbalancers.go b/cloud/linode/cilium_loadbalancers.go index c22f7317..f377082a 100644 --- a/cloud/linode/cilium_loadbalancers.go +++ b/cloud/linode/cilium_loadbalancers.go @@ -75,12 +75,12 @@ var ( // getExistingSharedIPsInCluster determines the list of addresses to share on nodes by checking the // CiliumLoadBalancerIPPools created by the CCM in createCiliumLBIPPool // NOTE: Cilium CRDs must be installed for this to work -func (l *loadbalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]string, error) { +func (l *LoadBalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]string, error) { addrs := []string{} if err := l.retrieveCiliumClientset(); err != nil { return addrs, err } - pools, err := l.ciliumClient.CiliumLoadBalancerIPPools().List(ctx, metav1.ListOptions{ + pools, err := l.CiliumClient.CiliumLoadBalancerIPPools().List(ctx, metav1.ListOptions{ LabelSelector: "app.kubernetes.io/managed-by=linode-ccm", }) if err != nil { @@ -94,11 +94,11 @@ func (l *loadbalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]st return addrs, nil } -func (l *loadbalancers) getExistingSharedIPs(ctx context.Context, ipHolder *linodego.Instance) ([]string, error) { +func (l *LoadBalancers) getExistingSharedIPs(ctx context.Context, ipHolder *linodego.Instance) ([]string, error) { if ipHolder == nil { return nil, nil } - ipHolderAddrs, err := l.client.GetInstanceIPAddresses(ctx, ipHolder.ID) + ipHolderAddrs, err := l.Client.GetInstanceIPAddresses(ctx, ipHolder.ID) if err != nil { return nil, err } @@ -110,7 +110,7 @@ func (l *loadbalancers) getExistingSharedIPs(ctx context.Context, ipHolder *lino } // shareIPs shares the given list of IP addresses on the given Node -func (l *loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.Node) error { +func (l *LoadBalancers) shareIPs(ctx context.Context, addrs []string, node *v1.Node) error { nodeLinodeID, err := parseProviderID(node.Spec.ProviderID) if err != nil { return err @@ -118,14 +118,14 @@ func (l *loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.N if err = l.retrieveKubeClient(); err != nil { return err } - if err = l.client.ShareIPAddresses(ctx, linodego.IPAddressesShareOptions{ + if err = l.Client.ShareIPAddresses(ctx, linodego.IPAddressesShareOptions{ IPs: addrs, LinodeID: nodeLinodeID, }); err != nil { return err } // need to make sure node is up-to-date - node, err = l.kubeClient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + node, err = l.KubeClient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { return err } @@ -134,7 +134,7 @@ func (l *loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.N } node.Labels[annotations.AnnLinodeNodeIPSharingUpdated] = "true" retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - _, err := l.kubeClient.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) + _, err := l.KubeClient.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) return err }) if retryErr != nil { @@ -151,7 +151,7 @@ func (l *loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.N // perform IP sharing (via a specified node selector) have the expected IPs shared // in the event that a Node joins the cluster after the LoadBalancer Service already // exists -func (l *loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHolderSuffix string) error { +func (l *LoadBalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHolderSuffix string) error { // ignore cases where the provider ID has been set if node.Spec.ProviderID == "" { klog.Info("skipping IP while providerID is unset") @@ -159,8 +159,8 @@ func (l *loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHo } // If performing Service load-balancing via IP sharing + BGP, check for a special annotation // added by the CCM gets set when load-balancer IPs have been successfully shared on the node - if Options.BGPNodeSelector != "" { - kv := strings.Split(Options.BGPNodeSelector, "=") + if l.Options.BGPNodeSelector != "" { + kv := strings.Split(l.Options.BGPNodeSelector, "=") // Check if node should be participating in IP sharing via the given selector if val, ok := node.Labels[kv[0]]; !ok || len(kv) != 2 || val != kv[1] { // not a selected Node @@ -210,13 +210,13 @@ func (l *loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHo // createSharedIP requests an additional IP that can be shared on Nodes to support // loadbalancing via Cilium LB IPAM + BGP Control Plane. -func (l *loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ipHolderSuffix string) (string, error) { +func (l *LoadBalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ipHolderSuffix string) (string, error) { ipHolder, err := l.ensureIPHolder(ctx, ipHolderSuffix) if err != nil { return "", err } - newSharedIP, err := l.client.AddInstanceIPAddress(ctx, ipHolder.ID, true) + newSharedIP, err := l.Client.AddInstanceIPAddress(ctx, ipHolder.ID, true) if err != nil { return "", err } @@ -243,7 +243,7 @@ func (l *loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ip } // share the IPs with nodes participating in Cilium BGP peering - if Options.BGPNodeSelector == "" { + if l.Options.BGPNodeSelector == "" { for _, node := range nodes { if _, ok := node.Labels[commonControlPlaneLabel]; !ok { if err = l.shareIPs(ctx, addrs, node); err != nil { @@ -252,7 +252,7 @@ func (l *loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ip } } } else { - kv := strings.Split(Options.BGPNodeSelector, "=") + kv := strings.Split(l.Options.BGPNodeSelector, "=") for _, node := range nodes { if val, ok := node.Labels[kv[0]]; ok && len(kv) == 2 && val == kv[1] { if err = l.shareIPs(ctx, addrs, node); err != nil { @@ -267,13 +267,13 @@ func (l *loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ip // deleteSharedIP cleans up the shared IP for a LoadBalancer Service if it was assigned // by Cilium LB IPAM, removing it from the ip-holder -func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) error { +func (l *LoadBalancers) deleteSharedIP(ctx context.Context, service *v1.Service) error { err := l.retrieveKubeClient() if err != nil { return err } - nodeList, err := l.kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ - LabelSelector: Options.BGPNodeSelector, + nodeList, err := l.KubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + LabelSelector: l.Options.BGPNodeSelector, }) if err != nil { return err @@ -282,8 +282,8 @@ func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) serviceNn := getServiceNn(service) var ipHolderSuffix string - if Options.IpHolderSuffix != "" { - ipHolderSuffix = Options.IpHolderSuffix + if l.Options.IpHolderSuffix != "" { + ipHolderSuffix = l.Options.IpHolderSuffix klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) } @@ -304,14 +304,14 @@ func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) if err != nil { return err } - err = l.client.DeleteInstanceIPAddress(ctx, nodeLinodeID, ingress.IP) + err = l.Client.DeleteInstanceIPAddress(ctx, nodeLinodeID, ingress.IP) if IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { return err } } // finally delete the shared IP on the ip-holder - err = l.client.DeleteInstanceIPAddress(ctx, ipHolder.ID, ingress.IP) + err = l.Client.DeleteInstanceIPAddress(ctx, ipHolder.ID, ingress.IP) if IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { return err } @@ -323,7 +323,7 @@ func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) // To hold the IP in lieu of a proper IP reservation system, a special Nanode is // created but not booted and used to hold all shared IPs. -func (l *loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { +func (l *LoadBalancers) ensureIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { ipHolder, err := l.getIPHolder(ctx, suffix) if err != nil { return nil, err @@ -331,9 +331,9 @@ func (l *loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*lin if ipHolder != nil { return ipHolder, nil } - label := generateClusterScopedIPHolderLinodeName(l.zone, suffix) - ipHolder, err = l.client.CreateInstance(ctx, linodego.InstanceCreateOptions{ - Region: l.zone, + label := generateClusterScopedIPHolderLinodeName(l.Zone, suffix) + ipHolder, err = l.Client.CreateInstance(ctx, linodego.InstanceCreateOptions{ + Region: l.Zone, Type: "g6-nanode-1", Label: label, RootPass: uuid.NewString(), @@ -353,16 +353,16 @@ func (l *loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*lin return ipHolder, nil } -func (l *loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { +func (l *LoadBalancers) getIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { // even though we have updated the naming convention, leaving this in ensures we have backwards compatibility - filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, l.zone)} + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, l.Zone)} rawFilter, err := json.Marshal(filter) if err != nil { panic("this should not have failed") } var ipHolder *linodego.Instance // TODO (rk): should we switch to using GET instead of LIST? we would be able to wrap logic around errors - linodes, err := l.client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) + linodes, err := l.Client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) if err != nil { return nil, err } @@ -373,12 +373,12 @@ func (l *loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linode // a) an ip holder instance does not exist yet // or // b) another cluster already holds the linode grant to an ip holder using the old naming convention - filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(l.zone, suffix)} + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(l.Zone, suffix)} rawFilter, err = json.Marshal(filter) if err != nil { panic("this should not have failed") } - linodes, err = l.client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) + linodes, err = l.Client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) if err != nil { return nil, err } @@ -407,15 +407,15 @@ func generateClusterScopedIPHolderLinodeName(zone, suffix string) (label string) return label } -func (l *loadbalancers) retrieveCiliumClientset() error { - if l.ciliumClient != nil { +func (l *LoadBalancers) retrieveCiliumClientset() error { + if l.CiliumClient != nil { return nil } var ( kubeConfig *rest.Config err error ) - kubeconfigFlag := Options.KubeconfigFlag + kubeconfigFlag := l.Options.KubeconfigFlag if kubeconfigFlag == nil || kubeconfigFlag.Value.String() == "" { kubeConfig, err = rest.InClusterConfig() } else { @@ -424,7 +424,7 @@ func (l *loadbalancers) retrieveCiliumClientset() error { if err != nil { return err } - l.ciliumClient, err = ciliumclient.NewForConfig(kubeConfig) + l.CiliumClient, err = ciliumclient.NewForConfig(kubeConfig) return err } @@ -432,7 +432,7 @@ func (l *loadbalancers) retrieveCiliumClientset() error { // for LoadBalancer Services not backed by a NodeBalancer, a CiliumLoadBalancerIPPool resource // will be created specifically for the Service with the requested shared IP // NOTE: Cilium CRDs must be installed for this to work -func (l *loadbalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Service, sharedIP string) (*v2alpha1.CiliumLoadBalancerIPPool, error) { +func (l *LoadBalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Service, sharedIP string) (*v2alpha1.CiliumLoadBalancerIPPool, error) { if err := l.retrieveCiliumClientset(); err != nil { return nil, err } @@ -455,16 +455,16 @@ func (l *loadbalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Se }, } - return l.ciliumClient.CiliumLoadBalancerIPPools().Create(ctx, ciliumLBIPPool, metav1.CreateOptions{}) + return l.CiliumClient.CiliumLoadBalancerIPPools().Create(ctx, ciliumLBIPPool, metav1.CreateOptions{}) } // NOTE: Cilium CRDs must be installed for this to work -func (l *loadbalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Service) error { +func (l *LoadBalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Service) error { if err := l.retrieveCiliumClientset(); err != nil { return err } - return l.ciliumClient.CiliumLoadBalancerIPPools().Delete( + return l.CiliumClient.CiliumLoadBalancerIPPools().Delete( ctx, fmt.Sprintf("%s-%s-pool", service.Namespace, service.Name), metav1.DeleteOptions{}, @@ -472,12 +472,12 @@ func (l *loadbalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Se } // NOTE: Cilium CRDs must be installed for this to work -func (l *loadbalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Service) (*v2alpha1.CiliumLoadBalancerIPPool, error) { +func (l *LoadBalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Service) (*v2alpha1.CiliumLoadBalancerIPPool, error) { if err := l.retrieveCiliumClientset(); err != nil { return nil, err } - return l.ciliumClient.CiliumLoadBalancerIPPools().Get( + return l.CiliumClient.CiliumLoadBalancerIPPools().Get( ctx, fmt.Sprintf("%s-%s-pool", service.Namespace, service.Name), metav1.GetOptions{}, @@ -485,22 +485,22 @@ func (l *loadbalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Servi } // NOTE: Cilium CRDs must be installed for this to work -func (l *loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error { +func (l *LoadBalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error { if raw, ok := os.LookupEnv("BGP_CUSTOM_ID_MAP"); ok && raw != "" { klog.Info("BGP_CUSTOM_ID_MAP env variable specified, using it instead of the default region map") if err := json.Unmarshal([]byte(raw), ®ionIDMap); err != nil { return err } } - regionID, ok := regionIDMap[l.zone] + regionID, ok := regionIDMap[l.Zone] if !ok { - return fmt.Errorf("unsupported region for BGP: %s", l.zone) + return fmt.Errorf("unsupported region for BGP: %s", l.Zone) } if err := l.retrieveCiliumClientset(); err != nil { return err } // check if policy already exists - policy, err := l.ciliumClient.CiliumBGPPeeringPolicies().Get(ctx, ciliumBGPPeeringPolicyName, metav1.GetOptions{}) + policy, err := l.CiliumClient.CiliumBGPPeeringPolicies().Get(ctx, ciliumBGPPeeringPolicyName, metav1.GetOptions{}) if err != nil && !k8serrors.IsNotFound(err) { klog.Infof("Failed to get CiliumBGPPeeringPolicy: %s", err.Error()) return err @@ -513,7 +513,7 @@ func (l *loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error // otherwise create it var nodeSelector slimv1.LabelSelector // If no BGPNodeSelector is specified, select all worker nodes. - if Options.BGPNodeSelector == "" { + if l.Options.BGPNodeSelector == "" { nodeSelector = slimv1.LabelSelector{ MatchExpressions: []slimv1.LabelSelectorRequirement{ { @@ -523,9 +523,9 @@ func (l *loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error }, } } else { - kv := strings.Split(Options.BGPNodeSelector, "=") + kv := strings.Split(l.Options.BGPNodeSelector, "=") if len(kv) != BGPNodeSelectorFlagInputLen { - return fmt.Errorf("invalid node selector %s", Options.BGPNodeSelector) + return fmt.Errorf("invalid node selector %s", l.Options.BGPNodeSelector) } nodeSelector = slimv1.LabelSelector{MatchLabels: map[string]string{kv[0]: kv[1]}} @@ -581,7 +581,7 @@ func (l *loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error } klog.Info("Creating CiliumBGPPeeringPolicy") - _, err = l.ciliumClient.CiliumBGPPeeringPolicies().Create(ctx, ciliumBGPPeeringPolicy, metav1.CreateOptions{}) + _, err = l.CiliumClient.CiliumBGPPeeringPolicies().Create(ctx, ciliumBGPPeeringPolicy, metav1.CreateOptions{}) return err } diff --git a/cloud/linode/cilium_loadbalancers_test.go b/cloud/linode/cilium_loadbalancers_test.go index 2d3aa17b..bb91e187 100644 --- a/cloud/linode/cilium_loadbalancers_test.go +++ b/cloud/linode/cilium_loadbalancers_test.go @@ -222,7 +222,7 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -277,7 +277,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) - lb := &loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType, &Options} lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err == nil { @@ -289,7 +289,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { // Use BGP custom id map t.Setenv("BGP_CUSTOM_ID_MAP", "{'us-foobar': 2}") - lb = &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb = &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} lbStatus, err = lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err == nil { t.Fatal("expected not nil error") @@ -310,7 +310,7 @@ func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -355,7 +355,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -400,7 +400,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -445,7 +445,7 @@ func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockC ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -497,7 +497,7 @@ func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.Mo ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -549,7 +549,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -599,7 +599,7 @@ func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} @@ -632,7 +632,7 @@ func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} @@ -669,7 +669,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -732,7 +732,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) diff --git a/cloud/linode/cloud.go b/cloud/linode/cloud.go index 8b1d67a4..5e183761 100644 --- a/cloud/linode/cloud.go +++ b/cloud/linode/cloud.go @@ -31,10 +31,8 @@ const ( var supportedLoadBalancerTypes = []string{ciliumLBType, nodeBalancerLBType} -// Options is a configuration object for this cloudprovider implementation. -// We expect it to be initialized with flags external to this package, likely in -// main.go -var Options struct { +// OptionsConfig defines the configuration structure for cloud controller options +type OptionsConfig struct { KubeconfigFlag *pflag.Flag LinodeGoDebug bool EnableRouteController bool @@ -61,8 +59,13 @@ var Options struct { NodeCIDRMaskSizeIPv4 int NodeCIDRMaskSizeIPv6 int NodeBalancerPrefix string + AllowEmptyNodeBalancerBackends bool } +// Options is the global configuration instance used by the CCM. +// We expect it to be initialized with flags external to this package, likely in main.go +var Options OptionsConfig + type linodeCloud struct { client client.Client instances cloudprovider.InstancesV2 @@ -204,7 +207,7 @@ func newCloud() (cloudprovider.Interface, error) { lcloud := &linodeCloud{ client: linodeClient, instances: instanceCache, - loadbalancers: newLoadbalancers(linodeClient, region), + loadbalancers: NewLoadbalancers(linodeClient, region), routes: routes, linodeTokenHealthChecker: healthChecker, } @@ -225,7 +228,7 @@ func (c *linodeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBui go c.linodeTokenHealthChecker.Run(stopCh) } - lb, assertion := c.loadbalancers.(*loadbalancers) + lb, assertion := c.loadbalancers.(*LoadBalancers) if !assertion { klog.Error("type assertion during Initialize() failed") return diff --git a/cloud/linode/cloud_test.go b/cloud/linode/cloud_test.go index b335b939..a23df57a 100644 --- a/cloud/linode/cloud_test.go +++ b/cloud/linode/cloud_test.go @@ -182,10 +182,10 @@ func Test_linodeCloud_LoadBalancer(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: nil, }, - want: newLoadbalancers(client, "us-east"), + want: NewLoadbalancers(client, "us-east"), want1: true, }, } @@ -229,7 +229,7 @@ func Test_linodeCloud_InstancesV2(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: nil, }, want: newInstances(client), @@ -276,7 +276,7 @@ func Test_linodeCloud_Instances(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: nil, }, want: nil, @@ -323,7 +323,7 @@ func Test_linodeCloud_Zones(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: nil, }, want: nil, @@ -370,7 +370,7 @@ func Test_linodeCloud_Clusters(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: nil, }, want: nil, @@ -419,7 +419,7 @@ func Test_linodeCloud_Routes(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: r, EnableRouteController: false, }, @@ -431,7 +431,7 @@ func Test_linodeCloud_Routes(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: r, EnableRouteController: true, }, diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 1b8e9276..32070f2d 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -100,12 +100,13 @@ func (e lbNotFoundError) Error() string { return fmt.Sprintf("LoadBalancer not found for service (%s)", e.serviceNn) } -type loadbalancers struct { - client client.Client - zone string - kubeClient kubernetes.Interface - ciliumClient ciliumclient.CiliumV2alpha1Interface - loadBalancerType string +type LoadBalancers struct { + Client client.Client + Zone string + KubeClient kubernetes.Interface + CiliumClient ciliumclient.CiliumV2alpha1Interface + LoadBalancerType string + Options *OptionsConfig } type portConfigAnnotation struct { @@ -127,12 +128,23 @@ type portConfig struct { UDPCheckPort int } -// newLoadbalancers returns a cloudprovider.LoadBalancer whose concrete type is a *loadbalancer. -func newLoadbalancers(client client.Client, zone string) cloudprovider.LoadBalancer { - return &loadbalancers{client: client, zone: zone, loadBalancerType: Options.LoadBalancerType} +// NewLoadbalancersWithOptions returns a cloudprovider.LoadBalancer with custom options +func NewLoadbalancersWithOptions(client client.Client, zone string, options *OptionsConfig) cloudprovider.LoadBalancer { + return &LoadBalancers{ + Client: client, + Zone: zone, + LoadBalancerType: options.LoadBalancerType, + Options: options, + } +} + +// NewLoadbalancers returns a cloudprovider.LoadBalancer whose concrete type is a *loadbalancer. +// This constructor uses the global Options for backward compatibility. +func NewLoadbalancers(client client.Client, zone string) cloudprovider.LoadBalancer { + return NewLoadbalancersWithOptions(client, zone, &Options) } -func (l *loadbalancers) getNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) { +func (l *LoadBalancers) GetNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) { rawID := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID] id, idErr := strconv.Atoi(rawID) hasIDAnn := idErr == nil && id != 0 @@ -144,13 +156,13 @@ func (l *loadbalancers) getNodeBalancerForService(ctx context.Context, service * return l.getNodeBalancerByStatus(ctx, service) } -func (l *loadbalancers) getLatestServiceLoadBalancerStatus(ctx context.Context, service *v1.Service) (v1.LoadBalancerStatus, error) { +func (l *LoadBalancers) getLatestServiceLoadBalancerStatus(ctx context.Context, service *v1.Service) (v1.LoadBalancerStatus, error) { err := l.retrieveKubeClient() if err != nil { return v1.LoadBalancerStatus{}, err } - service, err = l.kubeClient.CoreV1().Services(service.Namespace).Get(ctx, service.Name, metav1.GetOptions{}) + service, err = l.KubeClient.CoreV1().Services(service.Namespace).Get(ctx, service.Name, metav1.GetOptions{}) if err != nil { return v1.LoadBalancerStatus{}, err } @@ -159,7 +171,7 @@ func (l *loadbalancers) getLatestServiceLoadBalancerStatus(ctx context.Context, // getNodeBalancerByStatus attempts to get the NodeBalancer from the IP or hostname specified in the // most recent LoadBalancer status. -func (l *loadbalancers) getNodeBalancerByStatus(ctx context.Context, service *v1.Service) (nb *linodego.NodeBalancer, err error) { +func (l *LoadBalancers) getNodeBalancerByStatus(ctx context.Context, service *v1.Service) (nb *linodego.NodeBalancer, err error) { lb := service.Status.LoadBalancer updatedLb, err := l.getLatestServiceLoadBalancerStatus(ctx, service) if err != nil { @@ -188,7 +200,7 @@ func (l *loadbalancers) getNodeBalancerByStatus(ctx context.Context, service *v1 // The current NodeBalancer from getNodeBalancerForService is compared to the most recent // LoadBalancer status; if they are different (because of an updated NodeBalancerID // annotation), the old one is deleted. -func (l *loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1.Service) error { +func (l *LoadBalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1.Service) error { // unless there's an annotation, we can never get a past and current NB to differ, // because they're looked up the same way if _, ok := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID]; !ok { @@ -205,7 +217,7 @@ func (l *loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1. } } - nb, err := l.getNodeBalancerForService(ctx, service) + nb, err := l.GetNodeBalancerForService(ctx, service) if err != nil { return err } @@ -214,7 +226,7 @@ func (l *loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1. return nil } - if err := l.client.DeleteNodeBalancer(ctx, previousNB.ID); err != nil { + if err := l.Client.DeleteNodeBalancer(ctx, previousNB.ID); err != nil { return err } @@ -225,27 +237,27 @@ func (l *loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1. // GetLoadBalancerName returns the name of the load balancer. // // GetLoadBalancer will not modify service. -func (l *loadbalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.Service) string { +func (l *LoadBalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.Service) string { unixNano := strconv.FormatInt(time.Now().UnixNano(), 16) - return fmt.Sprintf("%s-%s", Options.NodeBalancerPrefix, unixNano[len(unixNano)-12:]) + return fmt.Sprintf("%s-%s", l.Options.NodeBalancerPrefix, unixNano[len(unixNano)-12:]) } // GetLoadBalancer returns the *v1.LoadBalancerStatus of service. // // GetLoadBalancer will not modify service. -func (l *loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { +func (l *LoadBalancers) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) // Handle LoadBalancers backed by Cilium - if l.loadBalancerType == ciliumLBType { + if l.LoadBalancerType == ciliumLBType { return &v1.LoadBalancerStatus{ Ingress: service.Status.LoadBalancer.Ingress, }, true, nil } - nb, err := l.getNodeBalancerForService(ctx, service) + nb, err := l.GetNodeBalancerForService(ctx, service) if err != nil { var targetError lbNotFoundError if errors.As(err, &targetError) { @@ -256,21 +268,21 @@ func (l *loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, } } - return makeLoadBalancerStatus(service, nb), true, nil + return l.makeLoadBalancerStatus(service, nb), true, nil } // EnsureLoadBalancer ensures that the cluster is running a load balancer for // service. // // EnsureLoadBalancer will not modify service or nodes. -func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (lbStatus *v1.LoadBalancerStatus, err error) { +func (l *LoadBalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (lbStatus *v1.LoadBalancerStatus, err error) { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) serviceNn := getServiceNn(service) // Handle LoadBalancers backed by Cilium - if l.loadBalancerType == ciliumLBType { + if l.LoadBalancerType == ciliumLBType { klog.Infof("handling LoadBalancer Service %s as %s", serviceNn, ciliumLBClass) if err = l.ensureCiliumBGPPeeringPolicy(ctx); err != nil { @@ -296,8 +308,8 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri } var ipHolderSuffix string - if Options.IpHolderSuffix != "" { - ipHolderSuffix = Options.IpHolderSuffix + if l.Options.IpHolderSuffix != "" { + ipHolderSuffix = l.Options.IpHolderSuffix klog.Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) } @@ -321,7 +333,7 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri // Handle LoadBalancers backed by NodeBalancers var nb *linodego.NodeBalancer - nb, err = l.getNodeBalancerForService(ctx, service) + nb, err = l.GetNodeBalancerForService(ctx, service) if err == nil { if err = l.updateNodeBalancer(ctx, clusterName, service, nodes, nb); err != nil { sentry.CaptureError(ctx, err) @@ -349,7 +361,7 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri } klog.Infof("NodeBalancer (%d) has been ensured for service (%s)", nb.ID, serviceNn) - lbStatus = makeLoadBalancerStatus(service, nb) + lbStatus = l.makeLoadBalancerStatus(service, nb) if !l.shouldPreserveNodeBalancer(service) { if err := l.cleanupOldNodeBalancer(ctx, service); err != nil { @@ -361,22 +373,27 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri return lbStatus, nil } -func (l *loadbalancers) updateNodeBalancer( +func (l *LoadBalancers) updateNodeBalancer( ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node, nb *linodego.NodeBalancer, ) (err error) { - if len(nodes) == 0 { + if len(nodes) == 0 && !l.Options.AllowEmptyNodeBalancerBackends { return fmt.Errorf("%w: service %s", errNoNodesAvailable, getServiceNn(service)) } + // Log warning when updating NodeBalancer without nodes + if len(nodes) == 0 && l.Options.AllowEmptyNodeBalancerBackends { + klog.Warningf("Updating NodeBalancer for service (%s) without backend nodes - load balancer will be non-functional until nodes are added", getServiceNn(service)) + } + connThrottle := getConnectionThrottle(service) if connThrottle != nb.ClientConnThrottle { update := nb.GetUpdateOptions() update.ClientConnThrottle = &connThrottle - nb, err = l.client.UpdateNodeBalancer(ctx, nb.ID, update) + nb, err = l.Client.UpdateNodeBalancer(ctx, nb.ID, update) if err != nil { sentry.CaptureError(ctx, err) return err @@ -387,21 +404,21 @@ func (l *loadbalancers) updateNodeBalancer( if !reflect.DeepEqual(nb.Tags, tags) { update := nb.GetUpdateOptions() update.Tags = &tags - nb, err = l.client.UpdateNodeBalancer(ctx, nb.ID, update) + nb, err = l.Client.UpdateNodeBalancer(ctx, nb.ID, update) if err != nil { sentry.CaptureError(ctx, err) return err } } - fwClient := firewall.LinodeClient{Client: l.client} + fwClient := firewall.LinodeClient{Client: l.Client} err = fwClient.UpdateNodeBalancerFirewall(ctx, l.GetLoadBalancerName(ctx, clusterName, service), tags, service, nb) if err != nil { return err } // Get all of the NodeBalancer's configs - nbCfgs, err := l.client.ListNodeBalancerConfigs(ctx, nb.ID, nil) + nbCfgs, err := l.Client.ListNodeBalancerConfigs(ctx, nb.ID, nil) if err != nil { sentry.CaptureError(ctx, err) return err @@ -435,7 +452,7 @@ func (l *loadbalancers) updateNodeBalancer( if currentNBCfg != nil { // Obtain list of current NB nodes and convert it to map of node IDs var currentNBNodes []linodego.NodeBalancerNode - currentNBNodes, err = l.client.ListNodeBalancerNodes(ctx, nb.ID, currentNBCfg.ID, nil) + currentNBNodes, err = l.Client.ListNodeBalancerNodes(ctx, nb.ID, currentNBCfg.ID, nil) if err != nil { // This error can be ignored, because if we fail to get nodes we can anyway rebuild the config from scratch, // it would just cause the NB to reload config even if the node list did not change, so we prefer to send IDs when it is possible. @@ -451,16 +468,16 @@ func (l *loadbalancers) updateNodeBalancer( // Add all of the Nodes to the config newNBNodes := make([]linodego.NodeBalancerConfigRebuildNodeOptions, 0, len(nodes)) subnetID := 0 - if Options.NodeBalancerBackendIPv4SubnetID != 0 { - subnetID = Options.NodeBalancerBackendIPv4SubnetID + if l.Options.NodeBalancerBackendIPv4SubnetID != 0 { + subnetID = l.Options.NodeBalancerBackendIPv4SubnetID } backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] if ok { - if err = validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { + if err = l.validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { return err } } - if len(Options.VPCNames) > 0 && !Options.DisableNodeBalancerVPCBackends { + if len(l.Options.VPCNames) > 0 && !l.Options.DisableNodeBalancerVPCBackends { var id int id, err = l.getSubnetIDForSVC(ctx, service) if err != nil { @@ -469,6 +486,7 @@ func (l *loadbalancers) updateNodeBalancer( } subnetID = id } + for _, node := range nodes { if _, ok := node.Annotations[annotations.AnnExcludeNodeFromNb]; ok { klog.Infof("Node %s is excluded from NodeBalancer by annotation, skipping", node.Name) @@ -493,7 +511,7 @@ func (l *loadbalancers) updateNodeBalancer( if currentNBCfg == nil { createOpts := newNBCfg.GetCreateOptions() - currentNBCfg, err = l.client.CreateNodeBalancerConfig(ctx, nb.ID, createOpts) + currentNBCfg, err = l.Client.CreateNodeBalancerConfig(ctx, nb.ID, createOpts) if err != nil { sentry.CaptureError(ctx, err) return fmt.Errorf("[port %d] error creating NodeBalancer config: %w", int(port.Port), err) @@ -510,7 +528,7 @@ func (l *loadbalancers) updateNodeBalancer( rebuildOpts.Nodes = newNBNodes - if _, err = l.client.RebuildNodeBalancerConfig(ctx, nb.ID, currentNBCfg.ID, rebuildOpts); err != nil { + if _, err = l.Client.RebuildNodeBalancerConfig(ctx, nb.ID, currentNBCfg.ID, rebuildOpts); err != nil { sentry.CaptureError(ctx, err) return fmt.Errorf("[port %d] error rebuilding NodeBalancer config: %w", int(port.Port), err) } @@ -520,22 +538,23 @@ func (l *loadbalancers) updateNodeBalancer( } // UpdateLoadBalancer updates the NodeBalancer to have configs that match the Service's ports -func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (err error) { +func (l *LoadBalancers) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (err error) { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) // handle LoadBalancers backed by Cilium - if l.loadBalancerType == ciliumLBType { + if l.LoadBalancerType == ciliumLBType { klog.Infof("handling update for LoadBalancer Service %s/%s as %s", service.Namespace, service.Name, ciliumLBClass) serviceNn := getServiceNn(service) var ipHolderSuffix string - if Options.IpHolderSuffix != "" { - ipHolderSuffix = Options.IpHolderSuffix + if l.Options.IpHolderSuffix != "" { + ipHolderSuffix = l.Options.IpHolderSuffix klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) } // make sure that IPs are shared properly on the Node if using load-balancers not backed by NodeBalancers + for _, node := range nodes { if err = l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil { return err @@ -552,7 +571,7 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri return fmt.Errorf("failed to get latest LoadBalancer status for service (%s): %w", getServiceNn(service), err) } - nb, err := l.getNodeBalancerForService(ctx, serviceWithStatus) + nb, err := l.GetNodeBalancerForService(ctx, serviceWithStatus) if err != nil { sentry.CaptureError(ctx, err) return err @@ -570,7 +589,7 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri // Delete any NodeBalancer configs for ports that no longer exist on the Service // Note: Don't build a map or other lookup structure here, it is not worth the overhead -func (l *loadbalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []linodego.NodeBalancerConfig, servicePorts []v1.ServicePort) error { +func (l *LoadBalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []linodego.NodeBalancerConfig, servicePorts []v1.ServicePort) error { for _, nbc := range nbConfigs { found := false for _, sp := range servicePorts { @@ -579,7 +598,7 @@ func (l *loadbalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []lin } } if !found { - if err := l.client.DeleteNodeBalancerConfig(ctx, nbc.NodeBalancerID, nbc.ID); err != nil { + if err := l.Client.DeleteNodeBalancerConfig(ctx, nbc.NodeBalancerID, nbc.ID); err != nil { return err } } @@ -589,7 +608,7 @@ func (l *loadbalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []lin // shouldPreserveNodeBalancer determines whether a NodeBalancer should be deleted based on the // service's preserve annotation. -func (l *loadbalancers) shouldPreserveNodeBalancer(service *v1.Service) bool { +func (l *LoadBalancers) shouldPreserveNodeBalancer(service *v1.Service) bool { return getServiceBoolAnnotation(service, annotations.AnnLinodeLoadBalancerPreserve) } @@ -598,13 +617,13 @@ func (l *loadbalancers) shouldPreserveNodeBalancer(service *v1.Service) bool { // successfully deleted. // // EnsureLoadBalancerDeleted will not modify service. -func (l *loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { +func (l *LoadBalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) // Handle LoadBalancers backed by Cilium - if l.loadBalancerType == ciliumLBType { + if l.LoadBalancerType == ciliumLBType { klog.Infof("handling LoadBalancer Service %s/%s as %s", service.Namespace, service.Name, ciliumLBClass) if err := l.deleteSharedIP(ctx, service); err != nil { return err @@ -627,7 +646,7 @@ func (l *loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa return nil } - nb, err := l.getNodeBalancerForService(ctx, service) + nb, err := l.GetNodeBalancerForService(ctx, service) if err != nil { var targetError lbNotFoundError if errors.As(err, &targetError) { @@ -650,12 +669,12 @@ func (l *loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa return nil } - fwClient := firewall.LinodeClient{Client: l.client} + fwClient := firewall.LinodeClient{Client: l.Client} if err = fwClient.DeleteNodeBalancerFirewall(ctx, service, nb); err != nil { return err } - if err = l.client.DeleteNodeBalancer(ctx, nb.ID); err != nil { + if err = l.Client.DeleteNodeBalancer(ctx, nb.ID); err != nil { klog.Errorf("failed to delete NodeBalancer (%d) for service (%s): %s", nb.ID, serviceNn, err) sentry.CaptureError(ctx, err) return err @@ -665,8 +684,8 @@ func (l *loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa return nil } -func (l *loadbalancers) getNodeBalancerByHostname(ctx context.Context, service *v1.Service, hostname string) (*linodego.NodeBalancer, error) { - lbs, err := l.client.ListNodeBalancers(ctx, nil) +func (l *LoadBalancers) getNodeBalancerByHostname(ctx context.Context, service *v1.Service, hostname string) (*linodego.NodeBalancer, error) { + lbs, err := l.Client.ListNodeBalancers(ctx, nil) if err != nil { return nil, err } @@ -679,7 +698,7 @@ func (l *loadbalancers) getNodeBalancerByHostname(ctx context.Context, service * return nil, lbNotFoundError{serviceNn: getServiceNn(service)} } -func (l *loadbalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Service, ip netip.Addr) (*linodego.NodeBalancer, error) { +func (l *LoadBalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Service, ip netip.Addr) (*linodego.NodeBalancer, error) { var filter string if ip.Is6() { filter = fmt.Sprintf(`{"ipv6": "%v"}`, ip.String()) @@ -687,7 +706,7 @@ func (l *loadbalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Ser filter = fmt.Sprintf(`{"ipv4": "%v"}`, ip.String()) } - lbs, err := l.client.ListNodeBalancers(ctx, &linodego.ListOptions{Filter: filter}) + lbs, err := l.Client.ListNodeBalancers(ctx, &linodego.ListOptions{Filter: filter}) if err != nil { return nil, err } @@ -698,8 +717,8 @@ func (l *loadbalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Ser return &lbs[0], nil } -func (l *loadbalancers) getNodeBalancerByID(ctx context.Context, service *v1.Service, id int) (*linodego.NodeBalancer, error) { - nb, err := l.client.GetNodeBalancer(ctx, id) +func (l *LoadBalancers) getNodeBalancerByID(ctx context.Context, service *v1.Service, id int) (*linodego.NodeBalancer, error) { + nb, err := l.Client.GetNodeBalancer(ctx, id) if err != nil { var targetError *linodego.Error if errors.As(err, &targetError) && targetError.Code == http.StatusNotFound { @@ -710,13 +729,13 @@ func (l *loadbalancers) getNodeBalancerByID(ctx context.Context, service *v1.Ser return nb, nil } -func (l *loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName string, service *v1.Service) []string { +func (l *LoadBalancers) GetLoadBalancerTags(_ context.Context, clusterName string, service *v1.Service) []string { tags := []string{} if clusterName != "" { tags = append(tags, clusterName) } - tags = append(tags, Options.NodeBalancerTags...) + tags = append(tags, l.Options.NodeBalancerTags...) tagStr, ok := service.GetAnnotations()[annotations.AnnLinodeLoadBalancerTags] if ok { @@ -727,13 +746,13 @@ func (l *loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName strin } // GetLinodeNBType returns the NodeBalancer type for the service. -func (l *loadbalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalancerPlanType { +func (l *LoadBalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalancerPlanType { typeStr, ok := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerType] if ok && linodego.NodeBalancerPlanType(typeStr) == linodego.NBTypePremium { return linodego.NBTypePremium } - return linodego.NodeBalancerPlanType(Options.DefaultNBType) + return linodego.NodeBalancerPlanType(l.Options.DefaultNBType) } // getVPCCreateOptions returns the VPC options for the NodeBalancer creation. @@ -743,7 +762,7 @@ func (l *loadbalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalanc // 3. NodeBalancerBackendIPv4SubnetID/NodeBalancerBackendIPv4SubnetName flag // 4. NodeBalancerBackendIPv4Subnet flag // 5. Default to using the subnet ID of the service's VPC -func (l *loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Service) ([]linodego.NodeBalancerVPCOptions, error) { +func (l *LoadBalancers) getVPCCreateOptions(ctx context.Context, service *v1.Service) ([]linodego.NodeBalancerVPCOptions, error) { // Evaluate subnetID based on annotations or flags subnetID, err := l.getSubnetIDForSVC(ctx, service) if err != nil { @@ -753,7 +772,7 @@ func (l *loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser // Precedence 1: If the user has specified a NodeBalancerBackendIPv4Range, use that backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] if ok { - if err := validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { + if err := l.validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { return nil, err } // If the user has specified a NodeBalancerBackendIPv4Range, use that @@ -785,10 +804,10 @@ func (l *loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser // Precedence 3: If the user has specified a NodeBalancerBackendIPv4SubnetID, use that // and auto-allocate subnets from it for the NodeBalancer - if Options.NodeBalancerBackendIPv4SubnetID != 0 { + if l.Options.NodeBalancerBackendIPv4SubnetID != 0 { vpcCreateOpts := []linodego.NodeBalancerVPCOptions{ { - SubnetID: Options.NodeBalancerBackendIPv4SubnetID, + SubnetID: l.Options.NodeBalancerBackendIPv4SubnetID, }, } return vpcCreateOpts, nil @@ -796,11 +815,11 @@ func (l *loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser // Precedence 4: If the user has specified a NodeBalancerBackendIPv4Subnet, use that // and auto-allocate subnets from it for the NodeBalancer - if Options.NodeBalancerBackendIPv4Subnet != "" { + if l.Options.NodeBalancerBackendIPv4Subnet != "" { vpcCreateOpts := []linodego.NodeBalancerVPCOptions{ { SubnetID: subnetID, - IPv4Range: Options.NodeBalancerBackendIPv4Subnet, + IPv4Range: l.Options.NodeBalancerBackendIPv4Subnet, IPv4RangeAutoAssign: true, }, } @@ -816,7 +835,7 @@ func (l *loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser return vpcCreateOpts, nil } -func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName string, service *v1.Service, configs []*linodego.NodeBalancerConfigCreateOptions) (lb *linodego.NodeBalancer, err error) { +func (l *LoadBalancers) createNodeBalancer(ctx context.Context, clusterName string, service *v1.Service, configs []*linodego.NodeBalancerConfigCreateOptions) (lb *linodego.NodeBalancer, err error) { connThrottle := getConnectionThrottle(service) label := l.GetLoadBalancerName(ctx, clusterName, service) @@ -824,14 +843,14 @@ func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri nbType := l.GetLinodeNBType(service) createOpts := linodego.NodeBalancerCreateOptions{ Label: &label, - Region: l.zone, + Region: l.Zone, ClientConnThrottle: &connThrottle, Configs: configs, Tags: tags, Type: nbType, } - if len(Options.VPCNames) > 0 && !Options.DisableNodeBalancerVPCBackends { + if len(l.Options.VPCNames) > 0 && !l.Options.DisableNodeBalancerVPCBackends { createOpts.VPCs, err = l.getVPCCreateOptions(ctx, service) if err != nil { return nil, err @@ -854,7 +873,7 @@ func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri return nil, err } - fw, err := l.client.CreateFirewall(ctx, *fwcreateOpts) + fw, err := l.Client.CreateFirewall(ctx, *fwcreateOpts) if err != nil { return nil, err } @@ -863,10 +882,10 @@ func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri // no need to deal with firewalls, continue creating nb's } - return l.client.CreateNodeBalancer(ctx, createOpts) + return l.Client.CreateNodeBalancer(ctx, createOpts) } -func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1.Service, port v1.ServicePort) (linodego.NodeBalancerConfig, error) { +func (l *LoadBalancers) buildNodeBalancerConfig(ctx context.Context, service *v1.Service, port v1.ServicePort) (linodego.NodeBalancerConfig, error) { portConfigResult, err := getPortConfig(service, port) if err != nil { return linodego.NodeBalancerConfig{}, err @@ -951,13 +970,13 @@ func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1 return config, nil } -func (l *loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbConfig *linodego.NodeBalancerConfig, config portConfig) error { +func (l *LoadBalancers) addTLSCert(ctx context.Context, service *v1.Service, nbConfig *linodego.NodeBalancerConfig, config portConfig) error { err := l.retrieveKubeClient() if err != nil { return err } - nbConfig.SSLCert, nbConfig.SSLKey, err = getTLSCertInfo(ctx, l.kubeClient, service.Namespace, config) + nbConfig.SSLCert, nbConfig.SSLKey, err = getTLSCertInfo(ctx, l.KubeClient, service.Namespace, config) if err != nil { return err } @@ -970,8 +989,8 @@ func (l *loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbC // 2. If the service has annotations specifying VPCName or SubnetName, use them. // 3. If CCM is configured with --nodebalancer-backend-ipv4-subnet-id, it will be used as the subnet ID. // 4. Else, use first VPCName and SubnetName to calculate subnet id for the service. -func (l *loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Service) (int, error) { - if len(Options.VPCNames) == 0 { +func (l *LoadBalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Service) (int, error) { + if len(l.Options.VPCNames) == 0 { return 0, fmt.Errorf("CCM not configured with VPC, cannot create NodeBalancer with specified annotation") } // Check if the service has an annotation for NodeBalancerBackendSubnetID @@ -988,49 +1007,55 @@ func (l *loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Servi // If no VPCName or SubnetName is specified in annotations, but NodeBalancerBackendIPv4SubnetID is set, // use the NodeBalancerBackendIPv4SubnetID as the subnet ID. - if !vpcOk && !subnetOk && Options.NodeBalancerBackendIPv4SubnetID != 0 { - return Options.NodeBalancerBackendIPv4SubnetID, nil + if !vpcOk && !subnetOk && l.Options.NodeBalancerBackendIPv4SubnetID != 0 { + return l.Options.NodeBalancerBackendIPv4SubnetID, nil } - vpcName := Options.VPCNames[0] + vpcName := l.Options.VPCNames[0] if vpcOk { vpcName = specifiedVPCName } - vpcID, err := GetVPCID(ctx, l.client, vpcName) + vpcID, err := GetVPCID(ctx, l.Client, vpcName) if err != nil { return 0, err } - subnetName := Options.SubnetNames[0] + subnetName := l.Options.SubnetNames[0] if subnetOk { subnetName = specifiedSubnetName } // Use the VPC ID and Subnet Name to get the subnet ID - return GetSubnetID(ctx, l.client, vpcID, subnetName) + return GetSubnetID(ctx, l.Client, vpcID, subnetName) } // buildLoadBalancerRequest returns a linodego.NodeBalancer // requests for service across nodes. -func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*linodego.NodeBalancer, error) { - if len(nodes) == 0 { +func (l *LoadBalancers) buildLoadBalancerRequest(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*linodego.NodeBalancer, error) { + if len(nodes) == 0 && !l.Options.AllowEmptyNodeBalancerBackends { return nil, fmt.Errorf("%w: cluster %s, service %s", errNoNodesAvailable, clusterName, getServiceNn(service)) } + + // Log warning when creating NodeBalancer without nodes + if len(nodes) == 0 && l.Options.AllowEmptyNodeBalancerBackends { + klog.Warningf("Creating NodeBalancer for service (%s) without backend nodes - load balancer will be non-functional until nodes are added", getServiceNn(service)) + } + ports := service.Spec.Ports configs := make([]*linodego.NodeBalancerConfigCreateOptions, 0, len(ports)) subnetID := 0 - if Options.NodeBalancerBackendIPv4SubnetID != 0 { - subnetID = Options.NodeBalancerBackendIPv4SubnetID + if l.Options.NodeBalancerBackendIPv4SubnetID != 0 { + subnetID = l.Options.NodeBalancerBackendIPv4SubnetID } // Check for the NodeBalancerBackendIPv4Range annotation backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] if ok { - if err := validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { + if err := l.validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { return nil, err } } - if len(Options.VPCNames) > 0 && !Options.DisableNodeBalancerVPCBackends { + if len(l.Options.VPCNames) > 0 && !l.Options.DisableNodeBalancerVPCBackends { id, err := l.getSubnetIDForSVC(ctx, service) if err != nil { return nil, err @@ -1075,7 +1100,7 @@ func coerceString(str string, minLen, maxLen int, padding string) string { return str } -func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32, subnetID int, protocol linodego.ConfigProtocol) (*linodego.NodeBalancerConfigRebuildNodeOptions, error) { +func (l *LoadBalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32, subnetID int, protocol linodego.ConfigProtocol) (*linodego.NodeBalancerConfigRebuildNodeOptions, error) { nodeIP, err := getNodePrivateIP(node, subnetID) if err != nil { return nil, fmt.Errorf("node %s does not have a private IP address: %w", node.Name, err) @@ -1099,8 +1124,8 @@ func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, return nodeOptions, nil } -func (l *loadbalancers) retrieveKubeClient() error { - if l.kubeClient != nil { +func (l *LoadBalancers) retrieveKubeClient() error { + if l.KubeClient != nil { return nil } @@ -1111,7 +1136,7 @@ func (l *loadbalancers) retrieveKubeClient() error { // Check to see if --kubeconfig was set. If it was, build a kubeconfig from the given file. // Otherwise, use the in-cluster config. - kubeconfigFlag := Options.KubeconfigFlag + kubeconfigFlag := l.Options.KubeconfigFlag if kubeconfigFlag == nil || kubeconfigFlag.Value.String() == "" { kubeConfig, err = rest.InClusterConfig() } else { @@ -1122,7 +1147,7 @@ func (l *loadbalancers) retrieveKubeClient() error { return err } - l.kubeClient, err = kubernetes.NewForConfig(kubeConfig) + l.KubeClient, err = kubernetes.NewForConfig(kubeConfig) if err != nil { return err } @@ -1284,7 +1309,7 @@ func getConnectionThrottle(service *v1.Service) int { return connThrottle } -func makeLoadBalancerStatus(service *v1.Service, nb *linodego.NodeBalancer) *v1.LoadBalancerStatus { +func (l *LoadBalancers) makeLoadBalancerStatus(service *v1.Service, nb *linodego.NodeBalancer) *v1.LoadBalancerStatus { ingress := v1.LoadBalancerIngress{ Hostname: *nb.Hostname, } @@ -1304,7 +1329,7 @@ func makeLoadBalancerStatus(service *v1.Service, nb *linodego.NodeBalancer) *v1. } // Check for per-service IPv6 annotation first, then fall back to global setting - useIPv6 := getServiceBoolAnnotation(service, annotations.AnnLinodeEnableIPv6Ingress) || Options.EnableIPv6ForLoadBalancers + useIPv6 := getServiceBoolAnnotation(service, annotations.AnnLinodeEnableIPv6Ingress) || l.Options.EnableIPv6ForLoadBalancers // When IPv6 is enabled (either per-service or globally), include both IPv4 and IPv6 if useIPv6 && nb.IPv6 != nil && *nb.IPv6 != "" { @@ -1356,16 +1381,16 @@ func getServiceBoolAnnotation(service *v1.Service, name string) bool { // validateNodeBalancerBackendIPv4Range validates the NodeBalancerBackendIPv4Range // annotation to be within the NodeBalancerBackendIPv4Subnet if it is set. -func validateNodeBalancerBackendIPv4Range(backendIPv4Range string) error { - if Options.NodeBalancerBackendIPv4Subnet == "" { +func (l *LoadBalancers) validateNodeBalancerBackendIPv4Range(backendIPv4Range string) error { + if l.Options.NodeBalancerBackendIPv4Subnet == "" { return nil } - withinCIDR, err := isCIDRWithinCIDR(Options.NodeBalancerBackendIPv4Subnet, backendIPv4Range) + withinCIDR, err := isCIDRWithinCIDR(l.Options.NodeBalancerBackendIPv4Subnet, backendIPv4Range) if err != nil { return fmt.Errorf("invalid IPv4 range: %w", err) } if !withinCIDR { - return fmt.Errorf("IPv4 range %s is not within the subnet %s", backendIPv4Range, Options.NodeBalancerBackendIPv4Subnet) + return fmt.Errorf("IPv4 range %s is not within the subnet %s", backendIPv4Range, l.Options.NodeBalancerBackendIPv4Subnet) } return nil } diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 848c4d12..f5b4cfd7 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -370,7 +370,7 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a for key, value := range annMap { svc.Annotations[key] = value } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -392,9 +392,9 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a return err } - if nb.Region != lb.zone { + if nb.Region != lb.Zone { t.Error("unexpected nodebalancer region") - t.Logf("expected: %s", lb.zone) + t.Logf("expected: %s", lb.Zone) t.Logf("actual: %s", nb.Region) } @@ -484,12 +484,12 @@ func testCreateNodeBalancerWithNodeNoAddresses(t *testing.T, client *linodego.Cl }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -716,12 +716,12 @@ func testUpdateNodeBalancerWithVPCBackend(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -803,12 +803,12 @@ func testCreateNodeBalancerWithVPCOnlySubnetFlag(t *testing.T, client *linodego. }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -896,12 +896,12 @@ func testCreateNodeBalancerWithVPCNoFlagOrAnnotation(t *testing.T, client *linod }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -985,12 +985,12 @@ func testCreateNodeBalancerWithVPCAnnotationOnly(t *testing.T, client *linodego. }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1070,12 +1070,12 @@ func testCreateNodeBalancerWithVPCOnlySubnetIDFlag(t *testing.T, client *linodeg }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1216,12 +1216,12 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1381,12 +1381,12 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1456,12 +1456,12 @@ func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1569,12 +1569,12 @@ func testVeryLongServiceName(t *testing.T, client *linodego.Client, _ *fakeAPI) }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1637,12 +1637,12 @@ func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fak }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset clusterName := "linodelb" defer func() { @@ -1722,7 +1722,7 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * NodePort: int32(30001), } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -1732,8 +1732,8 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * }() fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset - addTLSSecret(t, lb.kubeClient) + lb.KubeClient = fakeClientset + addTLSSecret(t, lb.KubeClient) stubService(fakeClientset, svc) lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) @@ -1803,12 +1803,12 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset for _, tc := range []struct { name string @@ -1856,13 +1856,13 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nodeBalancer) svc.SetAnnotations(map[string]string{ annotations.AnnLinodeDefaultProxyProtocol: string(tc.proxyProtocolConfig), }) @@ -1931,12 +1931,12 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1984,7 +1984,7 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2032,12 +2032,12 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -2055,7 +2055,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2113,7 +2113,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2164,12 +2164,12 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset svc.SetAnnotations(map[string]string{ annotations.AnnLinodeCloudFirewallACL: `{ @@ -2195,7 +2195,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2220,7 +2220,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2263,12 +2263,12 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset svc.SetAnnotations(map[string]string{ annotations.AnnLinodeCloudFirewallACL: `{ @@ -2294,7 +2294,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2347,7 +2347,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2403,12 +2403,12 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset fwClient := firewall.LinodeClient{Client: client} fw, err := fwClient.CreateFirewall(t.Context(), linodego.FirewallCreateOptions{ @@ -2452,7 +2452,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2487,7 +2487,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2550,12 +2550,12 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -2573,7 +2573,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2611,7 +2611,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2657,7 +2657,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err = lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err = lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2703,7 +2703,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err = lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err = lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2786,12 +2786,12 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -2822,7 +2822,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2858,7 +2858,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2919,12 +2919,12 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -2956,7 +2956,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Errorf("Error in listing firewalls %s", err) } @@ -2976,7 +2976,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -3020,7 +3020,7 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -3029,19 +3029,19 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie }() fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nodeBalancer) newNodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create new NodeBalancer: %s", err) @@ -3062,7 +3062,7 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie t.Errorf("GetLoadBalancer returned an error: %s", err) } - expectedLBStatus := makeLoadBalancerStatus(svc, newNodeBalancer) + expectedLBStatus := lb.makeLoadBalancerStatus(svc, newNodeBalancer) if !reflect.DeepEqual(expectedLBStatus, lbStatus) { t.Errorf("LoadBalancer status mismatch: expected %v, got %v", expectedLBStatus, lbStatus) } @@ -3990,7 +3990,7 @@ func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fake }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4042,7 +4042,7 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4082,7 +4082,7 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli t.Fatal(err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nb) err = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) didDelete := fake.didRequestOccur(http.MethodDelete, fmt.Sprintf("/nodebalancers/%d", nb.ID), "") @@ -4159,7 +4159,7 @@ func testEnsureLoadBalancerDeleted(t *testing.T, client *linodego.Client, fake * }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4212,12 +4212,12 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } - lb.kubeClient = fake.NewSimpleClientset() - addTLSSecret(t, lb.kubeClient) + lb.KubeClient = fake.NewSimpleClientset() + addTLSSecret(t, lb.KubeClient) configs := []*linodego.NodeBalancerConfigCreateOptions{} nb, err := lb.createNodeBalancer(t.Context(), "linodelb", svc, configs) @@ -4225,7 +4225,7 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa t.Fatal(err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nb) defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() getLBStatus, exists, err := lb.GetLoadBalancer(t.Context(), "linodelb", svc) if err != nil { @@ -4336,14 +4336,20 @@ func testMakeLoadBalancerStatus(t *testing.T, client *linodego.Client, _ *fakeAP IP: ipv4, }}, } - status := makeLoadBalancerStatus(svc, nb) + + l := LoadBalancers{ + Options: &OptionsConfig{ + EnableIPv6ForLoadBalancers: false, + }, + } + status := l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for basic service to be %#v; got %#v", expectedStatus, status) } svc.Annotations[annotations.AnnLinodeHostnameOnlyIngress] = "true" expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } @@ -4369,20 +4375,24 @@ func testMakeLoadBalancerStatusWithIPv6(t *testing.T, client *linodego.Client, _ } // Test with EnableIPv6ForLoadBalancers = false (default) - Options.EnableIPv6ForLoadBalancers = false + l := LoadBalancers{ + Options: &OptionsConfig{ + EnableIPv6ForLoadBalancers: false, + }, + } expectedStatus := &v1.LoadBalancerStatus{ Ingress: []v1.LoadBalancerIngress{{ Hostname: hostname, IP: ipv4, }}, } - status := makeLoadBalancerStatus(svc, nb) + status := l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status with EnableIPv6ForLoadBalancers=false to be %#v; got %#v", expectedStatus, status) } // Test with EnableIPv6ForLoadBalancers = true - Options.EnableIPv6ForLoadBalancers = true + l.Options.EnableIPv6ForLoadBalancers = true expectedStatus = &v1.LoadBalancerStatus{ Ingress: []v1.LoadBalancerIngress{ { @@ -4395,18 +4405,18 @@ func testMakeLoadBalancerStatusWithIPv6(t *testing.T, client *linodego.Client, _ }, }, } - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status with EnableIPv6ForLoadBalancers=true to be %#v; got %#v", expectedStatus, status) } // Test with per-service annotation // Reset the global flag to false and set the annotation - Options.EnableIPv6ForLoadBalancers = false + l.Options.EnableIPv6ForLoadBalancers = false svc.Annotations[annotations.AnnLinodeEnableIPv6Ingress] = "true" // Expect the same result as when the global flag is enabled - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status with %s=true annotation to be %#v; got %#v", annotations.AnnLinodeEnableIPv6Ingress, expectedStatus, status) @@ -4439,35 +4449,41 @@ func testMakeLoadBalancerStatusEnvVar(t *testing.T, client *linodego.Client, _ * IP: ipv4, }}, } - status := makeLoadBalancerStatus(svc, nb) + + l := LoadBalancers{ + Options: &OptionsConfig{ + EnableIPv6ForLoadBalancers: false, + }, + } + + status := l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for basic service to be %#v; got %#v", expectedStatus, status) } t.Setenv("LINODE_HOSTNAME_ONLY_INGRESS", "true") expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } t.Setenv("LINODE_HOSTNAME_ONLY_INGRESS", "false") expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } t.Setenv("LINODE_HOSTNAME_ONLY_INGRESS", "banana") - expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } os.Unsetenv("LINODE_HOSTNAME_ONLY_INGRESS") } -func getLatestNbNodesForService(t *testing.T, client *linodego.Client, svc *v1.Service, lb *loadbalancers) []linodego.NodeBalancerNode { +func getLatestNbNodesForService(t *testing.T, client *linodego.Client, svc *v1.Service, lb *LoadBalancers) []linodego.NodeBalancerNode { t.Helper() nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { @@ -4509,12 +4525,12 @@ func testCleanupDoesntCall(t *testing.T, client *linodego.Client, fakeAPI *fakeA Annotations: map[string]string{annotations.AnnLinodeNodeBalancerID: strconv.Itoa(nb2.ID)}, }, } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb1) - svcAnn.Status.LoadBalancer = *makeLoadBalancerStatus(svcAnn, nb1) - lb, assertion := newLoadbalancers(client, region).(*loadbalancers) + lb, assertion := NewLoadbalancers(client, region).(*LoadBalancers) if !assertion { t.Error("type assertion failed") } + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nb1) + svcAnn.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svcAnn, nb1) fakeAPI.ResetRequests() t.Run("non-annotated service shouldn't call the API during cleanup", func(t *testing.T) { @@ -4562,7 +4578,7 @@ func testUpdateLoadBalancerNodeExcludedByAnnotation(t *testing.T, client *linode }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4571,15 +4587,15 @@ func testUpdateLoadBalancerNodeExcludedByAnnotation(t *testing.T, client *linode }() fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nodeBalancer) stubService(fakeClientset, svc) svc.SetAnnotations(map[string]string{ annotations.AnnLinodeNodeBalancerID: strconv.Itoa(nodeBalancer.ID), @@ -4805,7 +4821,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4814,15 +4830,15 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak }() fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nodeBalancer) stubService(fakeClientset, svc) svc.SetAnnotations(map[string]string{ annotations.AnnLinodeNodeBalancerID: strconv.Itoa(nodeBalancer.ID), @@ -4843,12 +4859,12 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset for _, test := range []struct { name string @@ -4886,7 +4902,7 @@ func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeA if err != nil { t.Fatal(err) } - test.service.Status.LoadBalancer = *makeLoadBalancerStatus(test.service, expectedNB) + test.service.Status.LoadBalancer = *lb.makeLoadBalancerStatus(test.service, expectedNB) stubService(fakeClientset, test.service) actualNB, err := lb.getNodeBalancerByStatus(t.Context(), test.service) @@ -4908,7 +4924,7 @@ func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeA func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4934,7 +4950,7 @@ func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego. }, } - _, err := lb.getNodeBalancerForService(t.Context(), svc) + _, err := lb.GetNodeBalancerForService(t.Context(), svc) if err == nil { t.Fatal("expected getNodeBalancerForService to return an error") } @@ -4952,12 +4968,12 @@ func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego. func testEnsureNewLoadBalancerWithNodeBalancerID(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) @@ -5051,12 +5067,12 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI }, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } - lb.kubeClient = fake.NewSimpleClientset() - addTLSSecret(t, lb.kubeClient) + lb.KubeClient = fake.NewSimpleClientset() + addTLSSecret(t, lb.KubeClient) defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() @@ -5069,7 +5085,7 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -5137,11 +5153,11 @@ func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Fatal(err) } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus := makeLoadBalancerStatus(svc, nb) + lbStatus := lb.makeLoadBalancerStatus(svc, nb) svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) stubService(fakeClientset, svc2) @@ -5492,14 +5508,16 @@ func Test_loadbalancers_GetLinodeNBType(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - l := &loadbalancers{ - client: tt.fields.client, - zone: tt.fields.zone, - kubeClient: tt.fields.kubeClient, - ciliumClient: tt.fields.ciliumClient, - loadBalancerType: tt.fields.loadBalancerType, + l := &LoadBalancers{ + Client: tt.fields.client, + Zone: tt.fields.zone, + KubeClient: tt.fields.kubeClient, + CiliumClient: tt.fields.ciliumClient, + LoadBalancerType: tt.fields.loadBalancerType, + Options: &OptionsConfig{ + DefaultNBType: string(tt.defaultNB), + }, } - Options.DefaultNBType = string(tt.defaultNB) if got := l.GetLinodeNBType(tt.args.service); !reflect.DeepEqual(got, tt.want) { t.Errorf("loadbalancers.GetLinodeNBType() = %v, want %v", got, tt.want) } @@ -5528,15 +5546,20 @@ func Test_validateNodeBalancerBackendIPv4Range(t *testing.T) { }, } - nbBackendSubnet := Options.NodeBalancerBackendIPv4Subnet - defer func() { - Options.NodeBalancerBackendIPv4Subnet = nbBackendSubnet - }() - Options.NodeBalancerBackendIPv4Subnet = "10.100.0.0/24" + l := &LoadBalancers{ + Client: nil, + Zone: "", + KubeClient: nil, + CiliumClient: nil, + LoadBalancerType: "nodebalancer", + Options: &OptionsConfig{ + NodeBalancerBackendIPv4Subnet: "10.100.0.0/24", + }, + } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := validateNodeBalancerBackendIPv4Range(tt.args.backendIPv4Range); (err != nil) != tt.wantErr { + if err := l.validateNodeBalancerBackendIPv4Range(tt.args.backendIPv4Range); (err != nil) != tt.wantErr { t.Errorf("validateNodeBalancerBackendIPv4Range() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/cloud/linode/service_controller.go b/cloud/linode/service_controller.go index ef909428..1eff5fc8 100644 --- a/cloud/linode/service_controller.go +++ b/cloud/linode/service_controller.go @@ -19,13 +19,13 @@ import ( var retryInterval = time.Minute * 1 type serviceController struct { - loadbalancers *loadbalancers + loadbalancers *LoadBalancers informer v1informers.ServiceInformer queue workqueue.TypedDelayingInterface[any] } -func newServiceController(loadbalancers *loadbalancers, informer v1informers.ServiceInformer) *serviceController { +func newServiceController(loadbalancers *LoadBalancers, informer v1informers.ServiceInformer) *serviceController { return &serviceController{ loadbalancers: loadbalancers, informer: informer, diff --git a/cloud/linode/service_controller_test.go b/cloud/linode/service_controller_test.go index bcb8c340..e844ed82 100644 --- a/cloud/linode/service_controller_test.go +++ b/cloud/linode/service_controller_test.go @@ -24,7 +24,7 @@ func Test_serviceController_Run(t *testing.T) { informer := informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Services() mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "test"}) - loadbalancers, assertion := newLoadbalancers(client, "us-east").(*loadbalancers) + loadbalancers, assertion := NewLoadbalancers(client, "us-east").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -53,7 +53,7 @@ func Test_serviceController_Run(t *testing.T) { func Test_serviceController_processNextDeletion(t *testing.T) { type fields struct { - loadbalancers *loadbalancers + loadbalancers *LoadBalancers queue workqueue.TypedDelayingInterface[any] Client *mocks.MockClient } @@ -70,7 +70,7 @@ func Test_serviceController_processNextDeletion(t *testing.T) { loadbalancers: nil, }, Setup: func(f *fields) { - f.loadbalancers = &loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} + f.loadbalancers = &LoadBalancers{Client: f.Client, Zone: "test", LoadBalancerType: Options.LoadBalancerType} f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) f.queue.Add("test") }, @@ -83,7 +83,7 @@ func Test_serviceController_processNextDeletion(t *testing.T) { loadbalancers: nil, }, Setup: func(f *fields) { - f.loadbalancers = &loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} + f.loadbalancers = &LoadBalancers{Client: f.Client, Zone: "test", LoadBalancerType: Options.LoadBalancerType} f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) svc := createTestService() f.queue.Add(svc) @@ -105,7 +105,7 @@ func Test_serviceController_processNextDeletion(t *testing.T) { tt.Setup(&tt.fields) s.loadbalancers = tt.fields.loadbalancers s.queue = tt.fields.queue - s.loadbalancers.client = tt.fields.Client + s.loadbalancers.Client = tt.fields.Client if got := s.processNextDeletion(); got != tt.want { t.Errorf("serviceController.processNextDeletion() = %v, want %v", got, tt.want) } diff --git a/go.mod b/go.mod index f519d76f..02752b1b 100644 --- a/go.mod +++ b/go.mod @@ -141,16 +141,16 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.41.0 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/mod v0.26.0 // indirect + golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/term v0.32.0 // indirect - golang.org/x/text v0.26.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.33.0 // indirect + golang.org/x/tools v0.35.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect google.golang.org/grpc v1.72.1 // indirect @@ -178,6 +178,7 @@ require ( // Fixes for `unknown revision v0.0.0` reported by `go list -modfile=go.mod -m -json -mod=mod all` replace ( + github.com/linode/linodego => github.com/komer3/linodego v0.0.0-20250815223828-0e5518dcb256 k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.33.0 k8s.io/cri-api => k8s.io/cri-api v0.33.0 k8s.io/cri-client => k8s.io/cri-client v0.33.0 diff --git a/go.sum b/go.sum index 3f232c98..6f567474 100644 --- a/go.sum +++ b/go.sum @@ -194,6 +194,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/komer3/linodego v0.0.0-20250815223828-0e5518dcb256 h1:iSmjmSvz/0FfuOMtqaPSQtzB0a9uK1/LIZ+h5Basopg= +github.com/komer3/linodego v0.0.0-20250815223828-0e5518dcb256/go.mod h1:LoQZ8hW8ZcXh/DJdYADwPCtVP8duyckLRGXwwwh55SU= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -202,8 +204,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/linode/linodego v1.53.1-0.20250709175023-9b152d30578c h1:WlZm+YNHBuphycMZG2s2+F04hx2wx1ShuOwPAIInjP8= -github.com/linode/linodego v1.53.1-0.20250709175023-9b152d30578c/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA= github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o= github.com/mackerelio/go-osstat v0.2.5/go.mod h1:atxwWF+POUZcdtR1wnsUcQxTytoHG4uhl2AKKzrOajY= github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= @@ -391,31 +391,31 @@ golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= +golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -427,15 +427,15 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -444,8 +444,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/main.go b/main.go index 81e403e8..36455a7a 100644 --- a/main.go +++ b/main.go @@ -101,6 +101,7 @@ func main() { command.Flags().BoolVar(&linode.Options.DisableNodeBalancerVPCBackends, "disable-nodebalancer-vpc-backends", false, "disables nodebalancer backends in VPCs (when enabled, nodebalancers will only have private IPs as backends for backward compatibility)") command.Flags().StringVar(&linode.Options.NodeBalancerPrefix, "nodebalancer-prefix", "ccm", fmt.Sprintf("Name prefix for NoadBalancers. (max. %v char.)", linode.NodeBalancerPrefixCharLimit)) command.Flags().BoolVar(&linode.Options.DisableIPv6NodeCIDRAllocation, "disable-ipv6-node-cidr-allocation", false, "disables IPv6 node cidr allocation by ipam controller (when enabled, IPv6 cidr ranges will be allocated to nodes)") + command.Flags().BoolVar(&linode.Options.AllowEmptyNodeBalancerBackends, "allow-empty-nodebalancer-backends", false, "allows creating NodeBalancers without backend nodes (useful for external management of backends)") // Set static flags command.Flags().VisitAll(func(fl *pflag.Flag) {