Skip to content

Commit da31636

Browse files
committed
switch to initializingworkspace multicluster-provider
On-behalf-of: SAP <simon.bein@sap.com> Signed-off-by: Simon Bein <simontheleg@gmail.com>
1 parent 9ee997f commit da31636

File tree

1 file changed

+115
-115
lines changed

1 file changed

+115
-115
lines changed

docs/content/concepts/workspaces/workspace-initialization.md

Lines changed: 115 additions & 115 deletions
Original file line numberDiff line numberDiff line change
@@ -97,11 +97,78 @@ You can use this url to construct a kubeconfig for your controller. To do so, us
9797

9898
When writing a custom initializer, the following needs to be taken into account:
9999

100-
* You need to use the kcp-dev controller-runtime fork, as regular controller-runtime is not able to work as under the hood all LogicalClusters have the sam name
100+
* We strongly recommend to use the kcp [initializingworkspace multicluster-provider](github.com/kcp-dev/multicluster-provider) to build your custom initializer
101101
* You need to update LogicalClusters using patches; They cannot be updated using the update api
102102

103103
Keeping this in mind, you can use the following example as a starting point for your intitialization controller
104104

105+
=== "reconcile.go"
106+
107+
```Go
108+
package main
109+
110+
import (
111+
"context"
112+
"slices"
113+
114+
"github.com/go-logr/logr"
115+
kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1"
116+
"github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization"
117+
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
118+
"sigs.k8s.io/controller-runtime/pkg/cluster"
119+
"sigs.k8s.io/controller-runtime/pkg/reconcile"
120+
mcbuilder "sigs.k8s.io/multicluster-runtime/pkg/builder"
121+
mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager"
122+
mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile"
123+
)
124+
125+
type Reconciler struct {
126+
Log logr.Logger
127+
InitializerName kcpcorev1alpha1.LogicalClusterInitializer
128+
ClusterGetter func(context.Context, string) (cluster.Cluster, error)
129+
}
130+
131+
func (r *Reconciler) Reconcile(ctx context.Context, req mcreconcile.Request) (reconcile.Result, error) {
132+
log := r.Log.WithValues("clustername", req.ClusterName)
133+
log.Info("Reconciling")
134+
135+
// create a client scoped to the logical cluster the request came from
136+
cluster, err := r.ClusterGetter(ctx, req.ClusterName)
137+
if err != nil {
138+
return reconcile.Result{}, err
139+
}
140+
client := cluster.GetClient()
141+
142+
lc := &kcpcorev1alpha1.LogicalCluster{}
143+
if err := client.Get(ctx, req.NamespacedName, lc); err != nil {
144+
return reconcile.Result{}, err
145+
}
146+
147+
// check if your initializer is still set on the logicalcluster
148+
if slices.Contains(lc.Status.Initializers, r.InitializerName) {
149+
150+
// your logic to initialize a Workspace goes here
151+
log.Info("Starting to initialize cluster")
152+
153+
// after your initialization is done, don't forget to remove your initializer.
154+
// You will need to use patch, to update the LogicalCluster
155+
patch := ctrlclient.MergeFrom(lc.DeepCopy())
156+
lc.Status.Initializers = initialization.EnsureInitializerAbsent(r.InitializerName, lc.Status.Initializers)
157+
if err := client.Status().Patch(ctx, lc, patch); err != nil {
158+
return reconcile.Result{}, err
159+
}
160+
}
161+
162+
return reconcile.Result{}, nil
163+
}
164+
165+
func (r *Reconciler) SetupWithManager(mgr mcmanager.Manager) error {
166+
return mcbuilder.ControllerManagedBy(mgr).
167+
For(&kcpcorev1alpha1.LogicalCluster{}).
168+
Complete(r)
169+
}
170+
```
171+
105172
=== "main.go"
106173

107174
```Go
@@ -112,139 +179,72 @@ Keeping this in mind, you can use the following example as a starting point for
112179
"fmt"
113180
"log/slog"
114181
"os"
115-
"slices"
116182
"strings"
117-
183+
118184
"github.com/go-logr/logr"
119185
kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1"
120-
"github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization"
186+
"github.com/kcp-dev/multicluster-provider/initializingworkspaces"
187+
"golang.org/x/sync/errgroup"
188+
"k8s.io/client-go/kubernetes/scheme"
121189
"k8s.io/client-go/tools/clientcmd"
122190
ctrl "sigs.k8s.io/controller-runtime"
123-
"sigs.k8s.io/controller-runtime/pkg/client"
124-
"sigs.k8s.io/controller-runtime/pkg/kcp"
125191
"sigs.k8s.io/controller-runtime/pkg/manager"
126-
"sigs.k8s.io/controller-runtime/pkg/reconcile"
127-
)
128-
129-
type Reconciler struct {
130-
Client client.Client
131-
Log logr.Logger
132-
InitializerName kcpcorev1alpha1.LogicalClusterInitializer
133-
}
134-
135-
func main() {
192+
mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager"
193+
)
194+
195+
// glue and setup code
196+
func main() {
136197
if err := execute(); err != nil {
137-
fmt.Println(err)
138-
os.Exit(1)
198+
fmt.Println(err)
199+
os.Exit(1)
139200
}
140-
}
141-
142-
func execute() error {
143-
kubeconfigpath := "<path-to-kubeconfig>"
144-
201+
}
202+
func execute() error {
203+
// your kubeconfig here
204+
kubeconfigpath := "<your-kubeconfig>"
205+
145206
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath)
146207
if err != nil {
147-
return err
208+
return err
148209
}
149-
210+
211+
// since the initializers name is is the last part of the hostname, we can take it from there
212+
initializerName := config.Host[strings.LastIndex(config.Host, "/")+1:]
213+
214+
provider, err := initializingworkspaces.New(config, initializingworkspaces.Options{InitializerName: initializerName})
215+
if err != nil {
216+
return err
217+
}
218+
150219
logger := logr.FromSlogHandler(slog.NewTextHandler(os.Stderr, nil))
151220
ctrl.SetLogger(logger)
152-
153-
mgr, err := kcp.NewClusterAwareManager(config, manager.Options{
154-
Logger: logger,
155-
})
221+
222+
mgr, err := mcmanager.New(config, provider, manager.Options{Logger: logger})
156223
if err != nil {
157-
return err
224+
return err
158225
}
159-
if err := kcpcorev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
160-
return err
226+
227+
// add the logicalcluster scheme
228+
if err := kcpcorev1alpha1.AddToScheme(scheme.Scheme); err != nil {
229+
return err
161230
}
162-
163-
// since the initializers name is is the last part of the hostname, we can take it from there
164-
initializerName := config.Host[strings.LastIndex(config.Host, "/")+1:]
165-
231+
166232
r := Reconciler{
167-
Client: mgr.GetClient(),
168-
Log: mgr.GetLogger().WithName("initializer-controller"),
169-
InitializerName: kcpcorev1alpha1.LogicalClusterInitializer(initializerName),
233+
Log: mgr.GetLogger().WithName("initializer-controller"),
234+
InitializerName: kcpcorev1alpha1.LogicalClusterInitializer(initializerName),
235+
ClusterGetter: mgr.GetCluster,
170236
}
171-
237+
172238
if err := r.SetupWithManager(mgr); err != nil {
173-
return err
239+
return err
174240
}
175241
mgr.GetLogger().Info("Setup complete")
176-
177-
if err := mgr.Start(context.Background()); err != nil {
178-
return err
179-
}
180-
181-
return nil
182-
}
183-
184-
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
185-
return ctrl.NewControllerManagedBy(mgr).
186-
For(&kcpcorev1alpha1.LogicalCluster{}).
187-
// we need to use kcp.WithClusterInContext here to target the correct logical clusters during reconciliation
188-
Complete(kcp.WithClusterInContext(r))
189-
}
190-
191-
func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
192-
log := r.Log.WithValues("clustername", req.ClusterName)
193-
log.Info("Reconciling")
194-
195-
lc := &kcpcorev1alpha1.LogicalCluster{}
196-
if err := r.Client.Get(ctx, req.NamespacedName, lc); err != nil {
197-
return reconcile.Result{}, err
198-
}
199-
200-
// check if your initializer is still set on the logicalcluster
201-
if slices.Contains(lc.Status.Initializers, r.InitializerName) {
202-
203-
log.Info("Starting to initialize cluster")
204-
// your logic here to initialize a Workspace
205-
206-
// after your initialization is done, don't forget to remove your initializer
207-
// Since LogicalCluster objects cannot be directly updated, we need to create a patch.
208-
patch := client.MergeFrom(lc.DeepCopy())
209-
lc.Status.Initializers = initialization.EnsureInitializerAbsent(r.InitializerName, lc.Status.Initializers)
210-
if err := r.Client.Status().Patch(ctx, lc, patch); err != nil {
211-
return reconcile.Result{}, err
212-
}
213-
}
214-
215-
return reconcile.Result{}, nil
216-
}
217-
```
218-
219-
=== "kubeconfig"
220-
221-
```yaml
222-
apiVersion: v1
223-
clusters:
224-
- cluster:
225-
certificate-authority-data: <your-certificate-authority>
226-
# obtain the server url from the status of your WorkspaceType
227-
server: "<initializing-workspace-url>"
228-
name: finalizer
229-
contexts:
230-
- context:
231-
cluster: finalizer
232-
user: <user-with-sufficient-permissions>
233-
name: finalizer
234-
current-context: finalizer
235-
kind: Config
236-
preferences: {}
237-
users:
238-
- name: <user-with-sufficient-permissions>
239-
user:
240-
token: <user-token>
241-
```
242-
243-
=== "go.mod"
244-
245-
```Go
246-
...
247-
// replace upstream controller-runtime with kcp cluster aware fork
248-
replace sigs.k8s.io/controller-runtime v0.19.7 => github.com/kcp-dev/controller-runtime v0.19.0-kcp.1
249-
...
242+
243+
// start the provider and manager
244+
g, ctx := errgroup.WithContext(context.Background())
245+
g.Go(func() error { return provider.Run(ctx, mgr) })
246+
g.Go(func() error { return mgr.Start(ctx) })
247+
248+
return g.Wait()
249+
}
250250
```

0 commit comments

Comments
 (0)