Skip to content

Commit 1a05eb6

Browse files
committed
minor
1 parent 0fe37f0 commit 1a05eb6

File tree

5 files changed

+15
-25
lines changed

5 files changed

+15
-25
lines changed

pkg/common/func.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,10 @@ func NewInterLinkConfig() (InterLinkConfig, error) {
5050
return InterLinkConfig{}, err
5151
}
5252

53-
log.G(context.Background()).Info("Loading InterLink config from " + path)
53+
log.G(context.Background()).Info("\u2705 Loading InterLink config from " + path)
5454
yfile, err := os.ReadFile(path)
5555
if err != nil {
56-
log.G(context.Background()).Error("Error opening config file, exiting...")
56+
log.G(context.Background()).Error("\u274C Error opening config file, exiting...")
5757
return InterLinkConfig{}, err
5858
}
5959
yaml.Unmarshal(yfile, &InterLinkConfigInst)

pkg/docker/Create.go

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,8 @@ func (h *SidecarHandler) prepareDockerRuns(podData commonIL.RetrievedPodData, w
6666

6767
if volume.PersistentVolumeClaim != nil {
6868
if _, ok := pathsOfVolumes[volume.PersistentVolumeClaim.ClaimName]; !ok {
69-
// WIP: this is a temporary solution to mount CVMFS volumes
70-
pathsOfVolumes[volume.PersistentVolumeClaim.ClaimName] = "/mnt/cvmfs"
69+
// WIP: this is a temporary solution to mount CVMFS volumes for persistent volume claims case
70+
pathsOfVolumes[volume.PersistentVolumeClaim.ClaimName] = "/cvmfs"
7171
}
7272

7373
}
@@ -94,10 +94,10 @@ func (h *SidecarHandler) prepareDockerRuns(podData commonIL.RetrievedPodData, w
9494

9595
// if the container is requesting 0 GPU, skip the GPU assignment
9696
if numGpusRequested == 0 {
97-
log.G(h.Ctx).Info("Container " + containerName + " is not requesting a GPU")
97+
log.G(h.Ctx).Info("\u2705 Container " + containerName + " is not requesting a GPU")
9898
} else {
9999

100-
log.G(h.Ctx).Info("Container " + containerName + " is requesting " + val.String() + " GPU")
100+
log.G(h.Ctx).Info("\u2705 Container " + containerName + " is requesting " + val.String() + " GPU")
101101

102102
isGpuRequested = true
103103

@@ -327,6 +327,10 @@ func (h *SidecarHandler) CreateHandler(w http.ResponseWriter, r *http.Request) {
327327

328328
dindContainerArgs := []string{"run"}
329329
dindContainerArgs = append(dindContainerArgs, gpuArgsAsArray...)
330+
if _, err := os.Stat("/cvmfs"); err == nil {
331+
dindContainerArgs = append(dindContainerArgs, "-v", "/cvmfs:/cvmfs")
332+
}
333+
330334
dindContainerArgs = append(dindContainerArgs, "--privileged", "-v", "/home:/home", "-v", "/var/lib/docker/overlay2:/var/lib/docker/overlay2", "-v", "/var/lib/docker/image:/var/lib/docker/image", "-d", "--name", string(data.Pod.UID)+"_dind", dindImage)
331335

332336
var dindContainerID string

pkg/docker/Status.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@ func (h *SidecarHandler) StatusHandler(w http.ResponseWriter, r *http.Request) {
6767

6868
containerstatus := strings.Split(execReturn.Stdout, " ")
6969

70-
// TODO: why first container?
7170
if execReturn.Stdout != "" {
7271
log.G(h.Ctx).Info("\u2705 [STATUS CALL] The container " + container.Name + " is in the state: " + containerstatus[0])
7372

pkg/docker/aux.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,6 @@ func prepareMounts(Ctx context.Context, config commonIL.InterLinkConfig, data co
127127
}
128128

129129
for _, emptyDir := range cont.EmptyDirs {
130-
log.G(Ctx).Info("-- EmptyDir to handle " + emptyDir)
131130
if containerName == podNamespace+"-"+podUID+"-"+cont.Name {
132131
paths, err := mountData(Ctx, config, data.Pod, emptyDir, container)
133132
if err != nil {

pkg/docker/gpustrategies/NvidiaHandler.go

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,6 @@ func (a *GPUManager) Init() error {
5858
// Discover implements the Discover function of the GPUManager interface
5959
func (a *GPUManager) Discover() error {
6060

61-
log.G(a.Ctx).Info("Discovering GPUs...")
62-
6361
count, ret := nvml.DeviceGetCount()
6462
if ret != nvml.SUCCESS {
6563
return fmt.Errorf("Unable to get device count: %v", nvml.ErrorString(ret))
@@ -92,21 +90,19 @@ func (a *GPUManager) Discover() error {
9290

9391
// print the GPUSpecsList if the length is greater than 0
9492
if len(a.GPUSpecsList) > 0 {
95-
log.G(a.Ctx).Info("Discovered GPUs:")
93+
log.G(a.Ctx).Info("\u2705 Discovered GPUs:")
9694
for _, gpuSpec := range a.GPUSpecsList {
97-
log.G(a.Ctx).Info(fmt.Sprintf("Name: %s, UUID: %s, Type: %s, Available: %t, Index: %d", gpuSpec.Name, gpuSpec.UUID, gpuSpec.Type, gpuSpec.Available, gpuSpec.Index))
95+
log.G(a.Ctx).Info(fmt.Sprintf("\u2705 Name: %s, UUID: %s, Type: %s, Available: %t, Index: %d", gpuSpec.Name, gpuSpec.UUID, gpuSpec.Type, gpuSpec.Available, gpuSpec.Index))
9896
}
9997
} else {
100-
log.G(a.Ctx).Info("No GPUs discovered")
98+
log.G(a.Ctx).Info(" \u2705 No GPUs discovered")
10199
}
102100

103101
return nil
104102
}
105103

106104
func (a *GPUManager) Check() error {
107105

108-
log.G(a.Ctx).Info("Checking the availability of GPUs...")
109-
110106
cli, err := client.NewEnvClient()
111107
if err != nil {
112108
return fmt.Errorf("unable to create a new Docker client: %v", err)
@@ -148,9 +144,9 @@ func (a *GPUManager) Check() error {
148144
// print the GPUSpecsList that are not available
149145
for _, gpuSpec := range a.GPUSpecsList {
150146
if !gpuSpec.Available {
151-
log.G(a.Ctx).Info(fmt.Sprintf("GPU with UUID %s is not available. It is in use by container %s", gpuSpec.UUID, gpuSpec.ContainerID))
147+
log.G(a.Ctx).Info(fmt.Sprintf("\u274C GPU with UUID %s is not available. It is in use by container %s", gpuSpec.UUID, gpuSpec.ContainerID))
152148
} else {
153-
log.G(a.Ctx).Info(fmt.Sprintf("GPU with UUID %s is available", gpuSpec.UUID))
149+
log.G(a.Ctx).Info(fmt.Sprintf("\u2705 GPU with UUID %s is available", gpuSpec.UUID))
154150
}
155151
}
156152

@@ -159,8 +155,6 @@ func (a *GPUManager) Check() error {
159155

160156
func (a *GPUManager) Shutdown() error {
161157

162-
log.G(a.Ctx).Info("Shutting down NVML...")
163-
164158
ret := nvml.Shutdown()
165159
if ret != nvml.SUCCESS {
166160
return fmt.Errorf("Unable to shutdown NVML: %v", nvml.ErrorString(ret))
@@ -193,8 +187,6 @@ func (a *GPUManager) Assign(UUID string, containerID string) error {
193187

194188
func (a *GPUManager) Release(containerID string) error {
195189

196-
log.G(a.Ctx).Info("Releasing GPU from container " + containerID)
197-
198190
a.GPUSpecsMutex.Lock()
199191
defer a.GPUSpecsMutex.Unlock()
200192

@@ -210,8 +202,6 @@ func (a *GPUManager) Release(containerID string) error {
210202
}
211203
}
212204

213-
log.G(a.Ctx).Info("Correctly released GPU from container " + containerID)
214-
215205
return nil
216206
}
217207

@@ -252,8 +242,6 @@ func (a *GPUManager) GetAndAssignAvailableGPUs(numGPUs int, containerID string)
252242
// dump the GPUSpecsList into a JSON file
253243
func (a *GPUManager) Dump() error {
254244

255-
log.G(a.Ctx).Info("Dumping the GPUSpecsList into a JSON file...")
256-
257245
// Convert the array to JSON format
258246
jsonData, err := json.MarshalIndent(a.GPUSpecsList, "", " ")
259247
if err != nil {

0 commit comments

Comments
 (0)