podman

Форк
0
/
pod_api.go 
763 строки · 23.1 Кб
1
//go:build !remote
2

3
package libpod
4

5
import (
6
	"context"
7
	"errors"
8
	"fmt"
9

10
	"github.com/containers/common/pkg/cgroups"
11
	"github.com/containers/podman/v5/libpod/define"
12
	"github.com/containers/podman/v5/libpod/events"
13
	"github.com/containers/podman/v5/pkg/parallel"
14
	"github.com/containers/podman/v5/pkg/rootless"
15
	"github.com/opencontainers/runtime-spec/specs-go"
16
	"github.com/sirupsen/logrus"
17
)
18

19
// startInitContainers starts a pod's init containers.
20
func (p *Pod) startInitContainers(ctx context.Context) error {
21
	initCtrs, err := p.initContainers()
22
	if err != nil {
23
		return err
24
	}
25
	// Now iterate init containers
26
	for _, initCon := range initCtrs {
27
		if err := initCon.Start(ctx, true); err != nil {
28
			return err
29
		}
30
		// Check that the init container waited correctly and the exit
31
		// code is good
32
		rc, err := initCon.Wait(ctx)
33
		if err != nil {
34
			return err
35
		}
36
		if rc != 0 {
37
			return fmt.Errorf("init container %s exited with code %d", initCon.ID(), rc)
38
		}
39
		// If the container is a once init container, we need to remove it
40
		// after it runs
41
		if initCon.config.InitContainerType == define.OneShotInitContainer {
42
			icLock := initCon.lock
43
			icLock.Lock()
44
			var time *uint
45
			opts := ctrRmOpts{
46
				RemovePod: true,
47
				Timeout:   time,
48
			}
49

50
			if _, _, err := p.runtime.removeContainer(ctx, initCon, opts); err != nil {
51
				icLock.Unlock()
52
				return fmt.Errorf("failed to remove once init container %s: %w", initCon.ID(), err)
53
			}
54
			icLock.Unlock()
55
		}
56
	}
57
	return nil
58
}
59

60
// Start starts all containers within a pod.
61
// It combines the effects of Init() and Start() on a container.
62
// If a container has already been initialized it will be started,
63
// otherwise it will be initialized then started.
64
// Containers that are already running or have been paused are ignored
65
// All containers are started independently, in order dictated by their
66
// dependencies.
67
// An error and a map[string]error are returned.
68
// If the error is not nil and the map is nil, an error was encountered before
69
// any containers were started.
70
// If map is not nil, an error was encountered when starting one or more
71
// containers. The container ID is mapped to the error encountered. The error is
72
// set to ErrPodPartialFail.
73
// If both error and the map are nil, all containers were started successfully.
74
func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
75
	p.lock.Lock()
76
	defer p.lock.Unlock()
77

78
	if !p.valid {
79
		return nil, define.ErrPodRemoved
80
	}
81

82
	if err := p.maybeStartServiceContainer(ctx); err != nil {
83
		return nil, err
84
	}
85

86
	// Before "regular" containers start in the pod, all init containers
87
	// must have run and exited successfully.
88
	if err := p.startInitContainers(ctx); err != nil {
89
		return nil, err
90
	}
91
	allCtrs, err := p.runtime.state.PodContainers(p)
92
	if err != nil {
93
		return nil, err
94
	}
95
	// Build a dependency graph of containers in the pod
96
	graph, err := BuildContainerGraph(allCtrs)
97
	if err != nil {
98
		return nil, fmt.Errorf("generating dependency graph for pod %s: %w", p.ID(), err)
99
	}
100
	// If there are no containers without dependencies, we can't start
101
	// Error out
102
	if len(graph.noDepNodes) == 0 {
103
		return nil, fmt.Errorf("no containers in pod %s have no dependencies, cannot start pod: %w", p.ID(), define.ErrNoSuchCtr)
104
	}
105

106
	ctrErrors := make(map[string]error)
107
	ctrsVisited := make(map[string]bool)
108

109
	// Traverse the graph beginning at nodes with no dependencies
110
	for _, node := range graph.noDepNodes {
111
		startNode(ctx, node, false, ctrErrors, ctrsVisited, false)
112
	}
113

114
	if len(ctrErrors) > 0 {
115
		return ctrErrors, fmt.Errorf("starting some containers: %w", define.ErrPodPartialFail)
116
	}
117
	defer p.newPodEvent(events.Start)
118
	return nil, nil
119
}
120

121
// Stop stops all containers within a pod without a timeout.  It assumes -1 for
122
// a timeout.
123
func (p *Pod) Stop(ctx context.Context, cleanup bool) (map[string]error, error) {
124
	return p.StopWithTimeout(ctx, cleanup, -1)
125
}
126

127
// StopWithTimeout stops all containers within a pod that are not already stopped
128
// Each container will use its own stop timeout.
129
// Only running containers will be stopped. Paused, stopped, or created
130
// containers will be ignored.
131
// If cleanup is true, mounts and network namespaces will be cleaned up after
132
// the container is stopped.
133
// All containers are stopped independently. An error stopping one container
134
// will not prevent other containers being stopped.
135
// An error and a map[string]error are returned.
136
// If the error is not nil and the map is nil, an error was encountered before
137
// any containers were stopped.
138
// If map is not nil, an error was encountered when stopping one or more
139
// containers. The container ID is mapped to the error encountered. The error is
140
// set to ErrPodPartialFail.
141
// If both error and the map are nil, all containers were stopped without error.
142
func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (map[string]error, error) {
143
	p.lock.Lock()
144
	defer p.lock.Unlock()
145

146
	return p.stopWithTimeout(ctx, cleanup, timeout)
147
}
148

149
func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (map[string]error, error) {
150
	if !p.valid {
151
		return nil, define.ErrPodRemoved
152
	}
153

154
	allCtrs, err := p.runtime.state.PodContainers(p)
155
	if err != nil {
156
		return nil, err
157
	}
158

159
	// Stopping pods is not ordered by dependency. We haven't seen any case
160
	// where this would actually matter.
161

162
	ctrErrChan := make(map[string]<-chan error)
163

164
	// Enqueue a function for each container with the parallel executor.
165
	for _, ctr := range allCtrs {
166
		c := ctr
167
		logrus.Debugf("Adding parallel job to stop container %s", c.ID())
168
		retChan := parallel.Enqueue(ctx, func() error {
169
			// Can't batch these without forcing Stop() to hold the
170
			// lock for the full duration of the timeout.
171
			// We probably don't want to do that.
172
			if timeout > -1 {
173
				if err := c.StopWithTimeout(uint(timeout)); err != nil {
174
					return err
175
				}
176
			} else {
177
				if err := c.Stop(); err != nil {
178
					return err
179
				}
180
			}
181

182
			if cleanup {
183
				return c.Cleanup(ctx)
184
			}
185

186
			return nil
187
		})
188

189
		ctrErrChan[c.ID()] = retChan
190
	}
191

192
	p.newPodEvent(events.Stop)
193

194
	ctrErrors := make(map[string]error)
195

196
	// Get returned error for every container we worked on
197
	for id, channel := range ctrErrChan {
198
		if err := <-channel; err != nil {
199
			if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
200
				continue
201
			}
202
			ctrErrors[id] = err
203
		}
204
	}
205

206
	if len(ctrErrors) > 0 {
207
		return ctrErrors, fmt.Errorf("stopping some containers: %w", define.ErrPodPartialFail)
208
	}
209

210
	if err := p.maybeStopServiceContainer(); err != nil {
211
		return nil, err
212
	}
213

214
	if err := p.updatePod(); err != nil {
215
		return nil, err
216
	}
217
	if err := p.removePodCgroup(); err != nil {
218
		return nil, err
219
	}
220

221
	return nil, nil
222
}
223

224
// Stops the pod if only the infra containers remains running.
225
func (p *Pod) stopIfOnlyInfraRemains(ctx context.Context, ignoreID string) error {
226
	p.lock.Lock()
227
	defer p.lock.Unlock()
228

229
	infraID := ""
230

231
	if p.HasInfraContainer() {
232
		infra, err := p.infraContainer()
233
		if err != nil {
234
			return err
235
		}
236
		infraID = infra.ID()
237
	}
238

239
	allCtrs, err := p.runtime.state.PodContainers(p)
240
	if err != nil {
241
		return err
242
	}
243

244
	for _, ctr := range allCtrs {
245
		if ctr.ID() == infraID || ctr.ID() == ignoreID {
246
			continue
247
		}
248

249
		state, err := ctr.State()
250
		if err != nil {
251
			return fmt.Errorf("getting state of container %s: %w", ctr.ID(), err)
252
		}
253

254
		switch state {
255
		case define.ContainerStateExited,
256
			define.ContainerStateRemoving,
257
			define.ContainerStateStopping,
258
			define.ContainerStateUnknown:
259
			continue
260
		default:
261
			return nil
262
		}
263
	}
264

265
	_, err = p.stopWithTimeout(ctx, true, -1)
266
	return err
267
}
268

269
// Cleanup cleans up all containers within a pod that have stopped.
270
// All containers are cleaned up independently. An error with one container will
271
// not prevent other containers being cleaned up.
272
// An error and a map[string]error are returned.
273
// If the error is not nil and the map is nil, an error was encountered before
274
// any containers were cleaned up.
275
// If map is not nil, an error was encountered when working on one or more
276
// containers. The container ID is mapped to the error encountered. The error is
277
// set to ErrPodPartialFail.
278
// If both error and the map are nil, all containers were paused without error
279
func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
280
	p.lock.Lock()
281
	defer p.lock.Unlock()
282

283
	if !p.valid {
284
		return nil, define.ErrPodRemoved
285
	}
286

287
	allCtrs, err := p.runtime.state.PodContainers(p)
288
	if err != nil {
289
		return nil, err
290
	}
291

292
	ctrErrChan := make(map[string]<-chan error)
293

294
	// Enqueue a function for each container with the parallel executor.
295
	for _, ctr := range allCtrs {
296
		c := ctr
297
		logrus.Debugf("Adding parallel job to clean up container %s", c.ID())
298
		retChan := parallel.Enqueue(ctx, func() error {
299
			return c.Cleanup(ctx)
300
		})
301

302
		ctrErrChan[c.ID()] = retChan
303
	}
304

305
	ctrErrors := make(map[string]error)
306

307
	// Get returned error for every container we worked on
308
	for id, channel := range ctrErrChan {
309
		if err := <-channel; err != nil {
310
			if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
311
				continue
312
			}
313
			ctrErrors[id] = err
314
		}
315
	}
316

317
	if len(ctrErrors) > 0 {
318
		return ctrErrors, fmt.Errorf("cleaning up some containers: %w", define.ErrPodPartialFail)
319
	}
320

321
	if err := p.maybeStopServiceContainer(); err != nil {
322
		return nil, err
323
	}
324

325
	return nil, nil
326
}
327

328
// Pause pauses all containers within a pod that are running.
329
// Only running containers will be paused. Paused, stopped, or created
330
// containers will be ignored.
331
// All containers are paused independently. An error pausing one container
332
// will not prevent other containers being paused.
333
// An error and a map[string]error are returned.
334
// If the error is not nil and the map is nil, an error was encountered before
335
// any containers were paused.
336
// If map is not nil, an error was encountered when pausing one or more
337
// containers. The container ID is mapped to the error encountered. The error is
338
// set to ErrPodPartialFail.
339
// If both error and the map are nil, all containers were paused without error
340
func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
341
	p.lock.Lock()
342
	defer p.lock.Unlock()
343

344
	if !p.valid {
345
		return nil, define.ErrPodRemoved
346
	}
347

348
	if rootless.IsRootless() {
349
		cgroupv2, err := cgroups.IsCgroup2UnifiedMode()
350
		if err != nil {
351
			return nil, fmt.Errorf("failed to determine cgroupversion: %w", err)
352
		}
353
		if !cgroupv2 {
354
			return nil, fmt.Errorf("can not pause pods containing rootless containers with cgroup V1: %w", define.ErrNoCgroups)
355
		}
356
	}
357

358
	allCtrs, err := p.runtime.state.PodContainers(p)
359
	if err != nil {
360
		return nil, err
361
	}
362

363
	ctrErrChan := make(map[string]<-chan error)
364

365
	// Enqueue a function for each container with the parallel executor.
366
	for _, ctr := range allCtrs {
367
		c := ctr
368
		logrus.Debugf("Adding parallel job to pause container %s", c.ID())
369
		retChan := parallel.Enqueue(ctx, c.Pause)
370

371
		ctrErrChan[c.ID()] = retChan
372
	}
373

374
	p.newPodEvent(events.Pause)
375

376
	ctrErrors := make(map[string]error)
377

378
	// Get returned error for every container we worked on
379
	for id, channel := range ctrErrChan {
380
		if err := <-channel; err != nil {
381
			if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
382
				continue
383
			}
384
			ctrErrors[id] = err
385
		}
386
	}
387

388
	if len(ctrErrors) > 0 {
389
		return ctrErrors, fmt.Errorf("pausing some containers: %w", define.ErrPodPartialFail)
390
	}
391
	return nil, nil
392
}
393

394
// Unpause unpauses all containers within a pod that are running.
395
// Only paused containers will be unpaused. Running, stopped, or created
396
// containers will be ignored.
397
// All containers are unpaused independently. An error unpausing one container
398
// will not prevent other containers being unpaused.
399
// An error and a map[string]error are returned.
400
// If the error is not nil and the map is nil, an error was encountered before
401
// any containers were unpaused.
402
// If map is not nil, an error was encountered when unpausing one or more
403
// containers. The container ID is mapped to the error encountered. The error is
404
// set to ErrPodPartialFail.
405
// If both error and the map are nil, all containers were unpaused without error.
406
func (p *Pod) Unpause(ctx context.Context) (map[string]error, error) {
407
	p.lock.Lock()
408
	defer p.lock.Unlock()
409

410
	if !p.valid {
411
		return nil, define.ErrPodRemoved
412
	}
413

414
	allCtrs, err := p.runtime.state.PodContainers(p)
415
	if err != nil {
416
		return nil, err
417
	}
418

419
	ctrErrChan := make(map[string]<-chan error)
420

421
	// Enqueue a function for each container with the parallel executor.
422
	for _, ctr := range allCtrs {
423
		c := ctr
424
		logrus.Debugf("Adding parallel job to unpause container %s", c.ID())
425
		retChan := parallel.Enqueue(ctx, c.Unpause)
426

427
		ctrErrChan[c.ID()] = retChan
428
	}
429

430
	p.newPodEvent(events.Unpause)
431

432
	ctrErrors := make(map[string]error)
433

434
	// Get returned error for every container we worked on
435
	for id, channel := range ctrErrChan {
436
		if err := <-channel; err != nil {
437
			if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
438
				continue
439
			}
440
			ctrErrors[id] = err
441
		}
442
	}
443

444
	if len(ctrErrors) > 0 {
445
		return ctrErrors, fmt.Errorf("unpausing some containers: %w", define.ErrPodPartialFail)
446
	}
447
	return nil, nil
448
}
449

450
// Restart restarts all containers within a pod that are not paused or in an error state.
451
// It combines the effects of Stop() and Start() on a container
452
// Each container will use its own stop timeout.
453
// All containers are started independently, in order dictated by their
454
// dependencies. An error restarting one container
455
// will not prevent other containers being restarted.
456
// An error and a map[string]error are returned.
457
// If the error is not nil and the map is nil, an error was encountered before
458
// any containers were restarted.
459
// If map is not nil, an error was encountered when restarting one or more
460
// containers. The container ID is mapped to the error encountered. The error is
461
// set to ErrPodPartialFail.
462
// If both error and the map are nil, all containers were restarted without error.
463
func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
464
	p.lock.Lock()
465
	defer p.lock.Unlock()
466

467
	if !p.valid {
468
		return nil, define.ErrPodRemoved
469
	}
470

471
	if err := p.maybeStartServiceContainer(ctx); err != nil {
472
		return nil, err
473
	}
474

475
	allCtrs, err := p.runtime.state.PodContainers(p)
476
	if err != nil {
477
		return nil, err
478
	}
479

480
	// Build a dependency graph of containers in the pod
481
	graph, err := BuildContainerGraph(allCtrs)
482
	if err != nil {
483
		return nil, fmt.Errorf("generating dependency graph for pod %s: %w", p.ID(), err)
484
	}
485

486
	ctrErrors := make(map[string]error)
487
	ctrsVisited := make(map[string]bool)
488

489
	// If there are no containers without dependencies, we can't start
490
	// Error out
491
	if len(graph.noDepNodes) == 0 {
492
		return nil, fmt.Errorf("no containers in pod %s have no dependencies, cannot start pod: %w", p.ID(), define.ErrNoSuchCtr)
493
	}
494

495
	// Traverse the graph beginning at nodes with no dependencies
496
	for _, node := range graph.noDepNodes {
497
		startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
498
	}
499

500
	if len(ctrErrors) > 0 {
501
		return ctrErrors, fmt.Errorf("stopping some containers: %w", define.ErrPodPartialFail)
502
	}
503
	p.newPodEvent(events.Stop)
504
	p.newPodEvent(events.Start)
505
	return nil, nil
506
}
507

508
// Kill sends a signal to all running containers within a pod.
509
// Signals will only be sent to running containers. Containers that are not
510
// running will be ignored. All signals are sent independently, and sending will
511
// continue even if some containers encounter errors.
512
// An error and a map[string]error are returned.
513
// If the error is not nil and the map is nil, an error was encountered before
514
// any containers were signalled.
515
// If map is not nil, an error was encountered when signalling one or more
516
// containers. The container ID is mapped to the error encountered. The error is
517
// set to ErrPodPartialFail.
518
// If both error and the map are nil, all containers were signalled successfully.
519
func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
520
	p.lock.Lock()
521
	defer p.lock.Unlock()
522

523
	if !p.valid {
524
		return nil, define.ErrPodRemoved
525
	}
526

527
	allCtrs, err := p.runtime.state.PodContainers(p)
528
	if err != nil {
529
		return nil, err
530
	}
531

532
	ctrErrChan := make(map[string]<-chan error)
533

534
	// Enqueue a function for each container with the parallel executor.
535
	for _, ctr := range allCtrs {
536
		c := ctr
537
		logrus.Debugf("Adding parallel job to kill container %s", c.ID())
538
		retChan := parallel.Enqueue(ctx, func() error {
539
			return c.Kill(signal)
540
		})
541

542
		ctrErrChan[c.ID()] = retChan
543
	}
544

545
	p.newPodEvent(events.Kill)
546

547
	ctrErrors := make(map[string]error)
548

549
	// Get returned error for every container we worked on
550
	for id, channel := range ctrErrChan {
551
		if err := <-channel; err != nil {
552
			if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
553
				continue
554
			}
555
			ctrErrors[id] = err
556
		}
557
	}
558

559
	if len(ctrErrors) > 0 {
560
		return ctrErrors, fmt.Errorf("killing some containers: %w", define.ErrPodPartialFail)
561
	}
562

563
	if err := p.maybeStopServiceContainer(); err != nil {
564
		return nil, err
565
	}
566

567
	return nil, nil
568
}
569

570
// Status gets the status of all containers in the pod.
571
// Returns a map of Container ID to Container Status.
572
func (p *Pod) Status() (map[string]define.ContainerStatus, error) {
573
	p.lock.Lock()
574
	defer p.lock.Unlock()
575

576
	if !p.valid {
577
		return nil, define.ErrPodRemoved
578
	}
579
	allCtrs, err := p.runtime.state.PodContainers(p)
580
	if err != nil {
581
		return nil, err
582
	}
583
	noInitCtrs := make([]*Container, 0)
584
	// Do not add init containers into status
585
	for _, ctr := range allCtrs {
586
		if ctrType := ctr.config.InitContainerType; len(ctrType) < 1 {
587
			noInitCtrs = append(noInitCtrs, ctr)
588
		}
589
	}
590
	return containerStatusFromContainers(noInitCtrs)
591
}
592

593
func containerStatusFromContainers(allCtrs []*Container) (map[string]define.ContainerStatus, error) {
594
	status := make(map[string]define.ContainerStatus, len(allCtrs))
595
	for _, ctr := range allCtrs {
596
		state, err := ctr.State()
597

598
		if err != nil {
599
			return nil, err
600
		}
601

602
		status[ctr.ID()] = state
603
	}
604

605
	return status, nil
606
}
607

608
// Inspect returns a PodInspect struct to describe the pod.
609
func (p *Pod) Inspect() (*define.InspectPodData, error) {
610
	p.lock.Lock()
611
	defer p.lock.Unlock()
612
	if err := p.updatePod(); err != nil {
613
		return nil, err
614
	}
615

616
	containers, err := p.runtime.state.PodContainers(p)
617
	if err != nil {
618
		return nil, err
619
	}
620
	ctrs := make([]define.InspectPodContainerInfo, 0, len(containers))
621
	ctrStatuses := make(map[string]define.ContainerStatus, len(containers))
622
	for _, c := range containers {
623
		containerStatus := "unknown"
624
		// Ignoring possible errors here because we don't want this to be
625
		// catastrophic in nature
626
		containerState, err := c.State()
627
		if err == nil {
628
			containerStatus = containerState.String()
629
		}
630
		ctrs = append(ctrs, define.InspectPodContainerInfo{
631
			ID:    c.ID(),
632
			Name:  c.Name(),
633
			State: containerStatus,
634
		})
635
		// Do not add init containers fdr status
636
		if len(c.config.InitContainerType) < 1 {
637
			ctrStatuses[c.ID()] = c.state.State
638
		}
639
	}
640
	podState, err := createPodStatusResults(ctrStatuses)
641
	if err != nil {
642
		return nil, err
643
	}
644

645
	namespaces := map[string]bool{
646
		"pid":    p.config.UsePodPID,
647
		"ipc":    p.config.UsePodIPC,
648
		"net":    p.config.UsePodNet,
649
		"mount":  p.config.UsePodMount,
650
		"user":   p.config.UsePodUser,
651
		"uts":    p.config.UsePodUTS,
652
		"cgroup": p.config.UsePodCgroupNS,
653
	}
654

655
	sharesNS := []string{}
656
	for nsStr, include := range namespaces {
657
		if include {
658
			sharesNS = append(sharesNS, nsStr)
659
		}
660
	}
661

662
	// Infra config contains detailed information on the pod's infra
663
	// container.
664
	var infraConfig *define.InspectPodInfraConfig
665
	var inspectMounts []define.InspectMount
666
	var devices []define.InspectDevice
667
	var infraSecurity []string
668
	if p.state.InfraContainerID != "" {
669
		infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
670
		if err != nil {
671
			return nil, err
672
		}
673
		infraConfig = new(define.InspectPodInfraConfig)
674
		infraConfig.HostNetwork = p.NetworkMode() == "host"
675
		infraConfig.StaticIP = infra.config.ContainerNetworkConfig.StaticIP
676
		infraConfig.NoManageResolvConf = infra.config.UseImageResolvConf
677
		infraConfig.NoManageHosts = infra.config.UseImageHosts
678
		infraConfig.CPUPeriod = p.CPUPeriod()
679
		infraConfig.CPUQuota = p.CPUQuota()
680
		infraConfig.CPUSetCPUs = p.ResourceLim().CPU.Cpus
681
		infraConfig.PidNS = p.NamespaceMode(specs.PIDNamespace)
682
		infraConfig.UserNS = p.NamespaceMode(specs.UserNamespace)
683
		infraConfig.UtsNS = p.NamespaceMode(specs.UTSNamespace)
684
		namedVolumes, mounts := infra.SortUserVolumes(infra.config.Spec)
685
		inspectMounts, err = infra.GetMounts(namedVolumes, infra.config.ImageVolumes, mounts)
686
		infraSecurity = infra.GetSecurityOptions()
687
		if err != nil {
688
			return nil, err
689
		}
690

691
		if len(infra.config.ContainerNetworkConfig.DNSServer) > 0 {
692
			infraConfig.DNSServer = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSServer))
693
			for _, entry := range infra.config.ContainerNetworkConfig.DNSServer {
694
				infraConfig.DNSServer = append(infraConfig.DNSServer, entry.String())
695
			}
696
		}
697
		if len(infra.config.ContainerNetworkConfig.DNSSearch) > 0 {
698
			infraConfig.DNSSearch = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSSearch))
699
			infraConfig.DNSSearch = append(infraConfig.DNSSearch, infra.config.ContainerNetworkConfig.DNSSearch...)
700
		}
701
		if len(infra.config.ContainerNetworkConfig.DNSOption) > 0 {
702
			infraConfig.DNSOption = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSOption))
703
			infraConfig.DNSOption = append(infraConfig.DNSOption, infra.config.ContainerNetworkConfig.DNSOption...)
704
		}
705
		if len(infra.config.HostAdd) > 0 {
706
			infraConfig.HostAdd = make([]string, 0, len(infra.config.HostAdd))
707
			infraConfig.HostAdd = append(infraConfig.HostAdd, infra.config.HostAdd...)
708
		}
709

710
		networks, err := infra.networks()
711
		if err != nil {
712
			return nil, err
713
		}
714
		netNames := make([]string, 0, len(networks))
715
		for name := range networks {
716
			netNames = append(netNames, name)
717
		}
718
		if len(netNames) > 0 {
719
			infraConfig.Networks = netNames
720
		}
721
		infraConfig.NetworkOptions = infra.config.ContainerNetworkConfig.NetworkOptions
722
		infraConfig.PortBindings = makeInspectPortBindings(infra.config.ContainerNetworkConfig.PortMappings)
723
	}
724

725
	inspectData := define.InspectPodData{
726
		ID:                  p.ID(),
727
		Name:                p.Name(),
728
		Namespace:           p.Namespace(),
729
		Created:             p.CreatedTime(),
730
		CreateCommand:       p.config.CreateCommand,
731
		ExitPolicy:          string(p.config.ExitPolicy),
732
		State:               podState,
733
		Hostname:            p.config.Hostname,
734
		Labels:              p.Labels(),
735
		CreateCgroup:        p.config.UsePodCgroup,
736
		CgroupParent:        p.CgroupParent(),
737
		CgroupPath:          p.state.CgroupPath,
738
		CreateInfra:         infraConfig != nil,
739
		InfraContainerID:    p.state.InfraContainerID,
740
		InfraConfig:         infraConfig,
741
		SharedNamespaces:    sharesNS,
742
		NumContainers:       uint(len(containers)),
743
		Containers:          ctrs,
744
		CPUSetCPUs:          p.ResourceLim().CPU.Cpus,
745
		CPUPeriod:           p.CPUPeriod(),
746
		CPUQuota:            p.CPUQuota(),
747
		MemoryLimit:         p.MemoryLimit(),
748
		Mounts:              inspectMounts,
749
		Devices:             devices,
750
		BlkioDeviceReadBps:  p.BlkiThrottleReadBps(),
751
		VolumesFrom:         p.VolumesFrom(),
752
		SecurityOpts:        infraSecurity,
753
		MemorySwap:          p.MemorySwap(),
754
		BlkioWeight:         p.BlkioWeight(),
755
		CPUSetMems:          p.CPUSetMems(),
756
		BlkioDeviceWriteBps: p.BlkiThrottleWriteBps(),
757
		CPUShares:           p.CPUShares(),
758
		RestartPolicy:       p.config.RestartPolicy,
759
		LockNumber:          p.lock.ID(),
760
	}
761

762
	return &inspectData, nil
763
}
764

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.