podman

Форк
0
/
container_inspect.go 
678 строк · 20.8 Кб
1
//go:build !remote
2

3
package libpod
4

5
import (
6
	"errors"
7
	"fmt"
8
	"strings"
9

10
	"github.com/containers/podman/v5/libpod/define"
11
	"github.com/containers/podman/v5/libpod/driver"
12
	"github.com/containers/podman/v5/pkg/signal"
13
	"github.com/containers/podman/v5/pkg/util"
14
	"github.com/containers/storage/types"
15
	"github.com/docker/go-units"
16
	spec "github.com/opencontainers/runtime-spec/specs-go"
17
	"github.com/sirupsen/logrus"
18
)
19

20
// inspectLocked inspects a container for low-level information.
21
// The caller must held c.lock.
22
func (c *Container) inspectLocked(size bool) (*define.InspectContainerData, error) {
23
	storeCtr, err := c.runtime.store.Container(c.ID())
24
	if err != nil {
25
		return nil, fmt.Errorf("getting container from store %q: %w", c.ID(), err)
26
	}
27
	layer, err := c.runtime.store.Layer(storeCtr.LayerID)
28
	if err != nil {
29
		return nil, fmt.Errorf("reading information about layer %q: %w", storeCtr.LayerID, err)
30
	}
31
	driverData, err := driver.GetDriverData(c.runtime.store, layer.ID)
32
	if err != nil {
33
		return nil, fmt.Errorf("getting graph driver info %q: %w", c.ID(), err)
34
	}
35
	return c.getContainerInspectData(size, driverData)
36
}
37

38
// Inspect a container for low-level information
39
func (c *Container) Inspect(size bool) (*define.InspectContainerData, error) {
40
	if !c.batched {
41
		c.lock.Lock()
42
		defer c.lock.Unlock()
43

44
		if err := c.syncContainer(); err != nil {
45
			return nil, err
46
		}
47
	}
48

49
	return c.inspectLocked(size)
50
}
51

52
func (c *Container) volumesFrom() ([]string, error) {
53
	ctrSpec, err := c.specFromState()
54
	if err != nil {
55
		return nil, err
56
	}
57
	if ctrs, ok := ctrSpec.Annotations[define.VolumesFromAnnotation]; ok {
58
		return strings.Split(ctrs, ";"), nil
59
	}
60
	return nil, nil
61
}
62

63
func (c *Container) getContainerInspectData(size bool, driverData *define.DriverData) (*define.InspectContainerData, error) {
64
	config := c.config
65
	runtimeInfo := c.state
66
	ctrSpec, err := c.specFromState()
67
	if err != nil {
68
		return nil, err
69
	}
70

71
	// Process is allowed to be nil in the stateSpec
72
	args := []string{}
73
	if config.Spec.Process != nil {
74
		args = config.Spec.Process.Args
75
	}
76
	var path string
77
	if len(args) > 0 {
78
		path = args[0]
79
	}
80
	if len(args) > 1 {
81
		args = args[1:]
82
	}
83

84
	execIDs := []string{}
85
	for id := range c.state.ExecSessions {
86
		execIDs = append(execIDs, id)
87
	}
88

89
	resolvPath := ""
90
	hostsPath := ""
91
	hostnamePath := ""
92
	if c.state.BindMounts != nil {
93
		if getPath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
94
			resolvPath = getPath
95
		}
96
		if getPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
97
			hostsPath = getPath
98
		}
99
		if getPath, ok := c.state.BindMounts["/etc/hostname"]; ok {
100
			hostnamePath = getPath
101
		}
102
	}
103

104
	namedVolumes, mounts := c.SortUserVolumes(ctrSpec)
105
	inspectMounts, err := c.GetMounts(namedVolumes, c.config.ImageVolumes, mounts)
106
	if err != nil {
107
		return nil, err
108
	}
109

110
	cgroupPath, err := c.cGroupPath()
111
	if err != nil {
112
		// Handle the case where the container is not running or has no cgroup.
113
		if errors.Is(err, define.ErrNoCgroups) || errors.Is(err, define.ErrCtrStopped) {
114
			cgroupPath = ""
115
		} else {
116
			return nil, err
117
		}
118
	}
119

120
	data := &define.InspectContainerData{
121
		ID:      config.ID,
122
		Created: config.CreatedTime,
123
		Path:    path,
124
		Args:    args,
125
		State: &define.InspectContainerState{
126
			OciVersion:     ctrSpec.Version,
127
			Status:         runtimeInfo.State.String(),
128
			Running:        runtimeInfo.State == define.ContainerStateRunning,
129
			Paused:         runtimeInfo.State == define.ContainerStatePaused,
130
			OOMKilled:      runtimeInfo.OOMKilled,
131
			Dead:           runtimeInfo.State.String() == "bad state",
132
			Pid:            runtimeInfo.PID,
133
			ConmonPid:      runtimeInfo.ConmonPID,
134
			ExitCode:       runtimeInfo.ExitCode,
135
			Error:          runtimeInfo.Error,
136
			StartedAt:      runtimeInfo.StartedTime,
137
			FinishedAt:     runtimeInfo.FinishedTime,
138
			Checkpointed:   runtimeInfo.Checkpointed,
139
			CgroupPath:     cgroupPath,
140
			RestoredAt:     runtimeInfo.RestoredTime,
141
			CheckpointedAt: runtimeInfo.CheckpointedTime,
142
			Restored:       runtimeInfo.Restored,
143
			CheckpointPath: runtimeInfo.CheckpointPath,
144
			CheckpointLog:  runtimeInfo.CheckpointLog,
145
			RestoreLog:     runtimeInfo.RestoreLog,
146
			StoppedByUser:  c.state.StoppedByUser,
147
		},
148
		Image:                   config.RootfsImageID,
149
		ImageName:               config.RootfsImageName,
150
		Namespace:               config.Namespace,
151
		Rootfs:                  config.Rootfs,
152
		Pod:                     config.Pod,
153
		ResolvConfPath:          resolvPath,
154
		HostnamePath:            hostnamePath,
155
		HostsPath:               hostsPath,
156
		StaticDir:               config.StaticDir,
157
		OCIRuntime:              config.OCIRuntime,
158
		ConmonPidFile:           config.ConmonPidFile,
159
		PidFile:                 config.PidFile,
160
		Name:                    config.Name,
161
		RestartCount:            int32(runtimeInfo.RestartCount),
162
		Driver:                  driverData.Name,
163
		MountLabel:              config.MountLabel,
164
		ProcessLabel:            config.ProcessLabel,
165
		AppArmorProfile:         ctrSpec.Process.ApparmorProfile,
166
		ExecIDs:                 execIDs,
167
		GraphDriver:             driverData,
168
		Mounts:                  inspectMounts,
169
		Dependencies:            c.Dependencies(),
170
		IsInfra:                 c.IsInfra(),
171
		IsService:               c.IsService(),
172
		KubeExitCodePropagation: config.KubeExitCodePropagation.String(),
173
		LockNumber:              c.lock.ID(),
174
	}
175

176
	if config.RootfsImageID != "" { // May not be set if the container was created with --rootfs
177
		image, _, err := c.runtime.libimageRuntime.LookupImage(config.RootfsImageID, nil)
178
		if err != nil {
179
			return nil, err
180
		}
181
		data.ImageDigest = image.Digest().String()
182
	}
183

184
	if ctrSpec.Process.Capabilities != nil {
185
		data.EffectiveCaps = ctrSpec.Process.Capabilities.Effective
186
		data.BoundingCaps = ctrSpec.Process.Capabilities.Bounding
187
	}
188

189
	if c.state.ConfigPath != "" {
190
		data.OCIConfigPath = c.state.ConfigPath
191
	}
192

193
	// Check if healthcheck is not nil and --no-healthcheck option is not set.
194
	// If --no-healthcheck is set Test will be always set to `[NONE]`, so the
195
	// inspect status should be set to nil.
196
	if c.config.HealthCheckConfig != nil && !(len(c.config.HealthCheckConfig.Test) == 1 && c.config.HealthCheckConfig.Test[0] == "NONE") {
197
		// This container has a healthcheck defined in it; we need to add its state
198
		healthCheckState, err := c.getHealthCheckLog()
199
		if err != nil {
200
			// An error here is not considered fatal; no health state will be displayed
201
			logrus.Error(err)
202
		} else {
203
			data.State.Health = &healthCheckState
204
		}
205
	} else {
206
		data.State.Health = nil
207
	}
208

209
	networkConfig, err := c.getContainerNetworkInfo()
210
	if err != nil {
211
		return nil, err
212
	}
213
	data.NetworkSettings = networkConfig
214

215
	inspectConfig := c.generateInspectContainerConfig(ctrSpec)
216
	data.Config = inspectConfig
217

218
	hostConfig, err := c.generateInspectContainerHostConfig(ctrSpec, namedVolumes, mounts)
219
	if err != nil {
220
		return nil, err
221
	}
222
	data.HostConfig = hostConfig
223

224
	if size {
225
		rootFsSize, err := c.rootFsSize()
226
		if err != nil {
227
			logrus.Errorf("Getting rootfs size %q: %v", config.ID, err)
228
		}
229
		data.SizeRootFs = rootFsSize
230

231
		rwSize, err := c.rwSize()
232
		if err != nil {
233
			logrus.Errorf("Getting rw size %q: %v", config.ID, err)
234
		}
235
		data.SizeRw = &rwSize
236
	}
237
	return data, nil
238
}
239

240
// Get inspect-formatted mounts list.
241
// Only includes user-specified mounts. Only includes bind mounts and named
242
// volumes, not tmpfs volumes.
243
func (c *Container) GetMounts(namedVolumes []*ContainerNamedVolume, imageVolumes []*ContainerImageVolume, mounts []spec.Mount) ([]define.InspectMount, error) {
244
	inspectMounts := []define.InspectMount{}
245

246
	// No mounts, return early
247
	if len(c.config.UserVolumes) == 0 {
248
		return inspectMounts, nil
249
	}
250

251
	for _, volume := range namedVolumes {
252
		mountStruct := define.InspectMount{}
253
		mountStruct.Type = "volume"
254
		mountStruct.Destination = volume.Dest
255
		mountStruct.Name = volume.Name
256

257
		// For src and driver, we need to look up the named
258
		// volume.
259
		volFromDB, err := c.runtime.state.Volume(volume.Name)
260
		if err != nil {
261
			return nil, fmt.Errorf("looking up volume %s in container %s config: %w", volume.Name, c.ID(), err)
262
		}
263
		mountStruct.Driver = volFromDB.Driver()
264

265
		mountPoint, err := volFromDB.MountPoint()
266
		if err != nil {
267
			return nil, err
268
		}
269
		mountStruct.Source = mountPoint
270

271
		parseMountOptionsForInspect(volume.Options, &mountStruct)
272

273
		inspectMounts = append(inspectMounts, mountStruct)
274
	}
275

276
	for _, volume := range imageVolumes {
277
		mountStruct := define.InspectMount{}
278
		mountStruct.Type = "image"
279
		mountStruct.Destination = volume.Dest
280
		mountStruct.Source = volume.Source
281
		mountStruct.RW = volume.ReadWrite
282

283
		inspectMounts = append(inspectMounts, mountStruct)
284
	}
285

286
	for _, mount := range mounts {
287
		// It's a mount.
288
		// Is it a tmpfs? If so, discard.
289
		if mount.Type == define.TypeTmpfs {
290
			continue
291
		}
292

293
		mountStruct := define.InspectMount{}
294
		mountStruct.Type = define.TypeBind
295
		mountStruct.Source = mount.Source
296
		mountStruct.Destination = mount.Destination
297

298
		parseMountOptionsForInspect(mount.Options, &mountStruct)
299

300
		inspectMounts = append(inspectMounts, mountStruct)
301
	}
302

303
	return inspectMounts, nil
304
}
305

306
// GetSecurityOptions retrieves and returns the security related annotations and process information upon inspection
307
func (c *Container) GetSecurityOptions() []string {
308
	ctrSpec := c.config.Spec
309
	SecurityOpt := []string{}
310
	if ctrSpec.Process != nil {
311
		if ctrSpec.Process.NoNewPrivileges {
312
			SecurityOpt = append(SecurityOpt, "no-new-privileges")
313
		}
314
	}
315
	if label, ok := ctrSpec.Annotations[define.InspectAnnotationLabel]; ok {
316
		SecurityOpt = append(SecurityOpt, fmt.Sprintf("label=%s", label))
317
	}
318
	if seccomp, ok := ctrSpec.Annotations[define.InspectAnnotationSeccomp]; ok {
319
		SecurityOpt = append(SecurityOpt, fmt.Sprintf("seccomp=%s", seccomp))
320
	}
321
	if apparmor, ok := ctrSpec.Annotations[define.InspectAnnotationApparmor]; ok {
322
		SecurityOpt = append(SecurityOpt, fmt.Sprintf("apparmor=%s", apparmor))
323
	}
324
	if c.config.Spec != nil && c.config.Spec.Linux != nil && c.config.Spec.Linux.MaskedPaths == nil {
325
		SecurityOpt = append(SecurityOpt, "unmask=all")
326
	}
327

328
	return SecurityOpt
329
}
330

331
// Parse mount options so we can populate them in the mount structure.
332
// The mount passed in will be modified.
333
func parseMountOptionsForInspect(options []string, mount *define.InspectMount) {
334
	isRW := true
335
	mountProp := ""
336
	zZ := ""
337
	otherOpts := []string{}
338

339
	// Some of these may be overwritten if the user passes us garbage opts
340
	// (for example, [ro,rw])
341
	// We catch these on the Podman side, so not a problem there, but other
342
	// users of libpod who do not properly validate mount options may see
343
	// this.
344
	// Not really worth dealing with on our end - garbage in, garbage out.
345
	for _, opt := range options {
346
		switch opt {
347
		case "ro":
348
			isRW = false
349
		case "rw":
350
			// Do nothing, silently discard
351
		case "shared", "slave", "private", "rshared", "rslave", "rprivate", "unbindable", "runbindable":
352
			mountProp = opt
353
		case "z", "Z":
354
			zZ = opt
355
		default:
356
			otherOpts = append(otherOpts, opt)
357
		}
358
	}
359

360
	mount.RW = isRW
361
	mount.Propagation = mountProp
362
	mount.Mode = zZ
363
	mount.Options = otherOpts
364
}
365

366
// Generate the InspectContainerConfig struct for the Config field of Inspect.
367
func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.InspectContainerConfig {
368
	ctrConfig := new(define.InspectContainerConfig)
369

370
	ctrConfig.Hostname = c.Hostname()
371
	ctrConfig.User = c.config.User
372
	if spec.Process != nil {
373
		ctrConfig.Tty = spec.Process.Terminal
374
		ctrConfig.Env = append([]string{}, spec.Process.Env...)
375
		ctrConfig.WorkingDir = spec.Process.Cwd
376
	}
377

378
	ctrConfig.StopTimeout = c.config.StopTimeout
379
	ctrConfig.Timeout = c.config.Timeout
380
	ctrConfig.OpenStdin = c.config.Stdin
381
	ctrConfig.Image = c.config.RootfsImageName
382
	ctrConfig.SystemdMode = c.Systemd()
383

384
	// Leave empty is not explicitly overwritten by user
385
	if len(c.config.Command) != 0 {
386
		ctrConfig.Cmd = []string{}
387
		ctrConfig.Cmd = append(ctrConfig.Cmd, c.config.Command...)
388
	}
389

390
	// Leave empty if not explicitly overwritten by user
391
	if len(c.config.Entrypoint) != 0 {
392
		ctrConfig.Entrypoint = c.config.Entrypoint
393
	}
394

395
	if len(c.config.Labels) != 0 {
396
		ctrConfig.Labels = make(map[string]string)
397
		for k, v := range c.config.Labels {
398
			ctrConfig.Labels[k] = v
399
		}
400
	}
401

402
	if len(spec.Annotations) != 0 {
403
		ctrConfig.Annotations = make(map[string]string)
404
		for k, v := range spec.Annotations {
405
			ctrConfig.Annotations[k] = v
406
		}
407
	}
408
	ctrConfig.StopSignal = signal.ToDockerFormat(c.config.StopSignal)
409
	// TODO: should JSON deep copy this to ensure internal pointers don't
410
	// leak.
411
	ctrConfig.Healthcheck = c.config.HealthCheckConfig
412

413
	ctrConfig.HealthcheckOnFailureAction = c.config.HealthCheckOnFailureAction.String()
414

415
	ctrConfig.CreateCommand = c.config.CreateCommand
416

417
	ctrConfig.Timezone = c.config.Timezone
418
	for _, secret := range c.config.Secrets {
419
		newSec := define.InspectSecret{}
420
		newSec.Name = secret.Name
421
		newSec.ID = secret.ID
422
		newSec.UID = secret.UID
423
		newSec.GID = secret.GID
424
		newSec.Mode = secret.Mode
425
		ctrConfig.Secrets = append(ctrConfig.Secrets, &newSec)
426
	}
427

428
	// Pad Umask to 4 characters
429
	if len(c.config.Umask) < 4 {
430
		pad := strings.Repeat("0", 4-len(c.config.Umask))
431
		ctrConfig.Umask = pad + c.config.Umask
432
	} else {
433
		ctrConfig.Umask = c.config.Umask
434
	}
435

436
	ctrConfig.Passwd = c.config.Passwd
437
	ctrConfig.ChrootDirs = append(ctrConfig.ChrootDirs, c.config.ChrootDirs...)
438

439
	ctrConfig.SdNotifyMode = c.config.SdNotifyMode
440
	ctrConfig.SdNotifySocket = c.config.SdNotifySocket
441
	return ctrConfig
442
}
443

444
func generateIDMappings(idMappings types.IDMappingOptions) *define.InspectIDMappings {
445
	var inspectMappings define.InspectIDMappings
446
	for _, uid := range idMappings.UIDMap {
447
		inspectMappings.UIDMap = append(inspectMappings.UIDMap, fmt.Sprintf("%d:%d:%d", uid.ContainerID, uid.HostID, uid.Size))
448
	}
449
	for _, gid := range idMappings.GIDMap {
450
		inspectMappings.GIDMap = append(inspectMappings.GIDMap, fmt.Sprintf("%d:%d:%d", gid.ContainerID, gid.HostID, gid.Size))
451
	}
452
	return &inspectMappings
453
}
454

455
// Generate the InspectContainerHostConfig struct for the HostConfig field of
456
// Inspect.
457
func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) (*define.InspectContainerHostConfig, error) {
458
	hostConfig := new(define.InspectContainerHostConfig)
459

460
	logConfig := new(define.InspectLogConfig)
461
	logConfig.Type = c.config.LogDriver
462
	logConfig.Path = c.config.LogPath
463
	logConfig.Size = units.HumanSize(float64(c.config.LogSize))
464
	logConfig.Tag = c.config.LogTag
465

466
	hostConfig.LogConfig = logConfig
467

468
	restartPolicy := new(define.InspectRestartPolicy)
469
	restartPolicy.Name = c.config.RestartPolicy
470
	if restartPolicy.Name == "" {
471
		restartPolicy.Name = define.RestartPolicyNo
472
	}
473
	restartPolicy.MaximumRetryCount = c.config.RestartRetries
474
	hostConfig.RestartPolicy = restartPolicy
475
	if c.config.NoCgroups {
476
		hostConfig.Cgroups = "disabled"
477
	} else {
478
		hostConfig.Cgroups = "default"
479
	}
480

481
	hostConfig.Dns = make([]string, 0, len(c.config.DNSServer))
482
	for _, dns := range c.config.DNSServer {
483
		hostConfig.Dns = append(hostConfig.Dns, dns.String())
484
	}
485

486
	hostConfig.DnsOptions = make([]string, 0, len(c.config.DNSOption))
487
	hostConfig.DnsOptions = append(hostConfig.DnsOptions, c.config.DNSOption...)
488

489
	hostConfig.DnsSearch = make([]string, 0, len(c.config.DNSSearch))
490
	hostConfig.DnsSearch = append(hostConfig.DnsSearch, c.config.DNSSearch...)
491

492
	hostConfig.ExtraHosts = make([]string, 0, len(c.config.HostAdd))
493
	hostConfig.ExtraHosts = append(hostConfig.ExtraHosts, c.config.HostAdd...)
494

495
	hostConfig.GroupAdd = make([]string, 0, len(c.config.Groups))
496
	hostConfig.GroupAdd = append(hostConfig.GroupAdd, c.config.Groups...)
497

498
	if ctrSpec.Process != nil {
499
		if ctrSpec.Process.OOMScoreAdj != nil {
500
			hostConfig.OomScoreAdj = *ctrSpec.Process.OOMScoreAdj
501
		}
502
	}
503

504
	hostConfig.SecurityOpt = c.GetSecurityOptions()
505

506
	hostConfig.ReadonlyRootfs = ctrSpec.Root.Readonly
507
	hostConfig.ShmSize = c.config.ShmSize
508
	hostConfig.Runtime = "oci"
509

510
	// Annotations
511
	if ctrSpec.Annotations != nil {
512
		if len(ctrSpec.Annotations) != 0 {
513
			hostConfig.Annotations = ctrSpec.Annotations
514
		}
515

516
		hostConfig.ContainerIDFile = ctrSpec.Annotations[define.InspectAnnotationCIDFile]
517
		if ctrSpec.Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue {
518
			hostConfig.AutoRemove = true
519
		}
520
		if ctrs, ok := ctrSpec.Annotations[define.VolumesFromAnnotation]; ok {
521
			hostConfig.VolumesFrom = strings.Split(ctrs, ";")
522
		}
523
		if ctrSpec.Annotations[define.InspectAnnotationPrivileged] == define.InspectResponseTrue {
524
			hostConfig.Privileged = true
525
		}
526
		if ctrSpec.Annotations[define.InspectAnnotationInit] == define.InspectResponseTrue {
527
			hostConfig.Init = true
528
		}
529
		if ctrSpec.Annotations[define.InspectAnnotationPublishAll] == define.InspectResponseTrue {
530
			hostConfig.PublishAllPorts = true
531
		}
532
	}
533

534
	if err := c.platformInspectContainerHostConfig(ctrSpec, hostConfig); err != nil {
535
		return nil, err
536
	}
537

538
	// NanoCPUs.
539
	// This is only calculated if CpuPeriod == 100000.
540
	// It is given in nanoseconds, versus the microseconds used elsewhere -
541
	// so multiply by 10000 (not sure why, but 1000 is off by 10).
542
	if hostConfig.CpuPeriod == 100000 {
543
		hostConfig.NanoCpus = 10000 * hostConfig.CpuQuota
544
	}
545

546
	// Bind mounts, formatted as src:dst.
547
	// We'll be appending some options that aren't necessarily in the
548
	// original command line... but no helping that from inside libpod.
549
	binds := []string{}
550
	tmpfs := make(map[string]string)
551
	for _, namedVol := range namedVolumes {
552
		if len(namedVol.Options) > 0 {
553
			binds = append(binds, fmt.Sprintf("%s:%s:%s", namedVol.Name, namedVol.Dest, strings.Join(namedVol.Options, ",")))
554
		} else {
555
			binds = append(binds, fmt.Sprintf("%s:%s", namedVol.Name, namedVol.Dest))
556
		}
557
	}
558
	for _, mount := range mounts {
559
		if mount.Type == define.TypeTmpfs {
560
			tmpfs[mount.Destination] = strings.Join(mount.Options, ",")
561
		} else {
562
			// TODO - maybe we should parse for empty source/destination
563
			// here. Would be confusing if we print just a bare colon.
564
			if len(mount.Options) > 0 {
565
				binds = append(binds, fmt.Sprintf("%s:%s:%s", mount.Source, mount.Destination, strings.Join(mount.Options, ",")))
566
			} else {
567
				binds = append(binds, fmt.Sprintf("%s:%s", mount.Source, mount.Destination))
568
			}
569
		}
570
	}
571
	hostConfig.Binds = binds
572
	hostConfig.Tmpfs = tmpfs
573

574
	// Network mode parsing.
575
	networkMode := c.NetworkMode()
576
	hostConfig.NetworkMode = networkMode
577

578
	// Port bindings.
579
	// Only populate if we are creating the network namespace to configure the network.
580
	if c.config.CreateNetNS {
581
		hostConfig.PortBindings = makeInspectPortBindings(c.config.PortMappings)
582
	} else {
583
		hostConfig.PortBindings = make(map[string][]define.InspectHostPort)
584
	}
585

586
	// Ulimits
587
	hostConfig.Ulimits = []define.InspectUlimit{}
588
	if ctrSpec.Process != nil {
589
		for _, limit := range ctrSpec.Process.Rlimits {
590
			newLimit := define.InspectUlimit{}
591
			newLimit.Name = limit.Type
592
			newLimit.Soft = int64(limit.Soft)
593
			newLimit.Hard = int64(limit.Hard)
594
			hostConfig.Ulimits = append(hostConfig.Ulimits, newLimit)
595
		}
596
	}
597

598
	// Terminal size
599
	// We can't actually get this for now...
600
	// So default to something sane.
601
	// TODO: Populate this.
602
	hostConfig.ConsoleSize = []uint{0, 0}
603

604
	return hostConfig, nil
605
}
606

607
// Return true if the container is running in the host's PID NS.
608
func (c *Container) inHostPidNS() (bool, error) {
609
	if c.config.PIDNsCtr != "" {
610
		return false, nil
611
	}
612
	ctrSpec, err := c.specFromState()
613
	if err != nil {
614
		return false, err
615
	}
616
	if ctrSpec.Linux != nil {
617
		// Locate the spec's PID namespace.
618
		// If there is none, it's pid=host.
619
		// If there is one and it has a path, it's "ns:".
620
		// If there is no path, it's default - the empty string.
621
		for _, ns := range ctrSpec.Linux.Namespaces {
622
			if ns.Type == spec.PIDNamespace {
623
				return false, nil
624
			}
625
		}
626
	}
627
	return true, nil
628
}
629

630
func (c *Container) GetDevices(priv bool, ctrSpec spec.Spec, deviceNodes map[string]string) ([]define.InspectDevice, error) {
631
	devices := []define.InspectDevice{}
632
	if ctrSpec.Linux != nil && !priv {
633
		for _, dev := range ctrSpec.Linux.Devices {
634
			key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor)
635
			if deviceNodes == nil {
636
				nodes, err := util.FindDeviceNodes()
637
				if err != nil {
638
					return nil, err
639
				}
640
				deviceNodes = nodes
641
			}
642
			path, ok := deviceNodes[key]
643
			if !ok {
644
				logrus.Warnf("Could not locate device %s on host", key)
645
				continue
646
			}
647
			newDev := define.InspectDevice{}
648
			newDev.PathOnHost = path
649
			newDev.PathInContainer = dev.Path
650
			devices = append(devices, newDev)
651
		}
652
	}
653
	return devices, nil
654
}
655

656
func blkioDeviceThrottle(deviceNodes map[string]string, devs []spec.LinuxThrottleDevice) ([]define.InspectBlkioThrottleDevice, error) {
657
	out := []define.InspectBlkioThrottleDevice{}
658
	for _, dev := range devs {
659
		key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor)
660
		if deviceNodes == nil {
661
			nodes, err := util.FindDeviceNodes()
662
			if err != nil {
663
				return nil, err
664
			}
665
			deviceNodes = nodes
666
		}
667
		path, ok := deviceNodes[key]
668
		if !ok {
669
			logrus.Infof("Could not locate throttle device %s in system devices", key)
670
			continue
671
		}
672
		throttleDev := define.InspectBlkioThrottleDevice{}
673
		throttleDev.Path = path
674
		throttleDev.Rate = dev.Rate
675
		out = append(out, throttleDev)
676
	}
677
	return out, nil
678
}
679

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.