talos

Форк
0
307 строк · 8.2 Кб
1
// This Source Code Form is subject to the terms of the Mozilla Public
2
// License, v. 2.0. If a copy of the MPL was not distributed with this
3
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
4

5
package qemu
6

7
import (
8
	"encoding/json"
9
	"fmt"
10
	"io"
11
	"math"
12
	"net"
13
	"os"
14
	"os/exec"
15
	"runtime"
16
	"strconv"
17
	"strings"
18
	"syscall"
19

20
	"github.com/google/uuid"
21
	"github.com/hashicorp/go-multierror"
22
	"github.com/siderolabs/gen/xslices"
23
	"github.com/siderolabs/go-procfs/procfs"
24

25
	"github.com/siderolabs/talos/pkg/machinery/constants"
26
	"github.com/siderolabs/talos/pkg/machinery/kernel"
27
	"github.com/siderolabs/talos/pkg/provision"
28
	"github.com/siderolabs/talos/pkg/provision/providers/vm"
29
)
30

31
//nolint:gocyclo,cyclop
32
func (p *provisioner) createNode(state *vm.State, clusterReq provision.ClusterRequest, nodeReq provision.NodeRequest, opts *provision.Options) (provision.NodeInfo, error) {
33
	arch := Arch(opts.TargetArch)
34
	pidPath := state.GetRelativePath(fmt.Sprintf("%s.pid", nodeReq.Name))
35

36
	var pflashImages []string
37

38
	if pflashSpec := arch.PFlash(opts.UEFIEnabled, opts.ExtraUEFISearchPaths); pflashSpec != nil {
39
		var err error
40

41
		if pflashImages, err = p.createPFlashImages(state, nodeReq.Name, pflashSpec); err != nil {
42
			return provision.NodeInfo{}, fmt.Errorf("error creating flash images: %w", err)
43
		}
44
	}
45

46
	vcpuCount := int64(math.RoundToEven(float64(nodeReq.NanoCPUs) / 1000 / 1000 / 1000))
47
	if vcpuCount < 2 {
48
		vcpuCount = 1
49
	}
50

51
	memSize := nodeReq.Memory / 1024 / 1024
52

53
	diskPaths, err := p.CreateDisks(state, nodeReq)
54
	if err != nil {
55
		return provision.NodeInfo{}, err
56
	}
57

58
	err = p.populateSystemDisk(diskPaths, clusterReq)
59
	if err != nil {
60
		return provision.NodeInfo{}, err
61
	}
62

63
	logFile, err := os.OpenFile(state.GetRelativePath(fmt.Sprintf("%s.log", nodeReq.Name)), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0o666)
64
	if err != nil {
65
		return provision.NodeInfo{}, err
66
	}
67

68
	defer logFile.Close() //nolint:errcheck
69

70
	cmdline := procfs.NewCmdline("")
71

72
	cmdline.SetAll(kernel.DefaultArgs)
73

74
	// required to get kernel console
75
	cmdline.Append("console", arch.Console())
76

77
	// reboot configuration
78
	cmdline.Append("reboot", "k")
79
	cmdline.Append("panic", "1")
80
	cmdline.Append("talos.shutdown", "halt")
81

82
	// Talos config
83
	cmdline.Append("talos.platform", constants.PlatformMetal)
84

85
	// add overrides
86
	if nodeReq.ExtraKernelArgs != nil {
87
		if err = cmdline.AppendAll(nodeReq.ExtraKernelArgs.Strings()); err != nil {
88
			return provision.NodeInfo{}, err
89
		}
90
	}
91

92
	var nodeConfig string
93

94
	if !nodeReq.SkipInjectingConfig {
95
		cmdline.Append("talos.config", "{TALOS_CONFIG_URL}") // to be patched by launcher
96

97
		nodeConfig, err = nodeReq.Config.EncodeString()
98
		if err != nil {
99
			return provision.NodeInfo{}, err
100
		}
101
	}
102

103
	nodeUUID := uuid.New()
104
	if nodeReq.UUID != nil {
105
		nodeUUID = *nodeReq.UUID
106
	}
107

108
	apiPort, err := p.findBridgeListenPort(clusterReq)
109
	if err != nil {
110
		return provision.NodeInfo{}, fmt.Errorf("error finding listen address for the API: %w", err)
111
	}
112

113
	defaultBootOrder := "cn"
114
	if nodeReq.DefaultBootOrder != "" {
115
		defaultBootOrder = nodeReq.DefaultBootOrder
116
	}
117

118
	// backwards compatibility, set Driver if not set
119
	for i := range nodeReq.Disks {
120
		if nodeReq.Disks[i].Driver != "" {
121
			continue
122
		}
123

124
		if i == 0 {
125
			nodeReq.Disks[i].Driver = "virtio"
126
		} else {
127
			nodeReq.Disks[i].Driver = "ide"
128
		}
129
	}
130

131
	launchConfig := LaunchConfig{
132
		QemuExecutable: arch.QemuExecutable(),
133
		DiskPaths:      diskPaths,
134
		DiskDrivers: xslices.Map(nodeReq.Disks, func(disk *provision.Disk) string {
135
			return disk.Driver
136
		}),
137
		VCPUCount:         vcpuCount,
138
		MemSize:           memSize,
139
		KernelArgs:        cmdline.String(),
140
		MachineType:       arch.QemuMachine(),
141
		PFlashImages:      pflashImages,
142
		MonitorPath:       state.GetRelativePath(fmt.Sprintf("%s.monitor", nodeReq.Name)),
143
		EnableKVM:         opts.TargetArch == runtime.GOARCH,
144
		BadRTC:            nodeReq.BadRTC,
145
		DefaultBootOrder:  defaultBootOrder,
146
		BootloaderEnabled: opts.BootloaderEnabled,
147
		NodeUUID:          nodeUUID,
148
		Config:            nodeConfig,
149
		BridgeName:        state.BridgeName,
150
		NetworkConfig:     state.VMCNIConfig,
151
		CNI:               clusterReq.Network.CNI,
152
		CIDRs:             clusterReq.Network.CIDRs,
153
		NoMasqueradeCIDRs: clusterReq.Network.NoMasqueradeCIDRs,
154
		IPs:               nodeReq.IPs,
155
		GatewayAddrs:      clusterReq.Network.GatewayAddrs,
156
		MTU:               clusterReq.Network.MTU,
157
		Nameservers:       clusterReq.Network.Nameservers,
158
		TFTPServer:        nodeReq.TFTPServer,
159
		IPXEBootFileName:  nodeReq.IPXEBootFilename,
160
		APIPort:           apiPort,
161
	}
162

163
	if clusterReq.IPXEBootScript != "" {
164
		launchConfig.TFTPServer = clusterReq.Network.GatewayAddrs[0].String()
165
		launchConfig.IPXEBootFileName = fmt.Sprintf("ipxe/%s/snp.efi", string(arch))
166
	}
167

168
	nodeInfo := provision.NodeInfo{
169
		ID:   pidPath,
170
		UUID: nodeUUID,
171
		Name: nodeReq.Name,
172
		Type: nodeReq.Type,
173

174
		NanoCPUs: nodeReq.NanoCPUs,
175
		Memory:   nodeReq.Memory,
176
		DiskSize: nodeReq.Disks[0].Size,
177

178
		IPs: nodeReq.IPs,
179

180
		APIPort: apiPort,
181
	}
182

183
	if opts.TPM2Enabled {
184
		tpm2, tpm2Err := p.createVirtualTPM2State(state, nodeReq.Name)
185
		if tpm2Err != nil {
186
			return provision.NodeInfo{}, tpm2Err
187
		}
188

189
		launchConfig.TPM2Config = tpm2
190
		nodeInfo.TPM2StateDir = tpm2.StateDir
191
	}
192

193
	if !clusterReq.Network.DHCPSkipHostname {
194
		launchConfig.Hostname = nodeReq.Name
195
	}
196

197
	if !(nodeReq.PXEBooted || launchConfig.IPXEBootFileName != "") {
198
		launchConfig.KernelImagePath = strings.ReplaceAll(clusterReq.KernelPath, constants.ArchVariable, opts.TargetArch)
199
		launchConfig.InitrdPath = strings.ReplaceAll(clusterReq.InitramfsPath, constants.ArchVariable, opts.TargetArch)
200
		launchConfig.ISOPath = strings.ReplaceAll(clusterReq.ISOPath, constants.ArchVariable, opts.TargetArch)
201
	}
202

203
	launchConfig.StatePath, err = state.StatePath()
204
	if err != nil {
205
		return provision.NodeInfo{}, err
206
	}
207

208
	launchConfigFile, err := os.Create(state.GetRelativePath(fmt.Sprintf("%s.config", nodeReq.Name)))
209
	if err != nil {
210
		return provision.NodeInfo{}, err
211
	}
212

213
	if err = json.NewEncoder(launchConfigFile).Encode(&launchConfig); err != nil {
214
		return provision.NodeInfo{}, err
215
	}
216

217
	if _, err = launchConfigFile.Seek(0, io.SeekStart); err != nil {
218
		return provision.NodeInfo{}, err
219
	}
220

221
	defer launchConfigFile.Close() //nolint:errcheck
222

223
	cmd := exec.Command(clusterReq.SelfExecutable, "qemu-launch")
224
	cmd.Stdout = logFile
225
	cmd.Stderr = logFile
226
	cmd.Stdin = launchConfigFile
227
	cmd.SysProcAttr = &syscall.SysProcAttr{
228
		Setsid: true, // daemonize
229
	}
230

231
	if err = cmd.Start(); err != nil {
232
		return provision.NodeInfo{}, err
233
	}
234

235
	if err = os.WriteFile(pidPath, []byte(strconv.Itoa(cmd.Process.Pid)), os.ModePerm); err != nil {
236
		return provision.NodeInfo{}, fmt.Errorf("error writing PID file: %w", err)
237
	}
238

239
	// no need to wait here, as cmd has all the Stdin/out/err via *os.File
240

241
	return nodeInfo, nil
242
}
243

244
func (p *provisioner) createNodes(state *vm.State, clusterReq provision.ClusterRequest, nodeReqs []provision.NodeRequest, opts *provision.Options) ([]provision.NodeInfo, error) {
245
	errCh := make(chan error)
246
	nodeCh := make(chan provision.NodeInfo, len(nodeReqs))
247

248
	for _, nodeReq := range nodeReqs {
249
		go func(nodeReq provision.NodeRequest) {
250
			nodeInfo, err := p.createNode(state, clusterReq, nodeReq, opts)
251
			if err == nil {
252
				nodeCh <- nodeInfo
253
			}
254

255
			errCh <- err
256
		}(nodeReq)
257
	}
258

259
	var multiErr *multierror.Error
260

261
	for range nodeReqs {
262
		multiErr = multierror.Append(multiErr, <-errCh)
263
	}
264

265
	close(nodeCh)
266

267
	nodesInfo := make([]provision.NodeInfo, 0, len(nodeReqs))
268

269
	for nodeInfo := range nodeCh {
270
		nodesInfo = append(nodesInfo, nodeInfo)
271
	}
272

273
	return nodesInfo, multiErr.ErrorOrNil()
274
}
275

276
func (p *provisioner) findBridgeListenPort(clusterReq provision.ClusterRequest) (int, error) {
277
	l, err := net.Listen("tcp", net.JoinHostPort(clusterReq.Network.GatewayAddrs[0].String(), "0"))
278
	if err != nil {
279
		return 0, err
280
	}
281

282
	port := l.Addr().(*net.TCPAddr).Port
283

284
	return port, l.Close()
285
}
286

287
func (p *provisioner) populateSystemDisk(disks []string, clusterReq provision.ClusterRequest) error {
288
	if len(disks) > 0 && clusterReq.DiskImagePath != "" {
289
		disk, err := os.OpenFile(disks[0], os.O_RDWR, 0o755)
290
		if err != nil {
291
			return err
292
		}
293
		defer disk.Close() //nolint:errcheck
294

295
		image, err := os.Open(clusterReq.DiskImagePath)
296
		if err != nil {
297
			return err
298
		}
299
		defer image.Close() //nolint:errcheck
300

301
		_, err = io.Copy(disk, image)
302

303
		return err
304
	}
305

306
	return nil
307
}
308

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.