qemu

Форк
0
/
cpu_hotplug.c 
349 строк · 12.0 Кб
1
/*
2
 * QEMU ACPI hotplug utilities
3
 *
4
 * Copyright (C) 2013 Red Hat Inc
5
 *
6
 * Authors:
7
 *   Igor Mammedov <imammedo@redhat.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10
 * See the COPYING file in the top-level directory.
11
 */
12
#include "qemu/osdep.h"
13
#include "hw/acpi/cpu_hotplug.h"
14
#include "qapi/error.h"
15
#include "hw/core/cpu.h"
16
#include "hw/i386/x86.h"
17
#include "hw/pci/pci_device.h"
18
#include "qemu/error-report.h"
19

20
#define CPU_EJECT_METHOD "CPEJ"
21
#define CPU_MAT_METHOD "CPMA"
22
#define CPU_ON_BITMAP "CPON"
23
#define CPU_STATUS_METHOD "CPST"
24
#define CPU_STATUS_MAP "PRS"
25
#define CPU_SCAN_METHOD "PRSC"
26

27
static uint64_t cpu_status_read(void *opaque, hwaddr addr, unsigned int size)
28
{
29
    AcpiCpuHotplug *cpus = opaque;
30
    uint64_t val = cpus->sts[addr];
31

32
    return val;
33
}
34

35
static void cpu_status_write(void *opaque, hwaddr addr, uint64_t data,
36
                             unsigned int size)
37
{
38
    /* firmware never used to write in CPU present bitmap so use
39
       this fact as means to switch QEMU into modern CPU hotplug
40
       mode by writing 0 at the beginning of legacy CPU bitmap
41
     */
42
    if (addr == 0 && data == 0) {
43
        AcpiCpuHotplug *cpus = opaque;
44
        object_property_set_bool(cpus->device, "cpu-hotplug-legacy", false,
45
                                 &error_abort);
46
    }
47
}
48

49
static const MemoryRegionOps AcpiCpuHotplug_ops = {
50
    .read = cpu_status_read,
51
    .write = cpu_status_write,
52
    .endianness = DEVICE_LITTLE_ENDIAN,
53
    .valid = {
54
        .min_access_size = 1,
55
        .max_access_size = 4,
56
    },
57
    .impl = {
58
        .max_access_size = 1,
59
    },
60
};
61

62
static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu,
63
                                     bool *swtchd_to_modern)
64
{
65
    CPUClass *k = CPU_GET_CLASS(cpu);
66
    int64_t cpu_id;
67

68
    cpu_id = k->get_arch_id(cpu);
69
    if ((cpu_id / 8) >= ACPI_GPE_PROC_LEN) {
70
        object_property_set_bool(g->device, "cpu-hotplug-legacy", false,
71
                                 &error_abort);
72
        *swtchd_to_modern = true;
73
        return;
74
    }
75

76
    *swtchd_to_modern = false;
77
    g->sts[cpu_id / 8] |= (1 << (cpu_id % 8));
78
}
79

80
void legacy_acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
81
                             AcpiCpuHotplug *g, DeviceState *dev, Error **errp)
82
{
83
    bool swtchd_to_modern;
84
    Error *local_err = NULL;
85

86
    acpi_set_cpu_present_bit(g, CPU(dev), &swtchd_to_modern);
87
    if (swtchd_to_modern) {
88
        /* propagate the hotplug to the modern interface */
89
        hotplug_handler_plug(hotplug_dev, dev, &local_err);
90
    } else {
91
        acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS);
92
    }
93
}
94

95
void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner,
96
                                  AcpiCpuHotplug *gpe_cpu, uint16_t base)
97
{
98
    CPUState *cpu;
99
    bool swtchd_to_modern;
100

101
    memory_region_init_io(&gpe_cpu->io, owner, &AcpiCpuHotplug_ops,
102
                          gpe_cpu, "acpi-cpu-hotplug", ACPI_GPE_PROC_LEN);
103
    memory_region_add_subregion(parent, base, &gpe_cpu->io);
104
    gpe_cpu->device = owner;
105

106
    CPU_FOREACH(cpu) {
107
        acpi_set_cpu_present_bit(gpe_cpu, cpu, &swtchd_to_modern);
108
    }
109
}
110

111
void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu,
112
                                CPUHotplugState *cpuhp_state,
113
                                uint16_t io_port)
114
{
115
    MemoryRegion *parent = pci_address_space_io(PCI_DEVICE(gpe_cpu->device));
116

117
    memory_region_del_subregion(parent, &gpe_cpu->io);
118
    cpu_hotplug_hw_init(parent, gpe_cpu->device, cpuhp_state, io_port);
119
}
120

121
void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine,
122
                                  uint16_t io_base)
123
{
124
    Aml *dev;
125
    Aml *crs;
126
    Aml *pkg;
127
    Aml *field;
128
    Aml *method;
129
    Aml *if_ctx;
130
    Aml *else_ctx;
131
    int i, apic_idx;
132
    Aml *sb_scope = aml_scope("_SB");
133
    uint8_t madt_tmpl[8] = {0x00, 0x08, 0x00, 0x00, 0x00, 0, 0, 0};
134
    Aml *cpu_id = aml_arg(1);
135
    Aml *apic_id = aml_arg(0);
136
    Aml *cpu_on = aml_local(0);
137
    Aml *madt = aml_local(1);
138
    Aml *cpus_map = aml_name(CPU_ON_BITMAP);
139
    Aml *zero = aml_int(0);
140
    Aml *one = aml_int(1);
141
    MachineClass *mc = MACHINE_GET_CLASS(machine);
142
    const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
143
    X86MachineState *x86ms = X86_MACHINE(machine);
144

145
    /*
146
     * _MAT method - creates an madt apic buffer
147
     * apic_id = Arg0 = Local APIC ID
148
     * cpu_id  = Arg1 = Processor ID
149
     * cpu_on = Local0 = CPON flag for this cpu
150
     * madt = Local1 = Buffer (in madt apic form) to return
151
     */
152
    method = aml_method(CPU_MAT_METHOD, 2, AML_NOTSERIALIZED);
153
    aml_append(method,
154
        aml_store(aml_derefof(aml_index(cpus_map, apic_id)), cpu_on));
155
    aml_append(method,
156
        aml_store(aml_buffer(sizeof(madt_tmpl), madt_tmpl), madt));
157
    /* Update the processor id, lapic id, and enable/disable status */
158
    aml_append(method, aml_store(cpu_id, aml_index(madt, aml_int(2))));
159
    aml_append(method, aml_store(apic_id, aml_index(madt, aml_int(3))));
160
    aml_append(method, aml_store(cpu_on, aml_index(madt, aml_int(4))));
161
    aml_append(method, aml_return(madt));
162
    aml_append(sb_scope, method);
163

164
    /*
165
     * _STA method - return ON status of cpu
166
     * apic_id = Arg0 = Local APIC ID
167
     * cpu_on = Local0 = CPON flag for this cpu
168
     */
169
    method = aml_method(CPU_STATUS_METHOD, 1, AML_NOTSERIALIZED);
170
    aml_append(method,
171
        aml_store(aml_derefof(aml_index(cpus_map, apic_id)), cpu_on));
172
    if_ctx = aml_if(cpu_on);
173
    {
174
        aml_append(if_ctx, aml_return(aml_int(0xF)));
175
    }
176
    aml_append(method, if_ctx);
177
    else_ctx = aml_else();
178
    {
179
        aml_append(else_ctx, aml_return(zero));
180
    }
181
    aml_append(method, else_ctx);
182
    aml_append(sb_scope, method);
183

184
    method = aml_method(CPU_EJECT_METHOD, 2, AML_NOTSERIALIZED);
185
    aml_append(method, aml_sleep(200));
186
    aml_append(sb_scope, method);
187

188
    method = aml_method(CPU_SCAN_METHOD, 0, AML_NOTSERIALIZED);
189
    {
190
        Aml *while_ctx, *if_ctx2, *else_ctx2;
191
        Aml *bus_check_evt = aml_int(1);
192
        Aml *remove_evt = aml_int(3);
193
        Aml *status_map = aml_local(5); /* Local5 = active cpu bitmap */
194
        Aml *byte = aml_local(2); /* Local2 = last read byte from bitmap */
195
        Aml *idx = aml_local(0); /* Processor ID / APIC ID iterator */
196
        Aml *is_cpu_on = aml_local(1); /* Local1 = CPON flag for cpu */
197
        Aml *status = aml_local(3); /* Local3 = active state for cpu */
198

199
        aml_append(method, aml_store(aml_name(CPU_STATUS_MAP), status_map));
200
        aml_append(method, aml_store(zero, byte));
201
        aml_append(method, aml_store(zero, idx));
202

203
        /* While (idx < SizeOf(CPON)) */
204
        while_ctx = aml_while(aml_lless(idx, aml_sizeof(cpus_map)));
205
        aml_append(while_ctx,
206
            aml_store(aml_derefof(aml_index(cpus_map, idx)), is_cpu_on));
207

208
        if_ctx = aml_if(aml_and(idx, aml_int(0x07), NULL));
209
        {
210
            /* Shift down previously read bitmap byte */
211
            aml_append(if_ctx, aml_shiftright(byte, one, byte));
212
        }
213
        aml_append(while_ctx, if_ctx);
214

215
        else_ctx = aml_else();
216
        {
217
            /* Read next byte from cpu bitmap */
218
            aml_append(else_ctx, aml_store(aml_derefof(aml_index(status_map,
219
                       aml_shiftright(idx, aml_int(3), NULL))), byte));
220
        }
221
        aml_append(while_ctx, else_ctx);
222

223
        aml_append(while_ctx, aml_store(aml_and(byte, one, NULL), status));
224
        if_ctx = aml_if(aml_lnot(aml_equal(is_cpu_on, status)));
225
        {
226
            /* State change - update CPON with new state */
227
            aml_append(if_ctx, aml_store(status, aml_index(cpus_map, idx)));
228
            if_ctx2 = aml_if(aml_equal(status, one));
229
            {
230
                aml_append(if_ctx2,
231
                    aml_call2(AML_NOTIFY_METHOD, idx, bus_check_evt));
232
            }
233
            aml_append(if_ctx, if_ctx2);
234
            else_ctx2 = aml_else();
235
            {
236
                aml_append(else_ctx2,
237
                    aml_call2(AML_NOTIFY_METHOD, idx, remove_evt));
238
            }
239
        }
240
        aml_append(if_ctx, else_ctx2);
241
        aml_append(while_ctx, if_ctx);
242

243
        aml_append(while_ctx, aml_increment(idx)); /* go to next cpu */
244
        aml_append(method, while_ctx);
245
    }
246
    aml_append(sb_scope, method);
247

248
    /* The current AML generator can cover the APIC ID range [0..255],
249
     * inclusive, for VCPU hotplug. */
250
    QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256);
251
    if (x86ms->apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) {
252
        error_report("max_cpus is too large. APIC ID of last CPU is %u",
253
                     x86ms->apic_id_limit - 1);
254
        exit(1);
255
    }
256

257
    /* create PCI0.PRES device and its _CRS to reserve CPU hotplug MMIO */
258
    dev = aml_device("PCI0." stringify(CPU_HOTPLUG_RESOURCE_DEVICE));
259
    aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A06")));
260
    aml_append(dev,
261
        aml_name_decl("_UID", aml_string("CPU Hotplug resources"))
262
    );
263
    /* device present, functioning, decoding, not shown in UI */
264
    aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
265
    crs = aml_resource_template();
266
    aml_append(crs,
267
        aml_io(AML_DECODE16, io_base, io_base, 1, ACPI_GPE_PROC_LEN)
268
    );
269
    aml_append(dev, aml_name_decl("_CRS", crs));
270
    aml_append(sb_scope, dev);
271
    /* declare CPU hotplug MMIO region and PRS field to access it */
272
    aml_append(sb_scope, aml_operation_region(
273
        "PRST", AML_SYSTEM_IO, aml_int(io_base), ACPI_GPE_PROC_LEN));
274
    field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
275
    aml_append(field, aml_named_field("PRS", 256));
276
    aml_append(sb_scope, field);
277

278
    /* build Processor object for each processor */
279
    for (i = 0; i < apic_ids->len; i++) {
280
        int cpu_apic_id = apic_ids->cpus[i].arch_id;
281

282
        assert(cpu_apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
283

284
        dev = aml_processor(i, 0, 0, "CP%.02X", cpu_apic_id);
285

286
        method = aml_method("_MAT", 0, AML_NOTSERIALIZED);
287
        aml_append(method,
288
            aml_return(aml_call2(CPU_MAT_METHOD,
289
                                 aml_int(cpu_apic_id), aml_int(i))
290
        ));
291
        aml_append(dev, method);
292

293
        method = aml_method("_STA", 0, AML_NOTSERIALIZED);
294
        aml_append(method,
295
            aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(cpu_apic_id))));
296
        aml_append(dev, method);
297

298
        method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
299
        aml_append(method,
300
            aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(cpu_apic_id),
301
                aml_arg(0)))
302
        );
303
        aml_append(dev, method);
304

305
        aml_append(sb_scope, dev);
306
    }
307

308
    /* build this code:
309
     *   Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...}
310
     */
311
    /* Arg0 = APIC ID */
312
    method = aml_method(AML_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
313
    for (i = 0; i < apic_ids->len; i++) {
314
        int cpu_apic_id = apic_ids->cpus[i].arch_id;
315

316
        if_ctx = aml_if(aml_equal(aml_arg(0), aml_int(cpu_apic_id)));
317
        aml_append(if_ctx,
318
            aml_notify(aml_name("CP%.02X", cpu_apic_id), aml_arg(1))
319
        );
320
        aml_append(method, if_ctx);
321
    }
322
    aml_append(sb_scope, method);
323

324
    /* build "Name(CPON, Package() { One, One, ..., Zero, Zero, ... })"
325
     *
326
     * Note: The ability to create variable-sized packages was first
327
     * introduced in ACPI 2.0. ACPI 1.0 only allowed fixed-size packages
328
     * ith up to 255 elements. Windows guests up to win2k8 fail when
329
     * VarPackageOp is used.
330
     */
331
    pkg = x86ms->apic_id_limit <= 255 ? aml_package(x86ms->apic_id_limit) :
332
                                        aml_varpackage(x86ms->apic_id_limit);
333

334
    for (i = 0, apic_idx = 0; i < apic_ids->len; i++) {
335
        int cpu_apic_id = apic_ids->cpus[i].arch_id;
336

337
        for (; apic_idx < cpu_apic_id; apic_idx++) {
338
            aml_append(pkg, aml_int(0));
339
        }
340
        aml_append(pkg, aml_int(apic_ids->cpus[i].cpu ? 1 : 0));
341
        apic_idx = cpu_apic_id + 1;
342
    }
343
    aml_append(sb_scope, aml_name_decl(CPU_ON_BITMAP, pkg));
344
    aml_append(ctx, sb_scope);
345

346
    method = aml_method("\\_GPE._E02", 0, AML_NOTSERIALIZED);
347
    aml_append(method, aml_call0("\\_SB." CPU_SCAN_METHOD));
348
    aml_append(ctx, method);
349
}
350

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.