qemu

Форк
0
/
tpm_spapr.c 
430 строк · 12.1 Кб
1
/*
2
 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3
 *
4
 * PAPR Virtual TPM
5
 *
6
 * Copyright (c) 2015, 2017, 2019 IBM Corporation.
7
 *
8
 * Authors:
9
 *    Stefan Berger <stefanb@linux.vnet.ibm.com>
10
 *
11
 * This code is licensed under the GPL version 2 or later. See the
12
 * COPYING file in the top-level directory.
13
 *
14
 */
15

16
#include "qemu/osdep.h"
17
#include "qemu/error-report.h"
18
#include "qapi/error.h"
19
#include "hw/qdev-properties.h"
20
#include "migration/vmstate.h"
21

22
#include "sysemu/tpm_backend.h"
23
#include "sysemu/tpm_util.h"
24
#include "tpm_prop.h"
25

26
#include "hw/ppc/spapr.h"
27
#include "hw/ppc/spapr_vio.h"
28
#include "trace.h"
29
#include "qom/object.h"
30

31
#define DEBUG_SPAPR 0
32

33
typedef struct SpaprTpmState SpaprTpmState;
34
DECLARE_INSTANCE_CHECKER(SpaprTpmState, VIO_SPAPR_VTPM,
35
                         TYPE_TPM_SPAPR)
36

37
typedef struct TpmCrq {
38
    uint8_t valid;  /* 0x80: cmd; 0xc0: init crq */
39
                    /* 0x81-0x83: CRQ message response */
40
    uint8_t msg;    /* see below */
41
    uint16_t len;   /* len of TPM request; len of TPM response */
42
    uint32_t data;  /* rtce_dma_handle when sending TPM request */
43
    uint64_t reserved;
44
} TpmCrq;
45

46
#define SPAPR_VTPM_VALID_INIT_CRQ_COMMAND  0xC0
47
#define SPAPR_VTPM_VALID_COMMAND           0x80
48
#define SPAPR_VTPM_MSG_RESULT              0x80
49

50
/* msg types for valid = SPAPR_VTPM_VALID_INIT_CRQ */
51
#define SPAPR_VTPM_INIT_CRQ_RESULT           0x1
52
#define SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT  0x2
53

54
/* msg types for valid = SPAPR_VTPM_VALID_CMD */
55
#define SPAPR_VTPM_GET_VERSION               0x1
56
#define SPAPR_VTPM_TPM_COMMAND               0x2
57
#define SPAPR_VTPM_GET_RTCE_BUFFER_SIZE      0x3
58
#define SPAPR_VTPM_PREPARE_TO_SUSPEND        0x4
59

60
/* response error messages */
61
#define SPAPR_VTPM_VTPM_ERROR                0xff
62

63
/* error codes */
64
#define SPAPR_VTPM_ERR_COPY_IN_FAILED        0x3
65
#define SPAPR_VTPM_ERR_COPY_OUT_FAILED       0x4
66

67
#define TPM_SPAPR_BUFFER_MAX                 4096
68

69
struct SpaprTpmState {
70
    SpaprVioDevice vdev;
71

72
    TpmCrq crq; /* track single TPM command */
73

74
    uint8_t state;
75
#define SPAPR_VTPM_STATE_NONE         0
76
#define SPAPR_VTPM_STATE_EXECUTION    1
77
#define SPAPR_VTPM_STATE_COMPLETION   2
78

79
    unsigned char *buffer;
80

81
    uint32_t numbytes; /* number of bytes to deliver on resume */
82

83
    TPMBackendCmd cmd;
84

85
    TPMBackend *be_driver;
86
    TPMVersion be_tpm_version;
87

88
    size_t be_buffer_size;
89
};
90

91
/*
92
 * Send a request to the TPM.
93
 */
94
static void tpm_spapr_tpm_send(SpaprTpmState *s)
95
{
96
    tpm_util_show_buffer(s->buffer, s->be_buffer_size, "To TPM");
97

98
    s->state = SPAPR_VTPM_STATE_EXECUTION;
99
    s->cmd = (TPMBackendCmd) {
100
        .locty = 0,
101
        .in = s->buffer,
102
        .in_len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size),
103
        .out = s->buffer,
104
        .out_len = s->be_buffer_size,
105
    };
106

107
    tpm_backend_deliver_request(s->be_driver, &s->cmd);
108
}
109

110
static int tpm_spapr_process_cmd(SpaprTpmState *s, uint64_t dataptr)
111
{
112
    long rc;
113

114
    /* a max. of be_buffer_size bytes can be transported */
115
    rc = spapr_vio_dma_read(&s->vdev, dataptr,
116
                            s->buffer, s->be_buffer_size);
117
    if (rc) {
118
        error_report("tpm_spapr_got_payload: DMA read failure");
119
    }
120
    /* let vTPM handle any malformed request */
121
    tpm_spapr_tpm_send(s);
122

123
    return rc;
124
}
125

126
static inline int spapr_tpm_send_crq(struct SpaprVioDevice *dev, TpmCrq *crq)
127
{
128
    return spapr_vio_send_crq(dev, (uint8_t *)crq);
129
}
130

131
static int tpm_spapr_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data)
132
{
133
    SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
134
    TpmCrq local_crq;
135
    TpmCrq *crq = &s->crq; /* requests only */
136
    int rc;
137
    uint8_t valid = crq_data[0];
138
    uint8_t msg = crq_data[1];
139

140
    trace_tpm_spapr_do_crq(valid, msg);
141

142
    switch (valid) {
143
    case SPAPR_VTPM_VALID_INIT_CRQ_COMMAND: /* Init command/response */
144

145
        /* Respond to initialization request */
146
        switch (msg) {
147
        case SPAPR_VTPM_INIT_CRQ_RESULT:
148
            trace_tpm_spapr_do_crq_crq_result();
149
            memset(&local_crq, 0, sizeof(local_crq));
150
            local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND;
151
            local_crq.msg = SPAPR_VTPM_INIT_CRQ_RESULT;
152
            spapr_tpm_send_crq(dev, &local_crq);
153
            break;
154

155
        case SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT:
156
            trace_tpm_spapr_do_crq_crq_complete_result();
157
            memset(&local_crq, 0, sizeof(local_crq));
158
            local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND;
159
            local_crq.msg = SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT;
160
            spapr_tpm_send_crq(dev, &local_crq);
161
            break;
162
        }
163

164
        break;
165
    case SPAPR_VTPM_VALID_COMMAND: /* Payloads */
166
        switch (msg) {
167
        case SPAPR_VTPM_TPM_COMMAND:
168
            trace_tpm_spapr_do_crq_tpm_command();
169
            if (s->state == SPAPR_VTPM_STATE_EXECUTION) {
170
                return H_BUSY;
171
            }
172
            memcpy(crq, crq_data, sizeof(*crq));
173

174
            rc = tpm_spapr_process_cmd(s, be32_to_cpu(crq->data));
175

176
            if (rc == H_SUCCESS) {
177
                crq->valid = be16_to_cpu(0);
178
            } else {
179
                local_crq.valid = SPAPR_VTPM_MSG_RESULT;
180
                local_crq.msg = SPAPR_VTPM_VTPM_ERROR;
181
                local_crq.len = cpu_to_be16(0);
182
                local_crq.data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_IN_FAILED);
183
                spapr_tpm_send_crq(dev, &local_crq);
184
            }
185
            break;
186

187
        case SPAPR_VTPM_GET_RTCE_BUFFER_SIZE:
188
            trace_tpm_spapr_do_crq_tpm_get_rtce_buffer_size(s->be_buffer_size);
189
            local_crq.valid = SPAPR_VTPM_VALID_COMMAND;
190
            local_crq.msg = SPAPR_VTPM_GET_RTCE_BUFFER_SIZE |
191
                            SPAPR_VTPM_MSG_RESULT;
192
            local_crq.len = cpu_to_be16(s->be_buffer_size);
193
            spapr_tpm_send_crq(dev, &local_crq);
194
            break;
195

196
        case SPAPR_VTPM_GET_VERSION:
197
            local_crq.valid = SPAPR_VTPM_VALID_COMMAND;
198
            local_crq.msg = SPAPR_VTPM_GET_VERSION | SPAPR_VTPM_MSG_RESULT;
199
            local_crq.len = cpu_to_be16(0);
200
            switch (s->be_tpm_version) {
201
            case TPM_VERSION_1_2:
202
                local_crq.data = cpu_to_be32(1);
203
                break;
204
            case TPM_VERSION_2_0:
205
                local_crq.data = cpu_to_be32(2);
206
                break;
207
            default:
208
                g_assert_not_reached();
209
                break;
210
            }
211
            trace_tpm_spapr_do_crq_get_version(be32_to_cpu(local_crq.data));
212
            spapr_tpm_send_crq(dev, &local_crq);
213
            break;
214

215
        case SPAPR_VTPM_PREPARE_TO_SUSPEND:
216
            trace_tpm_spapr_do_crq_prepare_to_suspend();
217
            local_crq.valid = SPAPR_VTPM_VALID_COMMAND;
218
            local_crq.msg = SPAPR_VTPM_PREPARE_TO_SUSPEND |
219
                            SPAPR_VTPM_MSG_RESULT;
220
            spapr_tpm_send_crq(dev, &local_crq);
221
            break;
222

223
        default:
224
            trace_tpm_spapr_do_crq_unknown_msg_type(crq->msg);
225
        }
226
        break;
227
    default:
228
        trace_tpm_spapr_do_crq_unknown_crq(valid, msg);
229
    };
230

231
    return H_SUCCESS;
232
}
233

234
static void tpm_spapr_request_completed(TPMIf *ti, int ret)
235
{
236
    SpaprTpmState *s = VIO_SPAPR_VTPM(ti);
237
    TpmCrq *crq = &s->crq;
238
    uint32_t len;
239
    int rc;
240

241
    s->state = SPAPR_VTPM_STATE_COMPLETION;
242

243
    /* a max. of be_buffer_size bytes can be transported */
244
    len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size);
245

246
    if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
247
        trace_tpm_spapr_caught_response(len);
248
        /* defer delivery of response until .post_load */
249
        s->numbytes = len;
250
        return;
251
    }
252

253
    rc = spapr_vio_dma_write(&s->vdev, be32_to_cpu(crq->data),
254
                             s->buffer, len);
255

256
    tpm_util_show_buffer(s->buffer, len, "From TPM");
257

258
    crq->valid = SPAPR_VTPM_MSG_RESULT;
259
    if (rc == H_SUCCESS) {
260
        crq->msg = SPAPR_VTPM_TPM_COMMAND | SPAPR_VTPM_MSG_RESULT;
261
        crq->len = cpu_to_be16(len);
262
    } else {
263
        error_report("%s: DMA write failure", __func__);
264
        crq->msg = SPAPR_VTPM_VTPM_ERROR;
265
        crq->len = cpu_to_be16(0);
266
        crq->data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_OUT_FAILED);
267
    }
268

269
    rc = spapr_tpm_send_crq(&s->vdev, crq);
270
    if (rc) {
271
        error_report("%s: Error sending response", __func__);
272
    }
273
}
274

275
static int tpm_spapr_do_startup_tpm(SpaprTpmState *s, size_t buffersize)
276
{
277
    return tpm_backend_startup_tpm(s->be_driver, buffersize);
278
}
279

280
static const char *tpm_spapr_get_dt_compatible(SpaprVioDevice *dev)
281
{
282
    SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
283

284
    switch (s->be_tpm_version) {
285
    case TPM_VERSION_1_2:
286
        return "IBM,vtpm";
287
    case TPM_VERSION_2_0:
288
        return "IBM,vtpm20";
289
    default:
290
        g_assert_not_reached();
291
    }
292
}
293

294
static void tpm_spapr_reset(SpaprVioDevice *dev)
295
{
296
    SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
297

298
    s->state = SPAPR_VTPM_STATE_NONE;
299
    s->numbytes = 0;
300

301
    s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver);
302

303
    s->be_buffer_size = MIN(tpm_backend_get_buffer_size(s->be_driver),
304
                            TPM_SPAPR_BUFFER_MAX);
305

306
    tpm_backend_reset(s->be_driver);
307

308
    if (tpm_spapr_do_startup_tpm(s, s->be_buffer_size) < 0) {
309
        exit(1);
310
    }
311
}
312

313
static enum TPMVersion tpm_spapr_get_version(TPMIf *ti)
314
{
315
    SpaprTpmState *s = VIO_SPAPR_VTPM(ti);
316

317
    if (tpm_backend_had_startup_error(s->be_driver)) {
318
        return TPM_VERSION_UNSPEC;
319
    }
320

321
    return tpm_backend_get_tpm_version(s->be_driver);
322
}
323

324
/* persistent state handling */
325

326
static int tpm_spapr_pre_save(void *opaque)
327
{
328
    SpaprTpmState *s = opaque;
329

330
    tpm_backend_finish_sync(s->be_driver);
331
    /*
332
     * we cannot deliver the results to the VM since DMA would touch VM memory
333
     */
334

335
    return 0;
336
}
337

338
static int tpm_spapr_post_load(void *opaque, int version_id)
339
{
340
    SpaprTpmState *s = opaque;
341

342
    if (s->numbytes) {
343
        trace_tpm_spapr_post_load();
344
        /* deliver the results to the VM via DMA */
345
        tpm_spapr_request_completed(TPM_IF(s), 0);
346
        s->numbytes = 0;
347
    }
348

349
    return 0;
350
}
351

352
static const VMStateDescription vmstate_spapr_vtpm = {
353
    .name = "tpm-spapr",
354
    .pre_save = tpm_spapr_pre_save,
355
    .post_load = tpm_spapr_post_load,
356
    .fields = (const VMStateField[]) {
357
        VMSTATE_SPAPR_VIO(vdev, SpaprTpmState),
358

359
        VMSTATE_UINT8(state, SpaprTpmState),
360
        VMSTATE_UINT32(numbytes, SpaprTpmState),
361
        VMSTATE_VBUFFER_UINT32(buffer, SpaprTpmState, 0, NULL, numbytes),
362
        /* remember DMA address */
363
        VMSTATE_UINT32(crq.data, SpaprTpmState),
364
        VMSTATE_END_OF_LIST(),
365
    }
366
};
367

368
static Property tpm_spapr_properties[] = {
369
    DEFINE_SPAPR_PROPERTIES(SpaprTpmState, vdev),
370
    DEFINE_PROP_TPMBE("tpmdev", SpaprTpmState, be_driver),
371
    DEFINE_PROP_END_OF_LIST(),
372
};
373

374
static void tpm_spapr_realizefn(SpaprVioDevice *dev, Error **errp)
375
{
376
    SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
377

378
    if (!tpm_find()) {
379
        error_setg(errp, "at most one TPM device is permitted");
380
        return;
381
    }
382

383
    dev->crq.SendFunc = tpm_spapr_do_crq;
384

385
    if (!s->be_driver) {
386
        error_setg(errp, "'tpmdev' property is required");
387
        return;
388
    }
389
    s->buffer = g_malloc(TPM_SPAPR_BUFFER_MAX);
390
}
391

392
static void tpm_spapr_class_init(ObjectClass *klass, void *data)
393
{
394
    DeviceClass *dc = DEVICE_CLASS(klass);
395
    SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
396
    TPMIfClass *tc = TPM_IF_CLASS(klass);
397

398
    k->realize = tpm_spapr_realizefn;
399
    k->reset = tpm_spapr_reset;
400
    k->dt_name = "vtpm";
401
    k->dt_type = "IBM,vtpm";
402
    k->get_dt_compatible = tpm_spapr_get_dt_compatible;
403
    k->signal_mask = 0x00000001;
404
    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
405
    device_class_set_props(dc, tpm_spapr_properties);
406
    k->rtce_window_size = 0x10000000;
407
    dc->vmsd = &vmstate_spapr_vtpm;
408

409
    tc->model = TPM_MODEL_TPM_SPAPR;
410
    tc->get_version = tpm_spapr_get_version;
411
    tc->request_completed = tpm_spapr_request_completed;
412
}
413

414
static const TypeInfo tpm_spapr_info = {
415
    .name          = TYPE_TPM_SPAPR,
416
    .parent        = TYPE_VIO_SPAPR_DEVICE,
417
    .instance_size = sizeof(SpaprTpmState),
418
    .class_init    = tpm_spapr_class_init,
419
    .interfaces = (InterfaceInfo[]) {
420
        { TYPE_TPM_IF },
421
        { }
422
    }
423
};
424

425
static void tpm_spapr_register_types(void)
426
{
427
    type_register_static(&tpm_spapr_info);
428
}
429

430
type_init(tpm_spapr_register_types)
431

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.