21
#include "qemu/osdep.h"
22
#include "migration/vmstate.h"
23
#include "hw/pci/pci_bridge.h"
24
#include "hw/pci/pcie.h"
25
#include "hw/pci/msix.h"
26
#include "hw/pci/msi.h"
27
#include "hw/pci/pci_bus.h"
28
#include "hw/pci/pcie_regs.h"
29
#include "pci-internal.h"
33
# define PCIE_DPRINTF(fmt, ...) \
34
fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__)
36
# define PCIE_DPRINTF(fmt, ...) do {} while (0)
38
#define PCIE_DEV_PRINTF(dev, fmt, ...) \
39
PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__)
41
#define PCI_ERR_SRC_COR_OFFS 0
42
#define PCI_ERR_SRC_UNCOR_OFFS 2
45
static uint32_t pcie_aer_uncor_default_severity(uint32_t status)
48
case PCI_ERR_UNC_INTN:
51
case PCI_ERR_UNC_RX_OVER:
53
case PCI_ERR_UNC_MALF_TLP:
54
return PCI_ERR_ROOT_CMD_FATAL_EN;
55
case PCI_ERR_UNC_POISON_TLP:
56
case PCI_ERR_UNC_ECRC:
57
case PCI_ERR_UNC_UNSUP:
58
case PCI_ERR_UNC_COMP_TIME:
59
case PCI_ERR_UNC_COMP_ABORT:
60
case PCI_ERR_UNC_UNX_COMP:
61
case PCI_ERR_UNC_ACSV:
62
case PCI_ERR_UNC_MCBTLP:
63
case PCI_ERR_UNC_ATOP_EBLOCKED:
64
case PCI_ERR_UNC_TLP_PRF_BLOCKED:
65
return PCI_ERR_ROOT_CMD_NONFATAL_EN;
70
return PCI_ERR_ROOT_CMD_FATAL_EN;
73
static int aer_log_add_err(PCIEAERLog *aer_log, const PCIEAERErr *err)
75
if (aer_log->log_num == aer_log->log_max) {
78
memcpy(&aer_log->log[aer_log->log_num], err, sizeof *err);
83
static void aer_log_del_err(PCIEAERLog *aer_log, PCIEAERErr *err)
85
assert(aer_log->log_num);
86
*err = aer_log->log[0];
88
memmove(&aer_log->log[0], &aer_log->log[1],
89
aer_log->log_num * sizeof *err);
92
static void aer_log_clear_all_err(PCIEAERLog *aer_log)
97
int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset,
98
uint16_t size, Error **errp)
100
pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, cap_ver,
102
dev->exp.aer_cap = offset;
105
if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) {
106
error_setg(errp, "Invalid aer_log_max %d. The max number of aer log "
107
"is %d", dev->exp.aer_log.log_max, PCIE_AER_LOG_MAX_LIMIT);
110
dev->exp.aer_log.log = g_malloc0(sizeof dev->exp.aer_log.log[0] *
111
dev->exp.aer_log.log_max);
113
pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS,
114
PCI_ERR_UNC_SUPPORTED);
116
if (dev->cap_present & QEMU_PCIE_ERR_UNC_MASK) {
117
pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK,
118
PCI_ERR_UNC_MASK_DEFAULT);
119
pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK,
120
PCI_ERR_UNC_SUPPORTED);
123
pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER,
124
PCI_ERR_UNC_SEVERITY_DEFAULT);
125
pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER,
126
PCI_ERR_UNC_SUPPORTED);
128
pci_long_test_and_set_mask(dev->w1cmask + offset + PCI_ERR_COR_STATUS,
129
PCI_ERR_COR_SUPPORTED);
131
pci_set_long(dev->config + offset + PCI_ERR_COR_MASK,
132
PCI_ERR_COR_MASK_DEFAULT);
133
pci_set_long(dev->wmask + offset + PCI_ERR_COR_MASK,
134
PCI_ERR_COR_SUPPORTED);
137
if (dev->exp.aer_log.log_max > 0) {
138
pci_set_long(dev->config + offset + PCI_ERR_CAP,
139
PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC |
141
pci_set_long(dev->wmask + offset + PCI_ERR_CAP,
142
PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE |
145
pci_set_long(dev->config + offset + PCI_ERR_CAP,
146
PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC);
147
pci_set_long(dev->wmask + offset + PCI_ERR_CAP,
148
PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
151
switch (pcie_cap_get_type(dev)) {
152
case PCI_EXP_TYPE_ROOT_PORT:
155
case PCI_EXP_TYPE_DOWNSTREAM:
156
case PCI_EXP_TYPE_UPSTREAM:
157
pci_word_test_and_set_mask(dev->wmask + PCI_BRIDGE_CONTROL,
158
PCI_BRIDGE_CTL_SERR);
159
pci_long_test_and_set_mask(dev->w1cmask + PCI_STATUS,
160
PCI_SEC_STATUS_RCV_SYSTEM_ERROR);
169
void pcie_aer_exit(PCIDevice *dev)
171
g_free(dev->exp.aer_log.log);
174
static void pcie_aer_update_uncor_status(PCIDevice *dev)
176
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
177
PCIEAERLog *aer_log = &dev->exp.aer_log;
180
for (i = 0; i < aer_log->log_num; i++) {
181
pci_long_test_and_set_mask(aer_cap + PCI_ERR_UNCOR_STATUS,
182
dev->exp.aer_log.log[i].status);
196
pcie_aer_msg_alldev(PCIDevice *dev, const PCIEAERMsg *msg)
198
uint16_t devctl = pci_get_word(dev->config + dev->exp.exp_cap +
200
if (!(pcie_aer_msg_is_uncor(msg) &&
201
(pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_SERR)) &&
202
!((msg->severity == PCI_ERR_ROOT_CMD_NONFATAL_EN) &&
203
(devctl & PCI_EXP_DEVCTL_NFERE)) &&
204
!((msg->severity == PCI_ERR_ROOT_CMD_COR_EN) &&
205
(devctl & PCI_EXP_DEVCTL_CERE)) &&
206
!((msg->severity == PCI_ERR_ROOT_CMD_FATAL_EN) &&
207
(devctl & PCI_EXP_DEVCTL_FERE))) {
222
pci_word_test_and_set_mask(dev->config + PCI_STATUS,
223
PCI_STATUS_SIG_SYSTEM_ERROR);
225
if (!(msg->severity &
226
pci_get_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL))) {
243
static bool pcie_aer_msg_vbridge(PCIDevice *dev, const PCIEAERMsg *msg)
245
uint16_t bridge_control = pci_get_word(dev->config + PCI_BRIDGE_CONTROL);
247
if (pcie_aer_msg_is_uncor(msg)) {
249
pci_word_test_and_set_mask(dev->config + PCI_SEC_STATUS,
250
PCI_SEC_STATUS_RCV_SYSTEM_ERROR);
253
if (!(bridge_control & PCI_BRIDGE_CTL_SERR)) {
259
void pcie_aer_root_set_vector(PCIDevice *dev, unsigned int vector)
261
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
262
assert(vector < PCI_ERR_ROOT_IRQ_MAX);
263
pci_long_test_and_clear_mask(aer_cap + PCI_ERR_ROOT_STATUS,
265
pci_long_test_and_set_mask(aer_cap + PCI_ERR_ROOT_STATUS,
266
vector << PCI_ERR_ROOT_IRQ_SHIFT);
269
static unsigned int pcie_aer_root_get_vector(PCIDevice *dev)
271
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
272
uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
273
return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT;
277
static uint32_t pcie_aer_status_to_cmd(uint32_t status)
280
if (status & PCI_ERR_ROOT_COR_RCV) {
281
cmd |= PCI_ERR_ROOT_CMD_COR_EN;
283
if (status & PCI_ERR_ROOT_NONFATAL_RCV) {
284
cmd |= PCI_ERR_ROOT_CMD_NONFATAL_EN;
286
if (status & PCI_ERR_ROOT_FATAL_RCV) {
287
cmd |= PCI_ERR_ROOT_CMD_FATAL_EN;
292
static void pcie_aer_root_notify(PCIDevice *dev)
294
if (msix_enabled(dev)) {
295
msix_notify(dev, pcie_aer_root_get_vector(dev));
296
} else if (msi_enabled(dev)) {
297
msi_notify(dev, pcie_aer_root_get_vector(dev));
298
} else if (pci_intx(dev) != -1) {
308
static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
313
uint32_t root_status, prev_status;
315
cmd = pci_get_word(dev->config + PCI_COMMAND);
316
aer_cap = dev->config + dev->exp.aer_cap;
317
root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
318
prev_status = root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
320
if (cmd & PCI_COMMAND_SERR) {
332
switch (msg->severity) {
333
case PCI_ERR_ROOT_CMD_COR_EN:
334
if (root_status & PCI_ERR_ROOT_COR_RCV) {
335
root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
337
pci_set_word(aer_cap + PCI_ERR_ROOT_ERR_SRC + PCI_ERR_SRC_COR_OFFS,
340
root_status |= PCI_ERR_ROOT_COR_RCV;
342
case PCI_ERR_ROOT_CMD_NONFATAL_EN:
343
root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
345
case PCI_ERR_ROOT_CMD_FATAL_EN:
346
if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) {
347
root_status |= PCI_ERR_ROOT_FIRST_FATAL;
349
root_status |= PCI_ERR_ROOT_FATAL_RCV;
355
if (pcie_aer_msg_is_uncor(msg)) {
356
if (root_status & PCI_ERR_ROOT_UNCOR_RCV) {
357
root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
359
pci_set_word(aer_cap + PCI_ERR_ROOT_ERR_SRC +
360
PCI_ERR_SRC_UNCOR_OFFS, msg->source_id);
362
root_status |= PCI_ERR_ROOT_UNCOR_RCV;
364
pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status);
370
if (!(root_cmd & msg->severity) ||
371
(pcie_aer_status_to_cmd(prev_status) & root_cmd)) {
376
pcie_aer_root_notify(dev);
384
static void pcie_aer_msg(PCIDevice *dev, const PCIEAERMsg *msg)
389
if (!pci_is_express(dev)) {
396
type = pcie_cap_get_type(dev);
397
if ((type == PCI_EXP_TYPE_ROOT_PORT ||
398
type == PCI_EXP_TYPE_UPSTREAM ||
399
type == PCI_EXP_TYPE_DOWNSTREAM) &&
400
!pcie_aer_msg_vbridge(dev, msg)) {
403
if (!pcie_aer_msg_alldev(dev, msg)) {
406
if (type == PCI_EXP_TYPE_ROOT_PORT) {
407
pcie_aer_msg_root_port(dev, msg);
417
dev = pci_bridge_get_device(pci_get_bus(dev));
421
static void pcie_aer_update_log(PCIDevice *dev, const PCIEAERErr *err)
423
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
424
uint8_t first_bit = ctz32(err->status);
425
uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
429
assert(!(err->status & (err->status - 1)));
431
errcap &= ~(PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP);
432
errcap |= PCI_ERR_CAP_FEP(first_bit);
434
if (err->flags & PCIE_AER_ERR_HEADER_VALID) {
435
for (i = 0; i < ARRAY_SIZE(err->header); ++i) {
437
uint8_t *header_log =
438
aer_cap + PCI_ERR_HEADER_LOG + i * sizeof err->header[0];
439
stl_be_p(header_log, err->header[i]);
442
assert(!(err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT));
443
memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE);
446
if ((err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT) &&
447
(pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCAP2) &
448
PCI_EXP_DEVCAP2_EETLPP)) {
449
for (i = 0; i < ARRAY_SIZE(err->prefix); ++i) {
451
uint8_t *prefix_log =
452
aer_cap + PCI_ERR_TLP_PREFIX_LOG + i * sizeof err->prefix[0];
453
stl_be_p(prefix_log, err->prefix[i]);
455
errcap |= PCI_ERR_CAP_TLP;
457
memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0,
458
PCI_ERR_TLP_PREFIX_LOG_SIZE);
460
pci_set_long(aer_cap + PCI_ERR_CAP, errcap);
463
static void pcie_aer_clear_log(PCIDevice *dev)
465
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
467
pci_long_test_and_clear_mask(aer_cap + PCI_ERR_CAP,
468
PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP);
470
memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE);
471
memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0, PCI_ERR_TLP_PREFIX_LOG_SIZE);
474
static void pcie_aer_clear_error(PCIDevice *dev)
476
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
477
uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
478
PCIEAERLog *aer_log = &dev->exp.aer_log;
481
if (!(errcap & PCI_ERR_CAP_MHRE) || !aer_log->log_num) {
482
pcie_aer_clear_log(dev);
495
pcie_aer_update_uncor_status(dev);
497
aer_log_del_err(aer_log, &err);
498
pcie_aer_update_log(dev, &err);
501
static int pcie_aer_record_error(PCIDevice *dev,
502
const PCIEAERErr *err)
504
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
505
uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
506
int fep = PCI_ERR_CAP_FEP(errcap);
509
assert(!(err->status & (err->status - 1)));
511
if (errcap & PCI_ERR_CAP_MHRE &&
512
(pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS) & (1U << fep))) {
514
if (aer_log_add_err(&dev->exp.aer_log, err) < 0) {
521
pcie_aer_update_log(dev, err);
525
typedef struct PCIEAERInject {
528
const PCIEAERErr *err;
531
uint32_t error_status;
532
bool unsupported_request;
537
static bool pcie_aer_inject_cor_error(PCIEAERInject *inj,
538
uint32_t uncor_status,
539
bool is_advisory_nonfatal)
541
PCIDevice *dev = inj->dev;
543
inj->devsta |= PCI_EXP_DEVSTA_CED;
544
if (inj->unsupported_request) {
545
inj->devsta |= PCI_EXP_DEVSTA_URD;
547
pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);
551
pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_COR_STATUS,
553
mask = pci_get_long(inj->aer_cap + PCI_ERR_COR_MASK);
554
if (mask & inj->error_status) {
557
if (is_advisory_nonfatal) {
558
uint32_t uncor_mask =
559
pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
560
if (!(uncor_mask & uncor_status)) {
561
inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
563
pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
568
if (inj->unsupported_request && !(inj->devctl & PCI_EXP_DEVCTL_URRE)) {
571
if (!(inj->devctl & PCI_EXP_DEVCTL_CERE)) {
575
inj->msg.severity = PCI_ERR_ROOT_CMD_COR_EN;
579
static bool pcie_aer_inject_uncor_error(PCIEAERInject *inj, bool is_fatal)
581
PCIDevice *dev = inj->dev;
585
inj->devsta |= PCI_EXP_DEVSTA_FED;
587
inj->devsta |= PCI_EXP_DEVSTA_NFED;
589
if (inj->unsupported_request) {
590
inj->devsta |= PCI_EXP_DEVSTA_URD;
592
pci_set_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);
595
uint32_t mask = pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
596
if (mask & inj->error_status) {
597
pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
602
inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
603
pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
607
cmd = pci_get_word(dev->config + PCI_COMMAND);
608
if (inj->unsupported_request &&
609
!(inj->devctl & PCI_EXP_DEVCTL_URRE) && !(cmd & PCI_COMMAND_SERR)) {
613
if (!((cmd & PCI_COMMAND_SERR) ||
614
(inj->devctl & PCI_EXP_DEVCTL_FERE))) {
617
inj->msg.severity = PCI_ERR_ROOT_CMD_FATAL_EN;
619
if (!((cmd & PCI_COMMAND_SERR) ||
620
(inj->devctl & PCI_EXP_DEVCTL_NFERE))) {
623
inj->msg.severity = PCI_ERR_ROOT_CMD_NONFATAL_EN;
639
int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err)
641
uint8_t *aer_cap = NULL;
644
uint32_t error_status = err->status;
647
if (!pci_is_express(dev)) {
651
if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) {
652
error_status &= PCI_ERR_COR_SUPPORTED;
654
error_status &= PCI_ERR_UNC_SUPPORTED;
658
if (!error_status || (error_status & (error_status - 1))) {
662
if (dev->exp.aer_cap) {
663
uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
664
aer_cap = dev->config + dev->exp.aer_cap;
665
devctl = pci_get_long(exp_cap + PCI_EXP_DEVCTL);
666
devsta = pci_get_long(exp_cap + PCI_EXP_DEVSTA);
670
inj.aer_cap = aer_cap;
674
inj.error_status = error_status;
675
inj.unsupported_request = !(err->flags & PCIE_AER_ERR_IS_CORRECTABLE) &&
676
err->status == PCI_ERR_UNC_UNSUP;
677
inj.log_overflow = false;
679
if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) {
680
if (!pcie_aer_inject_cor_error(&inj, 0, false)) {
685
pcie_aer_uncor_default_severity(error_status) ==
686
PCI_ERR_ROOT_CMD_FATAL_EN;
689
error_status & pci_get_long(aer_cap + PCI_ERR_UNCOR_SEVER);
691
if (!is_fatal && (err->flags & PCIE_AER_ERR_MAYBE_ADVISORY)) {
692
inj.error_status = PCI_ERR_COR_ADV_NONFATAL;
693
if (!pcie_aer_inject_cor_error(&inj, error_status, true)) {
697
if (!pcie_aer_inject_uncor_error(&inj, is_fatal)) {
704
inj.msg.source_id = err->source_id;
705
pcie_aer_msg(dev, &inj.msg);
707
if (inj.log_overflow) {
708
PCIEAERErr header_log_overflow = {
709
.status = PCI_ERR_COR_HL_OVERFLOW,
710
.flags = PCIE_AER_ERR_IS_CORRECTABLE,
712
int ret = pcie_aer_inject_error(dev, &header_log_overflow);
718
void pcie_aer_write_config(PCIDevice *dev,
719
uint32_t addr, uint32_t val, int len)
721
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
722
uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
723
uint32_t first_error = 1U << PCI_ERR_CAP_FEP(errcap);
724
uint32_t uncorsta = pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS);
727
if (!(uncorsta & first_error)) {
729
pcie_aer_clear_error(dev);
730
} else if (errcap & PCI_ERR_CAP_MHRE) {
735
pcie_aer_update_uncor_status(dev);
740
aer_log_clear_all_err(&dev->exp.aer_log);
744
void pcie_aer_root_init(PCIDevice *dev)
746
uint16_t pos = dev->exp.aer_cap;
748
pci_set_long(dev->wmask + pos + PCI_ERR_ROOT_COMMAND,
749
PCI_ERR_ROOT_CMD_EN_MASK);
750
pci_set_long(dev->w1cmask + pos + PCI_ERR_ROOT_STATUS,
751
PCI_ERR_ROOT_STATUS_REPORT_MASK);
755
pci_set_long(dev->cmask + pos + PCI_ERR_ROOT_STATUS,
759
void pcie_aer_root_reset(PCIDevice *dev)
761
uint8_t* aer_cap = dev->config + dev->exp.aer_cap;
763
pci_set_long(aer_cap + PCI_ERR_ROOT_COMMAND, 0);
772
void pcie_aer_root_write_config(PCIDevice *dev,
773
uint32_t addr, uint32_t val, int len,
774
uint32_t root_cmd_prev)
776
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
777
uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
778
uint32_t enabled_cmd = pcie_aer_status_to_cmd(root_status);
779
uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
781
if (!msix_enabled(dev) && !msi_enabled(dev)) {
782
if (pci_intx(dev) != -1) {
783
pci_set_irq(dev, !!(root_cmd & enabled_cmd));
788
if ((root_cmd_prev & enabled_cmd) || !(root_cmd & enabled_cmd)) {
793
pcie_aer_root_notify(dev);
796
static const VMStateDescription vmstate_pcie_aer_err = {
797
.name = "PCIE_AER_ERROR",
799
.minimum_version_id = 1,
800
.fields = (const VMStateField[]) {
801
VMSTATE_UINT32(status, PCIEAERErr),
802
VMSTATE_UINT16(source_id, PCIEAERErr),
803
VMSTATE_UINT16(flags, PCIEAERErr),
804
VMSTATE_UINT32_ARRAY(header, PCIEAERErr, 4),
805
VMSTATE_UINT32_ARRAY(prefix, PCIEAERErr, 4),
806
VMSTATE_END_OF_LIST()
810
static bool pcie_aer_state_log_num_valid(void *opaque, int version_id)
812
PCIEAERLog *s = opaque;
814
return s->log_num <= s->log_max;
817
const VMStateDescription vmstate_pcie_aer_log = {
818
.name = "PCIE_AER_ERROR_LOG",
820
.minimum_version_id = 1,
821
.fields = (const VMStateField[]) {
822
VMSTATE_UINT16(log_num, PCIEAERLog),
823
VMSTATE_UINT16_EQUAL(log_max, PCIEAERLog, NULL),
824
VMSTATE_VALIDATE("log_num <= log_max", pcie_aer_state_log_num_valid),
825
VMSTATE_STRUCT_VARRAY_POINTER_UINT16(log, PCIEAERLog, log_num,
826
vmstate_pcie_aer_err, PCIEAERErr),
827
VMSTATE_END_OF_LIST()
831
typedef struct PCIEAERErrorName {
841
static const struct PCIEAERErrorName pcie_aer_error_list[] = {
844
.val = PCI_ERR_UNC_DLP,
845
.correctable = false,
848
.val = PCI_ERR_UNC_SDN,
849
.correctable = false,
851
.name = "POISON_TLP",
852
.val = PCI_ERR_UNC_POISON_TLP,
853
.correctable = false,
856
.val = PCI_ERR_UNC_FCP,
857
.correctable = false,
860
.val = PCI_ERR_UNC_COMP_TIME,
861
.correctable = false,
863
.name = "COMP_ABORT",
864
.val = PCI_ERR_UNC_COMP_ABORT,
865
.correctable = false,
868
.val = PCI_ERR_UNC_UNX_COMP,
869
.correctable = false,
872
.val = PCI_ERR_UNC_RX_OVER,
873
.correctable = false,
876
.val = PCI_ERR_UNC_MALF_TLP,
877
.correctable = false,
880
.val = PCI_ERR_UNC_ECRC,
881
.correctable = false,
884
.val = PCI_ERR_UNC_UNSUP,
885
.correctable = false,
888
.val = PCI_ERR_UNC_ACSV,
889
.correctable = false,
892
.val = PCI_ERR_UNC_INTN,
893
.correctable = false,
896
.val = PCI_ERR_UNC_MCBTLP,
897
.correctable = false,
899
.name = "ATOP_EBLOCKED",
900
.val = PCI_ERR_UNC_ATOP_EBLOCKED,
901
.correctable = false,
903
.name = "TLP_PRF_BLOCKED",
904
.val = PCI_ERR_UNC_TLP_PRF_BLOCKED,
905
.correctable = false,
908
.val = PCI_ERR_COR_RCVR,
912
.val = PCI_ERR_COR_BAD_TLP,
916
.val = PCI_ERR_COR_BAD_DLLP,
920
.val = PCI_ERR_COR_REP_ROLL,
924
.val = PCI_ERR_COR_REP_TIMER,
927
.name = "ADV_NONFATAL",
928
.val = PCI_ERR_COR_ADV_NONFATAL,
932
.val = PCI_ERR_COR_INTERNAL,
935
.name = "HL_OVERFLOW",
936
.val = PCI_ERR_COR_HL_OVERFLOW,
941
int pcie_aer_parse_error_string(const char *error_name,
942
uint32_t *status, bool *correctable)
946
for (i = 0; i < ARRAY_SIZE(pcie_aer_error_list); i++) {
947
const PCIEAERErrorName *e = &pcie_aer_error_list[i];
948
if (strcmp(error_name, e->name)) {
953
*correctable = e->correctable;