12
#include "qemu/osdep.h"
14
#include "qemu/module.h"
15
#include "qapi/error.h"
18
#include "migration/vmstate.h"
19
#include "hw/registerfields.h"
21
#include "hw/misc/tz-mpc.h"
22
#include "hw/qdev-properties.h"
35
FIELD(CTRL, SEC_RESP, 4, 1)
36
FIELD(CTRL, AUTOINC, 8, 1)
37
FIELD(CTRL, LOCKDOWN, 31, 1)
43
FIELD(INT_STAT, IRQ, 0, 1)
45
FIELD(INT_CLEAR, IRQ, 0, 1)
47
FIELD(INT_EN, IRQ, 0, 1)
50
FIELD(INT_INFO2, HMASTER, 0, 16)
51
FIELD(INT_INFO2, HNONSEC, 16, 1)
52
FIELD(INT_INFO2, CFG_NS, 17, 1)
54
FIELD(INT_SET, IRQ, 0, 1)
68
static const uint8_t tz_mpc_idregs[] = {
69
0x04, 0x00, 0x00, 0x00,
70
0x60, 0xb8, 0x1b, 0x00,
71
0x0d, 0xf0, 0x05, 0xb1,
74
static void tz_mpc_irq_update(TZMPC *s)
76
qemu_set_irq(s->irq, s->int_stat && s->int_en);
79
static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx,
80
uint32_t oldlut, uint32_t newlut)
85
IOMMUTLBEvent event = {
87
.addr_mask = s->blocksize - 1,
90
hwaddr addr = lutidx * s->blocksize * 32;
93
for (i = 0; i < 32; i++, addr += s->blocksize) {
96
if (!((oldlut ^ newlut) & (1 << i))) {
102
block_is_ns = newlut & (1 << i);
104
trace_tz_mpc_iommu_notify(addr);
105
event.entry.iova = addr;
106
event.entry.translated_addr = addr;
108
event.type = IOMMU_NOTIFIER_UNMAP;
109
event.entry.perm = IOMMU_NONE;
110
memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, event);
111
memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, event);
113
event.type = IOMMU_NOTIFIER_MAP;
114
event.entry.perm = IOMMU_RW;
116
event.entry.target_as = &s->blocked_io_as;
118
event.entry.target_as = &s->downstream_as;
120
memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, event);
122
event.entry.target_as = &s->downstream_as;
124
event.entry.target_as = &s->blocked_io_as;
126
memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, event);
130
static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size)
133
if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) {
135
s->blk_idx %= s->blk_max;
139
static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr,
141
unsigned size, MemTxAttrs attrs)
143
TZMPC *s = TZ_MPC(opaque);
145
uint32_t offset = addr & ~0x3;
147
if (!attrs.secure && offset < A_PIDR4) {
149
qemu_log_mask(LOG_GUEST_ERROR,
150
"TZ MPC register read: NS access to offset 0x%x\n",
168
r = ctz32(s->blocksize) - 5;
174
r = s->blk_lut[s->blk_idx];
175
tz_mpc_autoinc_idx(s, size);
201
r = tz_mpc_idregs[(offset - A_PIDR4) / 4];
205
qemu_log_mask(LOG_GUEST_ERROR,
206
"TZ MPC register read: write-only offset 0x%x\n",
211
qemu_log_mask(LOG_GUEST_ERROR,
212
"TZ MPC register read: bad offset 0x%x\n", offset);
222
r = extract32(r, (addr & 3) * 8, size * 8);
226
trace_tz_mpc_reg_read(addr, r, size);
231
static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr,
233
unsigned size, MemTxAttrs attrs)
235
TZMPC *s = TZ_MPC(opaque);
236
uint32_t offset = addr & ~0x3;
238
trace_tz_mpc_reg_write(addr, value, size);
240
if (!attrs.secure && offset < A_PIDR4) {
242
qemu_log_mask(LOG_GUEST_ERROR,
243
"TZ MPC register write: NS access to offset 0x%x\n",
263
oldval = s->blk_lut[s->blk_idx];
269
value = deposit32(oldval, (addr & 3) * 8, size * 8, value);
272
if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) &&
273
(offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) {
277
qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x "
278
"while MPC is in lockdown mode\n", offset);
287
s->ctrl = value & (R_CTRL_SEC_RESP_MASK |
288
R_CTRL_AUTOINC_MASK |
289
R_CTRL_LOCKDOWN_MASK);
292
s->blk_idx = value % s->blk_max;
295
tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value);
296
s->blk_lut[s->blk_idx] = value;
297
tz_mpc_autoinc_idx(s, size);
300
if (value & R_INT_CLEAR_IRQ_MASK) {
302
tz_mpc_irq_update(s);
306
s->int_en = value & R_INT_EN_IRQ_MASK;
307
tz_mpc_irq_update(s);
310
if (value & R_INT_SET_IRQ_MASK) {
311
s->int_stat = R_INT_STAT_IRQ_MASK;
312
tz_mpc_irq_update(s);
327
qemu_log_mask(LOG_GUEST_ERROR,
328
"TZ MPC register write: read-only offset 0x%x\n", offset);
331
qemu_log_mask(LOG_GUEST_ERROR,
332
"TZ MPC register write: bad offset 0x%x\n", offset);
339
static const MemoryRegionOps tz_mpc_reg_ops = {
340
.read_with_attrs = tz_mpc_reg_read,
341
.write_with_attrs = tz_mpc_reg_write,
342
.endianness = DEVICE_LITTLE_ENDIAN,
343
.valid.min_access_size = 1,
344
.valid.max_access_size = 4,
345
.impl.min_access_size = 1,
346
.impl.max_access_size = 4,
349
static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr)
352
hwaddr blknum = addr / s->blocksize;
353
hwaddr blkword = blknum / 32;
354
uint32_t blkbit = 1U << (blknum % 32);
359
assert(blkword < s->blk_max);
360
return s->blk_lut[blkword] & blkbit;
363
static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs)
374
s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER,
375
attrs.requester_id & 0xffff);
376
s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC,
378
s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS,
379
tz_mpc_cfg_ns(s, addr));
380
s->int_stat |= R_INT_STAT_IRQ_MASK;
381
tz_mpc_irq_update(s);
385
return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK;
392
static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr,
394
unsigned size, MemTxAttrs attrs)
396
TZMPC *s = TZ_MPC(opaque);
398
trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure);
401
return tz_mpc_handle_block(s, addr, attrs);
404
static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr,
406
unsigned size, MemTxAttrs attrs)
408
TZMPC *s = TZ_MPC(opaque);
410
trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure);
412
return tz_mpc_handle_block(s, addr, attrs);
415
static const MemoryRegionOps tz_mpc_mem_blocked_ops = {
416
.read_with_attrs = tz_mpc_mem_blocked_read,
417
.write_with_attrs = tz_mpc_mem_blocked_write,
418
.endianness = DEVICE_LITTLE_ENDIAN,
419
.valid.min_access_size = 1,
420
.valid.max_access_size = 8,
421
.impl.min_access_size = 1,
422
.impl.max_access_size = 8,
425
static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu,
426
hwaddr addr, IOMMUAccessFlags flags,
429
TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream));
432
IOMMUTLBEntry ret = {
433
.iova = addr & ~(s->blocksize - 1),
434
.translated_addr = addr & ~(s->blocksize - 1),
435
.addr_mask = s->blocksize - 1,
445
ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS);
447
trace_tz_mpc_translate(addr, flags,
448
iommu_idx == IOMMU_IDX_S ? "S" : "NS",
449
ok ? "pass" : "block");
451
ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as;
455
static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs)
464
return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS;
467
static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu)
469
return IOMMU_NUM_INDEXES;
472
static void tz_mpc_reset(DeviceState *dev)
474
TZMPC *s = TZ_MPC(dev);
476
s->ctrl = 0x00000100;
483
memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t));
486
static void tz_mpc_init(Object *obj)
488
DeviceState *dev = DEVICE(obj);
489
TZMPC *s = TZ_MPC(obj);
491
qdev_init_gpio_out_named(dev, &s->irq, "irq", 1);
494
static void tz_mpc_realize(DeviceState *dev, Error **errp)
496
Object *obj = OBJECT(dev);
497
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
498
TZMPC *s = TZ_MPC(dev);
508
if (!s->downstream) {
509
error_setg(errp, "MPC 'downstream' link not set");
513
size = memory_region_size(s->downstream);
515
memory_region_init_iommu(&s->upstream, sizeof(s->upstream),
516
TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
517
obj, "tz-mpc-upstream", size);
525
s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream);
526
if (size % s->blocksize != 0) {
528
"MPC 'downstream' size %" PRId64
529
" is not a multiple of %" HWADDR_PRIx " bytes",
531
object_unref(OBJECT(&s->upstream));
538
s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32);
540
memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops,
541
s, "tz-mpc-regs", 0x1000);
542
sysbus_init_mmio(sbd, &s->regmr);
544
sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream));
550
memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops,
551
s, "tz-mpc-blocked-io", size);
553
address_space_init(&s->downstream_as, s->downstream,
554
"tz-mpc-downstream");
555
address_space_init(&s->blocked_io_as, &s->blocked_io,
556
"tz-mpc-blocked-io");
558
s->blk_lut = g_new0(uint32_t, s->blk_max);
561
static int tz_mpc_post_load(void *opaque, int version_id)
563
TZMPC *s = TZ_MPC(opaque);
566
if (s->blk_idx >= s->blk_max) {
572
static const VMStateDescription tz_mpc_vmstate = {
575
.minimum_version_id = 1,
576
.post_load = tz_mpc_post_load,
577
.fields = (const VMStateField[]) {
578
VMSTATE_UINT32(ctrl, TZMPC),
579
VMSTATE_UINT32(blk_idx, TZMPC),
580
VMSTATE_UINT32(int_stat, TZMPC),
581
VMSTATE_UINT32(int_en, TZMPC),
582
VMSTATE_UINT32(int_info1, TZMPC),
583
VMSTATE_UINT32(int_info2, TZMPC),
584
VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max,
585
0, vmstate_info_uint32, uint32_t),
586
VMSTATE_END_OF_LIST()
590
static Property tz_mpc_properties[] = {
591
DEFINE_PROP_LINK("downstream", TZMPC, downstream,
592
TYPE_MEMORY_REGION, MemoryRegion *),
593
DEFINE_PROP_END_OF_LIST(),
596
static void tz_mpc_class_init(ObjectClass *klass, void *data)
598
DeviceClass *dc = DEVICE_CLASS(klass);
600
dc->realize = tz_mpc_realize;
601
dc->vmsd = &tz_mpc_vmstate;
602
dc->reset = tz_mpc_reset;
603
device_class_set_props(dc, tz_mpc_properties);
606
static const TypeInfo tz_mpc_info = {
608
.parent = TYPE_SYS_BUS_DEVICE,
609
.instance_size = sizeof(TZMPC),
610
.instance_init = tz_mpc_init,
611
.class_init = tz_mpc_class_init,
614
static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
617
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
619
imrc->translate = tz_mpc_translate;
620
imrc->attrs_to_index = tz_mpc_attrs_to_index;
621
imrc->num_indexes = tz_mpc_num_indexes;
624
static const TypeInfo tz_mpc_iommu_memory_region_info = {
625
.name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
626
.parent = TYPE_IOMMU_MEMORY_REGION,
627
.class_init = tz_mpc_iommu_memory_region_class_init,
630
static void tz_mpc_register_types(void)
632
type_register_static(&tz_mpc_info);
633
type_register_static(&tz_mpc_iommu_memory_region_info);
636
type_init(tz_mpc_register_types);