11
#include "qemu/osdep.h"
15
#include "hw/qdev-properties.h"
16
#include "hw/arm/pxa.h"
18
#include "migration/vmstate.h"
19
#include "qapi/error.h"
20
#include "qemu/module.h"
21
#include "qom/object.h"
23
#define PXA255_DMA_NUM_CHANNELS 16
24
#define PXA27X_DMA_NUM_CHANNELS 32
26
#define PXA2XX_DMA_NUM_REQUESTS 75
37
#define TYPE_PXA2XX_DMA "pxa2xx-dma"
38
OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxDMAState, PXA2XX_DMA)
40
struct PXA2xxDMAState {
41
SysBusDevice parent_obj;
56
PXA2xxDMAChannel *chan;
58
uint8_t req[PXA2XX_DMA_NUM_REQUESTS];
85
#define DRCMR_CHLNUM 0x1f
86
#define DRCMR_MAPVLD (1 << 7)
87
#define DDADR_STOP (1 << 0)
88
#define DDADR_BREN (1 << 1)
89
#define DCMD_LEN 0x1fff
90
#define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1))
91
#define DCMD_SIZE(x) (4 << (((x) >> 16) & 3))
92
#define DCMD_FLYBYT (1 << 19)
93
#define DCMD_FLYBYS (1 << 20)
94
#define DCMD_ENDIRQEN (1 << 21)
95
#define DCMD_STARTIRQEN (1 << 22)
96
#define DCMD_CMPEN (1 << 25)
97
#define DCMD_FLOWTRG (1 << 28)
98
#define DCMD_FLOWSRC (1 << 29)
99
#define DCMD_INCTRGADDR (1 << 30)
100
#define DCMD_INCSRCADDR (1 << 31)
101
#define DCSR_BUSERRINTR (1 << 0)
102
#define DCSR_STARTINTR (1 << 1)
103
#define DCSR_ENDINTR (1 << 2)
104
#define DCSR_STOPINTR (1 << 3)
105
#define DCSR_RASINTR (1 << 4)
106
#define DCSR_REQPEND (1 << 8)
107
#define DCSR_EORINT (1 << 9)
108
#define DCSR_CMPST (1 << 10)
109
#define DCSR_MASKRUN (1 << 22)
110
#define DCSR_RASIRQEN (1 << 23)
111
#define DCSR_CLRCMPST (1 << 24)
112
#define DCSR_SETCMPST (1 << 25)
113
#define DCSR_EORSTOPEN (1 << 26)
114
#define DCSR_EORJMPEN (1 << 27)
115
#define DCSR_EORIRQEN (1 << 28)
116
#define DCSR_STOPIRQEN (1 << 29)
117
#define DCSR_NODESCFETCH (1 << 30)
118
#define DCSR_RUN (1 << 31)
120
static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
123
if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
124
(s->chan[ch].state & DCSR_STOPINTR))
125
s->stopintr |= 1 << ch;
127
s->stopintr &= ~(1 << ch);
129
if ((s->chan[ch].state & DCSR_EORIRQEN) &&
130
(s->chan[ch].state & DCSR_EORINT))
131
s->eorintr |= 1 << ch;
133
s->eorintr &= ~(1 << ch);
135
if ((s->chan[ch].state & DCSR_RASIRQEN) &&
136
(s->chan[ch].state & DCSR_RASINTR))
137
s->rasintr |= 1 << ch;
139
s->rasintr &= ~(1 << ch);
141
if (s->chan[ch].state & DCSR_STARTINTR)
142
s->startintr |= 1 << ch;
144
s->startintr &= ~(1 << ch);
146
if (s->chan[ch].state & DCSR_ENDINTR)
147
s->endintr |= 1 << ch;
149
s->endintr &= ~(1 << ch);
152
if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
153
qemu_irq_raise(s->irq);
155
qemu_irq_lower(s->irq);
158
static inline void pxa2xx_dma_descriptor_fetch(
159
PXA2xxDMAState *s, int ch)
162
hwaddr daddr = s->chan[ch].descr & ~0xf;
163
if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
166
cpu_physical_memory_read(daddr, desc, 16);
167
s->chan[ch].descr = desc[DDADR];
168
s->chan[ch].src = desc[DSADR];
169
s->chan[ch].dest = desc[DTADR];
170
s->chan[ch].cmd = desc[DCMD];
172
if (s->chan[ch].cmd & DCMD_FLOWSRC)
173
s->chan[ch].src &= ~3;
174
if (s->chan[ch].cmd & DCMD_FLOWTRG)
175
s->chan[ch].dest &= ~3;
177
if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
178
printf("%s: unsupported mode in channel %i\n", __func__, ch);
180
if (s->chan[ch].cmd & DCMD_STARTIRQEN)
181
s->chan[ch].state |= DCSR_STARTINTR;
184
static void pxa2xx_dma_run(PXA2xxDMAState *s)
186
int c, srcinc, destinc;
191
PXA2xxDMAChannel *ch;
198
for (c = 0; c < s->channels; c ++) {
201
while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
203
if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
206
length = ch->cmd & DCMD_LEN;
207
size = DCMD_SIZE(ch->cmd);
208
width = DCMD_WIDTH(ch->cmd);
210
srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
211
destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
214
size = MIN(length, size);
216
for (n = 0; n < size; n += width) {
217
cpu_physical_memory_read(ch->src, buffer + n, width);
221
for (n = 0; n < size; n += width) {
222
cpu_physical_memory_write(ch->dest, buffer + n, width);
228
if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
230
ch->state |= DCSR_EORINT;
231
if (ch->state & DCSR_EORSTOPEN)
232
ch->state |= DCSR_STOPINTR;
233
if ((ch->state & DCSR_EORJMPEN) &&
234
!(ch->state & DCSR_NODESCFETCH))
235
pxa2xx_dma_descriptor_fetch(s, c);
240
ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
244
if (ch->cmd & DCMD_ENDIRQEN)
245
ch->state |= DCSR_ENDINTR;
247
if ((ch->state & DCSR_NODESCFETCH) ||
248
(ch->descr & DDADR_STOP) ||
249
(ch->state & DCSR_EORSTOPEN)) {
250
ch->state |= DCSR_STOPINTR;
251
ch->state &= ~DCSR_RUN;
256
ch->state |= DCSR_STOPINTR;
266
static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset,
269
PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
270
unsigned int channel;
273
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n",
279
case DRCMR64 ... DRCMR74:
280
offset -= DRCMR64 - DRCMR0 - (64 << 2);
282
case DRCMR0 ... DRCMR63:
283
channel = (offset - DRCMR0) >> 2;
284
return s->req[channel];
291
case DCSR0 ... DCSR31:
292
channel = offset >> 2;
293
if (s->chan[channel].request)
294
return s->chan[channel].state | DCSR_REQPEND;
295
return s->chan[channel].state;
298
return s->stopintr | s->eorintr | s->rasintr |
299
s->startintr | s->endintr;
308
if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
309
channel = (offset - D_CH0) >> 4;
310
switch ((offset & 0x0f) >> 2) {
312
return s->chan[channel].descr;
314
return s->chan[channel].src;
316
return s->chan[channel].dest;
318
return s->chan[channel].cmd;
321
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
326
static void pxa2xx_dma_write(void *opaque, hwaddr offset,
327
uint64_t value, unsigned size)
329
PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
330
unsigned int channel;
333
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n",
339
case DRCMR64 ... DRCMR74:
340
offset -= DRCMR64 - DRCMR0 - (64 << 2);
342
case DRCMR0 ... DRCMR63:
343
channel = (offset - DRCMR0) >> 2;
345
if (value & DRCMR_MAPVLD)
346
if ((value & DRCMR_CHLNUM) > s->channels)
347
hw_error("%s: Bad DMA channel %i\n",
348
__func__, (unsigned)value & DRCMR_CHLNUM);
350
s->req[channel] = value;
359
case DCSR0 ... DCSR31:
360
channel = offset >> 2;
361
s->chan[channel].state &= 0x0000071f & ~(value &
362
(DCSR_EORINT | DCSR_ENDINTR |
363
DCSR_STARTINTR | DCSR_BUSERRINTR));
364
s->chan[channel].state |= value & 0xfc800000;
366
if (s->chan[channel].state & DCSR_STOPIRQEN)
367
s->chan[channel].state &= ~DCSR_STOPINTR;
369
if (value & DCSR_NODESCFETCH) {
371
if (value & DCSR_RUN) {
372
s->chan[channel].state &= ~DCSR_STOPINTR;
377
if (value & DCSR_RUN) {
378
s->chan[channel].state &= ~DCSR_STOPINTR;
379
pxa2xx_dma_descriptor_fetch(s, channel);
385
if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
386
s->chan[channel].state |= DCSR_STOPINTR;
388
if (value & DCSR_CLRCMPST)
389
s->chan[channel].state &= ~DCSR_CMPST;
390
if (value & DCSR_SETCMPST)
391
s->chan[channel].state |= DCSR_CMPST;
393
pxa2xx_dma_update(s, channel);
401
s->pio = value & 0x80000001;
405
if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
406
channel = (offset - D_CH0) >> 4;
407
switch ((offset & 0x0f) >> 2) {
409
s->chan[channel].descr = value;
412
s->chan[channel].src = value;
415
s->chan[channel].dest = value;
418
s->chan[channel].cmd = value;
427
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
432
static const MemoryRegionOps pxa2xx_dma_ops = {
433
.read = pxa2xx_dma_read,
434
.write = pxa2xx_dma_write,
435
.endianness = DEVICE_NATIVE_ENDIAN,
438
static void pxa2xx_dma_request(void *opaque, int req_num, int on)
440
PXA2xxDMAState *s = opaque;
442
if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
443
hw_error("%s: Bad DMA request %i\n", __func__, req_num);
445
if (!(s->req[req_num] & DRCMR_MAPVLD))
447
ch = s->req[req_num] & DRCMR_CHLNUM;
449
if (!s->chan[ch].request && on)
450
s->chan[ch].state |= DCSR_RASINTR;
452
s->chan[ch].state &= ~DCSR_RASINTR;
453
if (s->chan[ch].request && !on)
454
s->chan[ch].state |= DCSR_EORINT;
456
s->chan[ch].request = on;
459
pxa2xx_dma_update(s, ch);
463
static void pxa2xx_dma_init(Object *obj)
465
DeviceState *dev = DEVICE(obj);
466
PXA2xxDMAState *s = PXA2XX_DMA(obj);
467
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
469
memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
471
qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS);
473
memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s,
474
"pxa2xx.dma", 0x00010000);
475
sysbus_init_mmio(sbd, &s->iomem);
476
sysbus_init_irq(sbd, &s->irq);
479
static void pxa2xx_dma_realize(DeviceState *dev, Error **errp)
481
PXA2xxDMAState *s = PXA2XX_DMA(dev);
484
if (s->channels <= 0) {
485
error_setg(errp, "channels value invalid");
489
s->chan = g_new0(PXA2xxDMAChannel, s->channels);
491
for (i = 0; i < s->channels; i ++)
492
s->chan[i].state = DCSR_STOPINTR;
495
DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq)
499
dev = qdev_new("pxa2xx-dma");
500
qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
501
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
503
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
504
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
509
DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq)
513
dev = qdev_new("pxa2xx-dma");
514
qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
515
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
517
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
518
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
523
static bool is_version_0(void *opaque, int version_id)
525
return version_id == 0;
528
static const VMStateDescription vmstate_pxa2xx_dma_chan = {
529
.name = "pxa2xx_dma_chan",
531
.minimum_version_id = 1,
532
.fields = (const VMStateField[]) {
533
VMSTATE_UINT32(descr, PXA2xxDMAChannel),
534
VMSTATE_UINT32(src, PXA2xxDMAChannel),
535
VMSTATE_UINT32(dest, PXA2xxDMAChannel),
536
VMSTATE_UINT32(cmd, PXA2xxDMAChannel),
537
VMSTATE_UINT32(state, PXA2xxDMAChannel),
538
VMSTATE_INT32(request, PXA2xxDMAChannel),
539
VMSTATE_END_OF_LIST(),
543
static const VMStateDescription vmstate_pxa2xx_dma = {
544
.name = "pxa2xx_dma",
546
.minimum_version_id = 0,
547
.fields = (const VMStateField[]) {
548
VMSTATE_UNUSED_TEST(is_version_0, 4),
549
VMSTATE_UINT32(stopintr, PXA2xxDMAState),
550
VMSTATE_UINT32(eorintr, PXA2xxDMAState),
551
VMSTATE_UINT32(rasintr, PXA2xxDMAState),
552
VMSTATE_UINT32(startintr, PXA2xxDMAState),
553
VMSTATE_UINT32(endintr, PXA2xxDMAState),
554
VMSTATE_UINT32(align, PXA2xxDMAState),
555
VMSTATE_UINT32(pio, PXA2xxDMAState),
556
VMSTATE_BUFFER(req, PXA2xxDMAState),
557
VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels,
558
vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel),
559
VMSTATE_END_OF_LIST(),
563
static Property pxa2xx_dma_properties[] = {
564
DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1),
565
DEFINE_PROP_END_OF_LIST(),
568
static void pxa2xx_dma_class_init(ObjectClass *klass, void *data)
570
DeviceClass *dc = DEVICE_CLASS(klass);
572
dc->desc = "PXA2xx DMA controller";
573
dc->vmsd = &vmstate_pxa2xx_dma;
574
device_class_set_props(dc, pxa2xx_dma_properties);
575
dc->realize = pxa2xx_dma_realize;
578
static const TypeInfo pxa2xx_dma_info = {
579
.name = TYPE_PXA2XX_DMA,
580
.parent = TYPE_SYS_BUS_DEVICE,
581
.instance_size = sizeof(PXA2xxDMAState),
582
.instance_init = pxa2xx_dma_init,
583
.class_init = pxa2xx_dma_class_init,
586
static void pxa2xx_dma_register_types(void)
588
type_register_static(&pxa2xx_dma_info);
591
type_init(pxa2xx_dma_register_types)