4
* Copyright (C) 2015 : GreenSocs Ltd
5
* http://www.greensocs.com/ , email: info@greensocs.com
8
* Frederic Konrad <fred.konrad@greensocs.com>
10
* This program is free software; you can redistribute it and/or modify
11
* it under the terms of the GNU General Public License as published by
12
* the Free Software Foundation, either version 2 of the License, or
13
* (at your option) any later version.
15
* This program is distributed in the hope that it will be useful,
16
* but WITHOUT ANY WARRANTY; without even the implied warranty of
17
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
* GNU General Public License for more details.
20
* You should have received a copy of the GNU General Public License along
21
* with this program; if not, see <http://www.gnu.org/licenses/>.
25
#include "qemu/osdep.h"
26
#include "qemu/cutils.h"
28
#include "qemu/module.h"
29
#include "hw/dma/xlnx_dpdma.h"
31
#include "migration/vmstate.h"
37
#define DPRINTF(fmt, ...) do { \
39
qemu_log("xlnx_dpdma: " fmt , ## __VA_ARGS__); \
44
* Registers offset for DPDMA.
46
#define DPDMA_ERR_CTRL (0x0000)
47
#define DPDMA_ISR (0x0004 >> 2)
48
#define DPDMA_IMR (0x0008 >> 2)
49
#define DPDMA_IEN (0x000C >> 2)
50
#define DPDMA_IDS (0x0010 >> 2)
51
#define DPDMA_EISR (0x0014 >> 2)
52
#define DPDMA_EIMR (0x0018 >> 2)
53
#define DPDMA_EIEN (0x001C >> 2)
54
#define DPDMA_EIDS (0x0020 >> 2)
55
#define DPDMA_CNTL (0x0100 >> 2)
57
#define DPDMA_GBL (0x0104 >> 2)
58
#define DPDMA_GBL_TRG_CH(n) (1 << n)
59
#define DPDMA_GBL_RTRG_CH(n) (1 << 6 << n)
61
#define DPDMA_ALC0_CNTL (0x0108 >> 2)
62
#define DPDMA_ALC0_STATUS (0x010C >> 2)
63
#define DPDMA_ALC0_MAX (0x0110 >> 2)
64
#define DPDMA_ALC0_MIN (0x0114 >> 2)
65
#define DPDMA_ALC0_ACC (0x0118 >> 2)
66
#define DPDMA_ALC0_ACC_TRAN (0x011C >> 2)
67
#define DPDMA_ALC1_CNTL (0x0120 >> 2)
68
#define DPDMA_ALC1_STATUS (0x0124 >> 2)
69
#define DPDMA_ALC1_MAX (0x0128 >> 2)
70
#define DPDMA_ALC1_MIN (0x012C >> 2)
71
#define DPDMA_ALC1_ACC (0x0130 >> 2)
72
#define DPDMA_ALC1_ACC_TRAN (0x0134 >> 2)
74
#define DPDMA_DSCR_STRT_ADDRE_CH(n) ((0x0200 + n * 0x100) >> 2)
75
#define DPDMA_DSCR_STRT_ADDR_CH(n) ((0x0204 + n * 0x100) >> 2)
76
#define DPDMA_DSCR_NEXT_ADDRE_CH(n) ((0x0208 + n * 0x100) >> 2)
77
#define DPDMA_DSCR_NEXT_ADDR_CH(n) ((0x020C + n * 0x100) >> 2)
78
#define DPDMA_PYLD_CUR_ADDRE_CH(n) ((0x0210 + n * 0x100) >> 2)
79
#define DPDMA_PYLD_CUR_ADDR_CH(n) ((0x0214 + n * 0x100) >> 2)
81
#define DPDMA_CNTL_CH(n) ((0x0218 + n * 0x100) >> 2)
82
#define DPDMA_CNTL_CH_EN (1)
83
#define DPDMA_CNTL_CH_PAUSED (1 << 1)
85
#define DPDMA_STATUS_CH(n) ((0x021C + n * 0x100) >> 2)
86
#define DPDMA_STATUS_BURST_TYPE (1 << 4)
87
#define DPDMA_STATUS_MODE (1 << 5)
88
#define DPDMA_STATUS_EN_CRC (1 << 6)
89
#define DPDMA_STATUS_LAST_DSCR (1 << 7)
90
#define DPDMA_STATUS_LDSCR_FRAME (1 << 8)
91
#define DPDMA_STATUS_IGNR_DONE (1 << 9)
92
#define DPDMA_STATUS_DSCR_DONE (1 << 10)
93
#define DPDMA_STATUS_EN_DSCR_UP (1 << 11)
94
#define DPDMA_STATUS_EN_DSCR_INTR (1 << 12)
95
#define DPDMA_STATUS_PREAMBLE_OFF (13)
97
#define DPDMA_VDO_CH(n) ((0x0220 + n * 0x100) >> 2)
98
#define DPDMA_PYLD_SZ_CH(n) ((0x0224 + n * 0x100) >> 2)
99
#define DPDMA_DSCR_ID_CH(n) ((0x0228 + n * 0x100) >> 2)
102
* Descriptor control field.
104
#define CONTROL_PREAMBLE_VALUE 0xA5
106
#define DSCR_CTRL_PREAMBLE 0xFF
107
#define DSCR_CTRL_EN_DSCR_DONE_INTR (1 << 8)
108
#define DSCR_CTRL_EN_DSCR_UPDATE (1 << 9)
109
#define DSCR_CTRL_IGNORE_DONE (1 << 10)
110
#define DSCR_CTRL_AXI_BURST_TYPE (1 << 11)
111
#define DSCR_CTRL_AXCACHE (0x0F << 12)
112
#define DSCR_CTRL_AXPROT (0x2 << 16)
113
#define DSCR_CTRL_DESCRIPTOR_MODE (1 << 18)
114
#define DSCR_CTRL_LAST_DESCRIPTOR (1 << 19)
115
#define DSCR_CTRL_ENABLE_CRC (1 << 20)
116
#define DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME (1 << 21)
119
* Descriptor timestamp field.
121
#define STATUS_DONE (1 << 31)
123
#define DPDMA_FRAG_MAX_SZ (4096)
135
struct DPDMADescriptor {
137
uint32_t descriptor_id;
138
/* transfer size in byte. */
140
uint32_t line_size_stride;
141
uint32_t timestamp_lsb;
142
uint32_t timestamp_msb;
143
/* contains extension for both descriptor and source. */
144
uint32_t address_extension;
145
uint32_t next_descriptor;
146
uint32_t source_address;
147
uint32_t address_extension_23;
148
uint32_t address_extension_45;
149
uint32_t source_address2;
150
uint32_t source_address3;
151
uint32_t source_address4;
152
uint32_t source_address5;
156
typedef enum DPDMABurstType DPDMABurstType;
157
typedef enum DPDMAMode DPDMAMode;
158
typedef struct DPDMADescriptor DPDMADescriptor;
160
static bool xlnx_dpdma_desc_is_last(DPDMADescriptor *desc)
162
return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0);
165
static bool xlnx_dpdma_desc_is_last_of_frame(DPDMADescriptor *desc)
167
return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0);
170
static uint64_t xlnx_dpdma_desc_get_source_address(DPDMADescriptor *desc,
178
addr = (uint64_t)desc->source_address
179
+ (extract64(desc->address_extension, 16, 16) << 32);
182
addr = (uint64_t)desc->source_address2
183
+ (extract64(desc->address_extension_23, 0, 16) << 32);
186
addr = (uint64_t)desc->source_address3
187
+ (extract64(desc->address_extension_23, 16, 16) << 32);
190
addr = (uint64_t)desc->source_address4
191
+ (extract64(desc->address_extension_45, 0, 16) << 32);
194
addr = (uint64_t)desc->source_address5
195
+ (extract64(desc->address_extension_45, 16, 16) << 32);
205
static uint32_t xlnx_dpdma_desc_get_transfer_size(DPDMADescriptor *desc)
207
return desc->xfer_size;
210
static uint32_t xlnx_dpdma_desc_get_line_size(DPDMADescriptor *desc)
212
return extract32(desc->line_size_stride, 0, 18);
215
static uint32_t xlnx_dpdma_desc_get_line_stride(DPDMADescriptor *desc)
217
return extract32(desc->line_size_stride, 18, 14) * 16;
220
static inline bool xlnx_dpdma_desc_crc_enabled(DPDMADescriptor *desc)
222
return (desc->control & DSCR_CTRL_ENABLE_CRC) != 0;
225
static inline bool xlnx_dpdma_desc_check_crc(DPDMADescriptor *desc)
227
uint32_t *p = (uint32_t *)desc;
232
* CRC is calculated on the whole descriptor except the last 32bits word
233
* using 32bits addition.
235
for (i = 0; i < 15; i++) {
239
return crc == desc->crc;
242
static inline bool xlnx_dpdma_desc_completion_interrupt(DPDMADescriptor *desc)
244
return (desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0;
247
static inline bool xlnx_dpdma_desc_is_valid(DPDMADescriptor *desc)
249
return (desc->control & DSCR_CTRL_PREAMBLE) == CONTROL_PREAMBLE_VALUE;
252
static inline bool xlnx_dpdma_desc_is_contiguous(DPDMADescriptor *desc)
254
return (desc->control & DSCR_CTRL_DESCRIPTOR_MODE) == 0;
257
static inline bool xlnx_dpdma_desc_update_enabled(DPDMADescriptor *desc)
259
return (desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0;
262
static inline void xlnx_dpdma_desc_set_done(DPDMADescriptor *desc)
264
desc->timestamp_msb |= STATUS_DONE;
267
static inline bool xlnx_dpdma_desc_is_already_done(DPDMADescriptor *desc)
269
return (desc->timestamp_msb & STATUS_DONE) != 0;
272
static inline bool xlnx_dpdma_desc_ignore_done_bit(DPDMADescriptor *desc)
274
return (desc->control & DSCR_CTRL_IGNORE_DONE) != 0;
277
static const VMStateDescription vmstate_xlnx_dpdma = {
278
.name = TYPE_XLNX_DPDMA,
280
.fields = (const VMStateField[]) {
281
VMSTATE_UINT32_ARRAY(registers, XlnxDPDMAState,
282
XLNX_DPDMA_REG_ARRAY_SIZE),
283
VMSTATE_BOOL_ARRAY(operation_finished, XlnxDPDMAState, 6),
284
VMSTATE_END_OF_LIST()
288
static void xlnx_dpdma_update_irq(XlnxDPDMAState *s)
292
flags = ((s->registers[DPDMA_ISR] & (~s->registers[DPDMA_IMR]))
293
|| (s->registers[DPDMA_EISR] & (~s->registers[DPDMA_EIMR])));
294
qemu_set_irq(s->irq, flags);
297
static uint64_t xlnx_dpdma_descriptor_start_address(XlnxDPDMAState *s,
300
return (s->registers[DPDMA_DSCR_STRT_ADDRE_CH(channel)] << 16)
301
+ s->registers[DPDMA_DSCR_STRT_ADDR_CH(channel)];
304
static uint64_t xlnx_dpdma_descriptor_next_address(XlnxDPDMAState *s,
307
return ((uint64_t)s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] << 32)
308
+ s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)];
311
static bool xlnx_dpdma_is_channel_enabled(XlnxDPDMAState *s,
314
return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_EN) != 0;
317
static bool xlnx_dpdma_is_channel_paused(XlnxDPDMAState *s,
320
return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_PAUSED) != 0;
323
static inline bool xlnx_dpdma_is_channel_retriggered(XlnxDPDMAState *s,
326
/* Clear the retriggered bit after reading it. */
327
bool channel_is_retriggered = s->registers[DPDMA_GBL]
328
& DPDMA_GBL_RTRG_CH(channel);
329
s->registers[DPDMA_GBL] &= ~DPDMA_GBL_RTRG_CH(channel);
330
return channel_is_retriggered;
333
static inline bool xlnx_dpdma_is_channel_triggered(XlnxDPDMAState *s,
336
return s->registers[DPDMA_GBL] & DPDMA_GBL_TRG_CH(channel);
339
static void xlnx_dpdma_update_desc_info(XlnxDPDMAState *s, uint8_t channel,
340
DPDMADescriptor *desc)
342
s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] =
343
extract32(desc->address_extension, 0, 16);
344
s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)] = desc->next_descriptor;
345
s->registers[DPDMA_PYLD_CUR_ADDRE_CH(channel)] =
346
extract32(desc->address_extension, 16, 16);
347
s->registers[DPDMA_PYLD_CUR_ADDR_CH(channel)] = desc->source_address;
348
s->registers[DPDMA_VDO_CH(channel)] =
349
extract32(desc->line_size_stride, 18, 14)
350
+ (extract32(desc->line_size_stride, 0, 18)
352
s->registers[DPDMA_PYLD_SZ_CH(channel)] = desc->xfer_size;
353
s->registers[DPDMA_DSCR_ID_CH(channel)] = desc->descriptor_id;
355
/* Compute the status register with the descriptor information. */
356
s->registers[DPDMA_STATUS_CH(channel)] =
357
extract32(desc->control, 0, 8) << 13;
358
if ((desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0) {
359
s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_INTR;
361
if ((desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0) {
362
s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_UP;
364
if ((desc->timestamp_msb & STATUS_DONE) != 0) {
365
s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_DSCR_DONE;
367
if ((desc->control & DSCR_CTRL_IGNORE_DONE) != 0) {
368
s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_IGNR_DONE;
370
if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0) {
371
s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LDSCR_FRAME;
373
if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0) {
374
s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LAST_DSCR;
376
if ((desc->control & DSCR_CTRL_ENABLE_CRC) != 0) {
377
s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_CRC;
379
if ((desc->control & DSCR_CTRL_DESCRIPTOR_MODE) != 0) {
380
s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_MODE;
382
if ((desc->control & DSCR_CTRL_AXI_BURST_TYPE) != 0) {
383
s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_BURST_TYPE;
387
static void xlnx_dpdma_dump_descriptor(DPDMADescriptor *desc)
390
qemu_log("DUMP DESCRIPTOR:\n");
391
qemu_hexdump(stdout, "", desc, sizeof(DPDMADescriptor));
395
static uint64_t xlnx_dpdma_read(void *opaque, hwaddr offset,
398
XlnxDPDMAState *s = XLNX_DPDMA(opaque);
400
DPRINTF("read @%" HWADDR_PRIx "\n", offset);
401
offset = offset >> 2;
405
* Trying to read a write only register.
410
assert(offset <= (0xFFC >> 2));
411
return s->registers[offset];
416
static void xlnx_dpdma_write(void *opaque, hwaddr offset,
417
uint64_t value, unsigned size)
419
XlnxDPDMAState *s = XLNX_DPDMA(opaque);
421
DPRINTF("write @%" HWADDR_PRIx " = %" PRIx64 "\n", offset, value);
422
offset = offset >> 2;
426
s->registers[DPDMA_ISR] &= ~value;
427
xlnx_dpdma_update_irq(s);
430
s->registers[DPDMA_IMR] &= ~value;
433
s->registers[DPDMA_IMR] |= value;
436
s->registers[DPDMA_EISR] &= ~value;
437
xlnx_dpdma_update_irq(s);
440
s->registers[DPDMA_EIMR] &= ~value;
443
s->registers[DPDMA_EIMR] |= value;
447
case DPDMA_DSCR_NEXT_ADDRE_CH(0):
448
case DPDMA_DSCR_NEXT_ADDRE_CH(1):
449
case DPDMA_DSCR_NEXT_ADDRE_CH(2):
450
case DPDMA_DSCR_NEXT_ADDRE_CH(3):
451
case DPDMA_DSCR_NEXT_ADDRE_CH(4):
452
case DPDMA_DSCR_NEXT_ADDRE_CH(5):
453
case DPDMA_DSCR_NEXT_ADDR_CH(0):
454
case DPDMA_DSCR_NEXT_ADDR_CH(1):
455
case DPDMA_DSCR_NEXT_ADDR_CH(2):
456
case DPDMA_DSCR_NEXT_ADDR_CH(3):
457
case DPDMA_DSCR_NEXT_ADDR_CH(4):
458
case DPDMA_DSCR_NEXT_ADDR_CH(5):
459
case DPDMA_PYLD_CUR_ADDRE_CH(0):
460
case DPDMA_PYLD_CUR_ADDRE_CH(1):
461
case DPDMA_PYLD_CUR_ADDRE_CH(2):
462
case DPDMA_PYLD_CUR_ADDRE_CH(3):
463
case DPDMA_PYLD_CUR_ADDRE_CH(4):
464
case DPDMA_PYLD_CUR_ADDRE_CH(5):
465
case DPDMA_PYLD_CUR_ADDR_CH(0):
466
case DPDMA_PYLD_CUR_ADDR_CH(1):
467
case DPDMA_PYLD_CUR_ADDR_CH(2):
468
case DPDMA_PYLD_CUR_ADDR_CH(3):
469
case DPDMA_PYLD_CUR_ADDR_CH(4):
470
case DPDMA_PYLD_CUR_ADDR_CH(5):
471
case DPDMA_STATUS_CH(0):
472
case DPDMA_STATUS_CH(1):
473
case DPDMA_STATUS_CH(2):
474
case DPDMA_STATUS_CH(3):
475
case DPDMA_STATUS_CH(4):
476
case DPDMA_STATUS_CH(5):
477
case DPDMA_VDO_CH(0):
478
case DPDMA_VDO_CH(1):
479
case DPDMA_VDO_CH(2):
480
case DPDMA_VDO_CH(3):
481
case DPDMA_VDO_CH(4):
482
case DPDMA_VDO_CH(5):
483
case DPDMA_PYLD_SZ_CH(0):
484
case DPDMA_PYLD_SZ_CH(1):
485
case DPDMA_PYLD_SZ_CH(2):
486
case DPDMA_PYLD_SZ_CH(3):
487
case DPDMA_PYLD_SZ_CH(4):
488
case DPDMA_PYLD_SZ_CH(5):
489
case DPDMA_DSCR_ID_CH(0):
490
case DPDMA_DSCR_ID_CH(1):
491
case DPDMA_DSCR_ID_CH(2):
492
case DPDMA_DSCR_ID_CH(3):
493
case DPDMA_DSCR_ID_CH(4):
494
case DPDMA_DSCR_ID_CH(5):
496
* Trying to write to a read only register..
501
* This is a write only register so it's read as zero in the read
503
* We store the value anyway so we can know if the channel is
506
s->registers[offset] |= value & 0x00000FFF;
508
case DPDMA_DSCR_STRT_ADDRE_CH(0):
509
case DPDMA_DSCR_STRT_ADDRE_CH(1):
510
case DPDMA_DSCR_STRT_ADDRE_CH(2):
511
case DPDMA_DSCR_STRT_ADDRE_CH(3):
512
case DPDMA_DSCR_STRT_ADDRE_CH(4):
513
case DPDMA_DSCR_STRT_ADDRE_CH(5):
515
s->registers[offset] = value;
517
case DPDMA_CNTL_CH(0):
518
s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(0);
520
s->registers[offset] = value;
522
case DPDMA_CNTL_CH(1):
523
s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(1);
525
s->registers[offset] = value;
527
case DPDMA_CNTL_CH(2):
528
s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(2);
530
s->registers[offset] = value;
532
case DPDMA_CNTL_CH(3):
533
s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(3);
535
s->registers[offset] = value;
537
case DPDMA_CNTL_CH(4):
538
s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(4);
540
s->registers[offset] = value;
542
case DPDMA_CNTL_CH(5):
543
s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(5);
545
s->registers[offset] = value;
548
assert(offset <= (0xFFC >> 2));
549
s->registers[offset] = value;
554
static const MemoryRegionOps dma_ops = {
555
.read = xlnx_dpdma_read,
556
.write = xlnx_dpdma_write,
557
.endianness = DEVICE_NATIVE_ENDIAN,
559
.min_access_size = 4,
560
.max_access_size = 4,
563
.min_access_size = 4,
564
.max_access_size = 4,
568
static void xlnx_dpdma_init(Object *obj)
570
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
571
XlnxDPDMAState *s = XLNX_DPDMA(obj);
573
memory_region_init_io(&s->iomem, obj, &dma_ops, s,
574
TYPE_XLNX_DPDMA, 0x1000);
575
sysbus_init_mmio(sbd, &s->iomem);
576
sysbus_init_irq(sbd, &s->irq);
579
static void xlnx_dpdma_reset(DeviceState *dev)
581
XlnxDPDMAState *s = XLNX_DPDMA(dev);
584
memset(s->registers, 0, sizeof(s->registers));
585
s->registers[DPDMA_IMR] = 0x07FFFFFF;
586
s->registers[DPDMA_EIMR] = 0xFFFFFFFF;
587
s->registers[DPDMA_ALC0_MIN] = 0x0000FFFF;
588
s->registers[DPDMA_ALC1_MIN] = 0x0000FFFF;
590
for (i = 0; i < 6; i++) {
592
s->operation_finished[i] = true;
596
static void xlnx_dpdma_class_init(ObjectClass *oc, void *data)
598
DeviceClass *dc = DEVICE_CLASS(oc);
600
dc->vmsd = &vmstate_xlnx_dpdma;
601
dc->reset = xlnx_dpdma_reset;
604
static const TypeInfo xlnx_dpdma_info = {
605
.name = TYPE_XLNX_DPDMA,
606
.parent = TYPE_SYS_BUS_DEVICE,
607
.instance_size = sizeof(XlnxDPDMAState),
608
.instance_init = xlnx_dpdma_init,
609
.class_init = xlnx_dpdma_class_init,
612
static void xlnx_dpdma_register_types(void)
614
type_register_static(&xlnx_dpdma_info);
617
static MemTxResult xlnx_dpdma_read_descriptor(XlnxDPDMAState *s,
619
DPDMADescriptor *desc)
621
MemTxResult res = dma_memory_read(&address_space_memory, desc_addr,
622
desc, sizeof(DPDMADescriptor),
623
MEMTXATTRS_UNSPECIFIED);
628
/* Convert from LE into host endianness. */
629
desc->control = le32_to_cpu(desc->control);
630
desc->descriptor_id = le32_to_cpu(desc->descriptor_id);
631
desc->xfer_size = le32_to_cpu(desc->xfer_size);
632
desc->line_size_stride = le32_to_cpu(desc->line_size_stride);
633
desc->timestamp_lsb = le32_to_cpu(desc->timestamp_lsb);
634
desc->timestamp_msb = le32_to_cpu(desc->timestamp_msb);
635
desc->address_extension = le32_to_cpu(desc->address_extension);
636
desc->next_descriptor = le32_to_cpu(desc->next_descriptor);
637
desc->source_address = le32_to_cpu(desc->source_address);
638
desc->address_extension_23 = le32_to_cpu(desc->address_extension_23);
639
desc->address_extension_45 = le32_to_cpu(desc->address_extension_45);
640
desc->source_address2 = le32_to_cpu(desc->source_address2);
641
desc->source_address3 = le32_to_cpu(desc->source_address3);
642
desc->source_address4 = le32_to_cpu(desc->source_address4);
643
desc->source_address5 = le32_to_cpu(desc->source_address5);
644
desc->crc = le32_to_cpu(desc->crc);
649
static MemTxResult xlnx_dpdma_write_descriptor(uint64_t desc_addr,
650
DPDMADescriptor *desc)
652
DPDMADescriptor tmp_desc = *desc;
654
/* Convert from host endianness into LE. */
655
tmp_desc.control = cpu_to_le32(tmp_desc.control);
656
tmp_desc.descriptor_id = cpu_to_le32(tmp_desc.descriptor_id);
657
tmp_desc.xfer_size = cpu_to_le32(tmp_desc.xfer_size);
658
tmp_desc.line_size_stride = cpu_to_le32(tmp_desc.line_size_stride);
659
tmp_desc.timestamp_lsb = cpu_to_le32(tmp_desc.timestamp_lsb);
660
tmp_desc.timestamp_msb = cpu_to_le32(tmp_desc.timestamp_msb);
661
tmp_desc.address_extension = cpu_to_le32(tmp_desc.address_extension);
662
tmp_desc.next_descriptor = cpu_to_le32(tmp_desc.next_descriptor);
663
tmp_desc.source_address = cpu_to_le32(tmp_desc.source_address);
664
tmp_desc.address_extension_23 = cpu_to_le32(tmp_desc.address_extension_23);
665
tmp_desc.address_extension_45 = cpu_to_le32(tmp_desc.address_extension_45);
666
tmp_desc.source_address2 = cpu_to_le32(tmp_desc.source_address2);
667
tmp_desc.source_address3 = cpu_to_le32(tmp_desc.source_address3);
668
tmp_desc.source_address4 = cpu_to_le32(tmp_desc.source_address4);
669
tmp_desc.source_address5 = cpu_to_le32(tmp_desc.source_address5);
670
tmp_desc.crc = cpu_to_le32(tmp_desc.crc);
672
return dma_memory_write(&address_space_memory, desc_addr, &tmp_desc,
673
sizeof(DPDMADescriptor), MEMTXATTRS_UNSPECIFIED);
676
size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel,
680
uint64_t source_addr[6];
681
DPDMADescriptor desc;
685
assert(channel <= 5);
687
DPRINTF("start dpdma channel 0x%" PRIX8 "\n", channel);
689
if (!xlnx_dpdma_is_channel_triggered(s, channel)) {
690
DPRINTF("Channel isn't triggered..\n");
694
if (!xlnx_dpdma_is_channel_enabled(s, channel)) {
695
DPRINTF("Channel isn't enabled..\n");
699
if (xlnx_dpdma_is_channel_paused(s, channel)) {
700
DPRINTF("Channel is paused..\n");
705
if ((s->operation_finished[channel])
706
|| xlnx_dpdma_is_channel_retriggered(s, channel)) {
707
desc_addr = xlnx_dpdma_descriptor_start_address(s, channel);
708
s->operation_finished[channel] = false;
710
desc_addr = xlnx_dpdma_descriptor_next_address(s, channel);
713
if (xlnx_dpdma_read_descriptor(s, desc_addr, &desc)) {
714
s->registers[DPDMA_EISR] |= ((1 << 1) << channel);
715
xlnx_dpdma_update_irq(s);
716
s->operation_finished[channel] = true;
717
DPRINTF("Can't get the descriptor.\n");
721
xlnx_dpdma_update_desc_info(s, channel, &desc);
724
xlnx_dpdma_dump_descriptor(&desc);
727
DPRINTF("location of the descriptor: %" PRIx64 "\n", desc_addr);
728
if (!xlnx_dpdma_desc_is_valid(&desc)) {
729
s->registers[DPDMA_EISR] |= ((1 << 7) << channel);
730
xlnx_dpdma_update_irq(s);
731
s->operation_finished[channel] = true;
732
DPRINTF("Invalid descriptor..\n");
736
if (xlnx_dpdma_desc_crc_enabled(&desc)
737
&& !xlnx_dpdma_desc_check_crc(&desc)) {
738
s->registers[DPDMA_EISR] |= ((1 << 13) << channel);
739
xlnx_dpdma_update_irq(s);
740
s->operation_finished[channel] = true;
741
DPRINTF("Bad CRC for descriptor..\n");
745
if (xlnx_dpdma_desc_is_already_done(&desc)
746
&& !xlnx_dpdma_desc_ignore_done_bit(&desc)) {
747
/* We are trying to process an already processed descriptor. */
748
s->registers[DPDMA_EISR] |= ((1 << 25) << channel);
749
xlnx_dpdma_update_irq(s);
750
s->operation_finished[channel] = true;
751
DPRINTF("Already processed descriptor..\n");
755
done = xlnx_dpdma_desc_is_last(&desc)
756
|| xlnx_dpdma_desc_is_last_of_frame(&desc);
758
s->operation_finished[channel] = done;
759
if (s->data[channel]) {
760
int64_t transfer_len = xlnx_dpdma_desc_get_transfer_size(&desc);
761
uint32_t line_size = xlnx_dpdma_desc_get_line_size(&desc);
762
uint32_t line_stride = xlnx_dpdma_desc_get_line_stride(&desc);
763
if (xlnx_dpdma_desc_is_contiguous(&desc)) {
764
source_addr[0] = xlnx_dpdma_desc_get_source_address(&desc, 0);
765
while (transfer_len != 0) {
766
if (dma_memory_read(&address_space_memory,
768
&s->data[channel][ptr],
770
MEMTXATTRS_UNSPECIFIED)) {
771
s->registers[DPDMA_ISR] |= ((1 << 12) << channel);
772
xlnx_dpdma_update_irq(s);
773
DPRINTF("Can't get data.\n");
777
transfer_len -= line_size;
778
source_addr[0] += line_stride;
781
DPRINTF("Source address:\n");
783
for (frag = 0; frag < 5; frag++) {
785
xlnx_dpdma_desc_get_source_address(&desc, frag);
786
DPRINTF("Fragment %u: %" PRIx64 "\n", frag + 1,
791
while ((transfer_len < 0) && (frag < 5)) {
792
size_t fragment_len = DPDMA_FRAG_MAX_SZ
793
- (source_addr[frag] % DPDMA_FRAG_MAX_SZ);
795
if (dma_memory_read(&address_space_memory,
797
&(s->data[channel][ptr]),
799
MEMTXATTRS_UNSPECIFIED)) {
800
s->registers[DPDMA_ISR] |= ((1 << 12) << channel);
801
xlnx_dpdma_update_irq(s);
802
DPRINTF("Can't get data.\n");
806
transfer_len -= fragment_len;
812
if (xlnx_dpdma_desc_update_enabled(&desc)) {
813
/* The descriptor need to be updated when it's completed. */
814
DPRINTF("update the descriptor with the done flag set.\n");
815
xlnx_dpdma_desc_set_done(&desc);
816
if (xlnx_dpdma_write_descriptor(desc_addr, &desc)) {
817
DPRINTF("Can't write the descriptor.\n");
818
/* TODO: check hardware behaviour for memory write failure */
822
if (xlnx_dpdma_desc_completion_interrupt(&desc)) {
823
DPRINTF("completion interrupt enabled!\n");
824
s->registers[DPDMA_ISR] |= (1 << channel);
825
xlnx_dpdma_update_irq(s);
828
} while (!done && !one_desc);
833
void xlnx_dpdma_set_host_data_location(XlnxDPDMAState *s, uint8_t channel,
837
qemu_log_mask(LOG_UNIMP, "DPDMA client not attached to valid DPDMA"
842
assert(channel <= 5);
843
s->data[channel] = p;
846
void xlnx_dpdma_trigger_vsync_irq(XlnxDPDMAState *s)
848
s->registers[DPDMA_ISR] |= (1 << 27);
849
xlnx_dpdma_update_irq(s);
852
type_init(xlnx_dpdma_register_types)