2
* low level and IOMMU backend agnostic helpers used by VFIO devices,
3
* related to regions, interrupts, capabilities
5
* Copyright Red Hat, Inc. 2012
8
* Alex Williamson <alex.williamson@redhat.com>
10
* This work is licensed under the terms of the GNU GPL, version 2. See
11
* the COPYING file in the top-level directory.
13
* Based on qemu-kvm device-assignment:
14
* Adapted for KVM by Qumranet.
15
* Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
16
* Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
17
* Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
18
* Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
19
* Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
22
#include "qemu/osdep.h"
25
#include "hw/vfio/vfio-common.h"
28
#include "qapi/error.h"
29
#include "qemu/error-report.h"
30
#include "monitor/monitor.h"
33
* Common VFIO interrupt disable
35
void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
37
struct vfio_irq_set irq_set = {
38
.argsz = sizeof(irq_set),
39
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
45
ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
48
void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
50
struct vfio_irq_set irq_set = {
51
.argsz = sizeof(irq_set),
52
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
58
ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
61
void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
63
struct vfio_irq_set irq_set = {
64
.argsz = sizeof(irq_set),
65
.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
71
ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
74
static inline const char *action_to_str(int action)
77
case VFIO_IRQ_SET_ACTION_MASK:
79
case VFIO_IRQ_SET_ACTION_UNMASK:
81
case VFIO_IRQ_SET_ACTION_TRIGGER:
84
return "UNKNOWN ACTION";
88
static const char *index_to_str(VFIODevice *vbasedev, int index)
90
if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
95
case VFIO_PCI_INTX_IRQ_INDEX:
97
case VFIO_PCI_MSI_IRQ_INDEX:
99
case VFIO_PCI_MSIX_IRQ_INDEX:
101
case VFIO_PCI_ERR_IRQ_INDEX:
103
case VFIO_PCI_REQ_IRQ_INDEX:
110
bool vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
111
int action, int fd, Error **errp)
114
g_autofree struct vfio_irq_set *irq_set = NULL;
119
argsz = sizeof(*irq_set) + sizeof(*pfd);
121
irq_set = g_malloc0(argsz);
122
irq_set->argsz = argsz;
123
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
124
irq_set->index = index;
125
irq_set->start = subindex;
127
pfd = (int32_t *)&irq_set->data;
130
if (!ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
134
error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure");
136
name = index_to_str(vbasedev, index);
138
error_prepend(errp, "%s-%d: ", name, subindex);
140
error_prepend(errp, "index %d-%d: ", index, subindex);
143
"Failed to %s %s eventfd signaling for interrupt ",
144
fd < 0 ? "tear down" : "set up", action_to_str(action));
149
* IO Port/MMIO - Beware of the endians, VFIO is always little endian
151
void vfio_region_write(void *opaque, hwaddr addr,
152
uint64_t data, unsigned size)
154
VFIORegion *region = opaque;
155
VFIODevice *vbasedev = region->vbasedev;
168
buf.word = cpu_to_le16(data);
171
buf.dword = cpu_to_le32(data);
174
buf.qword = cpu_to_le64(data);
177
hw_error("vfio: unsupported write size, %u bytes", size);
181
if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
182
error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
184
__func__, vbasedev->name, region->nr,
188
trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
191
* A read or write to a BAR always signals an INTx EOI. This will
192
* do nothing if not pending (including not in INTx mode). We assume
193
* that a BAR access is in response to an interrupt and that BAR
194
* accesses will service the interrupt. Unfortunately, we don't know
195
* which access will service the interrupt, so we're potentially
196
* getting quite a few host interrupts per guest interrupt.
198
vbasedev->ops->vfio_eoi(vbasedev);
201
uint64_t vfio_region_read(void *opaque,
202
hwaddr addr, unsigned size)
204
VFIORegion *region = opaque;
205
VFIODevice *vbasedev = region->vbasedev;
214
if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
215
error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
216
__func__, vbasedev->name, region->nr,
225
data = le16_to_cpu(buf.word);
228
data = le32_to_cpu(buf.dword);
231
data = le64_to_cpu(buf.qword);
234
hw_error("vfio: unsupported read size, %u bytes", size);
238
trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
240
/* Same as write above */
241
vbasedev->ops->vfio_eoi(vbasedev);
246
const MemoryRegionOps vfio_region_ops = {
247
.read = vfio_region_read,
248
.write = vfio_region_write,
249
.endianness = DEVICE_LITTLE_ENDIAN,
251
.min_access_size = 1,
252
.max_access_size = 8,
255
.min_access_size = 1,
256
.max_access_size = 8,
260
int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size)
262
vbmap->pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
263
vbmap->size = ROUND_UP(vbmap->pages, sizeof(__u64) * BITS_PER_BYTE) /
265
vbmap->bitmap = g_try_malloc0(vbmap->size);
266
if (!vbmap->bitmap) {
273
struct vfio_info_cap_header *
274
vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id)
276
struct vfio_info_cap_header *hdr;
278
for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
287
struct vfio_info_cap_header *
288
vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
290
if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
294
return vfio_get_cap((void *)info, info->cap_offset, id);
297
struct vfio_info_cap_header *
298
vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id)
300
if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) {
304
return vfio_get_cap((void *)info, info->cap_offset, id);
307
static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
308
struct vfio_region_info *info)
310
struct vfio_info_cap_header *hdr;
311
struct vfio_region_info_cap_sparse_mmap *sparse;
314
hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
319
sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
321
trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
322
region->nr, sparse->nr_areas);
324
region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
326
for (i = 0, j = 0; i < sparse->nr_areas; i++) {
327
if (sparse->areas[i].size) {
328
trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
329
sparse->areas[i].offset +
330
sparse->areas[i].size - 1);
331
region->mmaps[j].offset = sparse->areas[i].offset;
332
region->mmaps[j].size = sparse->areas[i].size;
337
region->nr_mmaps = j;
338
region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
343
int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
344
int index, const char *name)
346
g_autofree struct vfio_region_info *info = NULL;
349
ret = vfio_get_region_info(vbasedev, index, &info);
354
region->vbasedev = vbasedev;
355
region->flags = info->flags;
356
region->size = info->size;
357
region->fd_offset = info->offset;
361
region->mem = g_new0(MemoryRegion, 1);
362
memory_region_init_io(region->mem, obj, &vfio_region_ops,
363
region, name, region->size);
365
if (!vbasedev->no_mmap &&
366
region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
368
ret = vfio_setup_region_sparse_mmaps(region, info);
371
region->nr_mmaps = 1;
372
region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
373
region->mmaps[0].offset = 0;
374
region->mmaps[0].size = region->size;
379
trace_vfio_region_setup(vbasedev->name, index, name,
380
region->flags, region->fd_offset, region->size);
384
static void vfio_subregion_unmap(VFIORegion *region, int index)
386
trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem),
387
region->mmaps[index].offset,
388
region->mmaps[index].offset +
389
region->mmaps[index].size - 1);
390
memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem);
391
munmap(region->mmaps[index].mmap, region->mmaps[index].size);
392
object_unparent(OBJECT(®ion->mmaps[index].mem));
393
region->mmaps[index].mmap = NULL;
396
int vfio_region_mmap(VFIORegion *region)
405
prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
406
prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
408
for (i = 0; i < region->nr_mmaps; i++) {
409
region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
410
MAP_SHARED, region->vbasedev->fd,
412
region->mmaps[i].offset);
413
if (region->mmaps[i].mmap == MAP_FAILED) {
416
trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
418
region->mmaps[i].offset,
420
region->mmaps[i].offset +
421
region->mmaps[i].size - 1, ret);
423
region->mmaps[i].mmap = NULL;
425
for (i--; i >= 0; i--) {
426
vfio_subregion_unmap(region, i);
432
name = g_strdup_printf("%s mmaps[%d]",
433
memory_region_name(region->mem), i);
434
memory_region_init_ram_device_ptr(®ion->mmaps[i].mem,
435
memory_region_owner(region->mem),
436
name, region->mmaps[i].size,
437
region->mmaps[i].mmap);
439
memory_region_add_subregion(region->mem, region->mmaps[i].offset,
440
®ion->mmaps[i].mem);
442
trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem),
443
region->mmaps[i].offset,
444
region->mmaps[i].offset +
445
region->mmaps[i].size - 1);
451
void vfio_region_unmap(VFIORegion *region)
459
for (i = 0; i < region->nr_mmaps; i++) {
460
if (region->mmaps[i].mmap) {
461
vfio_subregion_unmap(region, i);
466
void vfio_region_exit(VFIORegion *region)
474
for (i = 0; i < region->nr_mmaps; i++) {
475
if (region->mmaps[i].mmap) {
476
memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem);
480
trace_vfio_region_exit(region->vbasedev->name, region->nr);
483
void vfio_region_finalize(VFIORegion *region)
491
for (i = 0; i < region->nr_mmaps; i++) {
492
if (region->mmaps[i].mmap) {
493
munmap(region->mmaps[i].mmap, region->mmaps[i].size);
494
object_unparent(OBJECT(®ion->mmaps[i].mem));
498
object_unparent(OBJECT(region->mem));
501
g_free(region->mmaps);
503
trace_vfio_region_finalize(region->vbasedev->name, region->nr);
506
region->mmaps = NULL;
507
region->nr_mmaps = 0;
513
void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
521
for (i = 0; i < region->nr_mmaps; i++) {
522
if (region->mmaps[i].mmap) {
523
memory_region_set_enabled(®ion->mmaps[i].mem, enabled);
527
trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
531
int vfio_get_region_info(VFIODevice *vbasedev, int index,
532
struct vfio_region_info **info)
534
size_t argsz = sizeof(struct vfio_region_info);
536
*info = g_malloc0(argsz);
538
(*info)->index = index;
540
(*info)->argsz = argsz;
542
if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
548
if ((*info)->argsz > argsz) {
549
argsz = (*info)->argsz;
550
*info = g_realloc(*info, argsz);
558
int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
559
uint32_t subtype, struct vfio_region_info **info)
563
for (i = 0; i < vbasedev->num_regions; i++) {
564
struct vfio_info_cap_header *hdr;
565
struct vfio_region_info_cap_type *cap_type;
567
if (vfio_get_region_info(vbasedev, i, info)) {
571
hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
577
cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
579
trace_vfio_get_dev_region(vbasedev->name, i,
580
cap_type->type, cap_type->subtype);
582
if (cap_type->type == type && cap_type->subtype == subtype) {
593
bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
595
g_autofree struct vfio_region_info *info = NULL;
598
if (!vfio_get_region_info(vbasedev, region, &info)) {
599
if (vfio_get_region_info_cap(info, cap_type)) {
607
bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
612
if (vbasedev->fd < 0) {
613
if (stat(vbasedev->sysfsdev, &st) < 0) {
614
error_setg_errno(errp, errno, "no such host device");
615
error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev);
618
/* User may specify a name, e.g: VFIO platform device */
619
if (!vbasedev->name) {
620
vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
623
if (!vbasedev->iommufd) {
624
error_setg(errp, "Use FD passing only with iommufd backend");
628
* Give a name with fd so any function printing out vbasedev->name
631
if (!vbasedev->name) {
632
vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd);
639
void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp)
642
int fd = monitor_fd_param(monitor_cur(), str, errp);
645
error_prepend(errp, "Could not parse remote object fd %s:", str);
651
void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
652
DeviceState *dev, bool ram_discard)
654
vbasedev->type = type;
659
vbasedev->ram_block_discard_allowed = ram_discard;
662
int vfio_device_get_aw_bits(VFIODevice *vdev)
665
* iova_ranges is a sorted list. For old kernels that support
666
* VFIO but not support query of iova ranges, iova_ranges is NULL,
667
* in this case HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX(64) is returned.
669
GList *l = g_list_last(vdev->bcontainer->iova_ranges);
672
Range *range = l->data;
673
return range_get_last_bit(range) + 1;
676
return HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX;
679
bool vfio_device_is_mdev(VFIODevice *vbasedev)
681
g_autofree char *subsys = NULL;
682
g_autofree char *tmp = NULL;
684
if (!vbasedev->sysfsdev) {
688
tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev);
689
subsys = realpath(tmp, NULL);
690
return subsys && (strcmp(subsys, "/sys/bus/mdev") == 0);
693
bool vfio_device_hiod_realize(VFIODevice *vbasedev, Error **errp)
695
HostIOMMUDevice *hiod = vbasedev->hiod;
701
return HOST_IOMMU_DEVICE_GET_CLASS(hiod)->realize(hiod, vbasedev, errp);