13
#include "qemu/osdep.h"
14
#include "qapi/error.h"
15
#include "qemu/error-report.h"
16
#include "hw/vfio/vfio-container-base.h"
18
int vfio_container_dma_map(VFIOContainerBase *bcontainer,
19
hwaddr iova, ram_addr_t size,
20
void *vaddr, bool readonly)
22
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
24
g_assert(vioc->dma_map);
25
return vioc->dma_map(bcontainer, iova, size, vaddr, readonly);
28
int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
29
hwaddr iova, ram_addr_t size,
32
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
34
g_assert(vioc->dma_unmap);
35
return vioc->dma_unmap(bcontainer, iova, size, iotlb);
38
bool vfio_container_add_section_window(VFIOContainerBase *bcontainer,
39
MemoryRegionSection *section,
42
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
44
if (!vioc->add_window) {
48
return vioc->add_window(bcontainer, section, errp);
51
void vfio_container_del_section_window(VFIOContainerBase *bcontainer,
52
MemoryRegionSection *section)
54
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
56
if (!vioc->del_window) {
60
return vioc->del_window(bcontainer, section);
63
int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
64
bool start, Error **errp)
66
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
68
if (!bcontainer->dirty_pages_supported) {
72
g_assert(vioc->set_dirty_page_tracking);
73
return vioc->set_dirty_page_tracking(bcontainer, start, errp);
76
int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
77
VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp)
79
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
81
g_assert(vioc->query_dirty_bitmap);
82
return vioc->query_dirty_bitmap(bcontainer, vbmap, iova, size,
86
static gpointer copy_iova_range(gconstpointer src, gpointer data)
88
Range *source = (Range *)src;
89
Range *dest = g_new(Range, 1);
91
range_set_bounds(dest, range_lob(source), range_upb(source));
95
GList *vfio_container_get_iova_ranges(const VFIOContainerBase *bcontainer)
98
return g_list_copy_deep(bcontainer->iova_ranges, copy_iova_range, NULL);
101
static void vfio_container_instance_finalize(Object *obj)
103
VFIOContainerBase *bcontainer = VFIO_IOMMU(obj);
104
VFIOGuestIOMMU *giommu, *tmp;
106
QLIST_REMOVE(bcontainer, next);
108
QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) {
109
memory_region_unregister_iommu_notifier(
110
MEMORY_REGION(giommu->iommu_mr), &giommu->n);
111
QLIST_REMOVE(giommu, giommu_next);
115
g_list_free_full(bcontainer->iova_ranges, g_free);
118
static void vfio_container_instance_init(Object *obj)
120
VFIOContainerBase *bcontainer = VFIO_IOMMU(obj);
122
bcontainer->error = NULL;
123
bcontainer->dirty_pages_supported = false;
124
bcontainer->dma_max_mappings = 0;
125
bcontainer->iova_ranges = NULL;
126
QLIST_INIT(&bcontainer->giommu_list);
127
QLIST_INIT(&bcontainer->vrdl_list);
130
static const TypeInfo types[] = {
132
.name = TYPE_VFIO_IOMMU,
133
.parent = TYPE_OBJECT,
134
.instance_init = vfio_container_instance_init,
135
.instance_finalize = vfio_container_instance_finalize,
136
.instance_size = sizeof(VFIOContainerBase),
137
.class_size = sizeof(VFIOIOMMUClass),