10
#include <asm-generic/dma-mapping.h>
12
#include "etnaviv_gpu.h"
13
#include "etnaviv_mmu.h"
14
#include "etnaviv_compat.h"
16
#include <etnaviv_xml/state_hi.xml.h>
19
#define PT_ENTRIES (PT_SIZE / sizeof(uint32_t))
21
#define GPU_MEM_START 0x80000000
23
struct etnaviv_iommu_domain_pgtable {
28
struct etnaviv_iommu_domain {
31
dma_addr_t bad_page_dma;
32
struct iommu_ops *ops;
33
struct etnaviv_iommu_domain_pgtable pgtable;
36
static struct etnaviv_iommu_domain *to_etnaviv_domain(void *domain) {
40
static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
42
pgtable->pgtable = malloc(size);
43
if (!pgtable->pgtable)
46
pgtable->paddr = (dma_addr_t) pgtable->pgtable;
51
static uint32_t pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
54
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
57
paddr = pgtable->pgtable[index];
62
static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
63
unsigned long iova, phys_addr_t paddr) {
65
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
67
pgtable->pgtable[index] = paddr;
70
static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain) {
74
etnaviv_domain->bad_page_cpu = malloc(SZ_4K);
76
if (!etnaviv_domain->bad_page_cpu)
79
p = etnaviv_domain->bad_page_cpu;
80
for (i = 0; i < SZ_4K / 4; i++)
83
ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
85
free(etnaviv_domain->bad_page_cpu);
89
for (i = 0; i < PT_ENTRIES; i++)
90
etnaviv_domain->pgtable.pgtable[i] =
91
etnaviv_domain->bad_page_dma;
96
static void etnaviv_domain_free(struct iommu_domain *domain) {
97
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
99
free(etnaviv_domain->pgtable.pgtable);
101
free(etnaviv_domain->bad_page_cpu);
103
kfree(etnaviv_domain);
106
static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
107
phys_addr_t paddr, size_t size, int prot) {
108
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
113
pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
118
static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
119
unsigned long iova, size_t size) {
120
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
125
pgtable_write(&etnaviv_domain->pgtable, iova,
126
etnaviv_domain->bad_page_dma);
131
static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
133
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
135
return pgtable_read(&etnaviv_domain->pgtable, iova);
138
static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain) {
142
static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf) {
143
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
145
memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
148
struct etnaviv_iommu_ops etnaviv_iommu_ops = {
150
.domain_free = etnaviv_domain_free,
151
.map = etnaviv_iommuv1_map,
152
.unmap = etnaviv_iommuv1_unmap,
153
.iova_to_phys = etnaviv_iommu_iova_to_phys,
154
.pgsize_bitmap = SZ_4K,
156
.dump_size = etnaviv_iommuv1_dump_size,
157
.dump = etnaviv_iommuv1_dump,
160
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) {
161
struct etnaviv_iommu_domain *etnaviv_domain =
162
to_etnaviv_domain(gpu->mmu.domain);
166
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
167
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
168
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
169
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
170
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
173
pgtable = (uint32_t) etnaviv_domain->pgtable.paddr;
175
gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
176
gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
177
gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
178
gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
179
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
182
int etnaviv_iommuv1_domain_init(struct etnaviv_gpu *gpu) {
183
struct etnaviv_iommu_domain *etnaviv_domain;
186
etnaviv_domain = (void *) &gpu->mmu.domain;
188
memset(etnaviv_domain, 0, sizeof(*etnaviv_domain));
190
etnaviv_domain->ops = &etnaviv_iommu_ops.ops;
191
gpu->mmu.start_addr = GPU_MEM_START;
192
gpu->mmu.end_addr = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
194
if ((ret = __etnaviv_iommu_init(etnaviv_domain))) {