embox

Форк
0
/
etnaviv_buffer.c 
329 строк · 10.4 Кб
1
/*
2
 * Copyright (C) 2014 Etnaviv Project
3
 * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
4
 *
5
 * This program is free software; you can redistribute it and/or modify it
6
 * under the terms of the GNU General Public License version 2 as published by
7
 * the Free Software Foundation.
8
 *
9
 * This program is distributed in the hope that it will be useful, but WITHOUT
10
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12
 * more details.
13
 *
14
 * You should have received a copy of the GNU General Public License along with
15
 * this program.  If not, see <http://www.gnu.org/licenses/>.
16
 */
17

18
#include <inttypes.h>
19

20
#include <../arch/arm/armlib/mem_barriers.h>
21
#include <etnaviv_xml/cmdstream.xml.h>
22
#include <etnaviv_xml/common.xml.h>
23
#include <etnaviv_xml/state.xml.h>
24
#include <etnaviv_xml/state_3d.xml.h>
25
#include <etnaviv_xml/state_hi.xml.h>
26
#include <util/log.h>
27

28
#include "etnaviv_cmdbuf.h"
29
#include "etnaviv_compat.h"
30
#include "etnaviv_drm.h"
31
#include "etnaviv_gem.h"
32
#include "etnaviv_gpu.h"
33

34
/*
35
 * Command Buffer helper:
36
 */
37

38
static inline void OUT(struct etnaviv_cmdbuf *buffer, uint32_t data) {
39
	uint32_t *vaddr = (uint32_t *)buffer->vaddr;
40
	vaddr[buffer->user_size / 4] = data;
41
	buffer->user_size += 4;
42
}
43

44
static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer, uint32_t reg,
45
    uint32_t value) {
46
	uint32_t index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
47

48
	buffer->user_size = ALIGN(buffer->user_size, 8);
49

50
	/* write a register via cmd stream */
51
	OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE
52
	                | VIV_FE_LOAD_STATE_HEADER_COUNT(1)
53
	                | VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
54
	OUT(buffer, value);
55
}
56

57
static inline void CMD_END(struct etnaviv_cmdbuf *buffer) {
58
	buffer->user_size = ALIGN(buffer->user_size, 8);
59

60
	OUT(buffer, VIV_FE_END_HEADER_OP_END);
61
}
62

63
static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer) {
64
	buffer->user_size = ALIGN(buffer->user_size, 8);
65

66
	OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
67
}
68

69
static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer, uint16_t prefetch,
70
    uint32_t address) {
71
	buffer->user_size = ALIGN(buffer->user_size, 8);
72

73
	OUT(buffer,
74
	    VIV_FE_LINK_HEADER_OP_LINK | VIV_FE_LINK_HEADER_PREFETCH(prefetch));
75
	OUT(buffer, address);
76
}
77

78
static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer, uint32_t from,
79
    uint32_t to) {
80
	buffer->user_size = ALIGN(buffer->user_size, 8);
81

82
	OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
83
	OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
84
}
85

86
static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, uint32_t from,
87
    uint32_t to) {
88
	CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
89
	    VIVS_GL_SEMAPHORE_TOKEN_FROM(from) | VIVS_GL_SEMAPHORE_TOKEN_TO(to));
90
}
91

92
static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
93
    struct etnaviv_cmdbuf *buffer, uint8_t pipe) {
94
	uint32_t flush = 0;
95

96
	/*
97
	 * This assumes that if we're switching to 2D, we're switching
98
	 * away from 3D, and vice versa.  Hence, if we're switching to
99
	 * the 2D core, we need to flush the 3D depth and color caches,
100
	 * otherwise we need to flush the 2D pixel engine cache.
101
	 */
102
	if (gpu->exec_state == ETNA_PIPE_2D)
103
		flush = VIVS_GL_FLUSH_CACHE_PE2D;
104
	else if (gpu->exec_state == ETNA_PIPE_3D)
105
		flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
106

107
	CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
108
	CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
109
	CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
110

111
	CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, VIVS_GL_PIPE_SELECT_PIPE(pipe));
112
}
113

114
void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf,
115
    uint32_t off, uint32_t len) {
116
	uint32_t size = buf->size;
117
	uint32_t *ptr = buf->vaddr + off;
118
	int i;
119

120
	log_debug("virt %p phys 0x%08" PRIu32 " free 0x%08" PRIu32 "\n", ptr,
121
	    etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
122

123
	if (log_level_self() == LOG_NONE) {
124
		return;
125
	}
126

127
	for (i = 0; i < len / 4; i++) {
128
		if (i && !(i % 8))
129
			printk("\n");
130
		if (i % 8 == 0)
131
			printk("\t%p: ", ptr + i);
132
		printk("%08" PRIu32 " ", *(ptr + i));
133
	}
134

135
	printk("\n");
136
}
137

138
/*
139
 * Safely replace the WAIT of a waitlink with a new command and argument.
140
 * The GPU may be executing this WAIT while we're modifying it, so we have
141
 * to write it in a specific order to avoid the GPU branching to somewhere
142
 * else.  'wl_offset' is the offset to the first byte of the WAIT command.
143
 */
144
static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
145
    unsigned int wl_offset, uint32_t cmd, uint32_t arg) {
146
	uint32_t *lw = buffer->vaddr + wl_offset;
147

148
	lw[1] = arg;
149
	data_mem_barrier();
150
	lw[0] = cmd;
151
	data_mem_barrier();
152
}
153
/*
154
 * Ensure that there is space in the command buffer to contiguously write
155
 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
156
 */
157
static uint32_t etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
158
    struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords) {
159
	if (buffer->user_size + cmd_dwords * sizeof(uint64_t) > buffer->size)
160
		buffer->user_size = 0;
161

162
	return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
163
}
164

165
uint16_t etnaviv_buffer_init(struct etnaviv_gpu *gpu) {
166
	struct etnaviv_cmdbuf *buffer = gpu->buffer;
167

168
	/* initialize buffer */
169
	buffer->user_size = 0;
170

171
	CMD_WAIT(buffer);
172
	CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) + buffer->user_size - 4);
173

174
	return buffer->user_size / 8;
175
}
176

177
uint16_t etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu,
178
    uint32_t mtlb_addr, uint32_t safe_addr) {
179
	struct etnaviv_cmdbuf *buffer = gpu->buffer;
180

181
	buffer->user_size = 0;
182

183
	if (gpu->identity.features & chipFeatures_PIPE_3D) {
184
		CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
185
		    VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
186
		CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
187
		    mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
188
		CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
189
		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
190
		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
191
	}
192

193
	if (gpu->identity.features & chipFeatures_PIPE_2D) {
194
		CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
195
		    VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
196
		CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
197
		    mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
198
		CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
199
		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
200
		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
201
	}
202

203
	CMD_END(buffer);
204

205
	buffer->user_size = ALIGN(buffer->user_size, 8);
206

207
	return buffer->user_size / 8;
208
}
209

210
/* Append a command buffer to the ring buffer. */
211
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
212
    struct etnaviv_cmdbuf *cmdbuf) {
213
	struct etnaviv_cmdbuf *buffer = gpu->buffer;
214
	unsigned int waitlink_offset = buffer->user_size - 16;
215
	uint32_t return_target, return_dwords;
216
	uint32_t link_target, link_dwords;
217

218
	log_debug("exec_state=%d", gpu->exec_state);
219
	link_target = etnaviv_cmdbuf_get_va(cmdbuf);
220
	link_dwords = cmdbuf->size / 8;
221
	/*
222
	 * If we need maintanence prior to submitting this buffer, we will
223
	 * need to append a mmu flush load state, followed by a new
224
	 * link to this buffer - a total of four additional words.
225
	 */
226
	if (gpu->mmu.need_flush || gpu->switch_context) {
227
		uint32_t target, extra_dwords;
228

229
		/* link command */
230
		extra_dwords = 1;
231

232
		/* flush command */
233
		if (gpu->mmu.need_flush) {
234
			if (gpu->mmu.version == ETNAVIV_IOMMU_V1)
235
				extra_dwords += 1;
236
			else
237
				extra_dwords += 3;
238
		}
239

240
		/* pipe switch commands */
241
		if (gpu->switch_context) {
242
			extra_dwords += 4;
243
		}
244

245
		target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
246

247
		if (gpu->mmu.need_flush) {
248
			/* Add the MMU flush */
249
			if (gpu->mmu.version == ETNAVIV_IOMMU_V1) {
250
				CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
251
				    VIVS_GL_FLUSH_MMU_FLUSH_FEMMU | VIVS_GL_FLUSH_MMU_FLUSH_UNK1
252
				        | VIVS_GL_FLUSH_MMU_FLUSH_UNK2
253
				        | VIVS_GL_FLUSH_MMU_FLUSH_PEMMU
254
				        | VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
255
			}
256
			else {
257
				CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
258
				    VIVS_MMUv2_CONFIGURATION_MODE_MASK
259
				        | VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK
260
				        | VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
261
				CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
262
				CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
263
			}
264

265
			gpu->mmu.need_flush = false;
266
		}
267

268
		if (gpu->switch_context) {
269
			etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state);
270
			gpu->exec_state = cmdbuf->exec_state;
271
			gpu->switch_context = false;
272
		}
273

274
		/* And the link to the submitted buffer */
275
		CMD_LINK(buffer, link_dwords, link_target);
276

277
		/* Update the link target to point to above instructions */
278
		link_target = target;
279
		link_dwords = extra_dwords;
280
	}
281

282
	/*
283
	 * Append a LINK to the submitted command buffer to return to
284
	 * the ring buffer.  return_target is the ring target address.
285
	 * We need at most 7 dwords in the return target: 2 cache flush +
286
	 * 2 semaphore stall + 1 event + 1 wait + 1 link.
287
	 */
288
	return_dwords = 7;
289
	return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
290
	CMD_LINK(cmdbuf, return_dwords, return_target);
291

292
	/*
293
	 * Append a cache flush, stall, event, wait and link pointing back to
294
	 * the wait command to the ring buffer.
295
	 */
296
	if (gpu->exec_state == ETNA_PIPE_2D) {
297
		CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_PE2D);
298
	}
299
	else {
300
		CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
301
		    VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR);
302
		CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, VIVS_TS_FLUSH_CACHE_FLUSH);
303
	}
304

305
	CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
306
	CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
307
	CMD_LOAD_STATE(buffer, VIVS_GL_EVENT,
308
	    VIVS_GL_EVENT_EVENT_ID(event) | VIVS_GL_EVENT_FROM_PE);
309
	CMD_WAIT(buffer);
310
	CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) + buffer->user_size - 4);
311

312
	etnaviv_buffer_dump(gpu, buffer, 0, buffer->user_size);
313
	log_debug("stream link to 0x%08x @ 0x%08x", return_target,
314
	    etnaviv_cmdbuf_get_va(cmdbuf));
315
	log_debug("link op: %p", buffer->vaddr + waitlink_offset);
316
	log_debug("addr: 0x%08x", link_target);
317
	log_debug("back: 0x%08x", return_target);
318
	log_debug("event: %d", event);
319

320
	/*
321
	 * Kick off the submitted command by replacing the previous
322
	 * WAIT with a link to the address in the ring buffer.
323
	 */
324
	etnaviv_buffer_replace_wait(buffer, waitlink_offset,
325
	    VIV_FE_LINK_HEADER_OP_LINK | VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
326
	    link_target);
327

328
	etnaviv_buffer_dump(gpu, buffer, 0, buffer->user_size);
329
}
330

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.