11
#include <etnaviv_xml/cmdstream.xml.h>
12
#include <etnaviv_xml/common.xml.h>
13
#include <etnaviv_xml/state.xml.h>
14
#include <etnaviv_xml/state_hi.xml.h>
15
#include <kernel/panic.h>
18
#include "etnaviv_cmdbuf.h"
19
#include "etnaviv_compat.h"
20
#include "etnaviv_drm.h"
21
#include "etnaviv_drv.h"
22
#include "etnaviv_gem.h"
23
#include "etnaviv_gpu.h"
25
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, uint32_t param,
28
case ETNAVIV_PARAM_GPU_MODEL:
29
*value = gpu->identity.model;
32
case ETNAVIV_PARAM_GPU_REVISION:
33
*value = gpu->identity.revision;
36
case ETNAVIV_PARAM_GPU_FEATURES_0:
37
*value = gpu->identity.features;
40
case ETNAVIV_PARAM_GPU_FEATURES_1:
41
*value = gpu->identity.minor_features0;
44
case ETNAVIV_PARAM_GPU_FEATURES_2:
45
*value = gpu->identity.minor_features1;
48
case ETNAVIV_PARAM_GPU_FEATURES_3:
49
*value = gpu->identity.minor_features2;
52
case ETNAVIV_PARAM_GPU_FEATURES_4:
53
*value = gpu->identity.minor_features3;
56
case ETNAVIV_PARAM_GPU_FEATURES_5:
57
*value = gpu->identity.minor_features4;
60
case ETNAVIV_PARAM_GPU_FEATURES_6:
61
*value = gpu->identity.minor_features5;
64
case ETNAVIV_PARAM_GPU_STREAM_COUNT:
65
*value = gpu->identity.stream_count;
68
case ETNAVIV_PARAM_GPU_REGISTER_MAX:
69
*value = gpu->identity.register_max;
72
case ETNAVIV_PARAM_GPU_THREAD_COUNT:
73
*value = gpu->identity.thread_count;
76
case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
77
*value = gpu->identity.vertex_cache_size;
80
case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
81
*value = gpu->identity.shader_core_count;
84
case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
85
*value = gpu->identity.pixel_pipes;
88
case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
89
*value = gpu->identity.vertex_output_buffer_size;
92
case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
93
*value = gpu->identity.buffer_size;
96
case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
97
*value = gpu->identity.instruction_count;
100
case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
101
*value = gpu->identity.num_constants;
104
case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
105
*value = gpu->identity.varyings_count;
109
log_debug("invalid param: %u", param);
116
#define etnaviv_is_model_rev(gpu, mod, rev) \
117
((gpu)->identity.model == chipModel_##mod \
118
&& (gpu)->identity.revision == rev)
119
#define etnaviv_field(val, field) (((val)&field##__MASK) >> field##__SHIFT)
121
static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) {
122
if (gpu->identity.minor_features0
123
& chipMinorFeatures0_MORE_MINOR_FEATURES) {
125
unsigned int streams;
127
specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
128
specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
129
specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
130
specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
132
gpu->identity.stream_count = etnaviv_field(specs[0],
133
VIVS_HI_CHIP_SPECS_STREAM_COUNT);
134
gpu->identity.register_max = etnaviv_field(specs[0],
135
VIVS_HI_CHIP_SPECS_REGISTER_MAX);
136
gpu->identity.thread_count = etnaviv_field(specs[0],
137
VIVS_HI_CHIP_SPECS_THREAD_COUNT);
138
gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
139
VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
140
gpu->identity.shader_core_count = etnaviv_field(specs[0],
141
VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
142
gpu->identity.pixel_pipes = etnaviv_field(specs[0],
143
VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
144
gpu->identity.vertex_output_buffer_size = etnaviv_field(specs[0],
145
VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
147
gpu->identity.buffer_size = etnaviv_field(specs[1],
148
VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
149
gpu->identity.instruction_count = etnaviv_field(specs[1],
150
VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
151
gpu->identity.num_constants = etnaviv_field(specs[1],
152
VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
154
gpu->identity.varyings_count = etnaviv_field(specs[2],
155
VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
158
streams = etnaviv_field(specs[3], VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
160
gpu->identity.stream_count = streams;
164
if (gpu->identity.stream_count == 0) {
165
if (gpu->identity.model >= 0x1000)
166
gpu->identity.stream_count = 4;
168
gpu->identity.stream_count = 1;
172
if (gpu->identity.register_max)
173
gpu->identity.register_max = 1 << gpu->identity.register_max;
174
else if (gpu->identity.model == chipModel_GC400)
175
gpu->identity.register_max = 32;
177
gpu->identity.register_max = 64;
180
if (gpu->identity.thread_count)
181
gpu->identity.thread_count = 1 << gpu->identity.thread_count;
182
else if (gpu->identity.model == chipModel_GC400)
183
gpu->identity.thread_count = 64;
184
else if (gpu->identity.model == chipModel_GC500
185
|| gpu->identity.model == chipModel_GC530)
186
gpu->identity.thread_count = 128;
188
gpu->identity.thread_count = 256;
190
if (gpu->identity.vertex_cache_size == 0)
191
gpu->identity.vertex_cache_size = 8;
193
if (gpu->identity.shader_core_count == 0) {
194
if (gpu->identity.model >= 0x1000)
195
gpu->identity.shader_core_count = 2;
197
gpu->identity.shader_core_count = 1;
200
if (gpu->identity.pixel_pipes == 0)
201
gpu->identity.pixel_pipes = 1;
204
if (gpu->identity.vertex_output_buffer_size) {
205
gpu->identity.vertex_output_buffer_size =
206
1 << gpu->identity.vertex_output_buffer_size;
208
else if (gpu->identity.model == chipModel_GC400) {
209
if (gpu->identity.revision < 0x4000)
210
gpu->identity.vertex_output_buffer_size = 512;
211
else if (gpu->identity.revision < 0x4200)
212
gpu->identity.vertex_output_buffer_size = 256;
214
gpu->identity.vertex_output_buffer_size = 128;
217
gpu->identity.vertex_output_buffer_size = 512;
220
switch (gpu->identity.instruction_count) {
222
if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)
223
|| gpu->identity.model == chipModel_GC880)
224
gpu->identity.instruction_count = 512;
226
gpu->identity.instruction_count = 256;
230
gpu->identity.instruction_count = 1024;
234
gpu->identity.instruction_count = 2048;
238
gpu->identity.instruction_count = 256;
242
if (gpu->identity.num_constants == 0)
243
gpu->identity.num_constants = 168;
245
if (gpu->identity.varyings_count == 0) {
246
if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
247
gpu->identity.varyings_count = 12;
249
gpu->identity.varyings_count = 8;
256
if (etnaviv_is_model_rev(gpu, GC5000, 0x5434)
257
|| etnaviv_is_model_rev(gpu, GC4000, 0x5222)
258
|| etnaviv_is_model_rev(gpu, GC4000, 0x5245)
259
|| etnaviv_is_model_rev(gpu, GC4000, 0x5208)
260
|| etnaviv_is_model_rev(gpu, GC3000, 0x5435)
261
|| etnaviv_is_model_rev(gpu, GC2200, 0x5244)
262
|| etnaviv_is_model_rev(gpu, GC2100, 0x5108)
263
|| etnaviv_is_model_rev(gpu, GC2000, 0x5108)
264
|| etnaviv_is_model_rev(gpu, GC1500, 0x5246)
265
|| etnaviv_is_model_rev(gpu, GC880, 0x5107)
266
|| etnaviv_is_model_rev(gpu, GC880, 0x5106))
267
gpu->identity.varyings_count -= 1;
270
static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) {
271
uint32_t chipIdentity;
273
chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
276
if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
277
gpu->identity.model = chipModel_GC500;
278
gpu->identity.revision = etnaviv_field(chipIdentity,
279
VIVS_HI_CHIP_IDENTITY_REVISION);
282
gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
283
gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
291
if ((gpu->identity.model & 0xff00) == 0x0400
292
&& gpu->identity.model != chipModel_GC420) {
293
gpu->identity.model = gpu->identity.model & 0x0400;
297
if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
298
uint32_t chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
299
uint32_t chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
301
if (chipDate == 0x20080814 && chipTime == 0x12051100) {
306
gpu->identity.revision = 0x1051;
317
if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
318
gpu->identity.model = chipModel_GC3000;
319
gpu->identity.revision &= 0xffff;
323
log_info("model: GC%" PRIu32 ", revision: %" PRIu32, gpu->identity.model,
324
gpu->identity.revision);
326
gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
329
if (gpu->identity.model == chipModel_GC700)
330
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
332
if ((gpu->identity.model == chipModel_GC500 && gpu->identity.revision < 2)
333
|| (gpu->identity.model == chipModel_GC300
334
&& gpu->identity.revision < 0x2000)) {
339
gpu->identity.minor_features0 = 0;
340
gpu->identity.minor_features1 = 0;
341
gpu->identity.minor_features2 = 0;
342
gpu->identity.minor_features3 = 0;
343
gpu->identity.minor_features4 = 0;
344
gpu->identity.minor_features5 = 0;
347
gpu->identity.minor_features0 = gpu_read(gpu,
348
VIVS_HI_CHIP_MINOR_FEATURE_0);
350
if (gpu->identity.minor_features0
351
& chipMinorFeatures0_MORE_MINOR_FEATURES) {
352
gpu->identity.minor_features1 = gpu_read(gpu,
353
VIVS_HI_CHIP_MINOR_FEATURE_1);
354
gpu->identity.minor_features2 = gpu_read(gpu,
355
VIVS_HI_CHIP_MINOR_FEATURE_2);
356
gpu->identity.minor_features3 = gpu_read(gpu,
357
VIVS_HI_CHIP_MINOR_FEATURE_3);
358
gpu->identity.minor_features4 = gpu_read(gpu,
359
VIVS_HI_CHIP_MINOR_FEATURE_4);
360
gpu->identity.minor_features5 = gpu_read(gpu,
361
VIVS_HI_CHIP_MINOR_FEATURE_5);
365
if (gpu->identity.model == chipModel_GC600) {
366
gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | VIVS_HI_IDLE_STATE_RA
367
| VIVS_HI_IDLE_STATE_SE | VIVS_HI_IDLE_STATE_PA
368
| VIVS_HI_IDLE_STATE_SH | VIVS_HI_IDLE_STATE_PE
369
| VIVS_HI_IDLE_STATE_DE | VIVS_HI_IDLE_STATE_FE;
372
gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
375
etnaviv_hw_specs(gpu);
378
static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, uint32_t clock) {
379
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL,
380
clock | VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
381
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
384
static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu) {
385
unsigned int fscale = 1 << (6 - 1);
386
uint32_t clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS
387
| VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
389
etnaviv_gpu_load_clock(gpu, clock);
392
static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) {
393
uint32_t control, idle;
406
etnaviv_gpu_update_clock(gpu);
408
control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
414
control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
415
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
418
control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
419
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
425
control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
426
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
429
control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
430
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
433
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
436
if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
437
log_debug("FE is not idle");
442
control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
445
if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0)
446
|| ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
447
log_debug("GPU is not idle");
456
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
457
control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
459
log_error("GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle",
460
idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
461
control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
462
control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
468
etnaviv_gpu_update_clock(gpu);
473
static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) {
477
ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
478
ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
481
if (gpu->identity.revision == 0x4301 || gpu->identity.revision == 0x4302)
482
ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
484
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
486
pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
489
if (gpu->identity.model >= chipModel_GC400
490
&& gpu->identity.model != chipModel_GC420)
491
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
497
if (gpu->identity.revision < 0x5000
498
&& gpu->identity.minor_features0 & chipMinorFeatures0_HZ
499
&& !(gpu->identity.minor_features1
500
& chipMinorFeatures1_DISABLE_PE_GATING))
501
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
503
if (gpu->identity.revision < 0x5422)
506
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
507
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
509
gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
512
void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, uint32_t address,
514
gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
515
gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
516
VIVS_FE_COMMAND_CONTROL_ENABLE
517
| VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
520
static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu) {
525
uint32_t pulse_eater = 0x01590880;
527
if (etnaviv_is_model_rev(gpu, GC4000, 0x5208)
528
|| etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
529
pulse_eater |= BIT(23);
532
if (etnaviv_is_model_rev(gpu, GC1000, 0x5039)
533
|| etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
534
pulse_eater &= ~BIT(16);
535
pulse_eater |= BIT(17);
537
if ((gpu->identity.revision > 0x5420)
538
&& (gpu->identity.features & chipFeatures_PIPE_3D)) {
540
pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
541
pulse_eater |= BIT(18);
544
gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
547
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) {
551
etnaviv_gpu_enable_mlcg(gpu);
557
gpu_write(gpu, VIVS_HI_AXI_CONFIG,
558
VIVS_HI_AXI_CONFIG_AWCACHE(2) | VIVS_HI_AXI_CONFIG_ARCACHE(2));
561
if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
562
uint32_t bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
563
bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK
564
| VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
565
bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1)
566
| VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
567
gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
571
etnaviv_gpu_setup_pulse_eater(gpu);
574
etnaviv_iommu_restore(gpu);
577
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
579
prefetch = etnaviv_buffer_init(gpu);
581
etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer), prefetch);
584
int etnaviv_gpu_init(struct etnaviv_gpu *gpu) {
585
etnaviv_hw_identify(gpu);
587
if (gpu->identity.model == 0) {
588
log_error("Unknown GPU model");
593
if (gpu->identity.features & chipFeatures_PIPE_VG
594
&& gpu->identity.features & chipFeatures_FE20) {
595
panic("Wrong GPU register values, try to restart");
608
if (!(gpu->identity.features & chipFeatures_PIPE_3D)
609
|| (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
610
uint32_t dma_mask = (uint32_t)dma_get_required_mask(gpu);
611
if (dma_mask < PHYS_OFFSET + SZ_2G)
612
gpu->memory_base = PHYS_OFFSET;
614
gpu->memory_base = dma_mask - SZ_2G + 1;
616
else if (PHYS_OFFSET >= SZ_2G) {
617
log_info("Need to move linear window on MC1.0, disabling TS");
618
gpu->memory_base = PHYS_OFFSET;
619
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
622
log_debug("memory base %p\n", (void *)gpu->memory_base);
624
if (etnaviv_hw_reset(gpu)) {
625
log_error("GPU reset failed");
629
if (etnaviv_iommu_init(gpu)) {
630
log_error("Failed to instantiate GPU IOMMU");
634
gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
635
if (IS_ERR(gpu->cmdbuf_suballoc)) {
636
log_error("Failed to create cmdbuf suballocator\n");
640
gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, 4096, 0);
642
log_error("could not create command buffer");
646
if (gpu->mmu.version == ETNAVIV_IOMMU_V1
647
&& etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
648
log_debug("buffer %p va %p\n", gpu->buffer,
649
etnaviv_cmdbuf_get_va(gpu->buffer));
650
log_error("command buffer outside valid memory window");
654
etnaviv_gpu_hw_init(gpu);
664
static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug) {
667
debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
668
debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
670
for (i = 0; i < 500; i++) {
671
debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
672
debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
674
if (debug->address[0] != debug->address[1])
677
if (debug->state[0] != debug->state[1])
682
#define seq_puts(a, ...) printf(__VA_ARGS__)
683
#define seq_printf(a, ...) printf(__VA_ARGS__)
685
int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, char *s) {
686
struct dma_debug debug;
687
uint32_t dma_lo, dma_hi, axi, idle;
689
int cmdState, cmdDmaState, cmdFetState, dmaReqState, calState, veReqState;
691
static const char *_cmdState[] = {"PAR_IDLE_ST", "PAR_DEC_ST",
692
"PAR_ADR0_ST", "PAR_LOAD0_ST", "PAR_ADR1_ST", "PAR_LOAD1_ST",
693
"PAR_3DADR_ST", "PAR_3DCMD_ST", "PAR_3DCNTL_ST", "PAR_3DIDXCNTL_ST",
694
"PAR_INITREQDMA_ST", "PAR_DRAWIDX_ST", "PAR_DRAW_ST", "PAR_2DRECT0_ST",
695
"PAR_2DRECT1_ST", "PAR_2DDATA0_ST", "PAR_2DDATA1_ST", "PAR_WAITFIFO_ST",
696
"PAR_WAIT_ST", "PAR_LINK_ST", "PAR_END_ST", "PAR_STALL_ST"};
698
static const char *_cmdDmaState[] = {"CMD_IDLE_ST", "CMD_START_ST",
699
"CMD_REQ_ST", "CMD_END_ST"};
701
static const char *_cmdFetState[] = {"FET_IDLE_ST", "FET_RAMVALID_ST",
704
static const char *_reqDmaState[] = {"REQ_IDLE_ST", "REQ_WAITIDX_ST",
707
static const char *_calState[] = {"CAL_IDLE_ST", "CAL_LDADR_ST",
710
static const char *_veReqState[] = {"VER_IDLE_ST", "VER_CKCACHE_ST",
713
seq_printf(m, "%s Status:\n", s);
715
dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
716
dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
717
axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
718
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
720
verify_dma(gpu, &debug);
722
seq_puts(m, "\tfeatures\n");
723
seq_printf(m, "\t minor_features0: 0x%08" PRIu32 "\n",
724
gpu->identity.minor_features0);
725
seq_printf(m, "\t minor_features1: 0x%08" PRIu32 "\n",
726
gpu->identity.minor_features1);
727
seq_printf(m, "\t minor_features2: 0x%08" PRIu32 "\n",
728
gpu->identity.minor_features2);
729
seq_printf(m, "\t minor_features3: 0x%08" PRIu32 "\n",
730
gpu->identity.minor_features3);
731
seq_printf(m, "\t minor_features4: 0x%08" PRIu32 "\n",
732
gpu->identity.minor_features4);
733
seq_printf(m, "\t minor_features5: 0x%08" PRIu32 "\n",
734
gpu->identity.minor_features5);
736
seq_puts(m, "\tspecs\n");
737
seq_printf(m, "\t stream_count: %" PRIu32 "\n",
738
gpu->identity.stream_count);
739
seq_printf(m, "\t register_max: %" PRIu32 "\n", gpu->identity.register_max);
740
seq_printf(m, "\t thread_count: %" PRIu32 "\n", gpu->identity.thread_count);
741
seq_printf(m, "\t vertex_cache_size: %" PRIu32 "\n",
742
gpu->identity.vertex_cache_size);
743
seq_printf(m, "\t shader_core_count: %" PRIu32 "\n",
744
gpu->identity.shader_core_count);
745
seq_printf(m, "\t pixel_pipes: %" PRIu32 "\n", gpu->identity.pixel_pipes);
746
seq_printf(m, "\t vertex_output_buffer_size: %" PRIu32 "\n",
747
gpu->identity.vertex_output_buffer_size);
748
seq_printf(m, "\t buffer_size: %" PRIu32 "\n", gpu->identity.buffer_size);
749
seq_printf(m, "\t instruction_count: %" PRIu32 "\n",
750
gpu->identity.instruction_count);
751
seq_printf(m, "\t num_constants: %" PRIu32 "\n",
752
gpu->identity.num_constants);
753
seq_printf(m, "\t varyings_count: %" PRIu8 "\n",
754
gpu->identity.varyings_count);
756
seq_printf(m, "\taxi: 0x%08" PRIu32 "\n", axi);
757
seq_printf(m, "\tidle: 0x%08" PRIu32 "\n", idle);
758
idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
759
if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
760
seq_puts(m, "\t FE is not idle\n");
761
if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
762
seq_puts(m, "\t DE is not idle\n");
763
if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
764
seq_puts(m, "\t PE is not idle\n");
765
if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
766
seq_puts(m, "\t SH is not idle\n");
767
if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
768
seq_puts(m, "\t PA is not idle\n");
769
if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
770
seq_puts(m, "\t SE is not idle\n");
771
if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
772
seq_puts(m, "\t RA is not idle\n");
773
if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
774
seq_puts(m, "\t TX is not idle\n");
775
if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
776
seq_puts(m, "\t VG is not idle\n");
777
if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
778
seq_puts(m, "\t IM is not idle\n");
779
if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
780
seq_puts(m, "\t FP is not idle\n");
781
if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
782
seq_puts(m, "\t TS is not idle\n");
783
if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
784
seq_puts(m, "\t AXI low power mode\n");
786
if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
787
uint32_t read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
788
uint32_t read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
789
uint32_t write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
791
seq_puts(m, "\tMC\n");
792
seq_printf(m, "\t read0: 0x%08" PRIu32 "\n", read0);
793
seq_printf(m, "\t read1: 0x%08" PRIu32 "\n", read1);
794
seq_printf(m, "\t write: 0x%08" PRIu32 "\n", write);
797
seq_puts(m, "\tDMA ");
799
if (debug.address[0] == debug.address[1]
800
&& debug.state[0] == debug.state[1]) {
801
seq_puts(m, "seems to be stuck\n");
803
else if (debug.address[0] == debug.address[1]) {
804
seq_puts(m, "address is constant\n");
807
seq_puts(m, "is running\n");
810
cmdState = debug.state[1] & 0x1F;
811
cmdDmaState = (debug.state[1] >> 8) & 0x03;
812
cmdFetState = (debug.state[1] >> 10) & 0x03;
813
dmaReqState = (debug.state[1] >> 12) & 0x03;
814
calState = (debug.state[1] >> 14) & 0x03;
815
veReqState = (debug.state[1] >> 16) & 0x03;
817
seq_printf(m, "\t address 0: 0x%08" PRIu32 "\n", debug.address[0]);
818
seq_printf(m, "\t address 1: 0x%08" PRIu32 "\n", debug.address[1]);
819
seq_printf(m, "\t state 0: 0x%08" PRIu32 "\n", debug.state[0]);
820
seq_printf(m, "\t state 1: 0x%08" PRIu32 "\n", debug.state[1]);
821
seq_printf(m, "\t command state = %d (%s)\n", cmdState,
822
_cmdState[cmdState]);
823
seq_printf(m, "\t command DMA state = %d (%s)\n", cmdDmaState,
824
_cmdDmaState[cmdDmaState]);
825
seq_printf(m, "\t command fetch state = %d (%s)\n", cmdFetState,
826
_cmdFetState[cmdFetState]);
827
seq_printf(m, "\t DMA request state = %d (%s)\n", dmaReqState,
828
_reqDmaState[dmaReqState]);
829
seq_printf(m, "\t cal state = %d (%s)\n", calState,
830
_calState[calState]);
831
seq_printf(m, "\t VE request state = %d (%s)\n", veReqState,
832
_veReqState[veReqState]);
834
"\t last fetch 64 bit word: 0x%08" PRIu32 " 0x%08" PRIu32 "\n", dma_lo,
842
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
843
uint32_t fence, struct timespec *timeout) {
846
if (fence_after(fence, gpu->next_fence)) {
847
log_error("waiting on invalid fence: %u (of %u)\n", fence,
854
ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
857
unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
859
ret = wait_event_interruptible_timeout(gpu->fence_event,
860
fence_completed(gpu, fence), remaining);
862
log_debug("timeout waiting for fence: %u (retired: %u completed: "
864
fence, gpu->retired_fence, gpu->completed_fence);
872
extern void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
873
struct etnaviv_cmdbuf *buf, uint32_t off, uint32_t len);
875
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
876
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) {
877
unsigned int event = 0;
884
if (gpu->lastctx != cmdbuf->ctx) {
885
gpu->mmu.need_flush = true;
886
gpu->switch_context = true;
887
gpu->lastctx = cmdbuf->ctx;
890
while (gpu->busy) {};
893
etnaviv_buffer_queue(gpu, event, cmdbuf);
894
cmdbuf->nr_bos = submit->nr_bos;
895
etnaviv_buffer_dump(gpu, cmdbuf, 0, cmdbuf->user_size);
896
etnaviv_buffer_dump(gpu, gpu->buffer, 0, gpu->buffer->user_size);
901
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms) {
904
uint32_t idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
906
if ((idle & gpu->idle_mask) == gpu->idle_mask)
911
if (timeout_ms == 0) {
912
log_warning("timed out waiting for idle: idle=0x%x", idle);