1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2014-2018 Etnaviv Project
4 */
5
6 #include <drm/drm_drv.h>
7 #include <drm/drm_print.h>
8
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_gpu.h"
11 #include "etnaviv_gem.h"
12 #include "etnaviv_mmu.h"
13
14 #include "common.xml.h"
15 #include "state.xml.h"
16 #include "state_blt.xml.h"
17 #include "state_hi.xml.h"
18 #include "state_3d.xml.h"
19 #include "cmdstream.xml.h"
20
21 /*
22 * Command Buffer helper:
23 */
24
25
OUT(struct etnaviv_cmdbuf * buffer,u32 data)26 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
27 {
28 u32 *vaddr = (u32 *)buffer->vaddr;
29
30 BUG_ON(buffer->user_size >= buffer->size);
31
32 vaddr[buffer->user_size / 4] = data;
33 buffer->user_size += 4;
34 }
35
CMD_LOAD_STATE(struct etnaviv_cmdbuf * buffer,u32 reg,u32 value)36 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
37 u32 reg, u32 value)
38 {
39 u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
40
41 buffer->user_size = ALIGN(buffer->user_size, 8);
42
43 /* write a register via cmd stream */
44 OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
45 VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
46 VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
47 OUT(buffer, value);
48 }
49
CMD_END(struct etnaviv_cmdbuf * buffer)50 static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
51 {
52 buffer->user_size = ALIGN(buffer->user_size, 8);
53
54 OUT(buffer, VIV_FE_END_HEADER_OP_END);
55 }
56
CMD_WAIT(struct etnaviv_cmdbuf * buffer,unsigned int waitcycles)57 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer,
58 unsigned int waitcycles)
59 {
60 buffer->user_size = ALIGN(buffer->user_size, 8);
61
62 OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | waitcycles);
63 }
64
CMD_LINK(struct etnaviv_cmdbuf * buffer,u16 prefetch,u32 address)65 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
66 u16 prefetch, u32 address)
67 {
68 buffer->user_size = ALIGN(buffer->user_size, 8);
69
70 OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
71 VIV_FE_LINK_HEADER_PREFETCH(prefetch));
72 OUT(buffer, address);
73 }
74
CMD_STALL(struct etnaviv_cmdbuf * buffer,u32 from,u32 to)75 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
76 u32 from, u32 to)
77 {
78 buffer->user_size = ALIGN(buffer->user_size, 8);
79
80 OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
81 OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
82 }
83
CMD_SEM(struct etnaviv_cmdbuf * buffer,u32 from,u32 to)84 static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
85 {
86 CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
87 VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
88 VIVS_GL_SEMAPHORE_TOKEN_TO(to));
89 }
90
etnaviv_cmd_select_pipe(struct etnaviv_gpu * gpu,struct etnaviv_cmdbuf * buffer,u8 pipe)91 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
92 struct etnaviv_cmdbuf *buffer, u8 pipe)
93 {
94 u32 flush = 0;
95
96 lockdep_assert_held(&gpu->lock);
97
98 /*
99 * This assumes that if we're switching to 2D, we're switching
100 * away from 3D, and vice versa. Hence, if we're switching to
101 * the 2D core, we need to flush the 3D depth and color caches,
102 * otherwise we need to flush the 2D pixel engine cache.
103 */
104 if (gpu->exec_state == ETNA_PIPE_2D)
105 flush = VIVS_GL_FLUSH_CACHE_PE2D;
106 else if (gpu->exec_state == ETNA_PIPE_3D)
107 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
108
109 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
110 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
111 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
112
113 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
114 VIVS_GL_PIPE_SELECT_PIPE(pipe));
115 }
116
etnaviv_buffer_dump(struct etnaviv_gpu * gpu,struct etnaviv_cmdbuf * buf,u32 off,u32 len)117 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
118 struct etnaviv_cmdbuf *buf, u32 off, u32 len)
119 {
120 u32 size = buf->size;
121 u32 *ptr = buf->vaddr + off;
122
123 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
124 ptr, etnaviv_cmdbuf_get_va(buf,
125 &gpu->mmu_context->cmdbuf_mapping) +
126 off, size - len * 4 - off);
127
128 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
129 ptr, len * 4, 0);
130 }
131
132 /*
133 * Safely replace the WAIT of a waitlink with a new command and argument.
134 * The GPU may be executing this WAIT while we're modifying it, so we have
135 * to write it in a specific order to avoid the GPU branching to somewhere
136 * else. 'wl_offset' is the offset to the first byte of the WAIT command.
137 */
etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf * buffer,unsigned int wl_offset,u32 cmd,u32 arg)138 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
139 unsigned int wl_offset, u32 cmd, u32 arg)
140 {
141 u32 *lw = buffer->vaddr + wl_offset;
142
143 lw[1] = arg;
144 mb();
145 lw[0] = cmd;
146 mb();
147 }
148
149 /*
150 * Ensure that there is space in the command buffer to contiguously write
151 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
152 */
etnaviv_buffer_reserve(struct etnaviv_gpu * gpu,struct etnaviv_cmdbuf * buffer,unsigned int cmd_dwords)153 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
154 struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
155 {
156 if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
157 buffer->user_size = 0;
158
159 return etnaviv_cmdbuf_get_va(buffer,
160 &gpu->mmu_context->cmdbuf_mapping) +
161 buffer->user_size;
162 }
163
etnaviv_buffer_init(struct etnaviv_gpu * gpu)164 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
165 {
166 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
167
168 lockdep_assert_held(&gpu->lock);
169
170 /* initialize buffer */
171 buffer->user_size = 0;
172
173 CMD_WAIT(buffer, gpu->fe_waitcycles);
174 CMD_LINK(buffer, 2,
175 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
176 + buffer->user_size - 4);
177
178 return buffer->user_size / 8;
179 }
180
etnaviv_buffer_config_mmuv2(struct etnaviv_gpu * gpu,u32 mtlb_addr,u32 safe_addr)181 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
182 {
183 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
184
185 lockdep_assert_held(&gpu->lock);
186
187 buffer->user_size = 0;
188
189 if (gpu->identity.features & chipFeatures_PIPE_3D) {
190 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
191 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
192 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
193 mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
194 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
195 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
196 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
197 }
198
199 if (gpu->identity.features & chipFeatures_PIPE_2D) {
200 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
201 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
202 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
203 mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
204 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
205 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
206 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
207 }
208
209 CMD_END(buffer);
210
211 buffer->user_size = ALIGN(buffer->user_size, 8);
212
213 return buffer->user_size / 8;
214 }
215
etnaviv_buffer_config_pta(struct etnaviv_gpu * gpu,unsigned short id)216 u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
217 {
218 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
219
220 lockdep_assert_held(&gpu->lock);
221
222 buffer->user_size = 0;
223
224 CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
225 VIVS_MMUv2_PTA_CONFIG_INDEX(id));
226
227 CMD_END(buffer);
228
229 buffer->user_size = ALIGN(buffer->user_size, 8);
230
231 return buffer->user_size / 8;
232 }
233
etnaviv_buffer_end(struct etnaviv_gpu * gpu)234 void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
235 {
236 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
237 unsigned int waitlink_offset = buffer->user_size - 16;
238 u32 link_target, flush = 0;
239 bool has_blt = !!(gpu->identity.minor_features5 &
240 chipMinorFeatures5_BLT_ENGINE);
241
242 lockdep_assert_held(&gpu->lock);
243
244 if (gpu->exec_state == ETNA_PIPE_2D)
245 flush = VIVS_GL_FLUSH_CACHE_PE2D;
246 else if (gpu->exec_state == ETNA_PIPE_3D)
247 flush = VIVS_GL_FLUSH_CACHE_DEPTH |
248 VIVS_GL_FLUSH_CACHE_COLOR |
249 VIVS_GL_FLUSH_CACHE_TEXTURE |
250 VIVS_GL_FLUSH_CACHE_TEXTUREVS |
251 VIVS_GL_FLUSH_CACHE_SHADER_L2;
252
253 if (flush) {
254 unsigned int dwords = 7;
255
256 if (has_blt)
257 dwords += 10;
258
259 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
260
261 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
262 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
263 if (has_blt) {
264 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
265 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
266 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
267 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
268 }
269 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
270 if (gpu->exec_state == ETNA_PIPE_3D) {
271 if (has_blt) {
272 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
273 CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
274 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
275 } else {
276 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
277 VIVS_TS_FLUSH_CACHE_FLUSH);
278 }
279 }
280 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
281 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
282 if (has_blt) {
283 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
284 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
285 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
286 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
287 }
288 CMD_END(buffer);
289
290 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
291 VIV_FE_LINK_HEADER_OP_LINK |
292 VIV_FE_LINK_HEADER_PREFETCH(dwords),
293 link_target);
294 } else {
295 /* Replace the last link-wait with an "END" command */
296 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
297 VIV_FE_END_HEADER_OP_END, 0);
298 }
299 }
300
301 /* Append a 'sync point' to the ring buffer. */
etnaviv_sync_point_queue(struct etnaviv_gpu * gpu,unsigned int event)302 void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
303 {
304 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
305 unsigned int waitlink_offset = buffer->user_size - 16;
306 u32 dwords, target;
307
308 lockdep_assert_held(&gpu->lock);
309
310 /*
311 * We need at most 3 dwords in the return target:
312 * 1 event + 1 end + 1 wait + 1 link.
313 */
314 dwords = 4;
315 target = etnaviv_buffer_reserve(gpu, buffer, dwords);
316
317 /* Signal sync point event */
318 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
319 VIVS_GL_EVENT_FROM_PE);
320
321 /* Stop the FE to 'pause' the GPU */
322 CMD_END(buffer);
323
324 /* Append waitlink */
325 CMD_WAIT(buffer, gpu->fe_waitcycles);
326 CMD_LINK(buffer, 2,
327 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
328 + buffer->user_size - 4);
329
330 /*
331 * Kick off the 'sync point' command by replacing the previous
332 * WAIT with a link to the address in the ring buffer.
333 */
334 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
335 VIV_FE_LINK_HEADER_OP_LINK |
336 VIV_FE_LINK_HEADER_PREFETCH(dwords),
337 target);
338 }
339
340 /* Append a command buffer to the ring buffer. */
etnaviv_buffer_queue(struct etnaviv_gpu * gpu,u32 exec_state,struct etnaviv_iommu_context * mmu_context,unsigned int event,struct etnaviv_cmdbuf * cmdbuf)341 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
342 struct etnaviv_iommu_context *mmu_context, unsigned int event,
343 struct etnaviv_cmdbuf *cmdbuf)
344 {
345 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
346 unsigned int waitlink_offset = buffer->user_size - 16;
347 u32 return_target, return_dwords;
348 u32 link_target, link_dwords;
349 bool switch_context = gpu->exec_state != exec_state;
350 bool switch_mmu_context = gpu->mmu_context != mmu_context;
351 unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);
352 bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
353 bool has_blt = !!(gpu->identity.minor_features5 &
354 chipMinorFeatures5_BLT_ENGINE);
355
356 lockdep_assert_held(&gpu->lock);
357
358 if (drm_debug_enabled(DRM_UT_DRIVER))
359 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
360
361 link_target = etnaviv_cmdbuf_get_va(cmdbuf,
362 &gpu->mmu_context->cmdbuf_mapping);
363 link_dwords = cmdbuf->size / 8;
364
365 /*
366 * If we need maintenance prior to submitting this buffer, we will
367 * need to append a mmu flush load state, followed by a new
368 * link to this buffer - a total of four additional words.
369 */
370 if (need_flush || switch_context) {
371 u32 target, extra_dwords;
372
373 /* link command */
374 extra_dwords = 1;
375
376 /* flush command */
377 if (need_flush) {
378 if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
379 extra_dwords += 1;
380 else
381 extra_dwords += 3;
382 }
383
384 /* pipe switch commands */
385 if (switch_context)
386 extra_dwords += 4;
387
388 /* PTA load command */
389 if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
390 extra_dwords += 1;
391
392 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
393 /*
394 * Switch MMU context if necessary. Must be done after the
395 * link target has been calculated, as the jump forward in the
396 * kernel ring still uses the last active MMU context before
397 * the switch.
398 */
399 if (switch_mmu_context) {
400 struct etnaviv_iommu_context *old_context = gpu->mmu_context;
401
402 gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
403 etnaviv_iommu_context_put(old_context);
404 }
405
406 if (need_flush) {
407 /* Add the MMU flush */
408 if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
409 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
410 VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
411 VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
412 VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
413 VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
414 VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
415 } else {
416 u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
417 VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
418
419 if (switch_mmu_context &&
420 gpu->sec_mode == ETNA_SEC_KERNEL) {
421 unsigned short id =
422 etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
423 CMD_LOAD_STATE(buffer,
424 VIVS_MMUv2_PTA_CONFIG,
425 VIVS_MMUv2_PTA_CONFIG_INDEX(id));
426 }
427
428 if (gpu->sec_mode == ETNA_SEC_NONE)
429 flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
430
431 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
432 flush);
433 CMD_SEM(buffer, SYNC_RECIPIENT_FE,
434 SYNC_RECIPIENT_PE);
435 CMD_STALL(buffer, SYNC_RECIPIENT_FE,
436 SYNC_RECIPIENT_PE);
437 }
438
439 gpu->flush_seq = new_flush_seq;
440 }
441
442 if (switch_context) {
443 etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
444 gpu->exec_state = exec_state;
445 }
446
447 /* And the link to the submitted buffer */
448 link_target = etnaviv_cmdbuf_get_va(cmdbuf,
449 &gpu->mmu_context->cmdbuf_mapping);
450 CMD_LINK(buffer, link_dwords, link_target);
451
452 /* Update the link target to point to above instructions */
453 link_target = target;
454 link_dwords = extra_dwords;
455 }
456
457 /*
458 * Append a LINK to the submitted command buffer to return to
459 * the ring buffer. return_target is the ring target address.
460 * We need at most 7 dwords in the return target: 2 cache flush +
461 * 2 semaphore stall + 1 event + 1 wait + 1 link.
462 */
463 return_dwords = 7;
464
465 /*
466 * When the BLT engine is present we need 6 more dwords in the return
467 * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
468 * but we don't need the normal TS flush state.
469 */
470 if (has_blt)
471 return_dwords += 6;
472
473 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
474 CMD_LINK(cmdbuf, return_dwords, return_target);
475
476 /*
477 * Append a cache flush, stall, event, wait and link pointing back to
478 * the wait command to the ring buffer.
479 */
480 if (gpu->exec_state == ETNA_PIPE_2D) {
481 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
482 VIVS_GL_FLUSH_CACHE_PE2D);
483 } else {
484 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
485 VIVS_GL_FLUSH_CACHE_DEPTH |
486 VIVS_GL_FLUSH_CACHE_COLOR |
487 VIVS_GL_FLUSH_CACHE_SHADER_L1);
488 if (has_blt) {
489 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
490 CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
491 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
492 } else {
493 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
494 VIVS_TS_FLUSH_CACHE_FLUSH);
495 }
496 }
497 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
498 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
499
500 if (has_blt) {
501 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
502 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
503 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
504 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
505 }
506
507 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
508 VIVS_GL_EVENT_FROM_PE);
509 CMD_WAIT(buffer, gpu->fe_waitcycles);
510 CMD_LINK(buffer, 2,
511 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
512 + buffer->user_size - 4);
513
514 if (drm_debug_enabled(DRM_UT_DRIVER))
515 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
516 return_target,
517 etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
518 cmdbuf->vaddr);
519
520 if (drm_debug_enabled(DRM_UT_DRIVER)) {
521 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
522 cmdbuf->vaddr, cmdbuf->size, 0);
523
524 pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
525 pr_info("addr: 0x%08x\n", link_target);
526 pr_info("back: 0x%08x\n", return_target);
527 pr_info("event: %d\n", event);
528 }
529
530 /*
531 * Kick off the submitted command by replacing the previous
532 * WAIT with a link to the address in the ring buffer.
533 */
534 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
535 VIV_FE_LINK_HEADER_OP_LINK |
536 VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
537 link_target);
538
539 if (drm_debug_enabled(DRM_UT_DRIVER))
540 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
541 }
542