1f6ffbd4fSLucas Stach // SPDX-License-Identifier: GPL-2.0
2a8c21a54SThe etnaviv authors /*
3f6ffbd4fSLucas Stach * Copyright (C) 2014-2018 Etnaviv Project
4a8c21a54SThe etnaviv authors */
5a8c21a54SThe etnaviv authors
66eae41feSSam Ravnborg #include <drm/drm_drv.h>
76eae41feSSam Ravnborg
8ea1f5729SLucas Stach #include "etnaviv_cmdbuf.h"
9a8c21a54SThe etnaviv authors #include "etnaviv_gpu.h"
10a8c21a54SThe etnaviv authors #include "etnaviv_gem.h"
11a8c21a54SThe etnaviv authors #include "etnaviv_mmu.h"
12a8c21a54SThe etnaviv authors
13a8c21a54SThe etnaviv authors #include "common.xml.h"
14a8c21a54SThe etnaviv authors #include "state.xml.h"
15f232d9ecSLucas Stach #include "state_blt.xml.h"
16de08e8efSLucas Stach #include "state_hi.xml.h"
178581d814SRussell King #include "state_3d.xml.h"
18a8c21a54SThe etnaviv authors #include "cmdstream.xml.h"
19a8c21a54SThe etnaviv authors
20a8c21a54SThe etnaviv authors /*
21a8c21a54SThe etnaviv authors * Command Buffer helper:
22a8c21a54SThe etnaviv authors */
23a8c21a54SThe etnaviv authors
24a8c21a54SThe etnaviv authors
OUT(struct etnaviv_cmdbuf * buffer,u32 data)25a8c21a54SThe etnaviv authors static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
26a8c21a54SThe etnaviv authors {
27a8c21a54SThe etnaviv authors u32 *vaddr = (u32 *)buffer->vaddr;
28a8c21a54SThe etnaviv authors
29a8c21a54SThe etnaviv authors BUG_ON(buffer->user_size >= buffer->size);
30a8c21a54SThe etnaviv authors
31a8c21a54SThe etnaviv authors vaddr[buffer->user_size / 4] = data;
32a8c21a54SThe etnaviv authors buffer->user_size += 4;
33a8c21a54SThe etnaviv authors }
34a8c21a54SThe etnaviv authors
CMD_LOAD_STATE(struct etnaviv_cmdbuf * buffer,u32 reg,u32 value)35a8c21a54SThe etnaviv authors static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
36a8c21a54SThe etnaviv authors u32 reg, u32 value)
37a8c21a54SThe etnaviv authors {
38a8c21a54SThe etnaviv authors u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
39a8c21a54SThe etnaviv authors
40a8c21a54SThe etnaviv authors buffer->user_size = ALIGN(buffer->user_size, 8);
41a8c21a54SThe etnaviv authors
42a8c21a54SThe etnaviv authors /* write a register via cmd stream */
43a8c21a54SThe etnaviv authors OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
44a8c21a54SThe etnaviv authors VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
45a8c21a54SThe etnaviv authors VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
46a8c21a54SThe etnaviv authors OUT(buffer, value);
47a8c21a54SThe etnaviv authors }
48a8c21a54SThe etnaviv authors
CMD_END(struct etnaviv_cmdbuf * buffer)49a8c21a54SThe etnaviv authors static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
50a8c21a54SThe etnaviv authors {
51a8c21a54SThe etnaviv authors buffer->user_size = ALIGN(buffer->user_size, 8);
52a8c21a54SThe etnaviv authors
53a8c21a54SThe etnaviv authors OUT(buffer, VIV_FE_END_HEADER_OP_END);
54a8c21a54SThe etnaviv authors }
55a8c21a54SThe etnaviv authors
CMD_WAIT(struct etnaviv_cmdbuf * buffer,unsigned int waitcycles)56*295b6c02SLucas Stach static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer,
57*295b6c02SLucas Stach unsigned int waitcycles)
58a8c21a54SThe etnaviv authors {
59a8c21a54SThe etnaviv authors buffer->user_size = ALIGN(buffer->user_size, 8);
60a8c21a54SThe etnaviv authors
61*295b6c02SLucas Stach OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | waitcycles);
62a8c21a54SThe etnaviv authors }
63a8c21a54SThe etnaviv authors
CMD_LINK(struct etnaviv_cmdbuf * buffer,u16 prefetch,u32 address)64a8c21a54SThe etnaviv authors static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
65a8c21a54SThe etnaviv authors u16 prefetch, u32 address)
66a8c21a54SThe etnaviv authors {
67a8c21a54SThe etnaviv authors buffer->user_size = ALIGN(buffer->user_size, 8);
68a8c21a54SThe etnaviv authors
69a8c21a54SThe etnaviv authors OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
70a8c21a54SThe etnaviv authors VIV_FE_LINK_HEADER_PREFETCH(prefetch));
71a8c21a54SThe etnaviv authors OUT(buffer, address);
72a8c21a54SThe etnaviv authors }
73a8c21a54SThe etnaviv authors
CMD_STALL(struct etnaviv_cmdbuf * buffer,u32 from,u32 to)74a8c21a54SThe etnaviv authors static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
75a8c21a54SThe etnaviv authors u32 from, u32 to)
76a8c21a54SThe etnaviv authors {
77a8c21a54SThe etnaviv authors buffer->user_size = ALIGN(buffer->user_size, 8);
78a8c21a54SThe etnaviv authors
79a8c21a54SThe etnaviv authors OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
80a8c21a54SThe etnaviv authors OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
81a8c21a54SThe etnaviv authors }
82a8c21a54SThe etnaviv authors
CMD_SEM(struct etnaviv_cmdbuf * buffer,u32 from,u32 to)8318060f4dSRussell King static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
8418060f4dSRussell King {
8518060f4dSRussell King CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
8618060f4dSRussell King VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
8718060f4dSRussell King VIVS_GL_SEMAPHORE_TOKEN_TO(to));
8818060f4dSRussell King }
8918060f4dSRussell King
etnaviv_cmd_select_pipe(struct etnaviv_gpu * gpu,struct etnaviv_cmdbuf * buffer,u8 pipe)9090747b95SRussell King static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
9190747b95SRussell King struct etnaviv_cmdbuf *buffer, u8 pipe)
92a8c21a54SThe etnaviv authors {
9390747b95SRussell King u32 flush = 0;
94a8c21a54SThe etnaviv authors
95b6d6223fSLucas Stach lockdep_assert_held(&gpu->lock);
96b6d6223fSLucas Stach
97a8c21a54SThe etnaviv authors /*
98a8c21a54SThe etnaviv authors * This assumes that if we're switching to 2D, we're switching
99a8c21a54SThe etnaviv authors * away from 3D, and vice versa. Hence, if we're switching to
100a8c21a54SThe etnaviv authors * the 2D core, we need to flush the 3D depth and color caches,
101a8c21a54SThe etnaviv authors * otherwise we need to flush the 2D pixel engine cache.
102a8c21a54SThe etnaviv authors */
10390747b95SRussell King if (gpu->exec_state == ETNA_PIPE_2D)
104a8c21a54SThe etnaviv authors flush = VIVS_GL_FLUSH_CACHE_PE2D;
10590747b95SRussell King else if (gpu->exec_state == ETNA_PIPE_3D)
10690747b95SRussell King flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
107a8c21a54SThe etnaviv authors
108a8c21a54SThe etnaviv authors CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
10918060f4dSRussell King CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
110a8c21a54SThe etnaviv authors CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
111a8c21a54SThe etnaviv authors
112a8c21a54SThe etnaviv authors CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
113a8c21a54SThe etnaviv authors VIVS_GL_PIPE_SELECT_PIPE(pipe));
114a8c21a54SThe etnaviv authors }
115a8c21a54SThe etnaviv authors
etnaviv_buffer_dump(struct etnaviv_gpu * gpu,struct etnaviv_cmdbuf * buf,u32 off,u32 len)116a8c21a54SThe etnaviv authors static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
117a8c21a54SThe etnaviv authors struct etnaviv_cmdbuf *buf, u32 off, u32 len)
118a8c21a54SThe etnaviv authors {
119a8c21a54SThe etnaviv authors u32 size = buf->size;
120a8c21a54SThe etnaviv authors u32 *ptr = buf->vaddr + off;
121a8c21a54SThe etnaviv authors
122a8c21a54SThe etnaviv authors dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
12317e4660aSLucas Stach ptr, etnaviv_cmdbuf_get_va(buf,
12417e4660aSLucas Stach &gpu->mmu_context->cmdbuf_mapping) +
125db82a043SLucas Stach off, size - len * 4 - off);
126a8c21a54SThe etnaviv authors
127a8c21a54SThe etnaviv authors print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
128a8c21a54SThe etnaviv authors ptr, len * 4, 0);
129a8c21a54SThe etnaviv authors }
130a8c21a54SThe etnaviv authors
131584a13c6SRussell King /*
1326e138f76SRussell King * Safely replace the WAIT of a waitlink with a new command and argument.
1336e138f76SRussell King * The GPU may be executing this WAIT while we're modifying it, so we have
1346e138f76SRussell King * to write it in a specific order to avoid the GPU branching to somewhere
1356e138f76SRussell King * else. 'wl_offset' is the offset to the first byte of the WAIT command.
1366e138f76SRussell King */
etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf * buffer,unsigned int wl_offset,u32 cmd,u32 arg)1376e138f76SRussell King static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
1386e138f76SRussell King unsigned int wl_offset, u32 cmd, u32 arg)
1396e138f76SRussell King {
1406e138f76SRussell King u32 *lw = buffer->vaddr + wl_offset;
1416e138f76SRussell King
1426e138f76SRussell King lw[1] = arg;
1436e138f76SRussell King mb();
1446e138f76SRussell King lw[0] = cmd;
1456e138f76SRussell King mb();
1466e138f76SRussell King }
1476e138f76SRussell King
1486e138f76SRussell King /*
149584a13c6SRussell King * Ensure that there is space in the command buffer to contiguously write
150584a13c6SRussell King * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
151584a13c6SRussell King */
etnaviv_buffer_reserve(struct etnaviv_gpu * gpu,struct etnaviv_cmdbuf * buffer,unsigned int cmd_dwords)152584a13c6SRussell King static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
153584a13c6SRussell King struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
154584a13c6SRussell King {
155584a13c6SRussell King if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
156584a13c6SRussell King buffer->user_size = 0;
157584a13c6SRussell King
15817e4660aSLucas Stach return etnaviv_cmdbuf_get_va(buffer,
15917e4660aSLucas Stach &gpu->mmu_context->cmdbuf_mapping) +
160db82a043SLucas Stach buffer->user_size;
161584a13c6SRussell King }
162584a13c6SRussell King
etnaviv_buffer_init(struct etnaviv_gpu * gpu)163a8c21a54SThe etnaviv authors u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
164a8c21a54SThe etnaviv authors {
1652f9225dbSLucas Stach struct etnaviv_cmdbuf *buffer = &gpu->buffer;
166a8c21a54SThe etnaviv authors
167b6d6223fSLucas Stach lockdep_assert_held(&gpu->lock);
168b6d6223fSLucas Stach
169a8c21a54SThe etnaviv authors /* initialize buffer */
170a8c21a54SThe etnaviv authors buffer->user_size = 0;
171a8c21a54SThe etnaviv authors
172*295b6c02SLucas Stach CMD_WAIT(buffer, gpu->fe_waitcycles);
17317e4660aSLucas Stach CMD_LINK(buffer, 2,
17417e4660aSLucas Stach etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
175db82a043SLucas Stach + buffer->user_size - 4);
176a8c21a54SThe etnaviv authors
177a8c21a54SThe etnaviv authors return buffer->user_size / 8;
178a8c21a54SThe etnaviv authors }
179a8c21a54SThe etnaviv authors
etnaviv_buffer_config_mmuv2(struct etnaviv_gpu * gpu,u32 mtlb_addr,u32 safe_addr)180de08e8efSLucas Stach u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
181de08e8efSLucas Stach {
1822f9225dbSLucas Stach struct etnaviv_cmdbuf *buffer = &gpu->buffer;
183de08e8efSLucas Stach
184b6d6223fSLucas Stach lockdep_assert_held(&gpu->lock);
185b6d6223fSLucas Stach
186de08e8efSLucas Stach buffer->user_size = 0;
187de08e8efSLucas Stach
188de08e8efSLucas Stach if (gpu->identity.features & chipFeatures_PIPE_3D) {
189de08e8efSLucas Stach CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
190de08e8efSLucas Stach VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
191de08e8efSLucas Stach CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
192de08e8efSLucas Stach mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
193de08e8efSLucas Stach CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
194de08e8efSLucas Stach CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
195de08e8efSLucas Stach CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
196de08e8efSLucas Stach }
197de08e8efSLucas Stach
198de08e8efSLucas Stach if (gpu->identity.features & chipFeatures_PIPE_2D) {
199de08e8efSLucas Stach CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
200de08e8efSLucas Stach VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
201de08e8efSLucas Stach CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
202de08e8efSLucas Stach mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
203de08e8efSLucas Stach CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
204de08e8efSLucas Stach CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
205de08e8efSLucas Stach CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
206de08e8efSLucas Stach }
207de08e8efSLucas Stach
208de08e8efSLucas Stach CMD_END(buffer);
209de08e8efSLucas Stach
210de08e8efSLucas Stach buffer->user_size = ALIGN(buffer->user_size, 8);
211de08e8efSLucas Stach
212de08e8efSLucas Stach return buffer->user_size / 8;
213de08e8efSLucas Stach }
214de08e8efSLucas Stach
etnaviv_buffer_config_pta(struct etnaviv_gpu * gpu,unsigned short id)21527b67278SLucas Stach u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
2161137bef6SLucas Stach {
2171137bef6SLucas Stach struct etnaviv_cmdbuf *buffer = &gpu->buffer;
2181137bef6SLucas Stach
2191137bef6SLucas Stach lockdep_assert_held(&gpu->lock);
2201137bef6SLucas Stach
2211137bef6SLucas Stach buffer->user_size = 0;
2221137bef6SLucas Stach
2231137bef6SLucas Stach CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
22427b67278SLucas Stach VIVS_MMUv2_PTA_CONFIG_INDEX(id));
2251137bef6SLucas Stach
2261137bef6SLucas Stach CMD_END(buffer);
2271137bef6SLucas Stach
2281137bef6SLucas Stach buffer->user_size = ALIGN(buffer->user_size, 8);
2291137bef6SLucas Stach
2301137bef6SLucas Stach return buffer->user_size / 8;
2311137bef6SLucas Stach }
2321137bef6SLucas Stach
etnaviv_buffer_end(struct etnaviv_gpu * gpu)233a8c21a54SThe etnaviv authors void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
234a8c21a54SThe etnaviv authors {
2352f9225dbSLucas Stach struct etnaviv_cmdbuf *buffer = &gpu->buffer;
2368581d814SRussell King unsigned int waitlink_offset = buffer->user_size - 16;
2378581d814SRussell King u32 link_target, flush = 0;
238f232d9ecSLucas Stach bool has_blt = !!(gpu->identity.minor_features5 &
239f232d9ecSLucas Stach chipMinorFeatures5_BLT_ENGINE);
240a8c21a54SThe etnaviv authors
241b6d6223fSLucas Stach lockdep_assert_held(&gpu->lock);
242b6d6223fSLucas Stach
2438581d814SRussell King if (gpu->exec_state == ETNA_PIPE_2D)
2448581d814SRussell King flush = VIVS_GL_FLUSH_CACHE_PE2D;
2458581d814SRussell King else if (gpu->exec_state == ETNA_PIPE_3D)
2468581d814SRussell King flush = VIVS_GL_FLUSH_CACHE_DEPTH |
2478581d814SRussell King VIVS_GL_FLUSH_CACHE_COLOR |
2488581d814SRussell King VIVS_GL_FLUSH_CACHE_TEXTURE |
2498581d814SRussell King VIVS_GL_FLUSH_CACHE_TEXTUREVS |
2508581d814SRussell King VIVS_GL_FLUSH_CACHE_SHADER_L2;
251a8c21a54SThe etnaviv authors
2528581d814SRussell King if (flush) {
2538581d814SRussell King unsigned int dwords = 7;
2548581d814SRussell King
255f232d9ecSLucas Stach if (has_blt)
256f232d9ecSLucas Stach dwords += 10;
257f232d9ecSLucas Stach
2588581d814SRussell King link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
2598581d814SRussell King
2608581d814SRussell King CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
2618581d814SRussell King CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
262f232d9ecSLucas Stach if (has_blt) {
263f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
264f232d9ecSLucas Stach CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
265f232d9ecSLucas Stach CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
266f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
267f232d9ecSLucas Stach }
2688581d814SRussell King CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
269f232d9ecSLucas Stach if (gpu->exec_state == ETNA_PIPE_3D) {
270f232d9ecSLucas Stach if (has_blt) {
271f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
272f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
273f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
274f232d9ecSLucas Stach } else {
2758581d814SRussell King CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
2768581d814SRussell King VIVS_TS_FLUSH_CACHE_FLUSH);
277f232d9ecSLucas Stach }
278f232d9ecSLucas Stach }
2798581d814SRussell King CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
2808581d814SRussell King CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
281f232d9ecSLucas Stach if (has_blt) {
282f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
283f232d9ecSLucas Stach CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
284f232d9ecSLucas Stach CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
285f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
286f232d9ecSLucas Stach }
287a8c21a54SThe etnaviv authors CMD_END(buffer);
2888581d814SRussell King
2898581d814SRussell King etnaviv_buffer_replace_wait(buffer, waitlink_offset,
2908581d814SRussell King VIV_FE_LINK_HEADER_OP_LINK |
2918581d814SRussell King VIV_FE_LINK_HEADER_PREFETCH(dwords),
2928581d814SRussell King link_target);
2938581d814SRussell King } else {
2948581d814SRussell King /* Replace the last link-wait with an "END" command */
2958581d814SRussell King etnaviv_buffer_replace_wait(buffer, waitlink_offset,
2968581d814SRussell King VIV_FE_END_HEADER_OP_END, 0);
2978581d814SRussell King }
298a8c21a54SThe etnaviv authors }
299a8c21a54SThe etnaviv authors
300357713ceSChristian Gmeiner /* Append a 'sync point' to the ring buffer. */
etnaviv_sync_point_queue(struct etnaviv_gpu * gpu,unsigned int event)301357713ceSChristian Gmeiner void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
302357713ceSChristian Gmeiner {
3032f9225dbSLucas Stach struct etnaviv_cmdbuf *buffer = &gpu->buffer;
304357713ceSChristian Gmeiner unsigned int waitlink_offset = buffer->user_size - 16;
305357713ceSChristian Gmeiner u32 dwords, target;
306357713ceSChristian Gmeiner
307b6d6223fSLucas Stach lockdep_assert_held(&gpu->lock);
308b6d6223fSLucas Stach
309357713ceSChristian Gmeiner /*
310357713ceSChristian Gmeiner * We need at most 3 dwords in the return target:
311357713ceSChristian Gmeiner * 1 event + 1 end + 1 wait + 1 link.
312357713ceSChristian Gmeiner */
313357713ceSChristian Gmeiner dwords = 4;
314357713ceSChristian Gmeiner target = etnaviv_buffer_reserve(gpu, buffer, dwords);
315357713ceSChristian Gmeiner
316357713ceSChristian Gmeiner /* Signal sync point event */
317357713ceSChristian Gmeiner CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
318357713ceSChristian Gmeiner VIVS_GL_EVENT_FROM_PE);
319357713ceSChristian Gmeiner
320357713ceSChristian Gmeiner /* Stop the FE to 'pause' the GPU */
321357713ceSChristian Gmeiner CMD_END(buffer);
322357713ceSChristian Gmeiner
323357713ceSChristian Gmeiner /* Append waitlink */
324*295b6c02SLucas Stach CMD_WAIT(buffer, gpu->fe_waitcycles);
32517e4660aSLucas Stach CMD_LINK(buffer, 2,
32617e4660aSLucas Stach etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
327db82a043SLucas Stach + buffer->user_size - 4);
328357713ceSChristian Gmeiner
329357713ceSChristian Gmeiner /*
330357713ceSChristian Gmeiner * Kick off the 'sync point' command by replacing the previous
331357713ceSChristian Gmeiner * WAIT with a link to the address in the ring buffer.
332357713ceSChristian Gmeiner */
333357713ceSChristian Gmeiner etnaviv_buffer_replace_wait(buffer, waitlink_offset,
334357713ceSChristian Gmeiner VIV_FE_LINK_HEADER_OP_LINK |
335357713ceSChristian Gmeiner VIV_FE_LINK_HEADER_PREFETCH(dwords),
336357713ceSChristian Gmeiner target);
337357713ceSChristian Gmeiner }
338357713ceSChristian Gmeiner
33941db12dfSRussell King /* Append a command buffer to the ring buffer. */
etnaviv_buffer_queue(struct etnaviv_gpu * gpu,u32 exec_state,struct etnaviv_iommu_context * mmu_context,unsigned int event,struct etnaviv_cmdbuf * cmdbuf)340797b0159SLucas Stach void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
34117e4660aSLucas Stach struct etnaviv_iommu_context *mmu_context, unsigned int event,
34217e4660aSLucas Stach struct etnaviv_cmdbuf *cmdbuf)
343a8c21a54SThe etnaviv authors {
3442f9225dbSLucas Stach struct etnaviv_cmdbuf *buffer = &gpu->buffer;
3456e138f76SRussell King unsigned int waitlink_offset = buffer->user_size - 16;
34641db12dfSRussell King u32 return_target, return_dwords;
34733b1be99SRussell King u32 link_target, link_dwords;
348797b0159SLucas Stach bool switch_context = gpu->exec_state != exec_state;
34917e4660aSLucas Stach bool switch_mmu_context = gpu->mmu_context != mmu_context;
35027b67278SLucas Stach unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
35117e4660aSLucas Stach bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
352f232d9ecSLucas Stach bool has_blt = !!(gpu->identity.minor_features5 &
353f232d9ecSLucas Stach chipMinorFeatures5_BLT_ENGINE);
354a8c21a54SThe etnaviv authors
355b6d6223fSLucas Stach lockdep_assert_held(&gpu->lock);
356b6d6223fSLucas Stach
3576511a945SJani Nikula if (drm_debug_enabled(DRM_UT_DRIVER))
358a8c21a54SThe etnaviv authors etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
359a8c21a54SThe etnaviv authors
36017e4660aSLucas Stach link_target = etnaviv_cmdbuf_get_va(cmdbuf,
36117e4660aSLucas Stach &gpu->mmu_context->cmdbuf_mapping);
36233b1be99SRussell King link_dwords = cmdbuf->size / 8;
363a8c21a54SThe etnaviv authors
36441db12dfSRussell King /*
36517e4660aSLucas Stach * If we need maintenance prior to submitting this buffer, we will
36641db12dfSRussell King * need to append a mmu flush load state, followed by a new
36741db12dfSRussell King * link to this buffer - a total of four additional words.
36841db12dfSRussell King */
3694900dda9SLucas Stach if (need_flush || switch_context) {
37041db12dfSRussell King u32 target, extra_dwords;
37141db12dfSRussell King
37241db12dfSRussell King /* link command */
37341db12dfSRussell King extra_dwords = 1;
37441db12dfSRussell King
37541db12dfSRussell King /* flush command */
3764900dda9SLucas Stach if (need_flush) {
37727b67278SLucas Stach if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
37841db12dfSRussell King extra_dwords += 1;
3792e145a22SLucas Stach else
3802e145a22SLucas Stach extra_dwords += 3;
3812e145a22SLucas Stach }
38241db12dfSRussell King
38341db12dfSRussell King /* pipe switch commands */
3844375ffffSLucas Stach if (switch_context)
38541db12dfSRussell King extra_dwords += 4;
38641db12dfSRussell King
38717e4660aSLucas Stach /* PTA load command */
38817e4660aSLucas Stach if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
38917e4660aSLucas Stach extra_dwords += 1;
390a8c21a54SThe etnaviv authors
391a8c21a54SThe etnaviv authors target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
39217e4660aSLucas Stach /*
39317e4660aSLucas Stach * Switch MMU context if necessary. Must be done after the
39417e4660aSLucas Stach * link target has been calculated, as the jump forward in the
39517e4660aSLucas Stach * kernel ring still uses the last active MMU context before
39617e4660aSLucas Stach * the switch.
39717e4660aSLucas Stach */
39817e4660aSLucas Stach if (switch_mmu_context) {
39917e4660aSLucas Stach struct etnaviv_iommu_context *old_context = gpu->mmu_context;
40017e4660aSLucas Stach
40178edefc0SLucas Stach gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
40217e4660aSLucas Stach etnaviv_iommu_context_put(old_context);
40317e4660aSLucas Stach }
404a8c21a54SThe etnaviv authors
4054900dda9SLucas Stach if (need_flush) {
406a8c21a54SThe etnaviv authors /* Add the MMU flush */
40727b67278SLucas Stach if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
408a8c21a54SThe etnaviv authors CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
409a8c21a54SThe etnaviv authors VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
410a8c21a54SThe etnaviv authors VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
411a8c21a54SThe etnaviv authors VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
412a8c21a54SThe etnaviv authors VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
413a8c21a54SThe etnaviv authors VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
4142e145a22SLucas Stach } else {
41517e4660aSLucas Stach u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
41617e4660aSLucas Stach VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
41717e4660aSLucas Stach
41817e4660aSLucas Stach if (switch_mmu_context &&
41917e4660aSLucas Stach gpu->sec_mode == ETNA_SEC_KERNEL) {
42017e4660aSLucas Stach unsigned short id =
42117e4660aSLucas Stach etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
42217e4660aSLucas Stach CMD_LOAD_STATE(buffer,
42317e4660aSLucas Stach VIVS_MMUv2_PTA_CONFIG,
42417e4660aSLucas Stach VIVS_MMUv2_PTA_CONFIG_INDEX(id));
42517e4660aSLucas Stach }
42617e4660aSLucas Stach
42717e4660aSLucas Stach if (gpu->sec_mode == ETNA_SEC_NONE)
42817e4660aSLucas Stach flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
42917e4660aSLucas Stach
4302e145a22SLucas Stach CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
43117e4660aSLucas Stach flush);
4322e145a22SLucas Stach CMD_SEM(buffer, SYNC_RECIPIENT_FE,
4332e145a22SLucas Stach SYNC_RECIPIENT_PE);
4342e145a22SLucas Stach CMD_STALL(buffer, SYNC_RECIPIENT_FE,
4352e145a22SLucas Stach SYNC_RECIPIENT_PE);
4362e145a22SLucas Stach }
437a8c21a54SThe etnaviv authors
4384900dda9SLucas Stach gpu->flush_seq = new_flush_seq;
439a8c21a54SThe etnaviv authors }
440a8c21a54SThe etnaviv authors
4414375ffffSLucas Stach if (switch_context) {
442797b0159SLucas Stach etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
443797b0159SLucas Stach gpu->exec_state = exec_state;
444a8c21a54SThe etnaviv authors }
445a8c21a54SThe etnaviv authors
44633b1be99SRussell King /* And the link to the submitted buffer */
44717e4660aSLucas Stach link_target = etnaviv_cmdbuf_get_va(cmdbuf,
44817e4660aSLucas Stach &gpu->mmu_context->cmdbuf_mapping);
44933b1be99SRussell King CMD_LINK(buffer, link_dwords, link_target);
450a8c21a54SThe etnaviv authors
451a8c21a54SThe etnaviv authors /* Update the link target to point to above instructions */
45241db12dfSRussell King link_target = target;
45341db12dfSRussell King link_dwords = extra_dwords;
454a8c21a54SThe etnaviv authors }
455a8c21a54SThe etnaviv authors
45641db12dfSRussell King /*
45741db12dfSRussell King * Append a LINK to the submitted command buffer to return to
45841db12dfSRussell King * the ring buffer. return_target is the ring target address.
4598c136b59SLucas Stach * We need at most 7 dwords in the return target: 2 cache flush +
4608c136b59SLucas Stach * 2 semaphore stall + 1 event + 1 wait + 1 link.
46141db12dfSRussell King */
4628c136b59SLucas Stach return_dwords = 7;
463f232d9ecSLucas Stach
464f232d9ecSLucas Stach /*
465f232d9ecSLucas Stach * When the BLT engine is present we need 6 more dwords in the return
466f232d9ecSLucas Stach * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
467f232d9ecSLucas Stach * but we don't need the normal TS flush state.
468f232d9ecSLucas Stach */
469f232d9ecSLucas Stach if (has_blt)
470f232d9ecSLucas Stach return_dwords += 6;
471f232d9ecSLucas Stach
47241db12dfSRussell King return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
47341db12dfSRussell King CMD_LINK(cmdbuf, return_dwords, return_target);
47441db12dfSRussell King
47541db12dfSRussell King /*
4768c136b59SLucas Stach * Append a cache flush, stall, event, wait and link pointing back to
4778c136b59SLucas Stach * the wait command to the ring buffer.
47841db12dfSRussell King */
4798c136b59SLucas Stach if (gpu->exec_state == ETNA_PIPE_2D) {
4808c136b59SLucas Stach CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
4818c136b59SLucas Stach VIVS_GL_FLUSH_CACHE_PE2D);
4828c136b59SLucas Stach } else {
4838c136b59SLucas Stach CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
4848c136b59SLucas Stach VIVS_GL_FLUSH_CACHE_DEPTH |
4858c136b59SLucas Stach VIVS_GL_FLUSH_CACHE_COLOR);
486f232d9ecSLucas Stach if (has_blt) {
487f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
488f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
489f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
490f232d9ecSLucas Stach } else {
4918c136b59SLucas Stach CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
4928c136b59SLucas Stach VIVS_TS_FLUSH_CACHE_FLUSH);
4938c136b59SLucas Stach }
494f232d9ecSLucas Stach }
4958c136b59SLucas Stach CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
4968c136b59SLucas Stach CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
497f232d9ecSLucas Stach
498f232d9ecSLucas Stach if (has_blt) {
499f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
500f232d9ecSLucas Stach CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
501f232d9ecSLucas Stach CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
502f232d9ecSLucas Stach CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
503f232d9ecSLucas Stach }
504f232d9ecSLucas Stach
505a8c21a54SThe etnaviv authors CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
506a8c21a54SThe etnaviv authors VIVS_GL_EVENT_FROM_PE);
507*295b6c02SLucas Stach CMD_WAIT(buffer, gpu->fe_waitcycles);
50817e4660aSLucas Stach CMD_LINK(buffer, 2,
50917e4660aSLucas Stach etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
510db82a043SLucas Stach + buffer->user_size - 4);
511a8c21a54SThe etnaviv authors
5126511a945SJani Nikula if (drm_debug_enabled(DRM_UT_DRIVER))
51341db12dfSRussell King pr_info("stream link to 0x%08x @ 0x%08x %p\n",
514db82a043SLucas Stach return_target,
51517e4660aSLucas Stach etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
516c3ef4b8cSLucas Stach cmdbuf->vaddr);
51741db12dfSRussell King
5186511a945SJani Nikula if (drm_debug_enabled(DRM_UT_DRIVER)) {
51941db12dfSRussell King print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
52041db12dfSRussell King cmdbuf->vaddr, cmdbuf->size, 0);
52141db12dfSRussell King
52241db12dfSRussell King pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
52341db12dfSRussell King pr_info("addr: 0x%08x\n", link_target);
52441db12dfSRussell King pr_info("back: 0x%08x\n", return_target);
52541db12dfSRussell King pr_info("event: %d\n", event);
52641db12dfSRussell King }
52741db12dfSRussell King
52841db12dfSRussell King /*
52941db12dfSRussell King * Kick off the submitted command by replacing the previous
53041db12dfSRussell King * WAIT with a link to the address in the ring buffer.
53141db12dfSRussell King */
5326e138f76SRussell King etnaviv_buffer_replace_wait(buffer, waitlink_offset,
5336e138f76SRussell King VIV_FE_LINK_HEADER_OP_LINK |
53433b1be99SRussell King VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
5356e138f76SRussell King link_target);
536a8c21a54SThe etnaviv authors
5376511a945SJani Nikula if (drm_debug_enabled(DRM_UT_DRIVER))
538a8c21a54SThe etnaviv authors etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
539a8c21a54SThe etnaviv authors }
540