Lines Matching full:gpu

91 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,  in etnaviv_cmd_select_pipe()  argument
96 lockdep_assert_held(&gpu->lock); in etnaviv_cmd_select_pipe()
104 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_cmd_select_pipe()
106 else if (gpu->exec_state == ETNA_PIPE_3D) in etnaviv_cmd_select_pipe()
117 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, in etnaviv_buffer_dump() argument
123 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", in etnaviv_buffer_dump()
125 &gpu->mmu_context->cmdbuf_mapping) + in etnaviv_buffer_dump()
134 * The GPU may be executing this WAIT while we're modifying it, so we have
135 * to write it in a specific order to avoid the GPU branching to somewhere
153 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu, in etnaviv_buffer_reserve() argument
160 &gpu->mmu_context->cmdbuf_mapping) + in etnaviv_buffer_reserve()
164 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) in etnaviv_buffer_init() argument
166 struct etnaviv_cmdbuf *buffer = &gpu->buffer; in etnaviv_buffer_init()
168 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_init()
173 CMD_WAIT(buffer, gpu->fe_waitcycles); in etnaviv_buffer_init()
175 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping) in etnaviv_buffer_init()
181 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr) in etnaviv_buffer_config_mmuv2() argument
183 struct etnaviv_cmdbuf *buffer = &gpu->buffer; in etnaviv_buffer_config_mmuv2()
185 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_config_mmuv2()
189 if (gpu->identity.features & chipFeatures_PIPE_3D) { in etnaviv_buffer_config_mmuv2()
199 if (gpu->identity.features & chipFeatures_PIPE_2D) { in etnaviv_buffer_config_mmuv2()
216 u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id) in etnaviv_buffer_config_pta() argument
218 struct etnaviv_cmdbuf *buffer = &gpu->buffer; in etnaviv_buffer_config_pta()
220 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_config_pta()
234 void etnaviv_buffer_end(struct etnaviv_gpu *gpu) in etnaviv_buffer_end() argument
236 struct etnaviv_cmdbuf *buffer = &gpu->buffer; in etnaviv_buffer_end()
239 bool has_blt = !!(gpu->identity.minor_features5 & in etnaviv_buffer_end()
242 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_end()
244 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_buffer_end()
246 else if (gpu->exec_state == ETNA_PIPE_3D) in etnaviv_buffer_end()
259 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords); in etnaviv_buffer_end()
270 if (gpu->exec_state == ETNA_PIPE_3D) { in etnaviv_buffer_end()
302 void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event) in etnaviv_sync_point_queue() argument
304 struct etnaviv_cmdbuf *buffer = &gpu->buffer; in etnaviv_sync_point_queue()
308 lockdep_assert_held(&gpu->lock); in etnaviv_sync_point_queue()
315 target = etnaviv_buffer_reserve(gpu, buffer, dwords); in etnaviv_sync_point_queue()
321 /* Stop the FE to 'pause' the GPU */ in etnaviv_sync_point_queue()
325 CMD_WAIT(buffer, gpu->fe_waitcycles); in etnaviv_sync_point_queue()
327 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping) in etnaviv_sync_point_queue()
341 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, in etnaviv_buffer_queue() argument
345 struct etnaviv_cmdbuf *buffer = &gpu->buffer; in etnaviv_buffer_queue()
349 bool switch_context = gpu->exec_state != exec_state; in etnaviv_buffer_queue()
350 bool switch_mmu_context = gpu->mmu_context != mmu_context; in etnaviv_buffer_queue()
352 bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq; in etnaviv_buffer_queue()
353 bool has_blt = !!(gpu->identity.minor_features5 & in etnaviv_buffer_queue()
356 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_queue()
359 etnaviv_buffer_dump(gpu, buffer, 0, 0x50); in etnaviv_buffer_queue()
362 &gpu->mmu_context->cmdbuf_mapping); in etnaviv_buffer_queue()
378 if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) in etnaviv_buffer_queue()
389 if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL) in etnaviv_buffer_queue()
392 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords); in etnaviv_buffer_queue()
400 struct etnaviv_iommu_context *old_context = gpu->mmu_context; in etnaviv_buffer_queue()
402 gpu->mmu_context = etnaviv_iommu_context_get(mmu_context); in etnaviv_buffer_queue()
408 if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) { in etnaviv_buffer_queue()
420 gpu->sec_mode == ETNA_SEC_KERNEL) { in etnaviv_buffer_queue()
422 etnaviv_iommuv2_get_pta_id(gpu->mmu_context); in etnaviv_buffer_queue()
428 if (gpu->sec_mode == ETNA_SEC_NONE) in etnaviv_buffer_queue()
429 flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context); in etnaviv_buffer_queue()
439 gpu->flush_seq = new_flush_seq; in etnaviv_buffer_queue()
443 etnaviv_cmd_select_pipe(gpu, buffer, exec_state); in etnaviv_buffer_queue()
444 gpu->exec_state = exec_state; in etnaviv_buffer_queue()
449 &gpu->mmu_context->cmdbuf_mapping); in etnaviv_buffer_queue()
473 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); in etnaviv_buffer_queue()
480 if (gpu->exec_state == ETNA_PIPE_2D) { in etnaviv_buffer_queue()
509 CMD_WAIT(buffer, gpu->fe_waitcycles); in etnaviv_buffer_queue()
511 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping) in etnaviv_buffer_queue()
517 etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping), in etnaviv_buffer_queue()
540 etnaviv_buffer_dump(gpu, buffer, 0, 0x50); in etnaviv_buffer_queue()