Lines Matching +full:wait +full:- +full:queue

1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
5 #include <linux/dma-mapping.h>
28 if (v3d->ver < V3D_GEN_41) in v3d_init_core()
48 V3D_CORE_WRITE(core, V3D_GMP_CFG(v3d->ver), V3D_GMP_CFG_STOP_REQ); in v3d_idle_axi()
50 if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS(v3d->ver)) & in v3d_idle_axi()
54 DRM_ERROR("Failed to wait for safe GMP shutdown\n"); in v3d_idle_axi()
61 if (v3d->ver >= V3D_GEN_41) in v3d_idle_gca()
69 DRM_ERROR("Failed to wait for safe GCA shutdown\n"); in v3d_idle_gca()
83 /* GFXH-1383: The SW_INIT may cause a stray write to address 0 in v3d_reset_by_bridge()
84 * of the unit, so reset it to its power-on value here. in v3d_reset_by_bridge()
99 if (v3d->reset) in v3d_reset_v3d()
100 reset_control_reset(v3d->reset); in v3d_reset_v3d()
110 if (v3d->ver < V3D_GEN_71) in v3d_reset_sms()
119 DRM_ERROR("Failed to wait for SMS reset\n"); in v3d_reset_sms()
126 struct drm_device *dev = &v3d->drm; in v3d_reset()
128 DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n"); in v3d_reset()
129 DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n", in v3d_reset()
146 v3d_perfmon_stop(v3d, v3d->active_perfmon, false); in v3d_reset()
154 if (v3d->ver < V3D_GEN_41) { in v3d_flush_l3()
160 if (v3d->ver < V3D_GEN_33) { in v3d_flush_l3()
167 /* Invalidates the (read-only) L2C cache. This was the L2 cache for
173 if (v3d->ver >= V3D_GEN_33) in v3d_invalidate_l2c()
186 * need to wait for completion before dispatching the job -- in v3d_flush_l2t()
189 * new flush while the L2_CLEAN queue is trying to in v3d_flush_l2t()
192 mutex_lock(&v3d->cache_clean_lock); in v3d_flush_l2t()
196 mutex_unlock(&v3d->cache_clean_lock); in v3d_flush_l2t()
201 * For cleaning, which happens from the CACHE_CLEAN queue after CSD has
203 * signaling job completion. So, we synchronously wait before
205 * meantime to confuse our are-we-done checks.
210 struct drm_device *dev = &v3d->drm; in v3d_clean_caches()
221 mutex_lock(&v3d->cache_clean_lock); in v3d_clean_caches()
231 mutex_unlock(&v3d->cache_clean_lock); in v3d_clean_caches()
236 /* Invalidates the slice caches. These are read-only caches. */
269 struct v3d_queue_state *queue = &v3d->queue[i]; in v3d_gem_init() local
271 queue->fence_context = dma_fence_context_alloc(1); in v3d_gem_init()
272 memset(&queue->stats, 0, sizeof(queue->stats)); in v3d_gem_init()
273 seqcount_init(&queue->stats.lock); in v3d_gem_init()
275 spin_lock_init(&queue->queue_lock); in v3d_gem_init()
276 spin_lock_init(&queue->fence_lock); in v3d_gem_init()
279 spin_lock_init(&v3d->mm_lock); in v3d_gem_init()
280 ret = drmm_mutex_init(dev, &v3d->bo_lock); in v3d_gem_init()
283 ret = drmm_mutex_init(dev, &v3d->reset_lock); in v3d_gem_init()
286 ret = drmm_mutex_init(dev, &v3d->sched_lock); in v3d_gem_init()
289 ret = drmm_mutex_init(dev, &v3d->cache_clean_lock); in v3d_gem_init()
297 drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1); in v3d_gem_init()
299 v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size, in v3d_gem_init()
300 &v3d->pt_paddr, in v3d_gem_init()
302 if (!v3d->pt) { in v3d_gem_init()
303 drm_mm_takedown(&v3d->mm); in v3d_gem_init()
304 dev_err(v3d->drm.dev, in v3d_gem_init()
306 return -ENOMEM; in v3d_gem_init()
316 drm_mm_takedown(&v3d->mm); in v3d_gem_init()
317 dma_free_coherent(v3d->drm.dev, pt_size, (void *)v3d->pt, in v3d_gem_init()
318 v3d->pt_paddr); in v3d_gem_init()
338 WARN_ON(v3d->queue[q].active_job); in v3d_gem_destroy()
340 drm_mm_takedown(&v3d->mm); in v3d_gem_destroy()
342 dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt, in v3d_gem_destroy()
343 v3d->pt_paddr); in v3d_gem_destroy()