Lines Matching +full:software +full:- +full:locked

5  * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
43 * r600_dma_get_rptr - get the current read pointer
55 if (rdev->wb.enabled) in r600_dma_get_rptr()
56 rptr = rdev->wb.wb[ring->rptr_offs/4]; in r600_dma_get_rptr()
64 * r600_dma_get_wptr - get the current write pointer
78 * r600_dma_set_wptr - commit the write pointer
88 WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc); in r600_dma_set_wptr()
92 * r600_dma_stop - stop the async dma engine
96 * Stop the async dma engine (r6xx-evergreen).
102 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) in r600_dma_stop()
103 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); in r600_dma_stop()
108 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; in r600_dma_stop()
112 * r600_dma_resume - setup and start the async dma engine
116 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
121 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; in r600_dma_resume()
130 rb_bufsz = order_base_2(ring->ring_size / 4); in r600_dma_resume()
143 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); in r600_dma_resume()
145 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); in r600_dma_resume()
147 if (rdev->wb.enabled) in r600_dma_resume()
150 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); in r600_dma_resume()
163 if (rdev->family >= CHIP_RV770) in r600_dma_resume()
166 ring->wptr = 0; in r600_dma_resume()
167 WREG32(DMA_RB_WPTR, ring->wptr << 2); in r600_dma_resume()
171 ring->ready = true; in r600_dma_resume()
175 ring->ready = false; in r600_dma_resume()
179 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) in r600_dma_resume()
180 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); in r600_dma_resume()
186 * r600_dma_fini - tear down the async dma engine
190 * Stop the async dma engine and free the ring (r6xx-evergreen).
195 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); in r600_dma_fini()
199 * r600_dma_is_lockup - Check if the DMA engine is locked up
204 * Check if the async DMA engine is locked up.
205 * Returns true if the engine appears to be locked up, false if not.
220 * r600_dma_ring_test - simple async dma engine test
226 * value to memory. (r6xx-SI).
238 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in r600_dma_ring_test()
243 gpu_addr = rdev->wb.gpu_addr + index; in r600_dma_ring_test()
246 rdev->wb.wb[index/4] = cpu_to_le32(tmp); in r600_dma_ring_test()
250 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); in r600_dma_ring_test()
259 for (i = 0; i < rdev->usec_timeout; i++) { in r600_dma_ring_test()
260 tmp = le32_to_cpu(rdev->wb.wb[index/4]); in r600_dma_ring_test()
266 if (i < rdev->usec_timeout) { in r600_dma_ring_test()
267 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); in r600_dma_ring_test()
270 ring->idx, tmp); in r600_dma_ring_test()
271 r = -EINVAL; in r600_dma_ring_test()
277 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
284 * an interrupt if needed (r6xx-r7xx).
289 struct radeon_ring *ring = &rdev->ring[fence->ring]; in r600_dma_fence_ring_emit()
290 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in r600_dma_fence_ring_emit()
296 radeon_ring_write(ring, lower_32_bits(fence->seq)); in r600_dma_fence_ring_emit()
302 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
310 * other rings (r6xx-SI).
317 u64 addr = semaphore->gpu_addr; in r600_dma_semaphore_ring_emit()
328 * r600_dma_ib_test - test an IB on the DMA engine
333 * Test a simple IB in the DMA ring (r6xx-SI).
345 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in r600_dma_ib_test()
350 gpu_addr = rdev->wb.gpu_addr + index; in r600_dma_ib_test()
352 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); in r600_dma_ib_test()
377 return -ETIMEDOUT; in r600_dma_ib_test()
380 for (i = 0; i < rdev->usec_timeout; i++) { in r600_dma_ib_test()
381 tmp = le32_to_cpu(rdev->wb.wb[index/4]); in r600_dma_ib_test()
386 if (i < rdev->usec_timeout) { in r600_dma_ib_test()
387 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); in r600_dma_ib_test()
390 r = -EINVAL; in r600_dma_ib_test()
397 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
402 * Schedule an IB in the DMA ring (r6xx-r7xx).
406 struct radeon_ring *ring = &rdev->ring[ib->ring]; in r600_dma_ring_ib_execute()
408 if (rdev->wb.enabled) { in r600_dma_ring_ib_execute()
409 u32 next_rptr = ring->wptr + 4; in r600_dma_ring_ib_execute()
414 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); in r600_dma_ring_ib_execute()
415 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); in r600_dma_ring_ib_execute()
422 while ((ring->wptr & 7) != 5) in r600_dma_ring_ib_execute()
425 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); in r600_dma_ring_ib_execute()
426 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); in r600_dma_ring_ib_execute()
431 * r600_copy_dma - copy pages using the DMA engine
450 int ring_index = rdev->asic->copy.dma_ring_index; in r600_copy_dma()
451 struct radeon_ring *ring = &rdev->ring[ring_index]; in r600_copy_dma()
468 radeon_sync_rings(rdev, &sync, ring->idx); in r600_copy_dma()
474 size_in_dw -= cur_size_in_dw; in r600_copy_dma()
484 r = radeon_fence_emit(rdev, &fence, ring->idx); in r600_copy_dma()