1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 */ 29 #include <linux/seq_file.h> 30 #include <linux/slab.h> 31 #include <drm/drmP.h> 32 #include <drm/amdgpu_drm.h> 33 #include "amdgpu.h" 34 #include "atom.h" 35 36 /* 37 * Rings 38 * Most engines on the GPU are fed via ring buffers. Ring 39 * buffers are areas of GPU accessible memory that the host 40 * writes commands into and the GPU reads commands out of. 41 * There is a rptr (read pointer) that determines where the 42 * GPU is currently reading, and a wptr (write pointer) 43 * which determines where the host has written. When the 44 * pointers are equal, the ring is idle. When the host 45 * writes commands to the ring buffer, it increments the 46 * wptr. The GPU then starts fetching commands and executes 47 * them until the pointers are equal again. 48 */ 49 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); 50 51 /** 52 * amdgpu_ring_alloc - allocate space on the ring buffer 53 * 54 * @adev: amdgpu_device pointer 55 * @ring: amdgpu_ring structure holding ring information 56 * @ndw: number of dwords to allocate in the ring buffer 57 * 58 * Allocate @ndw dwords in the ring buffer (all asics). 59 * Returns 0 on success, error on failure. 60 */ 61 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) 62 { 63 /* Align requested size with padding so unlock_commit can 64 * pad safely */ 65 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 66 67 /* Make sure we aren't trying to allocate more space 68 * than the maximum for one submission 69 */ 70 if (WARN_ON_ONCE(ndw > ring->max_dw)) 71 return -ENOMEM; 72 73 ring->count_dw = ndw; 74 ring->wptr_old = ring->wptr; 75 return 0; 76 } 77 78 /** amdgpu_ring_insert_nop - insert NOP packets 79 * 80 * @ring: amdgpu_ring structure holding ring information 81 * @count: the number of NOP packets to insert 82 * 83 * This is the generic insert_nop function for rings except SDMA 84 */ 85 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 86 { 87 int i; 88 89 for (i = 0; i < count; i++) 90 amdgpu_ring_write(ring, ring->nop); 91 } 92 93 /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets 94 * 95 * @ring: amdgpu_ring structure holding ring information 96 * @ib: IB to add NOP packets to 97 * 98 * This is the generic pad_ib function for rings except SDMA 99 */ 100 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 101 { 102 while (ib->length_dw & ring->align_mask) 103 ib->ptr[ib->length_dw++] = ring->nop; 104 } 105 106 /** 107 * amdgpu_ring_commit - tell the GPU to execute the new 108 * commands on the ring buffer 109 * 110 * @adev: amdgpu_device pointer 111 * @ring: amdgpu_ring structure holding ring information 112 * 113 * Update the wptr (write pointer) to tell the GPU to 114 * execute new commands on the ring buffer (all asics). 115 */ 116 void amdgpu_ring_commit(struct amdgpu_ring *ring) 117 { 118 uint32_t count; 119 120 /* We pad to match fetch size */ 121 count = ring->align_mask + 1 - (ring->wptr & ring->align_mask); 122 count %= ring->align_mask + 1; 123 ring->funcs->insert_nop(ring, count); 124 125 mb(); 126 amdgpu_ring_set_wptr(ring); 127 } 128 129 /** 130 * amdgpu_ring_undo - reset the wptr 131 * 132 * @ring: amdgpu_ring structure holding ring information 133 * 134 * Reset the driver's copy of the wptr (all asics). 135 */ 136 void amdgpu_ring_undo(struct amdgpu_ring *ring) 137 { 138 ring->wptr = ring->wptr_old; 139 } 140 141 /** 142 * amdgpu_ring_backup - Back up the content of a ring 143 * 144 * @ring: the ring we want to back up 145 * 146 * Saves all unprocessed commits from a ring, returns the number of dwords saved. 147 */ 148 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, 149 uint32_t **data) 150 { 151 unsigned size, ptr, i; 152 153 *data = NULL; 154 155 if (ring->ring_obj == NULL) 156 return 0; 157 158 /* it doesn't make sense to save anything if all fences are signaled */ 159 if (!amdgpu_fence_count_emitted(ring)) 160 return 0; 161 162 ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); 163 164 size = ring->wptr + (ring->ring_size / 4); 165 size -= ptr; 166 size &= ring->ptr_mask; 167 if (size == 0) 168 return 0; 169 170 /* and then save the content of the ring */ 171 *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); 172 if (!*data) 173 return 0; 174 for (i = 0; i < size; ++i) { 175 (*data)[i] = ring->ring[ptr++]; 176 ptr &= ring->ptr_mask; 177 } 178 179 return size; 180 } 181 182 /** 183 * amdgpu_ring_restore - append saved commands to the ring again 184 * 185 * @ring: ring to append commands to 186 * @size: number of dwords we want to write 187 * @data: saved commands 188 * 189 * Allocates space on the ring and restore the previously saved commands. 190 */ 191 int amdgpu_ring_restore(struct amdgpu_ring *ring, 192 unsigned size, uint32_t *data) 193 { 194 int i, r; 195 196 if (!size || !data) 197 return 0; 198 199 /* restore the saved ring content */ 200 r = amdgpu_ring_alloc(ring, size); 201 if (r) 202 return r; 203 204 for (i = 0; i < size; ++i) { 205 amdgpu_ring_write(ring, data[i]); 206 } 207 208 amdgpu_ring_commit(ring); 209 kfree(data); 210 return 0; 211 } 212 213 /** 214 * amdgpu_ring_init - init driver ring struct. 215 * 216 * @adev: amdgpu_device pointer 217 * @ring: amdgpu_ring structure holding ring information 218 * @ring_size: size of the ring 219 * @nop: nop packet for this ring 220 * 221 * Initialize the driver information for the selected ring (all asics). 222 * Returns 0 on success, error on failure. 223 */ 224 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 225 unsigned ring_size, u32 nop, u32 align_mask, 226 struct amdgpu_irq_src *irq_src, unsigned irq_type, 227 enum amdgpu_ring_type ring_type) 228 { 229 u32 rb_bufsz; 230 int r; 231 232 if (ring->adev == NULL) { 233 if (adev->num_rings >= AMDGPU_MAX_RINGS) 234 return -EINVAL; 235 236 ring->adev = adev; 237 ring->idx = adev->num_rings++; 238 adev->rings[ring->idx] = ring; 239 r = amdgpu_fence_driver_init_ring(ring, 240 amdgpu_sched_hw_submission); 241 if (r) 242 return r; 243 } 244 245 r = amdgpu_wb_get(adev, &ring->rptr_offs); 246 if (r) { 247 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); 248 return r; 249 } 250 251 r = amdgpu_wb_get(adev, &ring->wptr_offs); 252 if (r) { 253 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); 254 return r; 255 } 256 257 r = amdgpu_wb_get(adev, &ring->fence_offs); 258 if (r) { 259 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); 260 return r; 261 } 262 263 r = amdgpu_wb_get(adev, &ring->next_rptr_offs); 264 if (r) { 265 dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r); 266 return r; 267 } 268 ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4); 269 ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; 270 271 r = amdgpu_wb_get(adev, &ring->cond_exe_offs); 272 if (r) { 273 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); 274 return r; 275 } 276 ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4); 277 ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs]; 278 279 spin_lock_init(&ring->fence_lock); 280 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); 281 if (r) { 282 dev_err(adev->dev, "failed initializing fences (%d).\n", r); 283 return r; 284 } 285 286 /* Align ring size */ 287 rb_bufsz = order_base_2(ring_size / 8); 288 ring_size = (1 << (rb_bufsz + 1)) * 4; 289 ring->ring_size = ring_size; 290 ring->align_mask = align_mask; 291 ring->nop = nop; 292 ring->type = ring_type; 293 294 /* Allocate ring buffer */ 295 if (ring->ring_obj == NULL) { 296 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, 297 AMDGPU_GEM_DOMAIN_GTT, 0, 298 NULL, NULL, &ring->ring_obj); 299 if (r) { 300 dev_err(adev->dev, "(%d) ring create failed\n", r); 301 return r; 302 } 303 r = amdgpu_bo_reserve(ring->ring_obj, false); 304 if (unlikely(r != 0)) 305 return r; 306 r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT, 307 &ring->gpu_addr); 308 if (r) { 309 amdgpu_bo_unreserve(ring->ring_obj); 310 dev_err(adev->dev, "(%d) ring pin failed\n", r); 311 return r; 312 } 313 r = amdgpu_bo_kmap(ring->ring_obj, 314 (void **)&ring->ring); 315 amdgpu_bo_unreserve(ring->ring_obj); 316 if (r) { 317 dev_err(adev->dev, "(%d) ring map failed\n", r); 318 return r; 319 } 320 } 321 ring->ptr_mask = (ring->ring_size / 4) - 1; 322 ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4, 323 amdgpu_sched_hw_submission); 324 325 if (amdgpu_debugfs_ring_init(adev, ring)) { 326 DRM_ERROR("Failed to register debugfs file for rings !\n"); 327 } 328 return 0; 329 } 330 331 /** 332 * amdgpu_ring_fini - tear down the driver ring struct. 333 * 334 * @adev: amdgpu_device pointer 335 * @ring: amdgpu_ring structure holding ring information 336 * 337 * Tear down the driver information for the selected ring (all asics). 338 */ 339 void amdgpu_ring_fini(struct amdgpu_ring *ring) 340 { 341 int r; 342 struct amdgpu_bo *ring_obj; 343 344 ring_obj = ring->ring_obj; 345 ring->ready = false; 346 ring->ring = NULL; 347 ring->ring_obj = NULL; 348 349 amdgpu_wb_free(ring->adev, ring->fence_offs); 350 amdgpu_wb_free(ring->adev, ring->rptr_offs); 351 amdgpu_wb_free(ring->adev, ring->wptr_offs); 352 amdgpu_wb_free(ring->adev, ring->next_rptr_offs); 353 354 if (ring_obj) { 355 r = amdgpu_bo_reserve(ring_obj, false); 356 if (likely(r == 0)) { 357 amdgpu_bo_kunmap(ring_obj); 358 amdgpu_bo_unpin(ring_obj); 359 amdgpu_bo_unreserve(ring_obj); 360 } 361 amdgpu_bo_unref(&ring_obj); 362 } 363 } 364 365 /* 366 * Debugfs info 367 */ 368 #if defined(CONFIG_DEBUG_FS) 369 370 static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) 371 { 372 struct drm_info_node *node = (struct drm_info_node *) m->private; 373 struct drm_device *dev = node->minor->dev; 374 struct amdgpu_device *adev = dev->dev_private; 375 int roffset = *(int*)node->info_ent->data; 376 struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); 377 378 uint32_t rptr, wptr, rptr_next; 379 unsigned i; 380 381 wptr = amdgpu_ring_get_wptr(ring); 382 seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); 383 384 rptr = amdgpu_ring_get_rptr(ring); 385 rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); 386 387 seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); 388 389 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", 390 ring->wptr, ring->wptr); 391 392 if (!ring->ready) 393 return 0; 394 395 /* print 8 dw before current rptr as often it's the last executed 396 * packet that is the root issue 397 */ 398 i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 399 while (i != rptr) { 400 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); 401 if (i == rptr) 402 seq_puts(m, " *"); 403 if (i == rptr_next) 404 seq_puts(m, " #"); 405 seq_puts(m, "\n"); 406 i = (i + 1) & ring->ptr_mask; 407 } 408 while (i != wptr) { 409 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); 410 if (i == rptr) 411 seq_puts(m, " *"); 412 if (i == rptr_next) 413 seq_puts(m, " #"); 414 seq_puts(m, "\n"); 415 i = (i + 1) & ring->ptr_mask; 416 } 417 return 0; 418 } 419 420 /* TODO: clean this up !*/ 421 static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); 422 static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); 423 static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); 424 static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring); 425 static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring); 426 static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); 427 static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); 428 static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); 429 430 static const struct drm_info_list amdgpu_debugfs_ring_info_list[] = { 431 {"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index}, 432 {"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index}, 433 {"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index}, 434 {"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index}, 435 {"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index}, 436 {"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index}, 437 {"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index}, 438 {"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index}, 439 }; 440 441 #endif 442 443 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) 444 { 445 #if defined(CONFIG_DEBUG_FS) 446 unsigned i; 447 for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) { 448 const struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i]; 449 int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data; 450 struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset); 451 unsigned r; 452 453 if (other != ring) 454 continue; 455 456 r = amdgpu_debugfs_add_files(adev, info, 1); 457 if (r) 458 return r; 459 } 460 #endif 461 return 0; 462 } 463