1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 * Christian König
28 */
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/debugfs.h>
33
34 #include <drm/amdgpu_drm.h>
35 #include "amdgpu.h"
36 #include "amdgpu_ras_mgr.h"
37 #include "atom.h"
38
39 /*
40 * Rings
41 * Most engines on the GPU are fed via ring buffers. Ring
42 * buffers are areas of GPU accessible memory that the host
43 * writes commands into and the GPU reads commands out of.
44 * There is a rptr (read pointer) that determines where the
45 * GPU is currently reading, and a wptr (write pointer)
46 * which determines where the host has written. When the
47 * pointers are equal, the ring is idle. When the host
48 * writes commands to the ring buffer, it increments the
49 * wptr. The GPU then starts fetching commands and executes
50 * them until the pointers are equal again.
51 */
52
53 /**
54 * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission.
55 *
56 * @type: ring type for which to return the limit.
57 */
amdgpu_ring_max_ibs(enum amdgpu_ring_type type)58 unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type)
59 {
60 switch (type) {
61 case AMDGPU_RING_TYPE_GFX:
62 /* Need to keep at least 192 on GFX7+ for old radv. */
63 return 192;
64 case AMDGPU_RING_TYPE_COMPUTE:
65 return 125;
66 case AMDGPU_RING_TYPE_VCN_JPEG:
67 return 16;
68 default:
69 return 49;
70 }
71 }
72
73 /**
74 * amdgpu_ring_alloc - allocate space on the ring buffer
75 *
76 * @ring: amdgpu_ring structure holding ring information
77 * @ndw: number of dwords to allocate in the ring buffer
78 *
79 * Allocate @ndw dwords in the ring buffer. The number of dwords should be the
80 * sum of all commands written to the ring.
81 *
82 * Returns:
83 * 0 on success, otherwise -ENOMEM if it tries to allocate more than the
84 * maximum dword allowed for one submission.
85 */
amdgpu_ring_alloc(struct amdgpu_ring * ring,unsigned int ndw)86 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
87 {
88 /* Align requested size with padding so unlock_commit can
89 * pad safely */
90 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
91
92 /* Make sure we aren't trying to allocate more space
93 * than the maximum for one submission. Skip for reemit
94 * since we may be reemitting several submissions.
95 */
96 if (!ring->reemit) {
97 if (WARN_ON_ONCE(ndw > ring->max_dw))
98 return -ENOMEM;
99 }
100
101 ring->count_dw = ndw;
102 ring->wptr_old = ring->wptr;
103
104 if (ring->funcs->begin_use)
105 ring->funcs->begin_use(ring);
106
107 return 0;
108 }
109
110 /**
111 * amdgpu_ring_insert_nop - insert NOP packets
112 *
113 * @ring: amdgpu_ring structure holding ring information
114 * @count: the number of NOP packets to insert
115 *
116 * This is the generic insert_nop function for rings except SDMA
117 */
amdgpu_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)118 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
119 {
120 uint32_t occupied, chunk1, chunk2;
121
122 occupied = ring->wptr & ring->buf_mask;
123 chunk1 = ring->buf_mask + 1 - occupied;
124 chunk1 = (chunk1 >= count) ? count : chunk1;
125 chunk2 = count - chunk1;
126
127 if (chunk1)
128 memset32(&ring->ring[occupied], ring->funcs->nop, chunk1);
129
130 if (chunk2)
131 memset32(ring->ring, ring->funcs->nop, chunk2);
132
133 ring->wptr += count;
134 ring->wptr &= ring->ptr_mask;
135 ring->count_dw -= count;
136 }
137
138 /**
139 * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
140 *
141 * @ring: amdgpu_ring structure holding ring information
142 * @ib: IB to add NOP packets to
143 *
144 * This is the generic pad_ib function for rings except SDMA
145 */
amdgpu_ring_generic_pad_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib)146 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
147 {
148 u32 align_mask = ring->funcs->align_mask;
149 u32 count = ib->length_dw & align_mask;
150
151 if (count) {
152 count = align_mask + 1 - count;
153
154 memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count);
155
156 ib->length_dw += count;
157 }
158 }
159
160 /**
161 * amdgpu_ring_commit - tell the GPU to execute the new
162 * commands on the ring buffer
163 *
164 * @ring: amdgpu_ring structure holding ring information
165 *
166 * Update the wptr (write pointer) to tell the GPU to
167 * execute new commands on the ring buffer (all asics).
168 */
amdgpu_ring_commit(struct amdgpu_ring * ring)169 void amdgpu_ring_commit(struct amdgpu_ring *ring)
170 {
171 uint32_t count;
172
173 if (ring->count_dw < 0)
174 drm_err(adev_to_drm(ring->adev), "writing more dwords to the ring than expected!\n");
175
176 /* We pad to match fetch size */
177 count = ring->funcs->align_mask + 1 -
178 (ring->wptr & ring->funcs->align_mask);
179 count &= ring->funcs->align_mask;
180
181 if (count != 0)
182 ring->funcs->insert_nop(ring, count);
183
184 mb();
185 amdgpu_ring_set_wptr(ring);
186
187 if (ring->funcs->end_use)
188 ring->funcs->end_use(ring);
189 }
190
191 /**
192 * amdgpu_ring_undo - reset the wptr
193 *
194 * @ring: amdgpu_ring structure holding ring information
195 *
196 * Reset the driver's copy of the wptr (all asics).
197 */
amdgpu_ring_undo(struct amdgpu_ring * ring)198 void amdgpu_ring_undo(struct amdgpu_ring *ring)
199 {
200 ring->wptr = ring->wptr_old;
201
202 if (ring->funcs->end_use)
203 ring->funcs->end_use(ring);
204 }
205
206 #define amdgpu_ring_get_gpu_addr(ring, offset) \
207 (ring->adev->wb.gpu_addr + offset * 4)
208
209 #define amdgpu_ring_get_cpu_addr(ring, offset) \
210 (&ring->adev->wb.wb[offset])
211
212 /**
213 * amdgpu_ring_init - init driver ring struct.
214 *
215 * @adev: amdgpu_device pointer
216 * @ring: amdgpu_ring structure holding ring information
217 * @max_dw: maximum number of dw for ring alloc
218 * @irq_src: interrupt source to use for this ring
219 * @irq_type: interrupt type to use for this ring
220 * @hw_prio: ring priority (NORMAL/HIGH)
221 * @sched_score: optional score atomic shared with other schedulers
222 *
223 * Initialize the driver information for the selected ring (all asics).
224 * Returns 0 on success, error on failure.
225 */
amdgpu_ring_init(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int max_dw,struct amdgpu_irq_src * irq_src,unsigned int irq_type,unsigned int hw_prio,atomic_t * sched_score)226 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
227 unsigned int max_dw, struct amdgpu_irq_src *irq_src,
228 unsigned int irq_type, unsigned int hw_prio,
229 atomic_t *sched_score)
230 {
231 int r;
232 int sched_hw_submission = amdgpu_sched_hw_submission;
233 u32 *num_sched;
234 u32 hw_ip;
235 unsigned int max_ibs_dw;
236
237 /* Set the hw submission limit higher for KIQ because
238 * it's used for a number of gfx/compute tasks by both
239 * KFD and KGD which may have outstanding fences and
240 * it doesn't really use the gpu scheduler anyway;
241 * KIQ tasks get submitted directly to the ring.
242 */
243 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
244 sched_hw_submission = max(sched_hw_submission, 256);
245 if (ring->funcs->type == AMDGPU_RING_TYPE_MES)
246 sched_hw_submission = 8;
247 else if (ring == &adev->sdma.instance[0].page)
248 sched_hw_submission = 256;
249
250 if (ring->adev == NULL) {
251 if (adev->num_rings >= AMDGPU_MAX_RINGS)
252 return -EINVAL;
253
254 ring->adev = adev;
255 ring->num_hw_submission = sched_hw_submission;
256 ring->sched_score = sched_score;
257 ring->vmid_wait = dma_fence_get_stub();
258
259 ring->idx = adev->num_rings++;
260 adev->rings[ring->idx] = ring;
261
262 r = amdgpu_fence_driver_init_ring(ring);
263 if (r)
264 return r;
265 }
266
267 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
268 if (r) {
269 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
270 return r;
271 }
272
273 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
274 if (r) {
275 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
276 return r;
277 }
278
279 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
280 if (r) {
281 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
282 return r;
283 }
284
285 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
286 if (r) {
287 dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
288 return r;
289 }
290
291 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
292 if (r) {
293 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
294 return r;
295 }
296
297 ring->fence_gpu_addr =
298 amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
299 ring->fence_cpu_addr =
300 amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
301
302 ring->rptr_gpu_addr =
303 amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
304 ring->rptr_cpu_addr =
305 amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
306
307 ring->wptr_gpu_addr =
308 amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
309 ring->wptr_cpu_addr =
310 amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
311
312 ring->trail_fence_gpu_addr =
313 amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
314 ring->trail_fence_cpu_addr =
315 amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
316
317 ring->cond_exe_gpu_addr =
318 amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
319 ring->cond_exe_cpu_addr =
320 amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
321
322 /* always set cond_exec_polling to CONTINUE */
323 *ring->cond_exe_cpu_addr = 1;
324
325 if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
326 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
327 if (r) {
328 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
329 return r;
330 }
331
332 max_ibs_dw = ring->funcs->emit_frame_size +
333 amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
334 max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
335
336 if (WARN_ON(max_ibs_dw > max_dw))
337 max_dw = max_ibs_dw;
338
339 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
340 } else {
341 ring->ring_size = roundup_pow_of_two(max_dw * 4);
342 ring->count_dw = (ring->ring_size - 4) >> 2;
343 /* ring buffer is empty now */
344 ring->wptr = *ring->rptr_cpu_addr = 0;
345 }
346
347 ring->buf_mask = (ring->ring_size / 4) - 1;
348 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
349 0xffffffffffffffff : ring->buf_mask;
350 /* Initialize cached_rptr to 0 */
351 ring->cached_rptr = 0;
352
353 if (!ring->ring_backup) {
354 ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
355 if (!ring->ring_backup)
356 return -ENOMEM;
357 }
358
359 /* Allocate ring buffer */
360 if (ring->ring_obj == NULL) {
361 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
362 PAGE_SIZE,
363 AMDGPU_GEM_DOMAIN_GTT,
364 &ring->ring_obj,
365 &ring->gpu_addr,
366 (void **)&ring->ring);
367 if (r) {
368 dev_err(adev->dev, "(%d) ring create failed\n", r);
369 kvfree(ring->ring_backup);
370 return r;
371 }
372 amdgpu_ring_clear_ring(ring);
373 }
374
375 ring->max_dw = max_dw;
376 ring->hw_prio = hw_prio;
377
378 if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
379 hw_ip = ring->funcs->type;
380 num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
381 adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
382 &ring->sched;
383 }
384
385 return 0;
386 }
387
388 /**
389 * amdgpu_ring_fini - tear down the driver ring struct.
390 *
391 * @ring: amdgpu_ring structure holding ring information
392 *
393 * Tear down the driver information for the selected ring (all asics).
394 */
amdgpu_ring_fini(struct amdgpu_ring * ring)395 void amdgpu_ring_fini(struct amdgpu_ring *ring)
396 {
397
398 /* Not to finish a ring which is not initialized */
399 if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
400 return;
401
402 ring->sched.ready = false;
403
404 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
405 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
406
407 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
408 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
409
410 amdgpu_bo_free_kernel(&ring->ring_obj,
411 &ring->gpu_addr,
412 (void **)&ring->ring);
413 kvfree(ring->ring_backup);
414 ring->ring_backup = NULL;
415
416 dma_fence_put(ring->vmid_wait);
417 ring->vmid_wait = NULL;
418 ring->me = 0;
419 }
420
421 /**
422 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
423 *
424 * @ring: ring to write to
425 * @reg0: register to write
426 * @reg1: register to wait on
427 * @ref: reference value to write/wait on
428 * @mask: mask to wait on
429 *
430 * Helper for rings that don't support write and wait in a
431 * single oneshot packet.
432 */
amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)433 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
434 uint32_t reg0, uint32_t reg1,
435 uint32_t ref, uint32_t mask)
436 {
437 amdgpu_ring_emit_wreg(ring, reg0, ref);
438 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
439 }
440
441 /**
442 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
443 *
444 * @ring: ring to try the recovery on
445 * @vmid: VMID we try to get going again
446 * @fence: timedout fence
447 *
448 * Tries to get a ring proceeding again when it is stuck.
449 */
amdgpu_ring_soft_recovery(struct amdgpu_ring * ring,unsigned int vmid,struct dma_fence * fence)450 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
451 struct dma_fence *fence)
452 {
453 unsigned long flags;
454 ktime_t deadline;
455 bool ret;
456
457 deadline = ktime_add_us(ktime_get(), 10000);
458
459 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
460 return false;
461
462 dma_fence_lock_irqsave(fence, flags);
463 if (!dma_fence_is_signaled_locked(fence))
464 dma_fence_set_error(fence, -ENODATA);
465 dma_fence_unlock_irqrestore(fence, flags);
466
467 while (!dma_fence_is_signaled(fence) &&
468 ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
469 ring->funcs->soft_recovery(ring, vmid);
470
471 ret = dma_fence_is_signaled(fence);
472 /* increment the counter only if soft reset worked */
473 if (ret)
474 atomic_inc(&ring->adev->gpu_reset_counter);
475
476 return ret;
477 }
478
479 /*
480 * Debugfs info
481 */
482 #if defined(CONFIG_DEBUG_FS)
483
amdgpu_ras_cper_debugfs_read(struct file * f,char __user * buf,size_t size,loff_t * offset)484 static ssize_t amdgpu_ras_cper_debugfs_read(struct file *f, char __user *buf,
485 size_t size, loff_t *offset)
486 {
487 const uint8_t ring_header_size = 12;
488 struct amdgpu_ring *ring = file_inode(f)->i_private;
489 struct ras_cmd_cper_snapshot_req *snapshot_req __free(kfree) =
490 kzalloc_obj(struct ras_cmd_cper_snapshot_req);
491 struct ras_cmd_cper_snapshot_rsp *snapshot_rsp __free(kfree) =
492 kzalloc_obj(struct ras_cmd_cper_snapshot_rsp);
493 struct ras_cmd_cper_record_req *record_req __free(kfree) =
494 kzalloc_obj(struct ras_cmd_cper_record_req);
495 struct ras_cmd_cper_record_rsp *record_rsp __free(kfree) =
496 kzalloc_obj(struct ras_cmd_cper_record_rsp);
497 uint8_t *ring_header __free(kfree) =
498 kzalloc(ring_header_size, GFP_KERNEL);
499 uint32_t total_cper_num;
500 uint64_t start_cper_id;
501 int r;
502
503 if (!snapshot_req || !snapshot_rsp || !record_req || !record_rsp ||
504 !ring_header)
505 return -ENOMEM;
506
507 if (!(*offset)) {
508 /* Need at least 12 bytes for the header on the first read */
509 if (size < ring_header_size)
510 return -EINVAL;
511
512 if (copy_to_user(buf, ring_header, ring_header_size))
513 return -EFAULT;
514 buf += ring_header_size;
515 size -= ring_header_size;
516 }
517
518 r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev,
519 RAS_CMD__GET_CPER_SNAPSHOT,
520 snapshot_req, sizeof(struct ras_cmd_cper_snapshot_req),
521 snapshot_rsp, sizeof(struct ras_cmd_cper_snapshot_rsp));
522 if (r || !snapshot_rsp->total_cper_num)
523 return r;
524
525 start_cper_id = snapshot_rsp->start_cper_id;
526 total_cper_num = snapshot_rsp->total_cper_num;
527
528 record_req->buf_ptr = (uint64_t)(uintptr_t)buf;
529 record_req->buf_size = size;
530 record_req->cper_start_id = start_cper_id + *offset;
531 record_req->cper_num = total_cper_num;
532 r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, RAS_CMD__GET_CPER_RECORD,
533 record_req, sizeof(struct ras_cmd_cper_record_req),
534 record_rsp, sizeof(struct ras_cmd_cper_record_rsp));
535 if (r)
536 return r;
537
538 r = *offset ? record_rsp->real_data_size : record_rsp->real_data_size + ring_header_size;
539 (*offset) += record_rsp->real_cper_num;
540
541 return r;
542 }
543
544 /* Layout of file is 12 bytes consisting of
545 * - rptr
546 * - wptr
547 * - driver's copy of wptr
548 *
549 * followed by n-words of ring data
550 */
amdgpu_debugfs_ring_read(struct file * f,char __user * buf,size_t size,loff_t * pos)551 static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
552 size_t size, loff_t *pos)
553 {
554 struct amdgpu_ring *ring = file_inode(f)->i_private;
555 u32 value, result, early[3] = { 0 };
556 uint64_t p;
557 u32 avail_dw, start_dw, read_dw;
558 loff_t i;
559 int r;
560
561 if (ring->funcs->type == AMDGPU_RING_TYPE_CPER && amdgpu_uniras_enabled(ring->adev))
562 return amdgpu_ras_cper_debugfs_read(f, buf, size, pos);
563
564 if (*pos & 3 || size & 3)
565 return -EINVAL;
566
567 result = 0;
568
569 if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
570 mutex_lock(&ring->adev->cper.ring_lock);
571
572 if (*pos < 12) {
573 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
574 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
575 early[2] = ring->wptr & ring->buf_mask;
576 for (i = *pos / 4; i < 3 && size; i++) {
577 r = put_user(early[i], (uint32_t *)buf);
578 if (r) {
579 result = r;
580 goto out;
581 }
582 buf += 4;
583 result += 4;
584 size -= 4;
585 *pos += 4;
586 }
587 }
588
589 if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
590 while (size) {
591 if (*pos >= (ring->ring_size + 12))
592 return result;
593
594 value = ring->ring[(*pos - 12)/4];
595 r = put_user(value, (uint32_t *)buf);
596 if (r)
597 return r;
598 buf += 4;
599 result += 4;
600 size -= 4;
601 *pos += 4;
602 }
603 } else {
604 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
605 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
606
607 p = early[0];
608 if (early[0] <= early[1])
609 avail_dw = early[1] - early[0];
610 else
611 avail_dw = ring->buf_mask + 1 - (early[0] - early[1]);
612
613 start_dw = (*pos > 12) ? ((*pos - 12) >> 2) : 0;
614 if (start_dw >= avail_dw)
615 goto out;
616
617 p = (p + start_dw) & ring->ptr_mask;
618 avail_dw -= start_dw;
619 read_dw = min_t(u32, avail_dw, size >> 2);
620
621 while (read_dw) {
622 if (p == early[1])
623 goto out;
624
625 value = ring->ring[p];
626 r = put_user(value, (uint32_t *)buf);
627 if (r) {
628 result = r;
629 goto out;
630 }
631
632 buf += 4;
633 result += 4;
634 read_dw--;
635 p++;
636 p &= ring->ptr_mask;
637 *pos += 4;
638 }
639 }
640
641 out:
642 if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
643 mutex_unlock(&ring->adev->cper.ring_lock);
644
645 return result;
646 }
647
amdgpu_debugfs_virt_ring_read(struct file * f,char __user * buf,size_t size,loff_t * pos)648 static ssize_t amdgpu_debugfs_virt_ring_read(struct file *f, char __user *buf,
649 size_t size, loff_t *pos)
650 {
651 struct amdgpu_ring *ring = file_inode(f)->i_private;
652
653 if (*pos & 3 || size & 3)
654 return -EINVAL;
655
656 if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
657 amdgpu_virt_req_ras_cper_dump(ring->adev, false);
658
659 return amdgpu_debugfs_ring_read(f, buf, size, pos);
660 }
661
662 static const struct file_operations amdgpu_debugfs_ring_fops = {
663 .owner = THIS_MODULE,
664 .read = amdgpu_debugfs_ring_read,
665 .llseek = default_llseek
666 };
667
668 static const struct file_operations amdgpu_debugfs_virt_ring_fops = {
669 .owner = THIS_MODULE,
670 .read = amdgpu_debugfs_virt_ring_read,
671 .llseek = default_llseek
672 };
673
amdgpu_debugfs_mqd_read(struct file * f,char __user * buf,size_t size,loff_t * pos)674 static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
675 size_t size, loff_t *pos)
676 {
677 struct amdgpu_ring *ring = file_inode(f)->i_private;
678 ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size);
679 void *from = ((u8 *)ring->mqd_ptr) + *pos;
680
681 if (*pos > ring->mqd_size)
682 return 0;
683
684 if (copy_to_user(buf, from, bytes))
685 return -EFAULT;
686
687 *pos += bytes;
688 return bytes;
689 }
690
691 static const struct file_operations amdgpu_debugfs_mqd_fops = {
692 .owner = THIS_MODULE,
693 .read = amdgpu_debugfs_mqd_read,
694 .llseek = default_llseek
695 };
696
amdgpu_debugfs_ring_error(void * data,u64 val)697 static int amdgpu_debugfs_ring_error(void *data, u64 val)
698 {
699 struct amdgpu_ring *ring = data;
700
701 amdgpu_fence_driver_set_error(ring, val);
702 return 0;
703 }
704
705 DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(amdgpu_debugfs_error_fops, NULL,
706 amdgpu_debugfs_ring_error, "%lld\n");
707
708 #endif
709
amdgpu_debugfs_ring_init(struct amdgpu_device * adev,struct amdgpu_ring * ring)710 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
711 struct amdgpu_ring *ring)
712 {
713 #if defined(CONFIG_DEBUG_FS)
714 struct drm_minor *minor = adev_to_drm(adev)->primary;
715 struct dentry *root = minor->debugfs_root;
716 char name[32];
717
718 sprintf(name, "amdgpu_ring_%s", ring->name);
719 if (amdgpu_sriov_vf(adev))
720 debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
721 &amdgpu_debugfs_virt_ring_fops,
722 ring->ring_size + 12);
723 else
724 debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
725 &amdgpu_debugfs_ring_fops,
726 ring->ring_size + 12);
727
728 if (ring->mqd_obj) {
729 sprintf(name, "amdgpu_mqd_%s", ring->name);
730 debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
731 &amdgpu_debugfs_mqd_fops,
732 ring->mqd_size);
733 }
734
735 sprintf(name, "amdgpu_error_%s", ring->name);
736 debugfs_create_file(name, 0200, root, ring,
737 &amdgpu_debugfs_error_fops);
738
739 #endif
740 }
741
742 /**
743 * amdgpu_ring_test_helper - tests ring and set sched readiness status
744 *
745 * @ring: ring to try the recovery on
746 *
747 * Tests ring and set sched readiness status
748 *
749 * Returns 0 on success, error on failure.
750 */
amdgpu_ring_test_helper(struct amdgpu_ring * ring)751 int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
752 {
753 struct amdgpu_device *adev = ring->adev;
754 int r;
755
756 r = amdgpu_ring_test_ring(ring);
757 if (r)
758 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
759 ring->name, r);
760 else
761 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
762 ring->name);
763
764 ring->sched.ready = !r;
765
766 return r;
767 }
768
amdgpu_ring_to_mqd_prop(struct amdgpu_ring * ring,struct amdgpu_mqd_prop * prop)769 static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
770 struct amdgpu_mqd_prop *prop)
771 {
772 struct amdgpu_device *adev = ring->adev;
773 bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
774 amdgpu_gfx_is_high_priority_compute_queue(adev, ring);
775 bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
776 amdgpu_gfx_is_high_priority_graphics_queue(adev, ring);
777
778 memset(prop, 0, sizeof(*prop));
779
780 prop->mqd_gpu_addr = ring->mqd_gpu_addr;
781 prop->hqd_base_gpu_addr = ring->gpu_addr;
782 prop->rptr_gpu_addr = ring->rptr_gpu_addr;
783 prop->wptr_gpu_addr = ring->wptr_gpu_addr;
784 prop->queue_size = ring->ring_size;
785 prop->eop_gpu_addr = ring->eop_gpu_addr;
786 prop->use_doorbell = ring->use_doorbell;
787 prop->doorbell_index = ring->doorbell_index;
788 prop->kernel_queue = true;
789
790 /* map_queues packet doesn't need activate the queue,
791 * so only kiq need set this field.
792 */
793 prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
794
795 prop->allow_tunneling = is_high_prio_compute;
796 if (is_high_prio_compute || is_high_prio_gfx) {
797 prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
798 prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
799 }
800 }
801
amdgpu_ring_init_mqd(struct amdgpu_ring * ring)802 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
803 {
804 struct amdgpu_device *adev = ring->adev;
805 struct amdgpu_mqd *mqd_mgr;
806 struct amdgpu_mqd_prop prop;
807
808 amdgpu_ring_to_mqd_prop(ring, &prop);
809
810 ring->wptr = 0;
811
812 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
813 mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE];
814 else
815 mqd_mgr = &adev->mqds[ring->funcs->type];
816
817 return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
818 }
819
amdgpu_ring_ib_begin(struct amdgpu_ring * ring)820 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring)
821 {
822 if (ring->is_sw_ring)
823 amdgpu_sw_ring_ib_begin(ring);
824 }
825
amdgpu_ring_ib_end(struct amdgpu_ring * ring)826 void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
827 {
828 if (ring->is_sw_ring)
829 amdgpu_sw_ring_ib_end(ring);
830 }
831
amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring * ring)832 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring)
833 {
834 if (ring->is_sw_ring)
835 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL);
836 }
837
amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring * ring)838 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring)
839 {
840 if (ring->is_sw_ring)
841 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE);
842 }
843
amdgpu_ring_ib_on_emit_de(struct amdgpu_ring * ring)844 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
845 {
846 if (ring->is_sw_ring)
847 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
848 }
849
amdgpu_ring_sched_ready(struct amdgpu_ring * ring)850 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
851 {
852 if (!ring)
853 return false;
854
855 if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
856 return false;
857
858 return true;
859 }
860
amdgpu_ring_reset_helper_begin(struct amdgpu_ring * ring,struct amdgpu_fence * guilty_fence)861 void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
862 struct amdgpu_fence *guilty_fence)
863 {
864 /* back up the non-guilty commands */
865 amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
866 }
867
amdgpu_ring_reset_helper_end(struct amdgpu_ring * ring,struct amdgpu_fence * guilty_fence)868 int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
869 struct amdgpu_fence *guilty_fence)
870 {
871 int r;
872
873 /* verify that the ring is functional */
874 r = amdgpu_ring_test_ring(ring);
875 if (r)
876 return r;
877
878 /* set an error on all fences from the context and reemit */
879 amdgpu_ring_set_fence_errors_and_reemit(ring, guilty_fence);
880
881 return 0;
882 }
883
amdgpu_ring_is_reset_type_supported(struct amdgpu_ring * ring,u32 reset_type)884 bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
885 u32 reset_type)
886 {
887 switch (ring->funcs->type) {
888 case AMDGPU_RING_TYPE_GFX:
889 if (ring->adev->gfx.gfx_supported_reset & reset_type)
890 return true;
891 break;
892 case AMDGPU_RING_TYPE_COMPUTE:
893 if (ring->adev->gfx.compute_supported_reset & reset_type)
894 return true;
895 break;
896 case AMDGPU_RING_TYPE_SDMA:
897 if (ring->adev->sdma.supported_reset & reset_type)
898 return true;
899 break;
900 case AMDGPU_RING_TYPE_VCN_DEC:
901 case AMDGPU_RING_TYPE_VCN_ENC:
902 if (ring->adev->vcn.supported_reset & reset_type)
903 return true;
904 break;
905 case AMDGPU_RING_TYPE_VCN_JPEG:
906 if (ring->adev->jpeg.supported_reset & reset_type)
907 return true;
908 break;
909 default:
910 break;
911 }
912 return false;
913 }
914