1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 * Christian König
28 */
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31
32 #include <drm/amdgpu_drm.h>
33
34 #include "amdgpu.h"
35 #include "atom.h"
36 #include "amdgpu_trace.h"
37
38 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
39 #define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
40
41 /*
42 * IB
43 * IBs (Indirect Buffers) and areas of GPU accessible memory where
44 * commands are stored. You can put a pointer to the IB in the
45 * command ring and the hw will fetch the commands from the IB
46 * and execute them. Generally userspace acceleration drivers
47 * produce command buffers which are send to the kernel and
48 * put in IBs for execution by the requested ring.
49 */
50
51 /**
52 * amdgpu_ib_get - request an IB (Indirect Buffer)
53 *
54 * @adev: amdgpu_device pointer
55 * @vm: amdgpu_vm pointer
56 * @size: requested IB size
57 * @pool_type: IB pool type (delayed, immediate, direct)
58 * @ib: IB object returned
59 *
60 * Request an IB (all asics). IBs are allocated using the
61 * suballocator.
62 * Returns 0 on success, error on failure.
63 */
amdgpu_ib_get(struct amdgpu_device * adev,struct amdgpu_vm * vm,unsigned int size,enum amdgpu_ib_pool_type pool_type,struct amdgpu_ib * ib)64 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
65 unsigned int size, enum amdgpu_ib_pool_type pool_type,
66 struct amdgpu_ib *ib)
67 {
68 int r;
69
70 if (size) {
71 r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
72 &ib->sa_bo, size);
73 if (r) {
74 dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
75 return r;
76 }
77
78 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
79 /* flush the cache before commit the IB */
80 ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
81
82 if (!vm)
83 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
84 }
85
86 return 0;
87 }
88
89 /**
90 * amdgpu_ib_free - free an IB (Indirect Buffer)
91 *
92 * @adev: amdgpu_device pointer
93 * @ib: IB object to free
94 * @f: the fence SA bo need wait on for the ib alloation
95 *
96 * Free an IB (all asics).
97 */
amdgpu_ib_free(struct amdgpu_device * adev,struct amdgpu_ib * ib,struct dma_fence * f)98 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
99 struct dma_fence *f)
100 {
101 amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
102 }
103
104 /**
105 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
106 *
107 * @ring: ring index the IB is associated with
108 * @num_ibs: number of IBs to schedule
109 * @ibs: IB objects to schedule
110 * @job: job to schedule
111 * @f: fence created during this submission
112 *
113 * Schedule an IB on the associated ring (all asics).
114 * Returns 0 on success, error on failure.
115 *
116 * On SI, there are two parallel engines fed from the primary ring,
117 * the CE (Constant Engine) and the DE (Drawing Engine). Since
118 * resource descriptors have moved to memory, the CE allows you to
119 * prime the caches while the DE is updating register state so that
120 * the resource descriptors will be already in cache when the draw is
121 * processed. To accomplish this, the userspace driver submits two
122 * IBs, one for the CE and one for the DE. If there is a CE IB (called
123 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
124 * to SI there was just a DE IB.
125 */
amdgpu_ib_schedule(struct amdgpu_ring * ring,unsigned int num_ibs,struct amdgpu_ib * ibs,struct amdgpu_job * job,struct dma_fence ** f)126 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
127 struct amdgpu_ib *ibs, struct amdgpu_job *job,
128 struct dma_fence **f)
129 {
130 struct amdgpu_device *adev = ring->adev;
131 struct amdgpu_ib *ib = &ibs[0];
132 struct dma_fence *tmp = NULL;
133 bool need_ctx_switch;
134 struct amdgpu_vm *vm;
135 uint64_t fence_ctx;
136 uint32_t status = 0, alloc_size;
137 unsigned int fence_flags = 0;
138 bool secure, init_shadow;
139 u64 shadow_va, csa_va, gds_va;
140 int vmid = AMDGPU_JOB_GET_VMID(job);
141 bool need_pipe_sync = false;
142 unsigned int cond_exec;
143
144 unsigned int i;
145 int r = 0;
146
147 if (num_ibs == 0)
148 return -EINVAL;
149
150 /* ring tests don't use a job */
151 if (job) {
152 vm = job->vm;
153 fence_ctx = job->base.s_fence ?
154 job->base.s_fence->scheduled.context : 0;
155 shadow_va = job->shadow_va;
156 csa_va = job->csa_va;
157 gds_va = job->gds_va;
158 init_shadow = job->init_shadow;
159 } else {
160 vm = NULL;
161 fence_ctx = 0;
162 shadow_va = 0;
163 csa_va = 0;
164 gds_va = 0;
165 init_shadow = false;
166 }
167
168 if (!ring->sched.ready && !ring->is_mes_queue) {
169 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
170 return -EINVAL;
171 }
172
173 if (vm && !job->vmid && !ring->is_mes_queue) {
174 dev_err(adev->dev, "VM IB without ID\n");
175 return -EINVAL;
176 }
177
178 if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
179 (!ring->funcs->secure_submission_supported)) {
180 dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name);
181 return -EINVAL;
182 }
183
184 alloc_size = ring->funcs->emit_frame_size + num_ibs *
185 ring->funcs->emit_ib_size;
186
187 r = amdgpu_ring_alloc(ring, alloc_size);
188 if (r) {
189 dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
190 return r;
191 }
192
193 need_ctx_switch = ring->current_ctx != fence_ctx;
194 if (ring->funcs->emit_pipeline_sync && job &&
195 ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
196 (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
197 amdgpu_vm_need_pipeline_sync(ring, job))) {
198 need_pipe_sync = true;
199
200 if (tmp)
201 trace_amdgpu_ib_pipe_sync(job, tmp);
202
203 dma_fence_put(tmp);
204 }
205
206 if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
207 ring->funcs->emit_mem_sync(ring);
208
209 if (ring->funcs->emit_wave_limit &&
210 ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
211 ring->funcs->emit_wave_limit(ring, true);
212
213 if (ring->funcs->insert_start)
214 ring->funcs->insert_start(ring);
215
216 if (job) {
217 r = amdgpu_vm_flush(ring, job, need_pipe_sync);
218 if (r) {
219 amdgpu_ring_undo(ring);
220 return r;
221 }
222 }
223
224 amdgpu_ring_ib_begin(ring);
225
226 if (ring->funcs->emit_gfx_shadow)
227 amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va,
228 init_shadow, vmid);
229
230 if (ring->funcs->init_cond_exec)
231 cond_exec = amdgpu_ring_init_cond_exec(ring,
232 ring->cond_exe_gpu_addr);
233
234 amdgpu_device_flush_hdp(adev, ring);
235
236 if (need_ctx_switch)
237 status |= AMDGPU_HAVE_CTX_SWITCH;
238
239 if (job && ring->funcs->emit_cntxcntl) {
240 status |= job->preamble_status;
241 status |= job->preemption_status;
242 amdgpu_ring_emit_cntxcntl(ring, status);
243 }
244
245 /* Setup initial TMZiness and send it off.
246 */
247 secure = false;
248 if (job && ring->funcs->emit_frame_cntl) {
249 secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
250 amdgpu_ring_emit_frame_cntl(ring, true, secure);
251 }
252
253 for (i = 0; i < num_ibs; ++i) {
254 ib = &ibs[i];
255
256 if (job && ring->funcs->emit_frame_cntl) {
257 if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
258 amdgpu_ring_emit_frame_cntl(ring, false, secure);
259 secure = !secure;
260 amdgpu_ring_emit_frame_cntl(ring, true, secure);
261 }
262 }
263
264 amdgpu_ring_emit_ib(ring, job, ib, status);
265 status &= ~AMDGPU_HAVE_CTX_SWITCH;
266 }
267
268 if (job && ring->funcs->emit_frame_cntl)
269 amdgpu_ring_emit_frame_cntl(ring, false, secure);
270
271 amdgpu_device_invalidate_hdp(adev, ring);
272
273 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
274 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
275
276 /* wrap the last IB with fence */
277 if (job && job->uf_addr) {
278 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
279 fence_flags | AMDGPU_FENCE_FLAG_64BIT);
280 }
281
282 if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec) {
283 amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0);
284 amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
285 }
286
287 r = amdgpu_fence_emit(ring, f, job, fence_flags);
288 if (r) {
289 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
290 if (job && job->vmid)
291 amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid);
292 amdgpu_ring_undo(ring);
293 return r;
294 }
295
296 if (ring->funcs->insert_end)
297 ring->funcs->insert_end(ring);
298
299 amdgpu_ring_patch_cond_exec(ring, cond_exec);
300
301 ring->current_ctx = fence_ctx;
302 if (vm && ring->funcs->emit_switch_buffer)
303 amdgpu_ring_emit_switch_buffer(ring);
304
305 if (ring->funcs->emit_wave_limit &&
306 ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
307 ring->funcs->emit_wave_limit(ring, false);
308
309 amdgpu_ring_ib_end(ring);
310 amdgpu_ring_commit(ring);
311 return 0;
312 }
313
314 /**
315 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
316 *
317 * @adev: amdgpu_device pointer
318 *
319 * Initialize the suballocator to manage a pool of memory
320 * for use as IBs (all asics).
321 * Returns 0 on success, error on failure.
322 */
amdgpu_ib_pool_init(struct amdgpu_device * adev)323 int amdgpu_ib_pool_init(struct amdgpu_device *adev)
324 {
325 int r, i;
326
327 if (adev->ib_pool_ready)
328 return 0;
329
330 for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
331 r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
332 AMDGPU_IB_POOL_SIZE, 256,
333 AMDGPU_GEM_DOMAIN_GTT);
334 if (r)
335 goto error;
336 }
337 adev->ib_pool_ready = true;
338
339 return 0;
340
341 error:
342 while (i--)
343 amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
344 return r;
345 }
346
347 /**
348 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
349 *
350 * @adev: amdgpu_device pointer
351 *
352 * Tear down the suballocator managing the pool of memory
353 * for use as IBs (all asics).
354 */
amdgpu_ib_pool_fini(struct amdgpu_device * adev)355 void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
356 {
357 int i;
358
359 if (!adev->ib_pool_ready)
360 return;
361
362 for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
363 amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
364 adev->ib_pool_ready = false;
365 }
366
367 /**
368 * amdgpu_ib_ring_tests - test IBs on the rings
369 *
370 * @adev: amdgpu_device pointer
371 *
372 * Test an IB (Indirect Buffer) on each ring.
373 * If the test fails, disable the ring.
374 * Returns 0 on success, error if the primary GFX ring
375 * IB test fails.
376 */
amdgpu_ib_ring_tests(struct amdgpu_device * adev)377 int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
378 {
379 long tmo_gfx, tmo_mm;
380 int r, ret = 0;
381 unsigned int i;
382
383 tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
384 if (amdgpu_sriov_vf(adev)) {
385 /* for MM engines in hypervisor side they are not scheduled together
386 * with CP and SDMA engines, so even in exclusive mode MM engine could
387 * still running on other VF thus the IB TEST TIMEOUT for MM engines
388 * under SR-IOV should be set to a long time. 8 sec should be enough
389 * for the MM comes back to this VF.
390 */
391 tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
392 }
393
394 if (amdgpu_sriov_runtime(adev)) {
395 /* for CP & SDMA engines since they are scheduled together so
396 * need to make the timeout width enough to cover the time
397 * cost waiting for it coming back under RUNTIME only
398 */
399 tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
400 } else if (adev->gmc.xgmi.hive_id) {
401 tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
402 }
403
404 for (i = 0; i < adev->num_rings; ++i) {
405 struct amdgpu_ring *ring = adev->rings[i];
406 long tmo;
407
408 /* KIQ rings don't have an IB test because we never submit IBs
409 * to them and they have no interrupt support.
410 */
411 if (!ring->sched.ready || !ring->funcs->test_ib)
412 continue;
413
414 if (adev->enable_mes &&
415 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
416 continue;
417
418 /* MM engine need more time */
419 if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
420 ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
421 ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
422 ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
423 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
424 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
425 tmo = tmo_mm;
426 else
427 tmo = tmo_gfx;
428
429 r = amdgpu_ring_test_ib(ring, tmo);
430 if (!r) {
431 DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
432 ring->name);
433 continue;
434 }
435
436 ring->sched.ready = false;
437 DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
438 ring->name, r);
439
440 if (ring == &adev->gfx.gfx_ring[0]) {
441 /* oh, oh, that's really bad */
442 adev->accel_working = false;
443 return r;
444
445 } else {
446 ret = r;
447 }
448 }
449 return ret;
450 }
451
452 /*
453 * Debugfs info
454 */
455 #if defined(CONFIG_DEBUG_FS)
456
amdgpu_debugfs_sa_info_show(struct seq_file * m,void * unused)457 static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused)
458 {
459 struct amdgpu_device *adev = m->private;
460
461 seq_puts(m, "--------------------- DELAYED ---------------------\n");
462 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
463 m);
464 seq_puts(m, "-------------------- IMMEDIATE --------------------\n");
465 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
466 m);
467 seq_puts(m, "--------------------- DIRECT ----------------------\n");
468 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
469
470 return 0;
471 }
472
473 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_sa_info);
474
475 #endif
476
amdgpu_debugfs_sa_init(struct amdgpu_device * adev)477 void amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
478 {
479 #if defined(CONFIG_DEBUG_FS)
480 struct drm_minor *minor = adev_to_drm(adev)->primary;
481 struct dentry *root = minor->debugfs_root;
482
483 debugfs_create_file("amdgpu_sa_info", 0444, root, adev,
484 &amdgpu_debugfs_sa_info_fops);
485
486 #endif
487 }
488