1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/debugfs.h>
28 #include <drm/drm_exec.h>
29 #include <drm/drm_drv.h>
30
31 #include "amdgpu.h"
32 #include "amdgpu_umsch_mm.h"
33 #include "umsch_mm_v4_0.h"
34
35 struct umsch_mm_test_ctx_data {
36 uint8_t process_csa[PAGE_SIZE];
37 uint8_t vpe_ctx_csa[PAGE_SIZE];
38 uint8_t vcn_ctx_csa[PAGE_SIZE];
39 };
40
41 struct umsch_mm_test_mqd_data {
42 uint8_t vpe_mqd[PAGE_SIZE];
43 uint8_t vcn_mqd[PAGE_SIZE];
44 };
45
46 struct umsch_mm_test_ring_data {
47 uint8_t vpe_ring[PAGE_SIZE];
48 uint8_t vpe_ib[PAGE_SIZE];
49 uint8_t vcn_ring[PAGE_SIZE];
50 uint8_t vcn_ib[PAGE_SIZE];
51 };
52
53 struct umsch_mm_test_queue_info {
54 uint64_t mqd_addr;
55 uint64_t csa_addr;
56 uint32_t doorbell_offset_0;
57 uint32_t doorbell_offset_1;
58 enum UMSCH_SWIP_ENGINE_TYPE engine;
59 };
60
61 struct umsch_mm_test {
62 struct amdgpu_bo *ctx_data_obj;
63 uint64_t ctx_data_gpu_addr;
64 uint32_t *ctx_data_cpu_addr;
65
66 struct amdgpu_bo *mqd_data_obj;
67 uint64_t mqd_data_gpu_addr;
68 uint32_t *mqd_data_cpu_addr;
69
70 struct amdgpu_bo *ring_data_obj;
71 uint64_t ring_data_gpu_addr;
72 uint32_t *ring_data_cpu_addr;
73
74
75 struct amdgpu_vm *vm;
76 struct amdgpu_bo_va *bo_va;
77 uint32_t pasid;
78 uint32_t vm_cntx_cntl;
79 uint32_t num_queues;
80 };
81
map_ring_data(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo,struct amdgpu_bo_va ** bo_va,uint64_t addr,uint32_t size)82 static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
83 struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
84 uint64_t addr, uint32_t size)
85 {
86 struct amdgpu_sync sync;
87 struct drm_exec exec;
88 int r;
89
90 amdgpu_sync_create(&sync);
91
92 drm_exec_init(&exec, 0, 0);
93 drm_exec_until_all_locked(&exec) {
94 r = drm_exec_lock_obj(&exec, &bo->tbo.base);
95 drm_exec_retry_on_contention(&exec);
96 if (unlikely(r))
97 goto error_fini_exec;
98
99 r = amdgpu_vm_lock_pd(vm, &exec, 0);
100 drm_exec_retry_on_contention(&exec);
101 if (unlikely(r))
102 goto error_fini_exec;
103 }
104
105 *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
106 if (!*bo_va) {
107 r = -ENOMEM;
108 goto error_fini_exec;
109 }
110
111 r = amdgpu_vm_bo_map(adev, *bo_va, addr, 0, size,
112 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
113 AMDGPU_PTE_EXECUTABLE);
114
115 if (r)
116 goto error_del_bo_va;
117
118
119 r = amdgpu_vm_bo_update(adev, *bo_va, false);
120 if (r)
121 goto error_del_bo_va;
122
123 amdgpu_sync_fence(&sync, (*bo_va)->last_pt_update);
124
125 r = amdgpu_vm_update_pdes(adev, vm, false);
126 if (r)
127 goto error_del_bo_va;
128
129 amdgpu_sync_fence(&sync, vm->last_update);
130
131 amdgpu_sync_wait(&sync, false);
132 drm_exec_fini(&exec);
133
134 amdgpu_sync_free(&sync);
135
136 return 0;
137
138 error_del_bo_va:
139 amdgpu_vm_bo_del(adev, *bo_va);
140 amdgpu_sync_free(&sync);
141
142 error_fini_exec:
143 drm_exec_fini(&exec);
144 amdgpu_sync_free(&sync);
145 return r;
146 }
147
unmap_ring_data(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo,struct amdgpu_bo_va * bo_va,uint64_t addr)148 static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
149 struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
150 uint64_t addr)
151 {
152 struct drm_exec exec;
153 long r;
154
155 drm_exec_init(&exec, 0, 0);
156 drm_exec_until_all_locked(&exec) {
157 r = drm_exec_lock_obj(&exec, &bo->tbo.base);
158 drm_exec_retry_on_contention(&exec);
159 if (unlikely(r))
160 goto out_unlock;
161
162 r = amdgpu_vm_lock_pd(vm, &exec, 0);
163 drm_exec_retry_on_contention(&exec);
164 if (unlikely(r))
165 goto out_unlock;
166 }
167
168
169 r = amdgpu_vm_bo_unmap(adev, bo_va, addr);
170 if (r)
171 goto out_unlock;
172
173 amdgpu_vm_bo_del(adev, bo_va);
174
175 out_unlock:
176 drm_exec_fini(&exec);
177
178 return r;
179 }
180
setup_vpe_queue(struct amdgpu_device * adev,struct umsch_mm_test * test,struct umsch_mm_test_queue_info * qinfo)181 static void setup_vpe_queue(struct amdgpu_device *adev,
182 struct umsch_mm_test *test,
183 struct umsch_mm_test_queue_info *qinfo)
184 {
185 struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr;
186 uint64_t ring_gpu_addr = test->ring_data_gpu_addr;
187
188 mqd->rb_base_lo = (ring_gpu_addr >> 8);
189 mqd->rb_base_hi = (ring_gpu_addr >> 40);
190 mqd->rb_size = PAGE_SIZE / 4;
191 mqd->wptr_val = 0;
192 mqd->rptr_val = 0;
193 mqd->unmapped = 1;
194
195 if (adev->vpe.collaborate_mode)
196 memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO));
197
198 qinfo->mqd_addr = test->mqd_data_gpu_addr;
199 qinfo->csa_addr = test->ctx_data_gpu_addr +
200 offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa);
201 qinfo->doorbell_offset_0 = 0;
202 qinfo->doorbell_offset_1 = 0;
203 }
204
setup_vcn_queue(struct amdgpu_device * adev,struct umsch_mm_test * test,struct umsch_mm_test_queue_info * qinfo)205 static void setup_vcn_queue(struct amdgpu_device *adev,
206 struct umsch_mm_test *test,
207 struct umsch_mm_test_queue_info *qinfo)
208 {
209 }
210
add_test_queue(struct amdgpu_device * adev,struct umsch_mm_test * test,struct umsch_mm_test_queue_info * qinfo)211 static int add_test_queue(struct amdgpu_device *adev,
212 struct umsch_mm_test *test,
213 struct umsch_mm_test_queue_info *qinfo)
214 {
215 struct umsch_mm_add_queue_input queue_input = {};
216 int r;
217
218 queue_input.process_id = test->pasid;
219 queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(test->vm->root.bo);
220
221 queue_input.process_va_start = 0;
222 queue_input.process_va_end = (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
223
224 queue_input.process_quantum = 100000; /* 10ms */
225 queue_input.process_csa_addr = test->ctx_data_gpu_addr +
226 offsetof(struct umsch_mm_test_ctx_data, process_csa);
227
228 queue_input.context_quantum = 10000; /* 1ms */
229 queue_input.context_csa_addr = qinfo->csa_addr;
230
231 queue_input.inprocess_context_priority = CONTEXT_PRIORITY_LEVEL_NORMAL;
232 queue_input.context_global_priority_level = CONTEXT_PRIORITY_LEVEL_NORMAL;
233 queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0;
234 queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1;
235
236 queue_input.engine_type = qinfo->engine;
237 queue_input.mqd_addr = qinfo->mqd_addr;
238 queue_input.vm_context_cntl = test->vm_cntx_cntl;
239
240 amdgpu_umsch_mm_lock(&adev->umsch_mm);
241 r = adev->umsch_mm.funcs->add_queue(&adev->umsch_mm, &queue_input);
242 amdgpu_umsch_mm_unlock(&adev->umsch_mm);
243 if (r)
244 return r;
245
246 return 0;
247 }
248
remove_test_queue(struct amdgpu_device * adev,struct umsch_mm_test * test,struct umsch_mm_test_queue_info * qinfo)249 static int remove_test_queue(struct amdgpu_device *adev,
250 struct umsch_mm_test *test,
251 struct umsch_mm_test_queue_info *qinfo)
252 {
253 struct umsch_mm_remove_queue_input queue_input = {};
254 int r;
255
256 queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0;
257 queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1;
258 queue_input.context_csa_addr = qinfo->csa_addr;
259
260 amdgpu_umsch_mm_lock(&adev->umsch_mm);
261 r = adev->umsch_mm.funcs->remove_queue(&adev->umsch_mm, &queue_input);
262 amdgpu_umsch_mm_unlock(&adev->umsch_mm);
263 if (r)
264 return r;
265
266 return 0;
267 }
268
submit_vpe_queue(struct amdgpu_device * adev,struct umsch_mm_test * test)269 static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *test)
270 {
271 struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr;
272 uint32_t *ring = test->ring_data_cpu_addr +
273 offsetof(struct umsch_mm_test_ring_data, vpe_ring) / 4;
274 uint32_t *ib = test->ring_data_cpu_addr +
275 offsetof(struct umsch_mm_test_ring_data, vpe_ib) / 4;
276 uint64_t ib_gpu_addr = test->ring_data_gpu_addr +
277 offsetof(struct umsch_mm_test_ring_data, vpe_ib);
278 uint32_t *fence = ib + 2048 / 4;
279 uint64_t fence_gpu_addr = ib_gpu_addr + 2048;
280 const uint32_t test_pattern = 0xdeadbeef;
281 int i;
282
283 ib[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0);
284 ib[1] = lower_32_bits(fence_gpu_addr);
285 ib[2] = upper_32_bits(fence_gpu_addr);
286 ib[3] = test_pattern;
287
288 ring[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0);
289 ring[1] = (ib_gpu_addr & 0xffffffe0);
290 ring[2] = upper_32_bits(ib_gpu_addr);
291 ring[3] = 4;
292 ring[4] = 0;
293 ring[5] = 0;
294
295 mqd->wptr_val = (6 << 2);
296 if (adev->vpe.collaborate_mode)
297 (++mqd)->wptr_val = (6 << 2);
298
299 WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
300
301 for (i = 0; i < adev->usec_timeout; i++) {
302 if (*fence == test_pattern)
303 return 0;
304 udelay(1);
305 }
306
307 dev_err(adev->dev, "vpe queue submission timeout\n");
308
309 return -ETIMEDOUT;
310 }
311
submit_vcn_queue(struct amdgpu_device * adev,struct umsch_mm_test * test)312 static int submit_vcn_queue(struct amdgpu_device *adev, struct umsch_mm_test *test)
313 {
314 return 0;
315 }
316
setup_umsch_mm_test(struct amdgpu_device * adev,struct umsch_mm_test * test)317 static int setup_umsch_mm_test(struct amdgpu_device *adev,
318 struct umsch_mm_test *test)
319 {
320 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
321 int r;
322
323 test->vm_cntx_cntl = hub->vm_cntx_cntl;
324
325 test->vm = kzalloc(sizeof(*test->vm), GFP_KERNEL);
326 if (!test->vm) {
327 r = -ENOMEM;
328 return r;
329 }
330
331 r = amdgpu_vm_init(adev, test->vm, -1);
332 if (r)
333 goto error_free_vm;
334
335 r = amdgpu_pasid_alloc(16);
336 if (r < 0)
337 goto error_fini_vm;
338 test->pasid = r;
339
340 r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ctx_data),
341 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
342 &test->ctx_data_obj,
343 &test->ctx_data_gpu_addr,
344 (void **)&test->ctx_data_cpu_addr);
345 if (r)
346 goto error_free_pasid;
347
348 memset(test->ctx_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ctx_data));
349
350 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE,
351 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
352 &test->mqd_data_obj,
353 &test->mqd_data_gpu_addr,
354 (void **)&test->mqd_data_cpu_addr);
355 if (r)
356 goto error_free_ctx_data_obj;
357
358 memset(test->mqd_data_cpu_addr, 0, PAGE_SIZE);
359
360 r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ring_data),
361 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
362 &test->ring_data_obj,
363 NULL,
364 (void **)&test->ring_data_cpu_addr);
365 if (r)
366 goto error_free_mqd_data_obj;
367
368 memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data));
369
370 test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
371 r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va,
372 test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data));
373 if (r)
374 goto error_free_ring_data_obj;
375
376 return 0;
377
378 error_free_ring_data_obj:
379 amdgpu_bo_free_kernel(&test->ring_data_obj, NULL,
380 (void **)&test->ring_data_cpu_addr);
381 error_free_mqd_data_obj:
382 amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr,
383 (void **)&test->mqd_data_cpu_addr);
384 error_free_ctx_data_obj:
385 amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr,
386 (void **)&test->ctx_data_cpu_addr);
387 error_free_pasid:
388 amdgpu_pasid_free(test->pasid);
389 error_fini_vm:
390 amdgpu_vm_fini(adev, test->vm);
391 error_free_vm:
392 kfree(test->vm);
393
394 return r;
395 }
396
cleanup_umsch_mm_test(struct amdgpu_device * adev,struct umsch_mm_test * test)397 static void cleanup_umsch_mm_test(struct amdgpu_device *adev,
398 struct umsch_mm_test *test)
399 {
400 unmap_ring_data(adev, test->vm, test->ring_data_obj,
401 test->bo_va, test->ring_data_gpu_addr);
402 amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr,
403 (void **)&test->mqd_data_cpu_addr);
404 amdgpu_bo_free_kernel(&test->ring_data_obj, NULL,
405 (void **)&test->ring_data_cpu_addr);
406 amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr,
407 (void **)&test->ctx_data_cpu_addr);
408 amdgpu_pasid_free(test->pasid);
409 amdgpu_vm_fini(adev, test->vm);
410 kfree(test->vm);
411 }
412
setup_test_queues(struct amdgpu_device * adev,struct umsch_mm_test * test,struct umsch_mm_test_queue_info * qinfo)413 static int setup_test_queues(struct amdgpu_device *adev,
414 struct umsch_mm_test *test,
415 struct umsch_mm_test_queue_info *qinfo)
416 {
417 int i, r;
418
419 for (i = 0; i < test->num_queues; i++) {
420 if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE)
421 setup_vpe_queue(adev, test, &qinfo[i]);
422 else
423 setup_vcn_queue(adev, test, &qinfo[i]);
424
425 r = add_test_queue(adev, test, &qinfo[i]);
426 if (r)
427 return r;
428 }
429
430 return 0;
431 }
432
submit_test_queues(struct amdgpu_device * adev,struct umsch_mm_test * test,struct umsch_mm_test_queue_info * qinfo)433 static int submit_test_queues(struct amdgpu_device *adev,
434 struct umsch_mm_test *test,
435 struct umsch_mm_test_queue_info *qinfo)
436 {
437 int i, r;
438
439 for (i = 0; i < test->num_queues; i++) {
440 if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE)
441 r = submit_vpe_queue(adev, test);
442 else
443 r = submit_vcn_queue(adev, test);
444 if (r)
445 return r;
446 }
447
448 return 0;
449 }
450
cleanup_test_queues(struct amdgpu_device * adev,struct umsch_mm_test * test,struct umsch_mm_test_queue_info * qinfo)451 static void cleanup_test_queues(struct amdgpu_device *adev,
452 struct umsch_mm_test *test,
453 struct umsch_mm_test_queue_info *qinfo)
454 {
455 int i;
456
457 for (i = 0; i < test->num_queues; i++)
458 remove_test_queue(adev, test, &qinfo[i]);
459 }
460
umsch_mm_test(struct amdgpu_device * adev)461 static int umsch_mm_test(struct amdgpu_device *adev)
462 {
463 struct umsch_mm_test_queue_info qinfo[] = {
464 { .engine = UMSCH_SWIP_ENGINE_TYPE_VPE },
465 };
466 struct umsch_mm_test test = { .num_queues = ARRAY_SIZE(qinfo) };
467 int r;
468
469 r = setup_umsch_mm_test(adev, &test);
470 if (r)
471 return r;
472
473 r = setup_test_queues(adev, &test, qinfo);
474 if (r)
475 goto cleanup;
476
477 r = submit_test_queues(adev, &test, qinfo);
478 if (r)
479 goto cleanup;
480
481 cleanup_test_queues(adev, &test, qinfo);
482 cleanup_umsch_mm_test(adev, &test);
483
484 return 0;
485
486 cleanup:
487 cleanup_test_queues(adev, &test, qinfo);
488 cleanup_umsch_mm_test(adev, &test);
489 return r;
490 }
491
amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm * umsch,void * pkt,int ndws)492 int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws)
493 {
494 struct amdgpu_ring *ring = &umsch->ring;
495
496 if (amdgpu_ring_alloc(ring, ndws))
497 return -ENOMEM;
498
499 amdgpu_ring_write_multiple(ring, pkt, ndws);
500 amdgpu_ring_commit(ring);
501
502 return 0;
503 }
504
amdgpu_umsch_mm_query_fence(struct amdgpu_umsch_mm * umsch)505 int amdgpu_umsch_mm_query_fence(struct amdgpu_umsch_mm *umsch)
506 {
507 struct amdgpu_ring *ring = &umsch->ring;
508 struct amdgpu_device *adev = ring->adev;
509 int r;
510
511 r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, adev->usec_timeout);
512 if (r < 1) {
513 dev_err(adev->dev, "ring umsch timeout, emitted fence %u\n",
514 ring->fence_drv.sync_seq);
515 return -ETIMEDOUT;
516 }
517
518 return 0;
519 }
520
umsch_mm_ring_set_wptr(struct amdgpu_ring * ring)521 static void umsch_mm_ring_set_wptr(struct amdgpu_ring *ring)
522 {
523 struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring;
524 struct amdgpu_device *adev = ring->adev;
525
526 if (ring->use_doorbell)
527 WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
528 else
529 WREG32(umsch->rb_wptr, ring->wptr << 2);
530 }
531
umsch_mm_ring_get_rptr(struct amdgpu_ring * ring)532 static u64 umsch_mm_ring_get_rptr(struct amdgpu_ring *ring)
533 {
534 struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring;
535 struct amdgpu_device *adev = ring->adev;
536
537 return RREG32(umsch->rb_rptr);
538 }
539
umsch_mm_ring_get_wptr(struct amdgpu_ring * ring)540 static u64 umsch_mm_ring_get_wptr(struct amdgpu_ring *ring)
541 {
542 struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring;
543 struct amdgpu_device *adev = ring->adev;
544
545 return RREG32(umsch->rb_wptr);
546 }
547
548 static const struct amdgpu_ring_funcs umsch_v4_0_ring_funcs = {
549 .type = AMDGPU_RING_TYPE_UMSCH_MM,
550 .align_mask = 0,
551 .nop = 0,
552 .support_64bit_ptrs = false,
553 .get_rptr = umsch_mm_ring_get_rptr,
554 .get_wptr = umsch_mm_ring_get_wptr,
555 .set_wptr = umsch_mm_ring_set_wptr,
556 .insert_nop = amdgpu_ring_insert_nop,
557 };
558
amdgpu_umsch_mm_ring_init(struct amdgpu_umsch_mm * umsch)559 int amdgpu_umsch_mm_ring_init(struct amdgpu_umsch_mm *umsch)
560 {
561 struct amdgpu_device *adev = container_of(umsch, struct amdgpu_device, umsch_mm);
562 struct amdgpu_ring *ring = &umsch->ring;
563
564 ring->vm_hub = AMDGPU_MMHUB0(0);
565 ring->use_doorbell = true;
566 ring->no_scheduler = true;
567 ring->doorbell_index = (AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1) + 6;
568
569 snprintf(ring->name, sizeof(ring->name), "umsch");
570
571 return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
572 }
573
amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm * umsch)574 int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
575 {
576 const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr;
577 struct amdgpu_device *adev = umsch->ring.adev;
578 const char *fw_name = NULL;
579 int r;
580
581 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
582 case IP_VERSION(4, 0, 5):
583 case IP_VERSION(4, 0, 6):
584 fw_name = "amdgpu/umsch_mm_4_0_0.bin";
585 break;
586 default:
587 break;
588 }
589
590 r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name);
591 if (r) {
592 release_firmware(adev->umsch_mm.fw);
593 adev->umsch_mm.fw = NULL;
594 return r;
595 }
596
597 umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)adev->umsch_mm.fw->data;
598
599 adev->umsch_mm.ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes);
600 adev->umsch_mm.data_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes);
601
602 adev->umsch_mm.irq_start_addr =
603 le32_to_cpu(umsch_mm_hdr->umsch_mm_irq_start_addr_lo) |
604 ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_irq_start_addr_hi)) << 32);
605 adev->umsch_mm.uc_start_addr =
606 le32_to_cpu(umsch_mm_hdr->umsch_mm_uc_start_addr_lo) |
607 ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_uc_start_addr_hi)) << 32);
608 adev->umsch_mm.data_start_addr =
609 le32_to_cpu(umsch_mm_hdr->umsch_mm_data_start_addr_lo) |
610 ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_data_start_addr_hi)) << 32);
611
612 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
613 struct amdgpu_firmware_info *info;
614
615 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_UMSCH_MM_UCODE];
616 info->ucode_id = AMDGPU_UCODE_ID_UMSCH_MM_UCODE;
617 info->fw = adev->umsch_mm.fw;
618 adev->firmware.fw_size +=
619 ALIGN(le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes), PAGE_SIZE);
620
621 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_UMSCH_MM_DATA];
622 info->ucode_id = AMDGPU_UCODE_ID_UMSCH_MM_DATA;
623 info->fw = adev->umsch_mm.fw;
624 adev->firmware.fw_size +=
625 ALIGN(le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes), PAGE_SIZE);
626 }
627
628 return 0;
629 }
630
amdgpu_umsch_mm_allocate_ucode_buffer(struct amdgpu_umsch_mm * umsch)631 int amdgpu_umsch_mm_allocate_ucode_buffer(struct amdgpu_umsch_mm *umsch)
632 {
633 const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr;
634 struct amdgpu_device *adev = umsch->ring.adev;
635 const __le32 *fw_data;
636 uint32_t fw_size;
637 int r;
638
639 umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)
640 adev->umsch_mm.fw->data;
641
642 fw_data = (const __le32 *)(adev->umsch_mm.fw->data +
643 le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_offset_bytes));
644 fw_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes);
645
646 r = amdgpu_bo_create_reserved(adev, fw_size,
647 4 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
648 &adev->umsch_mm.ucode_fw_obj,
649 &adev->umsch_mm.ucode_fw_gpu_addr,
650 (void **)&adev->umsch_mm.ucode_fw_ptr);
651 if (r) {
652 dev_err(adev->dev, "(%d) failed to create umsch_mm fw ucode bo\n", r);
653 return r;
654 }
655
656 memcpy(adev->umsch_mm.ucode_fw_ptr, fw_data, fw_size);
657
658 amdgpu_bo_kunmap(adev->umsch_mm.ucode_fw_obj);
659 amdgpu_bo_unreserve(adev->umsch_mm.ucode_fw_obj);
660 return 0;
661 }
662
amdgpu_umsch_mm_allocate_ucode_data_buffer(struct amdgpu_umsch_mm * umsch)663 int amdgpu_umsch_mm_allocate_ucode_data_buffer(struct amdgpu_umsch_mm *umsch)
664 {
665 const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr;
666 struct amdgpu_device *adev = umsch->ring.adev;
667 const __le32 *fw_data;
668 uint32_t fw_size;
669 int r;
670
671 umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)
672 adev->umsch_mm.fw->data;
673
674 fw_data = (const __le32 *)(adev->umsch_mm.fw->data +
675 le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_offset_bytes));
676 fw_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes);
677
678 r = amdgpu_bo_create_reserved(adev, fw_size,
679 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
680 &adev->umsch_mm.data_fw_obj,
681 &adev->umsch_mm.data_fw_gpu_addr,
682 (void **)&adev->umsch_mm.data_fw_ptr);
683 if (r) {
684 dev_err(adev->dev, "(%d) failed to create umsch_mm fw data bo\n", r);
685 return r;
686 }
687
688 memcpy(adev->umsch_mm.data_fw_ptr, fw_data, fw_size);
689
690 amdgpu_bo_kunmap(adev->umsch_mm.data_fw_obj);
691 amdgpu_bo_unreserve(adev->umsch_mm.data_fw_obj);
692 return 0;
693 }
694
amdgpu_umsch_mm_psp_execute_cmd_buf(struct amdgpu_umsch_mm * umsch)695 int amdgpu_umsch_mm_psp_execute_cmd_buf(struct amdgpu_umsch_mm *umsch)
696 {
697 struct amdgpu_device *adev = umsch->ring.adev;
698 struct amdgpu_firmware_info ucode = {
699 .ucode_id = AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER,
700 .mc_addr = adev->umsch_mm.cmd_buf_gpu_addr,
701 .ucode_size = ((uintptr_t)adev->umsch_mm.cmd_buf_curr_ptr -
702 (uintptr_t)adev->umsch_mm.cmd_buf_ptr),
703 };
704
705 return psp_execute_ip_fw_load(&adev->psp, &ucode);
706 }
707
umsch_mm_agdb_index_init(struct amdgpu_device * adev)708 static void umsch_mm_agdb_index_init(struct amdgpu_device *adev)
709 {
710 uint32_t umsch_mm_agdb_start;
711 int i;
712
713 umsch_mm_agdb_start = adev->doorbell_index.max_assignment + 1;
714 umsch_mm_agdb_start = roundup(umsch_mm_agdb_start, 1024);
715 umsch_mm_agdb_start += (AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1);
716
717 for (i = 0; i < CONTEXT_PRIORITY_NUM_LEVELS; i++)
718 adev->umsch_mm.agdb_index[i] = umsch_mm_agdb_start + i;
719 }
720
umsch_mm_init(struct amdgpu_device * adev)721 static int umsch_mm_init(struct amdgpu_device *adev)
722 {
723 int r;
724
725 adev->umsch_mm.vmid_mask_mm_vpe = 0xf00;
726 adev->umsch_mm.engine_mask = (1 << UMSCH_SWIP_ENGINE_TYPE_VPE);
727 adev->umsch_mm.vpe_hqd_mask = 0xfe;
728
729 r = amdgpu_device_wb_get(adev, &adev->umsch_mm.wb_index);
730 if (r) {
731 dev_err(adev->dev, "failed to alloc wb for umsch: %d\n", r);
732 return r;
733 }
734
735 adev->umsch_mm.sch_ctx_gpu_addr = adev->wb.gpu_addr +
736 (adev->umsch_mm.wb_index * 4);
737
738 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
739 AMDGPU_GEM_DOMAIN_GTT,
740 &adev->umsch_mm.cmd_buf_obj,
741 &adev->umsch_mm.cmd_buf_gpu_addr,
742 (void **)&adev->umsch_mm.cmd_buf_ptr);
743 if (r) {
744 dev_err(adev->dev, "failed to allocate cmdbuf bo %d\n", r);
745 amdgpu_device_wb_free(adev, adev->umsch_mm.wb_index);
746 return r;
747 }
748
749 r = amdgpu_bo_create_kernel(adev, AMDGPU_UMSCHFW_LOG_SIZE, PAGE_SIZE,
750 AMDGPU_GEM_DOMAIN_VRAM |
751 AMDGPU_GEM_DOMAIN_GTT,
752 &adev->umsch_mm.dbglog_bo,
753 &adev->umsch_mm.log_gpu_addr,
754 &adev->umsch_mm.log_cpu_addr);
755 if (r) {
756 dev_err(adev->dev, "(%d) failed to allocate umsch debug bo\n", r);
757 return r;
758 }
759
760 mutex_init(&adev->umsch_mm.mutex_hidden);
761
762 umsch_mm_agdb_index_init(adev);
763
764 return 0;
765 }
766
767
umsch_mm_early_init(void * handle)768 static int umsch_mm_early_init(void *handle)
769 {
770 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
771
772 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
773 case IP_VERSION(4, 0, 5):
774 case IP_VERSION(4, 0, 6):
775 umsch_mm_v4_0_set_funcs(&adev->umsch_mm);
776 break;
777 default:
778 return -EINVAL;
779 }
780
781 adev->umsch_mm.ring.funcs = &umsch_v4_0_ring_funcs;
782 umsch_mm_set_regs(&adev->umsch_mm);
783
784 return 0;
785 }
786
umsch_mm_late_init(void * handle)787 static int umsch_mm_late_init(void *handle)
788 {
789 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
790
791 if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
792 return 0;
793
794 return umsch_mm_test(adev);
795 }
796
umsch_mm_sw_init(void * handle)797 static int umsch_mm_sw_init(void *handle)
798 {
799 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
800 int r;
801
802 r = umsch_mm_init(adev);
803 if (r)
804 return r;
805
806 amdgpu_umsch_fwlog_init(&adev->umsch_mm);
807 r = umsch_mm_ring_init(&adev->umsch_mm);
808 if (r)
809 return r;
810
811 r = umsch_mm_init_microcode(&adev->umsch_mm);
812 if (r)
813 return r;
814
815 return 0;
816 }
817
umsch_mm_sw_fini(void * handle)818 static int umsch_mm_sw_fini(void *handle)
819 {
820 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
821
822 release_firmware(adev->umsch_mm.fw);
823 adev->umsch_mm.fw = NULL;
824
825 amdgpu_ring_fini(&adev->umsch_mm.ring);
826
827 mutex_destroy(&adev->umsch_mm.mutex_hidden);
828
829 amdgpu_bo_free_kernel(&adev->umsch_mm.cmd_buf_obj,
830 &adev->umsch_mm.cmd_buf_gpu_addr,
831 (void **)&adev->umsch_mm.cmd_buf_ptr);
832
833 amdgpu_bo_free_kernel(&adev->umsch_mm.dbglog_bo,
834 &adev->umsch_mm.log_gpu_addr,
835 (void **)&adev->umsch_mm.log_cpu_addr);
836
837 amdgpu_device_wb_free(adev, adev->umsch_mm.wb_index);
838
839 return 0;
840 }
841
umsch_mm_hw_init(void * handle)842 static int umsch_mm_hw_init(void *handle)
843 {
844 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
845 int r;
846
847 r = umsch_mm_load_microcode(&adev->umsch_mm);
848 if (r)
849 return r;
850
851 umsch_mm_ring_start(&adev->umsch_mm);
852
853 r = umsch_mm_set_hw_resources(&adev->umsch_mm);
854 if (r)
855 return r;
856
857 return 0;
858 }
859
umsch_mm_hw_fini(void * handle)860 static int umsch_mm_hw_fini(void *handle)
861 {
862 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
863
864 umsch_mm_ring_stop(&adev->umsch_mm);
865
866 amdgpu_bo_free_kernel(&adev->umsch_mm.data_fw_obj,
867 &adev->umsch_mm.data_fw_gpu_addr,
868 (void **)&adev->umsch_mm.data_fw_ptr);
869
870 amdgpu_bo_free_kernel(&adev->umsch_mm.ucode_fw_obj,
871 &adev->umsch_mm.ucode_fw_gpu_addr,
872 (void **)&adev->umsch_mm.ucode_fw_ptr);
873 return 0;
874 }
875
umsch_mm_suspend(void * handle)876 static int umsch_mm_suspend(void *handle)
877 {
878 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
879
880 return umsch_mm_hw_fini(adev);
881 }
882
umsch_mm_resume(void * handle)883 static int umsch_mm_resume(void *handle)
884 {
885 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
886
887 return umsch_mm_hw_init(adev);
888 }
889
amdgpu_umsch_fwlog_init(struct amdgpu_umsch_mm * umsch_mm)890 void amdgpu_umsch_fwlog_init(struct amdgpu_umsch_mm *umsch_mm)
891 {
892 #if defined(CONFIG_DEBUG_FS)
893 void *fw_log_cpu_addr = umsch_mm->log_cpu_addr;
894 volatile struct amdgpu_umsch_fwlog *log_buf = fw_log_cpu_addr;
895
896 log_buf->header_size = sizeof(struct amdgpu_umsch_fwlog);
897 log_buf->buffer_size = AMDGPU_UMSCHFW_LOG_SIZE;
898 log_buf->rptr = log_buf->header_size;
899 log_buf->wptr = log_buf->header_size;
900 log_buf->wrapped = 0;
901 #endif
902 }
903
904 /*
905 * debugfs for mapping umsch firmware log buffer.
906 */
907 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_umsch_fwlog_read(struct file * f,char __user * buf,size_t size,loff_t * pos)908 static ssize_t amdgpu_debugfs_umsch_fwlog_read(struct file *f, char __user *buf,
909 size_t size, loff_t *pos)
910 {
911 struct amdgpu_umsch_mm *umsch_mm;
912 void *log_buf;
913 volatile struct amdgpu_umsch_fwlog *plog;
914 unsigned int read_pos, write_pos, available, i, read_bytes = 0;
915 unsigned int read_num[2] = {0};
916
917 umsch_mm = file_inode(f)->i_private;
918 if (!umsch_mm)
919 return -ENODEV;
920
921 if (!umsch_mm->log_cpu_addr)
922 return -EFAULT;
923
924 log_buf = umsch_mm->log_cpu_addr;
925
926 plog = (volatile struct amdgpu_umsch_fwlog *)log_buf;
927 read_pos = plog->rptr;
928 write_pos = plog->wptr;
929
930 if (read_pos > AMDGPU_UMSCHFW_LOG_SIZE || write_pos > AMDGPU_UMSCHFW_LOG_SIZE)
931 return -EFAULT;
932
933 if (!size || (read_pos == write_pos))
934 return 0;
935
936 if (write_pos > read_pos) {
937 available = write_pos - read_pos;
938 read_num[0] = min_t(size_t, size, available);
939 } else {
940 read_num[0] = AMDGPU_UMSCHFW_LOG_SIZE - read_pos;
941 available = read_num[0] + write_pos - plog->header_size;
942 if (size > available)
943 read_num[1] = write_pos - plog->header_size;
944 else if (size > read_num[0])
945 read_num[1] = size - read_num[0];
946 else
947 read_num[0] = size;
948 }
949
950 for (i = 0; i < 2; i++) {
951 if (read_num[i]) {
952 if (read_pos == AMDGPU_UMSCHFW_LOG_SIZE)
953 read_pos = plog->header_size;
954 if (read_num[i] == copy_to_user((buf + read_bytes),
955 (log_buf + read_pos), read_num[i]))
956 return -EFAULT;
957
958 read_bytes += read_num[i];
959 read_pos += read_num[i];
960 }
961 }
962
963 plog->rptr = read_pos;
964 *pos += read_bytes;
965 return read_bytes;
966 }
967
968 static const struct file_operations amdgpu_debugfs_umschfwlog_fops = {
969 .owner = THIS_MODULE,
970 .read = amdgpu_debugfs_umsch_fwlog_read,
971 .llseek = default_llseek
972 };
973 #endif
974
amdgpu_debugfs_umsch_fwlog_init(struct amdgpu_device * adev,struct amdgpu_umsch_mm * umsch_mm)975 void amdgpu_debugfs_umsch_fwlog_init(struct amdgpu_device *adev,
976 struct amdgpu_umsch_mm *umsch_mm)
977 {
978 #if defined(CONFIG_DEBUG_FS)
979 struct drm_minor *minor = adev_to_drm(adev)->primary;
980 struct dentry *root = minor->debugfs_root;
981 char name[32];
982
983 sprintf(name, "amdgpu_umsch_fwlog");
984 debugfs_create_file_size(name, S_IFREG | 0444, root, umsch_mm,
985 &amdgpu_debugfs_umschfwlog_fops,
986 AMDGPU_UMSCHFW_LOG_SIZE);
987 #endif
988 }
989
990 static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = {
991 .name = "umsch_mm_v4_0",
992 .early_init = umsch_mm_early_init,
993 .late_init = umsch_mm_late_init,
994 .sw_init = umsch_mm_sw_init,
995 .sw_fini = umsch_mm_sw_fini,
996 .hw_init = umsch_mm_hw_init,
997 .hw_fini = umsch_mm_hw_fini,
998 .suspend = umsch_mm_suspend,
999 .resume = umsch_mm_resume,
1000 .dump_ip_state = NULL,
1001 .print_ip_state = NULL,
1002 };
1003
1004 const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = {
1005 .type = AMD_IP_BLOCK_TYPE_UMSCH_MM,
1006 .major = 4,
1007 .minor = 0,
1008 .rev = 0,
1009 .funcs = &umsch_mm_v4_0_ip_funcs,
1010 };
1011