1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_gt_pagefault.h"
7
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10
11 #include <drm/drm_exec.h>
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14
15 #include "abi/guc_actions_abi.h"
16 #include "xe_bo.h"
17 #include "xe_gt.h"
18 #include "xe_gt_tlb_invalidation.h"
19 #include "xe_guc.h"
20 #include "xe_guc_ct.h"
21 #include "xe_migrate.h"
22 #include "xe_trace_bo.h"
23 #include "xe_vm.h"
24
25 struct pagefault {
26 u64 page_addr;
27 u32 asid;
28 u16 pdata;
29 u8 vfid;
30 u8 access_type;
31 u8 fault_type;
32 u8 fault_level;
33 u8 engine_class;
34 u8 engine_instance;
35 u8 fault_unsuccessful;
36 bool trva_fault;
37 };
38
39 enum access_type {
40 ACCESS_TYPE_READ = 0,
41 ACCESS_TYPE_WRITE = 1,
42 ACCESS_TYPE_ATOMIC = 2,
43 ACCESS_TYPE_RESERVED = 3,
44 };
45
46 enum fault_type {
47 NOT_PRESENT = 0,
48 WRITE_ACCESS_VIOLATION = 1,
49 ATOMIC_ACCESS_VIOLATION = 2,
50 };
51
52 struct acc {
53 u64 va_range_base;
54 u32 asid;
55 u32 sub_granularity;
56 u8 granularity;
57 u8 vfid;
58 u8 access_type;
59 u8 engine_class;
60 u8 engine_instance;
61 };
62
access_is_atomic(enum access_type access_type)63 static bool access_is_atomic(enum access_type access_type)
64 {
65 return access_type == ACCESS_TYPE_ATOMIC;
66 }
67
vma_is_valid(struct xe_tile * tile,struct xe_vma * vma)68 static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
69 {
70 return BIT(tile->id) & vma->tile_present &&
71 !(BIT(tile->id) & vma->tile_invalidated);
72 }
73
vma_matches(struct xe_vma * vma,u64 page_addr)74 static bool vma_matches(struct xe_vma *vma, u64 page_addr)
75 {
76 if (page_addr > xe_vma_end(vma) - 1 ||
77 page_addr + SZ_4K - 1 < xe_vma_start(vma))
78 return false;
79
80 return true;
81 }
82
lookup_vma(struct xe_vm * vm,u64 page_addr)83 static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
84 {
85 struct xe_vma *vma = NULL;
86
87 if (vm->usm.last_fault_vma) { /* Fast lookup */
88 if (vma_matches(vm->usm.last_fault_vma, page_addr))
89 vma = vm->usm.last_fault_vma;
90 }
91 if (!vma)
92 vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
93
94 return vma;
95 }
96
xe_pf_begin(struct drm_exec * exec,struct xe_vma * vma,bool atomic,unsigned int id)97 static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
98 bool atomic, unsigned int id)
99 {
100 struct xe_bo *bo = xe_vma_bo(vma);
101 struct xe_vm *vm = xe_vma_vm(vma);
102 int err;
103
104 err = xe_vm_lock_vma(exec, vma);
105 if (err)
106 return err;
107
108 if (atomic && IS_DGFX(vm->xe)) {
109 if (xe_vma_is_userptr(vma)) {
110 err = -EACCES;
111 return err;
112 }
113
114 /* Migrate to VRAM, move should invalidate the VMA first */
115 err = xe_bo_migrate(bo, XE_PL_VRAM0 + id);
116 if (err)
117 return err;
118 } else if (bo) {
119 /* Create backing store if needed */
120 err = xe_bo_validate(bo, vm, true);
121 if (err)
122 return err;
123 }
124
125 return 0;
126 }
127
handle_vma_pagefault(struct xe_tile * tile,struct pagefault * pf,struct xe_vma * vma)128 static int handle_vma_pagefault(struct xe_tile *tile, struct pagefault *pf,
129 struct xe_vma *vma)
130 {
131 struct xe_vm *vm = xe_vma_vm(vma);
132 struct drm_exec exec;
133 struct dma_fence *fence;
134 ktime_t end = 0;
135 int err;
136 bool atomic;
137
138 trace_xe_vma_pagefault(vma);
139 atomic = access_is_atomic(pf->access_type);
140
141 /* Check if VMA is valid */
142 if (vma_is_valid(tile, vma) && !atomic)
143 return 0;
144
145 retry_userptr:
146 if (xe_vma_is_userptr(vma) &&
147 xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
148 struct xe_userptr_vma *uvma = to_userptr_vma(vma);
149
150 err = xe_vma_userptr_pin_pages(uvma);
151 if (err)
152 return err;
153 }
154
155 /* Lock VM and BOs dma-resv */
156 drm_exec_init(&exec, 0, 0);
157 drm_exec_until_all_locked(&exec) {
158 err = xe_pf_begin(&exec, vma, atomic, tile->id);
159 drm_exec_retry_on_contention(&exec);
160 if (xe_vm_validate_should_retry(&exec, err, &end))
161 err = -EAGAIN;
162 if (err)
163 goto unlock_dma_resv;
164
165 /* Bind VMA only to the GT that has faulted */
166 trace_xe_vma_pf_bind(vma);
167 fence = xe_vma_rebind(vm, vma, BIT(tile->id));
168 if (IS_ERR(fence)) {
169 err = PTR_ERR(fence);
170 if (xe_vm_validate_should_retry(&exec, err, &end))
171 err = -EAGAIN;
172 goto unlock_dma_resv;
173 }
174 }
175
176 dma_fence_wait(fence, false);
177 dma_fence_put(fence);
178 vma->tile_invalidated &= ~BIT(tile->id);
179
180 unlock_dma_resv:
181 drm_exec_fini(&exec);
182 if (err == -EAGAIN)
183 goto retry_userptr;
184
185 return err;
186 }
187
asid_to_vm(struct xe_device * xe,u32 asid)188 static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
189 {
190 struct xe_vm *vm;
191
192 down_read(&xe->usm.lock);
193 vm = xa_load(&xe->usm.asid_to_vm, asid);
194 if (vm && xe_vm_in_fault_mode(vm))
195 xe_vm_get(vm);
196 else
197 vm = ERR_PTR(-EINVAL);
198 up_read(&xe->usm.lock);
199
200 return vm;
201 }
202
handle_pagefault(struct xe_gt * gt,struct pagefault * pf)203 static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
204 {
205 struct xe_device *xe = gt_to_xe(gt);
206 struct xe_tile *tile = gt_to_tile(gt);
207 struct xe_vm *vm;
208 struct xe_vma *vma = NULL;
209 int err;
210
211 /* SW isn't expected to handle TRTT faults */
212 if (pf->trva_fault)
213 return -EFAULT;
214
215 vm = asid_to_vm(xe, pf->asid);
216 if (IS_ERR(vm))
217 return PTR_ERR(vm);
218
219 /*
220 * TODO: Change to read lock? Using write lock for simplicity.
221 */
222 down_write(&vm->lock);
223
224 if (xe_vm_is_closed(vm)) {
225 err = -ENOENT;
226 goto unlock_vm;
227 }
228
229 vma = lookup_vma(vm, pf->page_addr);
230 if (!vma) {
231 err = -EINVAL;
232 goto unlock_vm;
233 }
234
235 err = handle_vma_pagefault(tile, pf, vma);
236
237 unlock_vm:
238 if (!err)
239 vm->usm.last_fault_vma = vma;
240 up_write(&vm->lock);
241 xe_vm_put(vm);
242
243 return err;
244 }
245
send_pagefault_reply(struct xe_guc * guc,struct xe_guc_pagefault_reply * reply)246 static int send_pagefault_reply(struct xe_guc *guc,
247 struct xe_guc_pagefault_reply *reply)
248 {
249 u32 action[] = {
250 XE_GUC_ACTION_PAGE_FAULT_RES_DESC,
251 reply->dw0,
252 reply->dw1,
253 };
254
255 return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
256 }
257
print_pagefault(struct xe_device * xe,struct pagefault * pf)258 static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
259 {
260 drm_dbg(&xe->drm, "\n\tASID: %d\n"
261 "\tVFID: %d\n"
262 "\tPDATA: 0x%04x\n"
263 "\tFaulted Address: 0x%08x%08x\n"
264 "\tFaultType: %d\n"
265 "\tAccessType: %d\n"
266 "\tFaultLevel: %d\n"
267 "\tEngineClass: %d\n"
268 "\tEngineInstance: %d\n",
269 pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
270 lower_32_bits(pf->page_addr),
271 pf->fault_type, pf->access_type, pf->fault_level,
272 pf->engine_class, pf->engine_instance);
273 }
274
275 #define PF_MSG_LEN_DW 4
276
get_pagefault(struct pf_queue * pf_queue,struct pagefault * pf)277 static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
278 {
279 const struct xe_guc_pagefault_desc *desc;
280 bool ret = false;
281
282 spin_lock_irq(&pf_queue->lock);
283 if (pf_queue->tail != pf_queue->head) {
284 desc = (const struct xe_guc_pagefault_desc *)
285 (pf_queue->data + pf_queue->tail);
286
287 pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
288 pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
289 pf->engine_class = FIELD_GET(PFD_ENG_CLASS, desc->dw0);
290 pf->engine_instance = FIELD_GET(PFD_ENG_INSTANCE, desc->dw0);
291 pf->pdata = FIELD_GET(PFD_PDATA_HI, desc->dw1) <<
292 PFD_PDATA_HI_SHIFT;
293 pf->pdata |= FIELD_GET(PFD_PDATA_LO, desc->dw0);
294 pf->asid = FIELD_GET(PFD_ASID, desc->dw1);
295 pf->vfid = FIELD_GET(PFD_VFID, desc->dw2);
296 pf->access_type = FIELD_GET(PFD_ACCESS_TYPE, desc->dw2);
297 pf->fault_type = FIELD_GET(PFD_FAULT_TYPE, desc->dw2);
298 pf->page_addr = (u64)(FIELD_GET(PFD_VIRTUAL_ADDR_HI, desc->dw3)) <<
299 PFD_VIRTUAL_ADDR_HI_SHIFT;
300 pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
301 PFD_VIRTUAL_ADDR_LO_SHIFT;
302
303 pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
304 pf_queue->num_dw;
305 ret = true;
306 }
307 spin_unlock_irq(&pf_queue->lock);
308
309 return ret;
310 }
311
pf_queue_full(struct pf_queue * pf_queue)312 static bool pf_queue_full(struct pf_queue *pf_queue)
313 {
314 lockdep_assert_held(&pf_queue->lock);
315
316 return CIRC_SPACE(pf_queue->head, pf_queue->tail,
317 pf_queue->num_dw) <=
318 PF_MSG_LEN_DW;
319 }
320
xe_guc_pagefault_handler(struct xe_guc * guc,u32 * msg,u32 len)321 int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
322 {
323 struct xe_gt *gt = guc_to_gt(guc);
324 struct xe_device *xe = gt_to_xe(gt);
325 struct pf_queue *pf_queue;
326 unsigned long flags;
327 u32 asid;
328 bool full;
329
330 if (unlikely(len != PF_MSG_LEN_DW))
331 return -EPROTO;
332
333 asid = FIELD_GET(PFD_ASID, msg[1]);
334 pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
335
336 /*
337 * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
338 */
339 xe_gt_assert(gt, !(pf_queue->num_dw % PF_MSG_LEN_DW));
340
341 spin_lock_irqsave(&pf_queue->lock, flags);
342 full = pf_queue_full(pf_queue);
343 if (!full) {
344 memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
345 pf_queue->head = (pf_queue->head + len) %
346 pf_queue->num_dw;
347 queue_work(gt->usm.pf_wq, &pf_queue->worker);
348 } else {
349 drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
350 }
351 spin_unlock_irqrestore(&pf_queue->lock, flags);
352
353 return full ? -ENOSPC : 0;
354 }
355
356 #define USM_QUEUE_MAX_RUNTIME_MS 20
357
pf_queue_work_func(struct work_struct * w)358 static void pf_queue_work_func(struct work_struct *w)
359 {
360 struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
361 struct xe_gt *gt = pf_queue->gt;
362 struct xe_device *xe = gt_to_xe(gt);
363 struct xe_guc_pagefault_reply reply = {};
364 struct pagefault pf = {};
365 unsigned long threshold;
366 int ret;
367
368 threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
369
370 while (get_pagefault(pf_queue, &pf)) {
371 ret = handle_pagefault(gt, &pf);
372 if (unlikely(ret)) {
373 print_pagefault(xe, &pf);
374 pf.fault_unsuccessful = 1;
375 drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
376 }
377
378 reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
379 FIELD_PREP(PFR_SUCCESS, pf.fault_unsuccessful) |
380 FIELD_PREP(PFR_REPLY, PFR_ACCESS) |
381 FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) |
382 FIELD_PREP(PFR_ASID, pf.asid);
383
384 reply.dw1 = FIELD_PREP(PFR_VFID, pf.vfid) |
385 FIELD_PREP(PFR_ENG_INSTANCE, pf.engine_instance) |
386 FIELD_PREP(PFR_ENG_CLASS, pf.engine_class) |
387 FIELD_PREP(PFR_PDATA, pf.pdata);
388
389 send_pagefault_reply(>->uc.guc, &reply);
390
391 if (time_after(jiffies, threshold) &&
392 pf_queue->tail != pf_queue->head) {
393 queue_work(gt->usm.pf_wq, w);
394 break;
395 }
396 }
397 }
398
399 static void acc_queue_work_func(struct work_struct *w);
400
pagefault_fini(void * arg)401 static void pagefault_fini(void *arg)
402 {
403 struct xe_gt *gt = arg;
404 struct xe_device *xe = gt_to_xe(gt);
405
406 if (!xe->info.has_usm)
407 return;
408
409 destroy_workqueue(gt->usm.acc_wq);
410 destroy_workqueue(gt->usm.pf_wq);
411 }
412
xe_alloc_pf_queue(struct xe_gt * gt,struct pf_queue * pf_queue)413 static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
414 {
415 struct xe_device *xe = gt_to_xe(gt);
416 xe_dss_mask_t all_dss;
417 int num_dss, num_eus;
418
419 bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
420 XE_MAX_DSS_FUSE_BITS);
421
422 num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
423 num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
424 XE_MAX_EU_FUSE_BITS) * num_dss;
425
426 /* user can issue separate page faults per EU and per CS */
427 pf_queue->num_dw =
428 (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
429
430 pf_queue->gt = gt;
431 pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
432 sizeof(u32), GFP_KERNEL);
433 if (!pf_queue->data)
434 return -ENOMEM;
435
436 spin_lock_init(&pf_queue->lock);
437 INIT_WORK(&pf_queue->worker, pf_queue_work_func);
438
439 return 0;
440 }
441
xe_gt_pagefault_init(struct xe_gt * gt)442 int xe_gt_pagefault_init(struct xe_gt *gt)
443 {
444 struct xe_device *xe = gt_to_xe(gt);
445 int i, ret = 0;
446
447 if (!xe->info.has_usm)
448 return 0;
449
450 for (i = 0; i < NUM_PF_QUEUE; ++i) {
451 ret = xe_alloc_pf_queue(gt, >->usm.pf_queue[i]);
452 if (ret)
453 return ret;
454 }
455 for (i = 0; i < NUM_ACC_QUEUE; ++i) {
456 gt->usm.acc_queue[i].gt = gt;
457 spin_lock_init(>->usm.acc_queue[i].lock);
458 INIT_WORK(>->usm.acc_queue[i].worker, acc_queue_work_func);
459 }
460
461 gt->usm.pf_wq = alloc_workqueue("xe_gt_page_fault_work_queue",
462 WQ_UNBOUND | WQ_HIGHPRI, NUM_PF_QUEUE);
463 if (!gt->usm.pf_wq)
464 return -ENOMEM;
465
466 gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue",
467 WQ_UNBOUND | WQ_HIGHPRI,
468 NUM_ACC_QUEUE);
469 if (!gt->usm.acc_wq) {
470 destroy_workqueue(gt->usm.pf_wq);
471 return -ENOMEM;
472 }
473
474 return devm_add_action_or_reset(xe->drm.dev, pagefault_fini, gt);
475 }
476
xe_gt_pagefault_reset(struct xe_gt * gt)477 void xe_gt_pagefault_reset(struct xe_gt *gt)
478 {
479 struct xe_device *xe = gt_to_xe(gt);
480 int i;
481
482 if (!xe->info.has_usm)
483 return;
484
485 for (i = 0; i < NUM_PF_QUEUE; ++i) {
486 spin_lock_irq(>->usm.pf_queue[i].lock);
487 gt->usm.pf_queue[i].head = 0;
488 gt->usm.pf_queue[i].tail = 0;
489 spin_unlock_irq(>->usm.pf_queue[i].lock);
490 }
491
492 for (i = 0; i < NUM_ACC_QUEUE; ++i) {
493 spin_lock(>->usm.acc_queue[i].lock);
494 gt->usm.acc_queue[i].head = 0;
495 gt->usm.acc_queue[i].tail = 0;
496 spin_unlock(>->usm.acc_queue[i].lock);
497 }
498 }
499
granularity_in_byte(int val)500 static int granularity_in_byte(int val)
501 {
502 switch (val) {
503 case 0:
504 return SZ_128K;
505 case 1:
506 return SZ_2M;
507 case 2:
508 return SZ_16M;
509 case 3:
510 return SZ_64M;
511 default:
512 return 0;
513 }
514 }
515
sub_granularity_in_byte(int val)516 static int sub_granularity_in_byte(int val)
517 {
518 return (granularity_in_byte(val) / 32);
519 }
520
print_acc(struct xe_device * xe,struct acc * acc)521 static void print_acc(struct xe_device *xe, struct acc *acc)
522 {
523 drm_warn(&xe->drm, "Access counter request:\n"
524 "\tType: %s\n"
525 "\tASID: %d\n"
526 "\tVFID: %d\n"
527 "\tEngine: %d:%d\n"
528 "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
529 "\tSub_Granularity Vector: 0x%08x\n"
530 "\tVA Range base: 0x%016llx\n",
531 acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
532 acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
533 granularity_in_byte(acc->granularity) / SZ_1K,
534 sub_granularity_in_byte(acc->granularity) / SZ_1K,
535 acc->sub_granularity, acc->va_range_base);
536 }
537
get_acc_vma(struct xe_vm * vm,struct acc * acc)538 static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
539 {
540 u64 page_va = acc->va_range_base + (ffs(acc->sub_granularity) - 1) *
541 sub_granularity_in_byte(acc->granularity);
542
543 return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
544 }
545
handle_acc(struct xe_gt * gt,struct acc * acc)546 static int handle_acc(struct xe_gt *gt, struct acc *acc)
547 {
548 struct xe_device *xe = gt_to_xe(gt);
549 struct xe_tile *tile = gt_to_tile(gt);
550 struct drm_exec exec;
551 struct xe_vm *vm;
552 struct xe_vma *vma;
553 int ret = 0;
554
555 /* We only support ACC_TRIGGER at the moment */
556 if (acc->access_type != ACC_TRIGGER)
557 return -EINVAL;
558
559 vm = asid_to_vm(xe, acc->asid);
560 if (IS_ERR(vm))
561 return PTR_ERR(vm);
562
563 down_read(&vm->lock);
564
565 /* Lookup VMA */
566 vma = get_acc_vma(vm, acc);
567 if (!vma) {
568 ret = -EINVAL;
569 goto unlock_vm;
570 }
571
572 trace_xe_vma_acc(vma);
573
574 /* Userptr or null can't be migrated, nothing to do */
575 if (xe_vma_has_no_bo(vma))
576 goto unlock_vm;
577
578 /* Lock VM and BOs dma-resv */
579 drm_exec_init(&exec, 0, 0);
580 drm_exec_until_all_locked(&exec) {
581 ret = xe_pf_begin(&exec, vma, true, tile->id);
582 drm_exec_retry_on_contention(&exec);
583 if (ret)
584 break;
585 }
586
587 drm_exec_fini(&exec);
588 unlock_vm:
589 up_read(&vm->lock);
590 xe_vm_put(vm);
591
592 return ret;
593 }
594
595 #define make_u64(hi__, low__) ((u64)(hi__) << 32 | (u64)(low__))
596
597 #define ACC_MSG_LEN_DW 4
598
get_acc(struct acc_queue * acc_queue,struct acc * acc)599 static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
600 {
601 const struct xe_guc_acc_desc *desc;
602 bool ret = false;
603
604 spin_lock(&acc_queue->lock);
605 if (acc_queue->tail != acc_queue->head) {
606 desc = (const struct xe_guc_acc_desc *)
607 (acc_queue->data + acc_queue->tail);
608
609 acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
610 acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
611 FIELD_GET(ACC_SUBG_LO, desc->dw0);
612 acc->engine_class = FIELD_GET(ACC_ENG_CLASS, desc->dw1);
613 acc->engine_instance = FIELD_GET(ACC_ENG_INSTANCE, desc->dw1);
614 acc->asid = FIELD_GET(ACC_ASID, desc->dw1);
615 acc->vfid = FIELD_GET(ACC_VFID, desc->dw2);
616 acc->access_type = FIELD_GET(ACC_TYPE, desc->dw0);
617 acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
618 desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
619
620 acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) %
621 ACC_QUEUE_NUM_DW;
622 ret = true;
623 }
624 spin_unlock(&acc_queue->lock);
625
626 return ret;
627 }
628
acc_queue_work_func(struct work_struct * w)629 static void acc_queue_work_func(struct work_struct *w)
630 {
631 struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
632 struct xe_gt *gt = acc_queue->gt;
633 struct xe_device *xe = gt_to_xe(gt);
634 struct acc acc = {};
635 unsigned long threshold;
636 int ret;
637
638 threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
639
640 while (get_acc(acc_queue, &acc)) {
641 ret = handle_acc(gt, &acc);
642 if (unlikely(ret)) {
643 print_acc(xe, &acc);
644 drm_warn(&xe->drm, "ACC: Unsuccessful %d\n", ret);
645 }
646
647 if (time_after(jiffies, threshold) &&
648 acc_queue->tail != acc_queue->head) {
649 queue_work(gt->usm.acc_wq, w);
650 break;
651 }
652 }
653 }
654
acc_queue_full(struct acc_queue * acc_queue)655 static bool acc_queue_full(struct acc_queue *acc_queue)
656 {
657 lockdep_assert_held(&acc_queue->lock);
658
659 return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <=
660 ACC_MSG_LEN_DW;
661 }
662
xe_guc_access_counter_notify_handler(struct xe_guc * guc,u32 * msg,u32 len)663 int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
664 {
665 struct xe_gt *gt = guc_to_gt(guc);
666 struct acc_queue *acc_queue;
667 u32 asid;
668 bool full;
669
670 /*
671 * The below logic doesn't work unless ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW == 0
672 */
673 BUILD_BUG_ON(ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW);
674
675 if (unlikely(len != ACC_MSG_LEN_DW))
676 return -EPROTO;
677
678 asid = FIELD_GET(ACC_ASID, msg[1]);
679 acc_queue = >->usm.acc_queue[asid % NUM_ACC_QUEUE];
680
681 spin_lock(&acc_queue->lock);
682 full = acc_queue_full(acc_queue);
683 if (!full) {
684 memcpy(acc_queue->data + acc_queue->head, msg,
685 len * sizeof(u32));
686 acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
687 queue_work(gt->usm.acc_wq, &acc_queue->worker);
688 } else {
689 drm_warn(>_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
690 }
691 spin_unlock(&acc_queue->lock);
692
693 return full ? -ENOSPC : 0;
694 }
695