1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_gt_pagefault.h"
7
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10
11 #include <drm/drm_exec.h>
12 #include <drm/drm_managed.h>
13
14 #include "abi/guc_actions_abi.h"
15 #include "xe_bo.h"
16 #include "xe_gt.h"
17 #include "xe_gt_tlb_invalidation.h"
18 #include "xe_guc.h"
19 #include "xe_guc_ct.h"
20 #include "xe_migrate.h"
21 #include "xe_trace_bo.h"
22 #include "xe_vm.h"
23
24 struct pagefault {
25 u64 page_addr;
26 u32 asid;
27 u16 pdata;
28 u8 vfid;
29 u8 access_type;
30 u8 fault_type;
31 u8 fault_level;
32 u8 engine_class;
33 u8 engine_instance;
34 u8 fault_unsuccessful;
35 bool trva_fault;
36 };
37
38 enum access_type {
39 ACCESS_TYPE_READ = 0,
40 ACCESS_TYPE_WRITE = 1,
41 ACCESS_TYPE_ATOMIC = 2,
42 ACCESS_TYPE_RESERVED = 3,
43 };
44
45 enum fault_type {
46 NOT_PRESENT = 0,
47 WRITE_ACCESS_VIOLATION = 1,
48 ATOMIC_ACCESS_VIOLATION = 2,
49 };
50
51 struct acc {
52 u64 va_range_base;
53 u32 asid;
54 u32 sub_granularity;
55 u8 granularity;
56 u8 vfid;
57 u8 access_type;
58 u8 engine_class;
59 u8 engine_instance;
60 };
61
access_is_atomic(enum access_type access_type)62 static bool access_is_atomic(enum access_type access_type)
63 {
64 return access_type == ACCESS_TYPE_ATOMIC;
65 }
66
vma_is_valid(struct xe_tile * tile,struct xe_vma * vma)67 static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
68 {
69 return BIT(tile->id) & vma->tile_present &&
70 !(BIT(tile->id) & vma->tile_invalidated);
71 }
72
vma_matches(struct xe_vma * vma,u64 page_addr)73 static bool vma_matches(struct xe_vma *vma, u64 page_addr)
74 {
75 if (page_addr > xe_vma_end(vma) - 1 ||
76 page_addr + SZ_4K - 1 < xe_vma_start(vma))
77 return false;
78
79 return true;
80 }
81
lookup_vma(struct xe_vm * vm,u64 page_addr)82 static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
83 {
84 struct xe_vma *vma = NULL;
85
86 if (vm->usm.last_fault_vma) { /* Fast lookup */
87 if (vma_matches(vm->usm.last_fault_vma, page_addr))
88 vma = vm->usm.last_fault_vma;
89 }
90 if (!vma)
91 vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
92
93 return vma;
94 }
95
xe_pf_begin(struct drm_exec * exec,struct xe_vma * vma,bool atomic,unsigned int id)96 static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
97 bool atomic, unsigned int id)
98 {
99 struct xe_bo *bo = xe_vma_bo(vma);
100 struct xe_vm *vm = xe_vma_vm(vma);
101 int err;
102
103 err = xe_vm_lock_vma(exec, vma);
104 if (err)
105 return err;
106
107 if (atomic && IS_DGFX(vm->xe)) {
108 if (xe_vma_is_userptr(vma)) {
109 err = -EACCES;
110 return err;
111 }
112
113 /* Migrate to VRAM, move should invalidate the VMA first */
114 err = xe_bo_migrate(bo, XE_PL_VRAM0 + id);
115 if (err)
116 return err;
117 } else if (bo) {
118 /* Create backing store if needed */
119 err = xe_bo_validate(bo, vm, true);
120 if (err)
121 return err;
122 }
123
124 return 0;
125 }
126
handle_vma_pagefault(struct xe_tile * tile,struct pagefault * pf,struct xe_vma * vma)127 static int handle_vma_pagefault(struct xe_tile *tile, struct pagefault *pf,
128 struct xe_vma *vma)
129 {
130 struct xe_vm *vm = xe_vma_vm(vma);
131 struct drm_exec exec;
132 struct dma_fence *fence;
133 ktime_t end = 0;
134 int err;
135 bool atomic;
136
137 trace_xe_vma_pagefault(vma);
138 atomic = access_is_atomic(pf->access_type);
139
140 /* Check if VMA is valid */
141 if (vma_is_valid(tile, vma) && !atomic)
142 return 0;
143
144 retry_userptr:
145 if (xe_vma_is_userptr(vma) &&
146 xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
147 struct xe_userptr_vma *uvma = to_userptr_vma(vma);
148
149 err = xe_vma_userptr_pin_pages(uvma);
150 if (err)
151 return err;
152 }
153
154 /* Lock VM and BOs dma-resv */
155 drm_exec_init(&exec, 0, 0);
156 drm_exec_until_all_locked(&exec) {
157 err = xe_pf_begin(&exec, vma, atomic, tile->id);
158 drm_exec_retry_on_contention(&exec);
159 if (xe_vm_validate_should_retry(&exec, err, &end))
160 err = -EAGAIN;
161 if (err)
162 goto unlock_dma_resv;
163
164 /* Bind VMA only to the GT that has faulted */
165 trace_xe_vma_pf_bind(vma);
166 fence = xe_vma_rebind(vm, vma, BIT(tile->id));
167 if (IS_ERR(fence)) {
168 err = PTR_ERR(fence);
169 if (xe_vm_validate_should_retry(&exec, err, &end))
170 err = -EAGAIN;
171 goto unlock_dma_resv;
172 }
173 }
174
175 dma_fence_wait(fence, false);
176 dma_fence_put(fence);
177 vma->tile_invalidated &= ~BIT(tile->id);
178
179 unlock_dma_resv:
180 drm_exec_fini(&exec);
181 if (err == -EAGAIN)
182 goto retry_userptr;
183
184 return err;
185 }
186
asid_to_vm(struct xe_device * xe,u32 asid)187 static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
188 {
189 struct xe_vm *vm;
190
191 down_read(&xe->usm.lock);
192 vm = xa_load(&xe->usm.asid_to_vm, asid);
193 if (vm && xe_vm_in_fault_mode(vm))
194 xe_vm_get(vm);
195 else
196 vm = ERR_PTR(-EINVAL);
197 up_read(&xe->usm.lock);
198
199 return vm;
200 }
201
handle_pagefault(struct xe_gt * gt,struct pagefault * pf)202 static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
203 {
204 struct xe_device *xe = gt_to_xe(gt);
205 struct xe_tile *tile = gt_to_tile(gt);
206 struct xe_vm *vm;
207 struct xe_vma *vma = NULL;
208 int err;
209
210 /* SW isn't expected to handle TRTT faults */
211 if (pf->trva_fault)
212 return -EFAULT;
213
214 vm = asid_to_vm(xe, pf->asid);
215 if (IS_ERR(vm))
216 return PTR_ERR(vm);
217
218 /*
219 * TODO: Change to read lock? Using write lock for simplicity.
220 */
221 down_write(&vm->lock);
222
223 if (xe_vm_is_closed(vm)) {
224 err = -ENOENT;
225 goto unlock_vm;
226 }
227
228 vma = lookup_vma(vm, pf->page_addr);
229 if (!vma) {
230 err = -EINVAL;
231 goto unlock_vm;
232 }
233
234 err = handle_vma_pagefault(tile, pf, vma);
235
236 unlock_vm:
237 if (!err)
238 vm->usm.last_fault_vma = vma;
239 up_write(&vm->lock);
240 xe_vm_put(vm);
241
242 return err;
243 }
244
send_pagefault_reply(struct xe_guc * guc,struct xe_guc_pagefault_reply * reply)245 static int send_pagefault_reply(struct xe_guc *guc,
246 struct xe_guc_pagefault_reply *reply)
247 {
248 u32 action[] = {
249 XE_GUC_ACTION_PAGE_FAULT_RES_DESC,
250 reply->dw0,
251 reply->dw1,
252 };
253
254 return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
255 }
256
print_pagefault(struct xe_device * xe,struct pagefault * pf)257 static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
258 {
259 drm_dbg(&xe->drm, "\n\tASID: %d\n"
260 "\tVFID: %d\n"
261 "\tPDATA: 0x%04x\n"
262 "\tFaulted Address: 0x%08x%08x\n"
263 "\tFaultType: %d\n"
264 "\tAccessType: %d\n"
265 "\tFaultLevel: %d\n"
266 "\tEngineClass: %d\n"
267 "\tEngineInstance: %d\n",
268 pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
269 lower_32_bits(pf->page_addr),
270 pf->fault_type, pf->access_type, pf->fault_level,
271 pf->engine_class, pf->engine_instance);
272 }
273
274 #define PF_MSG_LEN_DW 4
275
get_pagefault(struct pf_queue * pf_queue,struct pagefault * pf)276 static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
277 {
278 const struct xe_guc_pagefault_desc *desc;
279 bool ret = false;
280
281 spin_lock_irq(&pf_queue->lock);
282 if (pf_queue->tail != pf_queue->head) {
283 desc = (const struct xe_guc_pagefault_desc *)
284 (pf_queue->data + pf_queue->tail);
285
286 pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
287 pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
288 pf->engine_class = FIELD_GET(PFD_ENG_CLASS, desc->dw0);
289 pf->engine_instance = FIELD_GET(PFD_ENG_INSTANCE, desc->dw0);
290 pf->pdata = FIELD_GET(PFD_PDATA_HI, desc->dw1) <<
291 PFD_PDATA_HI_SHIFT;
292 pf->pdata |= FIELD_GET(PFD_PDATA_LO, desc->dw0);
293 pf->asid = FIELD_GET(PFD_ASID, desc->dw1);
294 pf->vfid = FIELD_GET(PFD_VFID, desc->dw2);
295 pf->access_type = FIELD_GET(PFD_ACCESS_TYPE, desc->dw2);
296 pf->fault_type = FIELD_GET(PFD_FAULT_TYPE, desc->dw2);
297 pf->page_addr = (u64)(FIELD_GET(PFD_VIRTUAL_ADDR_HI, desc->dw3)) <<
298 PFD_VIRTUAL_ADDR_HI_SHIFT;
299 pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
300 PFD_VIRTUAL_ADDR_LO_SHIFT;
301
302 pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
303 pf_queue->num_dw;
304 ret = true;
305 }
306 spin_unlock_irq(&pf_queue->lock);
307
308 return ret;
309 }
310
pf_queue_full(struct pf_queue * pf_queue)311 static bool pf_queue_full(struct pf_queue *pf_queue)
312 {
313 lockdep_assert_held(&pf_queue->lock);
314
315 return CIRC_SPACE(pf_queue->head, pf_queue->tail,
316 pf_queue->num_dw) <=
317 PF_MSG_LEN_DW;
318 }
319
xe_guc_pagefault_handler(struct xe_guc * guc,u32 * msg,u32 len)320 int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
321 {
322 struct xe_gt *gt = guc_to_gt(guc);
323 struct xe_device *xe = gt_to_xe(gt);
324 struct pf_queue *pf_queue;
325 unsigned long flags;
326 u32 asid;
327 bool full;
328
329 if (unlikely(len != PF_MSG_LEN_DW))
330 return -EPROTO;
331
332 asid = FIELD_GET(PFD_ASID, msg[1]);
333 pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
334
335 /*
336 * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
337 */
338 xe_gt_assert(gt, !(pf_queue->num_dw % PF_MSG_LEN_DW));
339
340 spin_lock_irqsave(&pf_queue->lock, flags);
341 full = pf_queue_full(pf_queue);
342 if (!full) {
343 memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
344 pf_queue->head = (pf_queue->head + len) %
345 pf_queue->num_dw;
346 queue_work(gt->usm.pf_wq, &pf_queue->worker);
347 } else {
348 drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
349 }
350 spin_unlock_irqrestore(&pf_queue->lock, flags);
351
352 return full ? -ENOSPC : 0;
353 }
354
355 #define USM_QUEUE_MAX_RUNTIME_MS 20
356
pf_queue_work_func(struct work_struct * w)357 static void pf_queue_work_func(struct work_struct *w)
358 {
359 struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
360 struct xe_gt *gt = pf_queue->gt;
361 struct xe_device *xe = gt_to_xe(gt);
362 struct xe_guc_pagefault_reply reply = {};
363 struct pagefault pf = {};
364 unsigned long threshold;
365 int ret;
366
367 threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
368
369 while (get_pagefault(pf_queue, &pf)) {
370 ret = handle_pagefault(gt, &pf);
371 if (unlikely(ret)) {
372 print_pagefault(xe, &pf);
373 pf.fault_unsuccessful = 1;
374 drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
375 }
376
377 reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
378 FIELD_PREP(PFR_SUCCESS, pf.fault_unsuccessful) |
379 FIELD_PREP(PFR_REPLY, PFR_ACCESS) |
380 FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) |
381 FIELD_PREP(PFR_ASID, pf.asid);
382
383 reply.dw1 = FIELD_PREP(PFR_VFID, pf.vfid) |
384 FIELD_PREP(PFR_ENG_INSTANCE, pf.engine_instance) |
385 FIELD_PREP(PFR_ENG_CLASS, pf.engine_class) |
386 FIELD_PREP(PFR_PDATA, pf.pdata);
387
388 send_pagefault_reply(>->uc.guc, &reply);
389
390 if (time_after(jiffies, threshold) &&
391 pf_queue->tail != pf_queue->head) {
392 queue_work(gt->usm.pf_wq, w);
393 break;
394 }
395 }
396 }
397
398 static void acc_queue_work_func(struct work_struct *w);
399
pagefault_fini(void * arg)400 static void pagefault_fini(void *arg)
401 {
402 struct xe_gt *gt = arg;
403 struct xe_device *xe = gt_to_xe(gt);
404
405 if (!xe->info.has_usm)
406 return;
407
408 destroy_workqueue(gt->usm.acc_wq);
409 destroy_workqueue(gt->usm.pf_wq);
410 }
411
xe_alloc_pf_queue(struct xe_gt * gt,struct pf_queue * pf_queue)412 static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
413 {
414 struct xe_device *xe = gt_to_xe(gt);
415 xe_dss_mask_t all_dss;
416 int num_dss, num_eus;
417
418 bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
419 XE_MAX_DSS_FUSE_BITS);
420
421 num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
422 num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
423 XE_MAX_EU_FUSE_BITS) * num_dss;
424
425 /* user can issue separate page faults per EU and per CS */
426 pf_queue->num_dw =
427 (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
428
429 pf_queue->gt = gt;
430 pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
431 sizeof(u32), GFP_KERNEL);
432 if (!pf_queue->data)
433 return -ENOMEM;
434
435 spin_lock_init(&pf_queue->lock);
436 INIT_WORK(&pf_queue->worker, pf_queue_work_func);
437
438 return 0;
439 }
440
xe_gt_pagefault_init(struct xe_gt * gt)441 int xe_gt_pagefault_init(struct xe_gt *gt)
442 {
443 struct xe_device *xe = gt_to_xe(gt);
444 int i, ret = 0;
445
446 if (!xe->info.has_usm)
447 return 0;
448
449 for (i = 0; i < NUM_PF_QUEUE; ++i) {
450 ret = xe_alloc_pf_queue(gt, >->usm.pf_queue[i]);
451 if (ret)
452 return ret;
453 }
454 for (i = 0; i < NUM_ACC_QUEUE; ++i) {
455 gt->usm.acc_queue[i].gt = gt;
456 spin_lock_init(>->usm.acc_queue[i].lock);
457 INIT_WORK(>->usm.acc_queue[i].worker, acc_queue_work_func);
458 }
459
460 gt->usm.pf_wq = alloc_workqueue("xe_gt_page_fault_work_queue",
461 WQ_UNBOUND | WQ_HIGHPRI, NUM_PF_QUEUE);
462 if (!gt->usm.pf_wq)
463 return -ENOMEM;
464
465 gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue",
466 WQ_UNBOUND | WQ_HIGHPRI,
467 NUM_ACC_QUEUE);
468 if (!gt->usm.acc_wq) {
469 destroy_workqueue(gt->usm.pf_wq);
470 return -ENOMEM;
471 }
472
473 return devm_add_action_or_reset(xe->drm.dev, pagefault_fini, gt);
474 }
475
xe_gt_pagefault_reset(struct xe_gt * gt)476 void xe_gt_pagefault_reset(struct xe_gt *gt)
477 {
478 struct xe_device *xe = gt_to_xe(gt);
479 int i;
480
481 if (!xe->info.has_usm)
482 return;
483
484 for (i = 0; i < NUM_PF_QUEUE; ++i) {
485 spin_lock_irq(>->usm.pf_queue[i].lock);
486 gt->usm.pf_queue[i].head = 0;
487 gt->usm.pf_queue[i].tail = 0;
488 spin_unlock_irq(>->usm.pf_queue[i].lock);
489 }
490
491 for (i = 0; i < NUM_ACC_QUEUE; ++i) {
492 spin_lock(>->usm.acc_queue[i].lock);
493 gt->usm.acc_queue[i].head = 0;
494 gt->usm.acc_queue[i].tail = 0;
495 spin_unlock(>->usm.acc_queue[i].lock);
496 }
497 }
498
granularity_in_byte(int val)499 static int granularity_in_byte(int val)
500 {
501 switch (val) {
502 case 0:
503 return SZ_128K;
504 case 1:
505 return SZ_2M;
506 case 2:
507 return SZ_16M;
508 case 3:
509 return SZ_64M;
510 default:
511 return 0;
512 }
513 }
514
sub_granularity_in_byte(int val)515 static int sub_granularity_in_byte(int val)
516 {
517 return (granularity_in_byte(val) / 32);
518 }
519
print_acc(struct xe_device * xe,struct acc * acc)520 static void print_acc(struct xe_device *xe, struct acc *acc)
521 {
522 drm_warn(&xe->drm, "Access counter request:\n"
523 "\tType: %s\n"
524 "\tASID: %d\n"
525 "\tVFID: %d\n"
526 "\tEngine: %d:%d\n"
527 "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
528 "\tSub_Granularity Vector: 0x%08x\n"
529 "\tVA Range base: 0x%016llx\n",
530 acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
531 acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
532 granularity_in_byte(acc->granularity) / SZ_1K,
533 sub_granularity_in_byte(acc->granularity) / SZ_1K,
534 acc->sub_granularity, acc->va_range_base);
535 }
536
get_acc_vma(struct xe_vm * vm,struct acc * acc)537 static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
538 {
539 u64 page_va = acc->va_range_base + (ffs(acc->sub_granularity) - 1) *
540 sub_granularity_in_byte(acc->granularity);
541
542 return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
543 }
544
handle_acc(struct xe_gt * gt,struct acc * acc)545 static int handle_acc(struct xe_gt *gt, struct acc *acc)
546 {
547 struct xe_device *xe = gt_to_xe(gt);
548 struct xe_tile *tile = gt_to_tile(gt);
549 struct drm_exec exec;
550 struct xe_vm *vm;
551 struct xe_vma *vma;
552 int ret = 0;
553
554 /* We only support ACC_TRIGGER at the moment */
555 if (acc->access_type != ACC_TRIGGER)
556 return -EINVAL;
557
558 vm = asid_to_vm(xe, acc->asid);
559 if (IS_ERR(vm))
560 return PTR_ERR(vm);
561
562 down_read(&vm->lock);
563
564 /* Lookup VMA */
565 vma = get_acc_vma(vm, acc);
566 if (!vma) {
567 ret = -EINVAL;
568 goto unlock_vm;
569 }
570
571 trace_xe_vma_acc(vma);
572
573 /* Userptr or null can't be migrated, nothing to do */
574 if (xe_vma_has_no_bo(vma))
575 goto unlock_vm;
576
577 /* Lock VM and BOs dma-resv */
578 drm_exec_init(&exec, 0, 0);
579 drm_exec_until_all_locked(&exec) {
580 ret = xe_pf_begin(&exec, vma, true, tile->id);
581 drm_exec_retry_on_contention(&exec);
582 if (ret)
583 break;
584 }
585
586 drm_exec_fini(&exec);
587 unlock_vm:
588 up_read(&vm->lock);
589 xe_vm_put(vm);
590
591 return ret;
592 }
593
594 #define make_u64(hi__, low__) ((u64)(hi__) << 32 | (u64)(low__))
595
596 #define ACC_MSG_LEN_DW 4
597
get_acc(struct acc_queue * acc_queue,struct acc * acc)598 static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
599 {
600 const struct xe_guc_acc_desc *desc;
601 bool ret = false;
602
603 spin_lock(&acc_queue->lock);
604 if (acc_queue->tail != acc_queue->head) {
605 desc = (const struct xe_guc_acc_desc *)
606 (acc_queue->data + acc_queue->tail);
607
608 acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
609 acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
610 FIELD_GET(ACC_SUBG_LO, desc->dw0);
611 acc->engine_class = FIELD_GET(ACC_ENG_CLASS, desc->dw1);
612 acc->engine_instance = FIELD_GET(ACC_ENG_INSTANCE, desc->dw1);
613 acc->asid = FIELD_GET(ACC_ASID, desc->dw1);
614 acc->vfid = FIELD_GET(ACC_VFID, desc->dw2);
615 acc->access_type = FIELD_GET(ACC_TYPE, desc->dw0);
616 acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
617 desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
618
619 acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) %
620 ACC_QUEUE_NUM_DW;
621 ret = true;
622 }
623 spin_unlock(&acc_queue->lock);
624
625 return ret;
626 }
627
acc_queue_work_func(struct work_struct * w)628 static void acc_queue_work_func(struct work_struct *w)
629 {
630 struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
631 struct xe_gt *gt = acc_queue->gt;
632 struct xe_device *xe = gt_to_xe(gt);
633 struct acc acc = {};
634 unsigned long threshold;
635 int ret;
636
637 threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
638
639 while (get_acc(acc_queue, &acc)) {
640 ret = handle_acc(gt, &acc);
641 if (unlikely(ret)) {
642 print_acc(xe, &acc);
643 drm_warn(&xe->drm, "ACC: Unsuccessful %d\n", ret);
644 }
645
646 if (time_after(jiffies, threshold) &&
647 acc_queue->tail != acc_queue->head) {
648 queue_work(gt->usm.acc_wq, w);
649 break;
650 }
651 }
652 }
653
acc_queue_full(struct acc_queue * acc_queue)654 static bool acc_queue_full(struct acc_queue *acc_queue)
655 {
656 lockdep_assert_held(&acc_queue->lock);
657
658 return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <=
659 ACC_MSG_LEN_DW;
660 }
661
xe_guc_access_counter_notify_handler(struct xe_guc * guc,u32 * msg,u32 len)662 int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
663 {
664 struct xe_gt *gt = guc_to_gt(guc);
665 struct acc_queue *acc_queue;
666 u32 asid;
667 bool full;
668
669 /*
670 * The below logic doesn't work unless ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW == 0
671 */
672 BUILD_BUG_ON(ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW);
673
674 if (unlikely(len != ACC_MSG_LEN_DW))
675 return -EPROTO;
676
677 asid = FIELD_GET(ACC_ASID, msg[1]);
678 acc_queue = >->usm.acc_queue[asid % NUM_ACC_QUEUE];
679
680 spin_lock(&acc_queue->lock);
681 full = acc_queue_full(acc_queue);
682 if (!full) {
683 memcpy(acc_queue->data + acc_queue->head, msg,
684 len * sizeof(u32));
685 acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
686 queue_work(gt->usm.acc_wq, &acc_queue->worker);
687 } else {
688 drm_warn(>_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
689 }
690 spin_unlock(&acc_queue->lock);
691
692 return full ? -ENOSPC : 0;
693 }
694