xref: /linux/drivers/gpu/drm/xe/xe_gt_pagefault.c (revision f86ad0ed620cb3c91ec7d5468e93ac68d727539d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt_pagefault.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_managed.h>
13 
14 #include "abi/guc_actions_abi.h"
15 #include "xe_bo.h"
16 #include "xe_gt.h"
17 #include "xe_gt_printk.h"
18 #include "xe_gt_stats.h"
19 #include "xe_gt_tlb_invalidation.h"
20 #include "xe_guc.h"
21 #include "xe_guc_ct.h"
22 #include "xe_migrate.h"
23 #include "xe_svm.h"
24 #include "xe_trace_bo.h"
25 #include "xe_vm.h"
26 
27 struct pagefault {
28 	u64 page_addr;
29 	u32 asid;
30 	u16 pdata;
31 	u8 vfid;
32 	u8 access_type;
33 	u8 fault_type;
34 	u8 fault_level;
35 	u8 engine_class;
36 	u8 engine_instance;
37 	u8 fault_unsuccessful;
38 	bool trva_fault;
39 };
40 
41 enum access_type {
42 	ACCESS_TYPE_READ = 0,
43 	ACCESS_TYPE_WRITE = 1,
44 	ACCESS_TYPE_ATOMIC = 2,
45 	ACCESS_TYPE_RESERVED = 3,
46 };
47 
48 enum fault_type {
49 	NOT_PRESENT = 0,
50 	WRITE_ACCESS_VIOLATION = 1,
51 	ATOMIC_ACCESS_VIOLATION = 2,
52 };
53 
54 struct acc {
55 	u64 va_range_base;
56 	u32 asid;
57 	u32 sub_granularity;
58 	u8 granularity;
59 	u8 vfid;
60 	u8 access_type;
61 	u8 engine_class;
62 	u8 engine_instance;
63 };
64 
65 static bool access_is_atomic(enum access_type access_type)
66 {
67 	return access_type == ACCESS_TYPE_ATOMIC;
68 }
69 
70 static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
71 {
72 	return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
73 					   vma->tile_invalidated);
74 }
75 
76 static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
77 		       bool atomic, unsigned int id)
78 {
79 	struct xe_bo *bo = xe_vma_bo(vma);
80 	struct xe_vm *vm = xe_vma_vm(vma);
81 	int err;
82 
83 	err = xe_vm_lock_vma(exec, vma);
84 	if (err)
85 		return err;
86 
87 	if (atomic && IS_DGFX(vm->xe)) {
88 		if (xe_vma_is_userptr(vma)) {
89 			err = -EACCES;
90 			return err;
91 		}
92 
93 		/* Migrate to VRAM, move should invalidate the VMA first */
94 		err = xe_bo_migrate(bo, XE_PL_VRAM0 + id);
95 		if (err)
96 			return err;
97 	} else if (bo) {
98 		/* Create backing store if needed */
99 		err = xe_bo_validate(bo, vm, true);
100 		if (err)
101 			return err;
102 	}
103 
104 	return 0;
105 }
106 
107 static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
108 				bool atomic)
109 {
110 	struct xe_vm *vm = xe_vma_vm(vma);
111 	struct xe_tile *tile = gt_to_tile(gt);
112 	struct drm_exec exec;
113 	struct dma_fence *fence;
114 	ktime_t end = 0;
115 	int err;
116 
117 	lockdep_assert_held_write(&vm->lock);
118 
119 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
120 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
121 
122 	trace_xe_vma_pagefault(vma);
123 
124 	/* Check if VMA is valid, opportunistic check only */
125 	if (vma_is_valid(tile, vma) && !atomic)
126 		return 0;
127 
128 retry_userptr:
129 	if (xe_vma_is_userptr(vma) &&
130 	    xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
131 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
132 
133 		err = xe_vma_userptr_pin_pages(uvma);
134 		if (err)
135 			return err;
136 	}
137 
138 	/* Lock VM and BOs dma-resv */
139 	drm_exec_init(&exec, 0, 0);
140 	drm_exec_until_all_locked(&exec) {
141 		err = xe_pf_begin(&exec, vma, atomic, tile->id);
142 		drm_exec_retry_on_contention(&exec);
143 		if (xe_vm_validate_should_retry(&exec, err, &end))
144 			err = -EAGAIN;
145 		if (err)
146 			goto unlock_dma_resv;
147 
148 		/* Bind VMA only to the GT that has faulted */
149 		trace_xe_vma_pf_bind(vma);
150 		fence = xe_vma_rebind(vm, vma, BIT(tile->id));
151 		if (IS_ERR(fence)) {
152 			err = PTR_ERR(fence);
153 			if (xe_vm_validate_should_retry(&exec, err, &end))
154 				err = -EAGAIN;
155 			goto unlock_dma_resv;
156 		}
157 	}
158 
159 	dma_fence_wait(fence, false);
160 	dma_fence_put(fence);
161 
162 unlock_dma_resv:
163 	drm_exec_fini(&exec);
164 	if (err == -EAGAIN)
165 		goto retry_userptr;
166 
167 	return err;
168 }
169 
170 static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
171 {
172 	struct xe_vm *vm;
173 
174 	down_read(&xe->usm.lock);
175 	vm = xa_load(&xe->usm.asid_to_vm, asid);
176 	if (vm && xe_vm_in_fault_mode(vm))
177 		xe_vm_get(vm);
178 	else
179 		vm = ERR_PTR(-EINVAL);
180 	up_read(&xe->usm.lock);
181 
182 	return vm;
183 }
184 
185 static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
186 {
187 	struct xe_device *xe = gt_to_xe(gt);
188 	struct xe_vm *vm;
189 	struct xe_vma *vma = NULL;
190 	int err;
191 	bool atomic;
192 
193 	/* SW isn't expected to handle TRTT faults */
194 	if (pf->trva_fault)
195 		return -EFAULT;
196 
197 	vm = asid_to_vm(xe, pf->asid);
198 	if (IS_ERR(vm))
199 		return PTR_ERR(vm);
200 
201 	/*
202 	 * TODO: Change to read lock? Using write lock for simplicity.
203 	 */
204 	down_write(&vm->lock);
205 
206 	if (xe_vm_is_closed(vm)) {
207 		err = -ENOENT;
208 		goto unlock_vm;
209 	}
210 
211 	vma = xe_vm_find_vma_by_addr(vm, pf->page_addr);
212 	if (!vma) {
213 		err = -EINVAL;
214 		goto unlock_vm;
215 	}
216 
217 	atomic = access_is_atomic(pf->access_type);
218 
219 	if (xe_vma_is_cpu_addr_mirror(vma))
220 		err = xe_svm_handle_pagefault(vm, vma, gt,
221 					      pf->page_addr, atomic);
222 	else
223 		err = handle_vma_pagefault(gt, vma, atomic);
224 
225 unlock_vm:
226 	if (!err)
227 		vm->usm.last_fault_vma = vma;
228 	up_write(&vm->lock);
229 	xe_vm_put(vm);
230 
231 	return err;
232 }
233 
234 static int send_pagefault_reply(struct xe_guc *guc,
235 				struct xe_guc_pagefault_reply *reply)
236 {
237 	u32 action[] = {
238 		XE_GUC_ACTION_PAGE_FAULT_RES_DESC,
239 		reply->dw0,
240 		reply->dw1,
241 	};
242 
243 	return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
244 }
245 
246 static void print_pagefault(struct xe_gt *gt, struct pagefault *pf)
247 {
248 	xe_gt_dbg(gt, "\n\tASID: %d\n"
249 		  "\tVFID: %d\n"
250 		  "\tPDATA: 0x%04x\n"
251 		  "\tFaulted Address: 0x%08x%08x\n"
252 		  "\tFaultType: %d\n"
253 		  "\tAccessType: %d\n"
254 		  "\tFaultLevel: %d\n"
255 		  "\tEngineClass: %d %s\n"
256 		  "\tEngineInstance: %d\n",
257 		  pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
258 		  lower_32_bits(pf->page_addr),
259 		  pf->fault_type, pf->access_type, pf->fault_level,
260 		  pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
261 		  pf->engine_instance);
262 }
263 
264 #define PF_MSG_LEN_DW	4
265 
266 static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
267 {
268 	const struct xe_guc_pagefault_desc *desc;
269 	bool ret = false;
270 
271 	spin_lock_irq(&pf_queue->lock);
272 	if (pf_queue->tail != pf_queue->head) {
273 		desc = (const struct xe_guc_pagefault_desc *)
274 			(pf_queue->data + pf_queue->tail);
275 
276 		pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
277 		pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
278 		pf->engine_class = FIELD_GET(PFD_ENG_CLASS, desc->dw0);
279 		pf->engine_instance = FIELD_GET(PFD_ENG_INSTANCE, desc->dw0);
280 		pf->pdata = FIELD_GET(PFD_PDATA_HI, desc->dw1) <<
281 			PFD_PDATA_HI_SHIFT;
282 		pf->pdata |= FIELD_GET(PFD_PDATA_LO, desc->dw0);
283 		pf->asid = FIELD_GET(PFD_ASID, desc->dw1);
284 		pf->vfid = FIELD_GET(PFD_VFID, desc->dw2);
285 		pf->access_type = FIELD_GET(PFD_ACCESS_TYPE, desc->dw2);
286 		pf->fault_type = FIELD_GET(PFD_FAULT_TYPE, desc->dw2);
287 		pf->page_addr = (u64)(FIELD_GET(PFD_VIRTUAL_ADDR_HI, desc->dw3)) <<
288 			PFD_VIRTUAL_ADDR_HI_SHIFT;
289 		pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
290 			PFD_VIRTUAL_ADDR_LO_SHIFT;
291 
292 		pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
293 			pf_queue->num_dw;
294 		ret = true;
295 	}
296 	spin_unlock_irq(&pf_queue->lock);
297 
298 	return ret;
299 }
300 
301 static bool pf_queue_full(struct pf_queue *pf_queue)
302 {
303 	lockdep_assert_held(&pf_queue->lock);
304 
305 	return CIRC_SPACE(pf_queue->head, pf_queue->tail,
306 			  pf_queue->num_dw) <=
307 		PF_MSG_LEN_DW;
308 }
309 
310 int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
311 {
312 	struct xe_gt *gt = guc_to_gt(guc);
313 	struct pf_queue *pf_queue;
314 	unsigned long flags;
315 	u32 asid;
316 	bool full;
317 
318 	if (unlikely(len != PF_MSG_LEN_DW))
319 		return -EPROTO;
320 
321 	asid = FIELD_GET(PFD_ASID, msg[1]);
322 	pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
323 
324 	/*
325 	 * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
326 	 */
327 	xe_gt_assert(gt, !(pf_queue->num_dw % PF_MSG_LEN_DW));
328 
329 	spin_lock_irqsave(&pf_queue->lock, flags);
330 	full = pf_queue_full(pf_queue);
331 	if (!full) {
332 		memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
333 		pf_queue->head = (pf_queue->head + len) %
334 			pf_queue->num_dw;
335 		queue_work(gt->usm.pf_wq, &pf_queue->worker);
336 	} else {
337 		xe_gt_warn(gt, "PageFault Queue full, shouldn't be possible\n");
338 	}
339 	spin_unlock_irqrestore(&pf_queue->lock, flags);
340 
341 	return full ? -ENOSPC : 0;
342 }
343 
344 #define USM_QUEUE_MAX_RUNTIME_MS	20
345 
346 static void pf_queue_work_func(struct work_struct *w)
347 {
348 	struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
349 	struct xe_gt *gt = pf_queue->gt;
350 	struct xe_guc_pagefault_reply reply = {};
351 	struct pagefault pf = {};
352 	unsigned long threshold;
353 	int ret;
354 
355 	threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
356 
357 	while (get_pagefault(pf_queue, &pf)) {
358 		ret = handle_pagefault(gt, &pf);
359 		if (unlikely(ret)) {
360 			print_pagefault(gt, &pf);
361 			pf.fault_unsuccessful = 1;
362 			xe_gt_dbg(gt, "Fault response: Unsuccessful %pe\n", ERR_PTR(ret));
363 		}
364 
365 		reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
366 			FIELD_PREP(PFR_SUCCESS, pf.fault_unsuccessful) |
367 			FIELD_PREP(PFR_REPLY, PFR_ACCESS) |
368 			FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) |
369 			FIELD_PREP(PFR_ASID, pf.asid);
370 
371 		reply.dw1 = FIELD_PREP(PFR_VFID, pf.vfid) |
372 			FIELD_PREP(PFR_ENG_INSTANCE, pf.engine_instance) |
373 			FIELD_PREP(PFR_ENG_CLASS, pf.engine_class) |
374 			FIELD_PREP(PFR_PDATA, pf.pdata);
375 
376 		send_pagefault_reply(&gt->uc.guc, &reply);
377 
378 		if (time_after(jiffies, threshold) &&
379 		    pf_queue->tail != pf_queue->head) {
380 			queue_work(gt->usm.pf_wq, w);
381 			break;
382 		}
383 	}
384 }
385 
386 static void acc_queue_work_func(struct work_struct *w);
387 
388 static void pagefault_fini(void *arg)
389 {
390 	struct xe_gt *gt = arg;
391 	struct xe_device *xe = gt_to_xe(gt);
392 
393 	if (!xe->info.has_usm)
394 		return;
395 
396 	destroy_workqueue(gt->usm.acc_wq);
397 	destroy_workqueue(gt->usm.pf_wq);
398 }
399 
400 static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
401 {
402 	struct xe_device *xe = gt_to_xe(gt);
403 	xe_dss_mask_t all_dss;
404 	int num_dss, num_eus;
405 
406 	bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
407 		  XE_MAX_DSS_FUSE_BITS);
408 
409 	num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
410 	num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
411 				XE_MAX_EU_FUSE_BITS) * num_dss;
412 
413 	/*
414 	 * user can issue separate page faults per EU and per CS
415 	 *
416 	 * XXX: Multiplier required as compute UMD are getting PF queue errors
417 	 * without it. Follow on why this multiplier is required.
418 	 */
419 #define PF_MULTIPLIER	8
420 	pf_queue->num_dw =
421 		(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
422 #undef PF_MULTIPLIER
423 
424 	pf_queue->gt = gt;
425 	pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
426 				      sizeof(u32), GFP_KERNEL);
427 	if (!pf_queue->data)
428 		return -ENOMEM;
429 
430 	spin_lock_init(&pf_queue->lock);
431 	INIT_WORK(&pf_queue->worker, pf_queue_work_func);
432 
433 	return 0;
434 }
435 
436 int xe_gt_pagefault_init(struct xe_gt *gt)
437 {
438 	struct xe_device *xe = gt_to_xe(gt);
439 	int i, ret = 0;
440 
441 	if (!xe->info.has_usm)
442 		return 0;
443 
444 	for (i = 0; i < NUM_PF_QUEUE; ++i) {
445 		ret = xe_alloc_pf_queue(gt, &gt->usm.pf_queue[i]);
446 		if (ret)
447 			return ret;
448 	}
449 	for (i = 0; i < NUM_ACC_QUEUE; ++i) {
450 		gt->usm.acc_queue[i].gt = gt;
451 		spin_lock_init(&gt->usm.acc_queue[i].lock);
452 		INIT_WORK(&gt->usm.acc_queue[i].worker, acc_queue_work_func);
453 	}
454 
455 	gt->usm.pf_wq = alloc_workqueue("xe_gt_page_fault_work_queue",
456 					WQ_UNBOUND | WQ_HIGHPRI, NUM_PF_QUEUE);
457 	if (!gt->usm.pf_wq)
458 		return -ENOMEM;
459 
460 	gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue",
461 					 WQ_UNBOUND | WQ_HIGHPRI,
462 					 NUM_ACC_QUEUE);
463 	if (!gt->usm.acc_wq) {
464 		destroy_workqueue(gt->usm.pf_wq);
465 		return -ENOMEM;
466 	}
467 
468 	return devm_add_action_or_reset(xe->drm.dev, pagefault_fini, gt);
469 }
470 
471 void xe_gt_pagefault_reset(struct xe_gt *gt)
472 {
473 	struct xe_device *xe = gt_to_xe(gt);
474 	int i;
475 
476 	if (!xe->info.has_usm)
477 		return;
478 
479 	for (i = 0; i < NUM_PF_QUEUE; ++i) {
480 		spin_lock_irq(&gt->usm.pf_queue[i].lock);
481 		gt->usm.pf_queue[i].head = 0;
482 		gt->usm.pf_queue[i].tail = 0;
483 		spin_unlock_irq(&gt->usm.pf_queue[i].lock);
484 	}
485 
486 	for (i = 0; i < NUM_ACC_QUEUE; ++i) {
487 		spin_lock(&gt->usm.acc_queue[i].lock);
488 		gt->usm.acc_queue[i].head = 0;
489 		gt->usm.acc_queue[i].tail = 0;
490 		spin_unlock(&gt->usm.acc_queue[i].lock);
491 	}
492 }
493 
494 static int granularity_in_byte(int val)
495 {
496 	switch (val) {
497 	case 0:
498 		return SZ_128K;
499 	case 1:
500 		return SZ_2M;
501 	case 2:
502 		return SZ_16M;
503 	case 3:
504 		return SZ_64M;
505 	default:
506 		return 0;
507 	}
508 }
509 
510 static int sub_granularity_in_byte(int val)
511 {
512 	return (granularity_in_byte(val) / 32);
513 }
514 
515 static void print_acc(struct xe_gt *gt, struct acc *acc)
516 {
517 	xe_gt_warn(gt, "Access counter request:\n"
518 		   "\tType: %s\n"
519 		   "\tASID: %d\n"
520 		   "\tVFID: %d\n"
521 		   "\tEngine: %d:%d\n"
522 		   "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
523 		   "\tSub_Granularity Vector: 0x%08x\n"
524 		   "\tVA Range base: 0x%016llx\n",
525 		   acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
526 		   acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
527 		   granularity_in_byte(acc->granularity) / SZ_1K,
528 		   sub_granularity_in_byte(acc->granularity) / SZ_1K,
529 		   acc->sub_granularity, acc->va_range_base);
530 }
531 
532 static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
533 {
534 	u64 page_va = acc->va_range_base + (ffs(acc->sub_granularity) - 1) *
535 		sub_granularity_in_byte(acc->granularity);
536 
537 	return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
538 }
539 
540 static int handle_acc(struct xe_gt *gt, struct acc *acc)
541 {
542 	struct xe_device *xe = gt_to_xe(gt);
543 	struct xe_tile *tile = gt_to_tile(gt);
544 	struct drm_exec exec;
545 	struct xe_vm *vm;
546 	struct xe_vma *vma;
547 	int ret = 0;
548 
549 	/* We only support ACC_TRIGGER at the moment */
550 	if (acc->access_type != ACC_TRIGGER)
551 		return -EINVAL;
552 
553 	vm = asid_to_vm(xe, acc->asid);
554 	if (IS_ERR(vm))
555 		return PTR_ERR(vm);
556 
557 	down_read(&vm->lock);
558 
559 	/* Lookup VMA */
560 	vma = get_acc_vma(vm, acc);
561 	if (!vma) {
562 		ret = -EINVAL;
563 		goto unlock_vm;
564 	}
565 
566 	trace_xe_vma_acc(vma);
567 
568 	/* Userptr or null can't be migrated, nothing to do */
569 	if (xe_vma_has_no_bo(vma))
570 		goto unlock_vm;
571 
572 	/* Lock VM and BOs dma-resv */
573 	drm_exec_init(&exec, 0, 0);
574 	drm_exec_until_all_locked(&exec) {
575 		ret = xe_pf_begin(&exec, vma, true, tile->id);
576 		drm_exec_retry_on_contention(&exec);
577 		if (ret)
578 			break;
579 	}
580 
581 	drm_exec_fini(&exec);
582 unlock_vm:
583 	up_read(&vm->lock);
584 	xe_vm_put(vm);
585 
586 	return ret;
587 }
588 
589 #define make_u64(hi__, low__)  ((u64)(hi__) << 32 | (u64)(low__))
590 
591 #define ACC_MSG_LEN_DW        4
592 
593 static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
594 {
595 	const struct xe_guc_acc_desc *desc;
596 	bool ret = false;
597 
598 	spin_lock(&acc_queue->lock);
599 	if (acc_queue->tail != acc_queue->head) {
600 		desc = (const struct xe_guc_acc_desc *)
601 			(acc_queue->data + acc_queue->tail);
602 
603 		acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
604 		acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
605 			FIELD_GET(ACC_SUBG_LO, desc->dw0);
606 		acc->engine_class = FIELD_GET(ACC_ENG_CLASS, desc->dw1);
607 		acc->engine_instance = FIELD_GET(ACC_ENG_INSTANCE, desc->dw1);
608 		acc->asid =  FIELD_GET(ACC_ASID, desc->dw1);
609 		acc->vfid =  FIELD_GET(ACC_VFID, desc->dw2);
610 		acc->access_type = FIELD_GET(ACC_TYPE, desc->dw0);
611 		acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
612 					      desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
613 
614 		acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) %
615 				  ACC_QUEUE_NUM_DW;
616 		ret = true;
617 	}
618 	spin_unlock(&acc_queue->lock);
619 
620 	return ret;
621 }
622 
623 static void acc_queue_work_func(struct work_struct *w)
624 {
625 	struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
626 	struct xe_gt *gt = acc_queue->gt;
627 	struct acc acc = {};
628 	unsigned long threshold;
629 	int ret;
630 
631 	threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
632 
633 	while (get_acc(acc_queue, &acc)) {
634 		ret = handle_acc(gt, &acc);
635 		if (unlikely(ret)) {
636 			print_acc(gt, &acc);
637 			xe_gt_warn(gt, "ACC: Unsuccessful %pe\n", ERR_PTR(ret));
638 		}
639 
640 		if (time_after(jiffies, threshold) &&
641 		    acc_queue->tail != acc_queue->head) {
642 			queue_work(gt->usm.acc_wq, w);
643 			break;
644 		}
645 	}
646 }
647 
648 static bool acc_queue_full(struct acc_queue *acc_queue)
649 {
650 	lockdep_assert_held(&acc_queue->lock);
651 
652 	return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <=
653 		ACC_MSG_LEN_DW;
654 }
655 
656 int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
657 {
658 	struct xe_gt *gt = guc_to_gt(guc);
659 	struct acc_queue *acc_queue;
660 	u32 asid;
661 	bool full;
662 
663 	/*
664 	 * The below logic doesn't work unless ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW == 0
665 	 */
666 	BUILD_BUG_ON(ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW);
667 
668 	if (unlikely(len != ACC_MSG_LEN_DW))
669 		return -EPROTO;
670 
671 	asid = FIELD_GET(ACC_ASID, msg[1]);
672 	acc_queue = &gt->usm.acc_queue[asid % NUM_ACC_QUEUE];
673 
674 	spin_lock(&acc_queue->lock);
675 	full = acc_queue_full(acc_queue);
676 	if (!full) {
677 		memcpy(acc_queue->data + acc_queue->head, msg,
678 		       len * sizeof(u32));
679 		acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
680 		queue_work(gt->usm.acc_wq, &acc_queue->worker);
681 	} else {
682 		xe_gt_warn(gt, "ACC Queue full, dropping ACC\n");
683 	}
684 	spin_unlock(&acc_queue->lock);
685 
686 	return full ? -ENOSPC : 0;
687 }
688