xref: /linux/drivers/gpu/drm/xe/xe_gt_pagefault.c (revision 44343e8b250abb2f6bfd615493ca07a7f11f3cc2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt_pagefault.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_managed.h>
13 
14 #include "abi/guc_actions_abi.h"
15 #include "xe_bo.h"
16 #include "xe_gt.h"
17 #include "xe_gt_printk.h"
18 #include "xe_gt_stats.h"
19 #include "xe_guc.h"
20 #include "xe_guc_ct.h"
21 #include "xe_migrate.h"
22 #include "xe_svm.h"
23 #include "xe_trace_bo.h"
24 #include "xe_vm.h"
25 #include "xe_vram_types.h"
26 
27 struct pagefault {
28 	u64 page_addr;
29 	u32 asid;
30 	u16 pdata;
31 	u8 vfid;
32 	u8 access_type;
33 	u8 fault_type;
34 	u8 fault_level;
35 	u8 engine_class;
36 	u8 engine_instance;
37 	u8 fault_unsuccessful;
38 	bool trva_fault;
39 };
40 
41 enum access_type {
42 	ACCESS_TYPE_READ = 0,
43 	ACCESS_TYPE_WRITE = 1,
44 	ACCESS_TYPE_ATOMIC = 2,
45 	ACCESS_TYPE_RESERVED = 3,
46 };
47 
48 enum fault_type {
49 	NOT_PRESENT = 0,
50 	WRITE_ACCESS_VIOLATION = 1,
51 	ATOMIC_ACCESS_VIOLATION = 2,
52 };
53 
54 struct acc {
55 	u64 va_range_base;
56 	u32 asid;
57 	u32 sub_granularity;
58 	u8 granularity;
59 	u8 vfid;
60 	u8 access_type;
61 	u8 engine_class;
62 	u8 engine_instance;
63 };
64 
65 static bool access_is_atomic(enum access_type access_type)
66 {
67 	return access_type == ACCESS_TYPE_ATOMIC;
68 }
69 
70 static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
71 {
72 	return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
73 					   vma->tile_invalidated);
74 }
75 
76 static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
77 		       bool need_vram_move, struct xe_vram_region *vram)
78 {
79 	struct xe_bo *bo = xe_vma_bo(vma);
80 	struct xe_vm *vm = xe_vma_vm(vma);
81 	int err;
82 
83 	err = xe_vm_lock_vma(exec, vma);
84 	if (err)
85 		return err;
86 
87 	if (!bo)
88 		return 0;
89 
90 	err = need_vram_move ? xe_bo_migrate(bo, vram->placement) :
91 			       xe_bo_validate(bo, vm, true);
92 
93 	return err;
94 }
95 
96 static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
97 				bool atomic)
98 {
99 	struct xe_vm *vm = xe_vma_vm(vma);
100 	struct xe_tile *tile = gt_to_tile(gt);
101 	struct drm_exec exec;
102 	struct dma_fence *fence;
103 	ktime_t end = 0;
104 	int err, needs_vram;
105 
106 	lockdep_assert_held_write(&vm->lock);
107 
108 	needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
109 	if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma)))
110 		return needs_vram < 0 ? needs_vram : -EACCES;
111 
112 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
113 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
114 
115 	trace_xe_vma_pagefault(vma);
116 
117 	/* Check if VMA is valid, opportunistic check only */
118 	if (vma_is_valid(tile, vma) && !atomic)
119 		return 0;
120 
121 retry_userptr:
122 	if (xe_vma_is_userptr(vma) &&
123 	    xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
124 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
125 
126 		err = xe_vma_userptr_pin_pages(uvma);
127 		if (err)
128 			return err;
129 	}
130 
131 	/* Lock VM and BOs dma-resv */
132 	drm_exec_init(&exec, 0, 0);
133 	drm_exec_until_all_locked(&exec) {
134 		err = xe_pf_begin(&exec, vma, needs_vram == 1, tile->mem.vram);
135 		drm_exec_retry_on_contention(&exec);
136 		if (xe_vm_validate_should_retry(&exec, err, &end))
137 			err = -EAGAIN;
138 		if (err)
139 			goto unlock_dma_resv;
140 
141 		/* Bind VMA only to the GT that has faulted */
142 		trace_xe_vma_pf_bind(vma);
143 		fence = xe_vma_rebind(vm, vma, BIT(tile->id));
144 		if (IS_ERR(fence)) {
145 			err = PTR_ERR(fence);
146 			if (xe_vm_validate_should_retry(&exec, err, &end))
147 				err = -EAGAIN;
148 			goto unlock_dma_resv;
149 		}
150 	}
151 
152 	dma_fence_wait(fence, false);
153 	dma_fence_put(fence);
154 
155 unlock_dma_resv:
156 	drm_exec_fini(&exec);
157 	if (err == -EAGAIN)
158 		goto retry_userptr;
159 
160 	return err;
161 }
162 
163 static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
164 {
165 	struct xe_vm *vm;
166 
167 	down_read(&xe->usm.lock);
168 	vm = xa_load(&xe->usm.asid_to_vm, asid);
169 	if (vm && xe_vm_in_fault_mode(vm))
170 		xe_vm_get(vm);
171 	else
172 		vm = ERR_PTR(-EINVAL);
173 	up_read(&xe->usm.lock);
174 
175 	return vm;
176 }
177 
178 static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
179 {
180 	struct xe_device *xe = gt_to_xe(gt);
181 	struct xe_vm *vm;
182 	struct xe_vma *vma = NULL;
183 	int err;
184 	bool atomic;
185 
186 	/* SW isn't expected to handle TRTT faults */
187 	if (pf->trva_fault)
188 		return -EFAULT;
189 
190 	vm = asid_to_vm(xe, pf->asid);
191 	if (IS_ERR(vm))
192 		return PTR_ERR(vm);
193 
194 	/*
195 	 * TODO: Change to read lock? Using write lock for simplicity.
196 	 */
197 	down_write(&vm->lock);
198 
199 	if (xe_vm_is_closed(vm)) {
200 		err = -ENOENT;
201 		goto unlock_vm;
202 	}
203 
204 	vma = xe_vm_find_vma_by_addr(vm, pf->page_addr);
205 	if (!vma) {
206 		err = -EINVAL;
207 		goto unlock_vm;
208 	}
209 
210 	atomic = access_is_atomic(pf->access_type);
211 
212 	if (xe_vma_is_cpu_addr_mirror(vma))
213 		err = xe_svm_handle_pagefault(vm, vma, gt,
214 					      pf->page_addr, atomic);
215 	else
216 		err = handle_vma_pagefault(gt, vma, atomic);
217 
218 unlock_vm:
219 	if (!err)
220 		vm->usm.last_fault_vma = vma;
221 	up_write(&vm->lock);
222 	xe_vm_put(vm);
223 
224 	return err;
225 }
226 
227 static int send_pagefault_reply(struct xe_guc *guc,
228 				struct xe_guc_pagefault_reply *reply)
229 {
230 	u32 action[] = {
231 		XE_GUC_ACTION_PAGE_FAULT_RES_DESC,
232 		reply->dw0,
233 		reply->dw1,
234 	};
235 
236 	return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
237 }
238 
239 static void print_pagefault(struct xe_gt *gt, struct pagefault *pf)
240 {
241 	xe_gt_dbg(gt, "\n\tASID: %d\n"
242 		  "\tVFID: %d\n"
243 		  "\tPDATA: 0x%04x\n"
244 		  "\tFaulted Address: 0x%08x%08x\n"
245 		  "\tFaultType: %d\n"
246 		  "\tAccessType: %d\n"
247 		  "\tFaultLevel: %d\n"
248 		  "\tEngineClass: %d %s\n"
249 		  "\tEngineInstance: %d\n",
250 		  pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
251 		  lower_32_bits(pf->page_addr),
252 		  pf->fault_type, pf->access_type, pf->fault_level,
253 		  pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
254 		  pf->engine_instance);
255 }
256 
257 #define PF_MSG_LEN_DW	4
258 
259 static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
260 {
261 	const struct xe_guc_pagefault_desc *desc;
262 	bool ret = false;
263 
264 	spin_lock_irq(&pf_queue->lock);
265 	if (pf_queue->tail != pf_queue->head) {
266 		desc = (const struct xe_guc_pagefault_desc *)
267 			(pf_queue->data + pf_queue->tail);
268 
269 		pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
270 		pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
271 		pf->engine_class = FIELD_GET(PFD_ENG_CLASS, desc->dw0);
272 		pf->engine_instance = FIELD_GET(PFD_ENG_INSTANCE, desc->dw0);
273 		pf->pdata = FIELD_GET(PFD_PDATA_HI, desc->dw1) <<
274 			PFD_PDATA_HI_SHIFT;
275 		pf->pdata |= FIELD_GET(PFD_PDATA_LO, desc->dw0);
276 		pf->asid = FIELD_GET(PFD_ASID, desc->dw1);
277 		pf->vfid = FIELD_GET(PFD_VFID, desc->dw2);
278 		pf->access_type = FIELD_GET(PFD_ACCESS_TYPE, desc->dw2);
279 		pf->fault_type = FIELD_GET(PFD_FAULT_TYPE, desc->dw2);
280 		pf->page_addr = (u64)(FIELD_GET(PFD_VIRTUAL_ADDR_HI, desc->dw3)) <<
281 			PFD_VIRTUAL_ADDR_HI_SHIFT;
282 		pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
283 			PFD_VIRTUAL_ADDR_LO_SHIFT;
284 
285 		pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
286 			pf_queue->num_dw;
287 		ret = true;
288 	}
289 	spin_unlock_irq(&pf_queue->lock);
290 
291 	return ret;
292 }
293 
294 static bool pf_queue_full(struct pf_queue *pf_queue)
295 {
296 	lockdep_assert_held(&pf_queue->lock);
297 
298 	return CIRC_SPACE(pf_queue->head, pf_queue->tail,
299 			  pf_queue->num_dw) <=
300 		PF_MSG_LEN_DW;
301 }
302 
303 int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
304 {
305 	struct xe_gt *gt = guc_to_gt(guc);
306 	struct pf_queue *pf_queue;
307 	unsigned long flags;
308 	u32 asid;
309 	bool full;
310 
311 	if (unlikely(len != PF_MSG_LEN_DW))
312 		return -EPROTO;
313 
314 	asid = FIELD_GET(PFD_ASID, msg[1]);
315 	pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
316 
317 	/*
318 	 * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
319 	 */
320 	xe_gt_assert(gt, !(pf_queue->num_dw % PF_MSG_LEN_DW));
321 
322 	spin_lock_irqsave(&pf_queue->lock, flags);
323 	full = pf_queue_full(pf_queue);
324 	if (!full) {
325 		memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
326 		pf_queue->head = (pf_queue->head + len) %
327 			pf_queue->num_dw;
328 		queue_work(gt->usm.pf_wq, &pf_queue->worker);
329 	} else {
330 		xe_gt_warn(gt, "PageFault Queue full, shouldn't be possible\n");
331 	}
332 	spin_unlock_irqrestore(&pf_queue->lock, flags);
333 
334 	return full ? -ENOSPC : 0;
335 }
336 
337 #define USM_QUEUE_MAX_RUNTIME_MS	20
338 
339 static void pf_queue_work_func(struct work_struct *w)
340 {
341 	struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
342 	struct xe_gt *gt = pf_queue->gt;
343 	struct xe_guc_pagefault_reply reply = {};
344 	struct pagefault pf = {};
345 	unsigned long threshold;
346 	int ret;
347 
348 	threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
349 
350 	while (get_pagefault(pf_queue, &pf)) {
351 		ret = handle_pagefault(gt, &pf);
352 		if (unlikely(ret)) {
353 			print_pagefault(gt, &pf);
354 			pf.fault_unsuccessful = 1;
355 			xe_gt_dbg(gt, "Fault response: Unsuccessful %pe\n", ERR_PTR(ret));
356 		}
357 
358 		reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
359 			FIELD_PREP(PFR_SUCCESS, pf.fault_unsuccessful) |
360 			FIELD_PREP(PFR_REPLY, PFR_ACCESS) |
361 			FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) |
362 			FIELD_PREP(PFR_ASID, pf.asid);
363 
364 		reply.dw1 = FIELD_PREP(PFR_VFID, pf.vfid) |
365 			FIELD_PREP(PFR_ENG_INSTANCE, pf.engine_instance) |
366 			FIELD_PREP(PFR_ENG_CLASS, pf.engine_class) |
367 			FIELD_PREP(PFR_PDATA, pf.pdata);
368 
369 		send_pagefault_reply(&gt->uc.guc, &reply);
370 
371 		if (time_after(jiffies, threshold) &&
372 		    pf_queue->tail != pf_queue->head) {
373 			queue_work(gt->usm.pf_wq, w);
374 			break;
375 		}
376 	}
377 }
378 
379 static void acc_queue_work_func(struct work_struct *w);
380 
381 static void pagefault_fini(void *arg)
382 {
383 	struct xe_gt *gt = arg;
384 	struct xe_device *xe = gt_to_xe(gt);
385 
386 	if (!xe->info.has_usm)
387 		return;
388 
389 	destroy_workqueue(gt->usm.acc_wq);
390 	destroy_workqueue(gt->usm.pf_wq);
391 }
392 
393 static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
394 {
395 	struct xe_device *xe = gt_to_xe(gt);
396 	xe_dss_mask_t all_dss;
397 	int num_dss, num_eus;
398 
399 	bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
400 		  XE_MAX_DSS_FUSE_BITS);
401 
402 	num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
403 	num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
404 				XE_MAX_EU_FUSE_BITS) * num_dss;
405 
406 	/*
407 	 * user can issue separate page faults per EU and per CS
408 	 *
409 	 * XXX: Multiplier required as compute UMD are getting PF queue errors
410 	 * without it. Follow on why this multiplier is required.
411 	 */
412 #define PF_MULTIPLIER	8
413 	pf_queue->num_dw =
414 		(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
415 	pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw);
416 #undef PF_MULTIPLIER
417 
418 	pf_queue->gt = gt;
419 	pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
420 				      sizeof(u32), GFP_KERNEL);
421 	if (!pf_queue->data)
422 		return -ENOMEM;
423 
424 	spin_lock_init(&pf_queue->lock);
425 	INIT_WORK(&pf_queue->worker, pf_queue_work_func);
426 
427 	return 0;
428 }
429 
430 int xe_gt_pagefault_init(struct xe_gt *gt)
431 {
432 	struct xe_device *xe = gt_to_xe(gt);
433 	int i, ret = 0;
434 
435 	if (!xe->info.has_usm)
436 		return 0;
437 
438 	for (i = 0; i < NUM_PF_QUEUE; ++i) {
439 		ret = xe_alloc_pf_queue(gt, &gt->usm.pf_queue[i]);
440 		if (ret)
441 			return ret;
442 	}
443 	for (i = 0; i < NUM_ACC_QUEUE; ++i) {
444 		gt->usm.acc_queue[i].gt = gt;
445 		spin_lock_init(&gt->usm.acc_queue[i].lock);
446 		INIT_WORK(&gt->usm.acc_queue[i].worker, acc_queue_work_func);
447 	}
448 
449 	gt->usm.pf_wq = alloc_workqueue("xe_gt_page_fault_work_queue",
450 					WQ_UNBOUND | WQ_HIGHPRI, NUM_PF_QUEUE);
451 	if (!gt->usm.pf_wq)
452 		return -ENOMEM;
453 
454 	gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue",
455 					 WQ_UNBOUND | WQ_HIGHPRI,
456 					 NUM_ACC_QUEUE);
457 	if (!gt->usm.acc_wq) {
458 		destroy_workqueue(gt->usm.pf_wq);
459 		return -ENOMEM;
460 	}
461 
462 	return devm_add_action_or_reset(xe->drm.dev, pagefault_fini, gt);
463 }
464 
465 void xe_gt_pagefault_reset(struct xe_gt *gt)
466 {
467 	struct xe_device *xe = gt_to_xe(gt);
468 	int i;
469 
470 	if (!xe->info.has_usm)
471 		return;
472 
473 	for (i = 0; i < NUM_PF_QUEUE; ++i) {
474 		spin_lock_irq(&gt->usm.pf_queue[i].lock);
475 		gt->usm.pf_queue[i].head = 0;
476 		gt->usm.pf_queue[i].tail = 0;
477 		spin_unlock_irq(&gt->usm.pf_queue[i].lock);
478 	}
479 
480 	for (i = 0; i < NUM_ACC_QUEUE; ++i) {
481 		spin_lock(&gt->usm.acc_queue[i].lock);
482 		gt->usm.acc_queue[i].head = 0;
483 		gt->usm.acc_queue[i].tail = 0;
484 		spin_unlock(&gt->usm.acc_queue[i].lock);
485 	}
486 }
487 
488 static int granularity_in_byte(int val)
489 {
490 	switch (val) {
491 	case 0:
492 		return SZ_128K;
493 	case 1:
494 		return SZ_2M;
495 	case 2:
496 		return SZ_16M;
497 	case 3:
498 		return SZ_64M;
499 	default:
500 		return 0;
501 	}
502 }
503 
504 static int sub_granularity_in_byte(int val)
505 {
506 	return (granularity_in_byte(val) / 32);
507 }
508 
509 static void print_acc(struct xe_gt *gt, struct acc *acc)
510 {
511 	xe_gt_warn(gt, "Access counter request:\n"
512 		   "\tType: %s\n"
513 		   "\tASID: %d\n"
514 		   "\tVFID: %d\n"
515 		   "\tEngine: %d:%d\n"
516 		   "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
517 		   "\tSub_Granularity Vector: 0x%08x\n"
518 		   "\tVA Range base: 0x%016llx\n",
519 		   acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
520 		   acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
521 		   granularity_in_byte(acc->granularity) / SZ_1K,
522 		   sub_granularity_in_byte(acc->granularity) / SZ_1K,
523 		   acc->sub_granularity, acc->va_range_base);
524 }
525 
526 static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
527 {
528 	u64 page_va = acc->va_range_base + (ffs(acc->sub_granularity) - 1) *
529 		sub_granularity_in_byte(acc->granularity);
530 
531 	return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
532 }
533 
534 static int handle_acc(struct xe_gt *gt, struct acc *acc)
535 {
536 	struct xe_device *xe = gt_to_xe(gt);
537 	struct xe_tile *tile = gt_to_tile(gt);
538 	struct drm_exec exec;
539 	struct xe_vm *vm;
540 	struct xe_vma *vma;
541 	int ret = 0;
542 
543 	/* We only support ACC_TRIGGER at the moment */
544 	if (acc->access_type != ACC_TRIGGER)
545 		return -EINVAL;
546 
547 	vm = asid_to_vm(xe, acc->asid);
548 	if (IS_ERR(vm))
549 		return PTR_ERR(vm);
550 
551 	down_read(&vm->lock);
552 
553 	/* Lookup VMA */
554 	vma = get_acc_vma(vm, acc);
555 	if (!vma) {
556 		ret = -EINVAL;
557 		goto unlock_vm;
558 	}
559 
560 	trace_xe_vma_acc(vma);
561 
562 	/* Userptr or null can't be migrated, nothing to do */
563 	if (xe_vma_has_no_bo(vma))
564 		goto unlock_vm;
565 
566 	/* Lock VM and BOs dma-resv */
567 	drm_exec_init(&exec, 0, 0);
568 	drm_exec_until_all_locked(&exec) {
569 		ret = xe_pf_begin(&exec, vma, IS_DGFX(vm->xe), tile->mem.vram);
570 		drm_exec_retry_on_contention(&exec);
571 		if (ret)
572 			break;
573 	}
574 
575 	drm_exec_fini(&exec);
576 unlock_vm:
577 	up_read(&vm->lock);
578 	xe_vm_put(vm);
579 
580 	return ret;
581 }
582 
583 #define make_u64(hi__, low__)  ((u64)(hi__) << 32 | (u64)(low__))
584 
585 #define ACC_MSG_LEN_DW        4
586 
587 static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
588 {
589 	const struct xe_guc_acc_desc *desc;
590 	bool ret = false;
591 
592 	spin_lock(&acc_queue->lock);
593 	if (acc_queue->tail != acc_queue->head) {
594 		desc = (const struct xe_guc_acc_desc *)
595 			(acc_queue->data + acc_queue->tail);
596 
597 		acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
598 		acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
599 			FIELD_GET(ACC_SUBG_LO, desc->dw0);
600 		acc->engine_class = FIELD_GET(ACC_ENG_CLASS, desc->dw1);
601 		acc->engine_instance = FIELD_GET(ACC_ENG_INSTANCE, desc->dw1);
602 		acc->asid =  FIELD_GET(ACC_ASID, desc->dw1);
603 		acc->vfid =  FIELD_GET(ACC_VFID, desc->dw2);
604 		acc->access_type = FIELD_GET(ACC_TYPE, desc->dw0);
605 		acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
606 					      desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
607 
608 		acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) %
609 				  ACC_QUEUE_NUM_DW;
610 		ret = true;
611 	}
612 	spin_unlock(&acc_queue->lock);
613 
614 	return ret;
615 }
616 
617 static void acc_queue_work_func(struct work_struct *w)
618 {
619 	struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
620 	struct xe_gt *gt = acc_queue->gt;
621 	struct acc acc = {};
622 	unsigned long threshold;
623 	int ret;
624 
625 	threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
626 
627 	while (get_acc(acc_queue, &acc)) {
628 		ret = handle_acc(gt, &acc);
629 		if (unlikely(ret)) {
630 			print_acc(gt, &acc);
631 			xe_gt_warn(gt, "ACC: Unsuccessful %pe\n", ERR_PTR(ret));
632 		}
633 
634 		if (time_after(jiffies, threshold) &&
635 		    acc_queue->tail != acc_queue->head) {
636 			queue_work(gt->usm.acc_wq, w);
637 			break;
638 		}
639 	}
640 }
641 
642 static bool acc_queue_full(struct acc_queue *acc_queue)
643 {
644 	lockdep_assert_held(&acc_queue->lock);
645 
646 	return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <=
647 		ACC_MSG_LEN_DW;
648 }
649 
650 int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
651 {
652 	struct xe_gt *gt = guc_to_gt(guc);
653 	struct acc_queue *acc_queue;
654 	u32 asid;
655 	bool full;
656 
657 	/*
658 	 * The below logic doesn't work unless ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW == 0
659 	 */
660 	BUILD_BUG_ON(ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW);
661 
662 	if (unlikely(len != ACC_MSG_LEN_DW))
663 		return -EPROTO;
664 
665 	asid = FIELD_GET(ACC_ASID, msg[1]);
666 	acc_queue = &gt->usm.acc_queue[asid % NUM_ACC_QUEUE];
667 
668 	spin_lock(&acc_queue->lock);
669 	full = acc_queue_full(acc_queue);
670 	if (!full) {
671 		memcpy(acc_queue->data + acc_queue->head, msg,
672 		       len * sizeof(u32));
673 		acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
674 		queue_work(gt->usm.acc_wq, &acc_queue->worker);
675 	} else {
676 		xe_gt_warn(gt, "ACC Queue full, dropping ACC\n");
677 	}
678 	spin_unlock(&acc_queue->lock);
679 
680 	return full ? -ENOSPC : 0;
681 }
682