xref: /linux/drivers/gpu/drm/xe/xe_gt_pagefault.c (revision 36f353a1ebf88280f58d1ebfe2731251d9159456)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt_pagefault.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 
15 #include "abi/guc_actions_abi.h"
16 #include "xe_bo.h"
17 #include "xe_gt.h"
18 #include "xe_gt_tlb_invalidation.h"
19 #include "xe_guc.h"
20 #include "xe_guc_ct.h"
21 #include "xe_migrate.h"
22 #include "xe_pt.h"
23 #include "xe_trace.h"
24 #include "xe_vm.h"
25 
26 struct pagefault {
27 	u64 page_addr;
28 	u32 asid;
29 	u16 pdata;
30 	u8 vfid;
31 	u8 access_type;
32 	u8 fault_type;
33 	u8 fault_level;
34 	u8 engine_class;
35 	u8 engine_instance;
36 	u8 fault_unsuccessful;
37 	bool trva_fault;
38 };
39 
40 enum access_type {
41 	ACCESS_TYPE_READ = 0,
42 	ACCESS_TYPE_WRITE = 1,
43 	ACCESS_TYPE_ATOMIC = 2,
44 	ACCESS_TYPE_RESERVED = 3,
45 };
46 
47 enum fault_type {
48 	NOT_PRESENT = 0,
49 	WRITE_ACCESS_VIOLATION = 1,
50 	ATOMIC_ACCESS_VIOLATION = 2,
51 };
52 
53 struct acc {
54 	u64 va_range_base;
55 	u32 asid;
56 	u32 sub_granularity;
57 	u8 granularity;
58 	u8 vfid;
59 	u8 access_type;
60 	u8 engine_class;
61 	u8 engine_instance;
62 };
63 
64 static bool access_is_atomic(enum access_type access_type)
65 {
66 	return access_type == ACCESS_TYPE_ATOMIC;
67 }
68 
69 static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
70 {
71 	return BIT(tile->id) & vma->tile_present &&
72 		!(BIT(tile->id) & vma->usm.tile_invalidated);
73 }
74 
75 static bool vma_matches(struct xe_vma *vma, u64 page_addr)
76 {
77 	if (page_addr > xe_vma_end(vma) - 1 ||
78 	    page_addr + SZ_4K - 1 < xe_vma_start(vma))
79 		return false;
80 
81 	return true;
82 }
83 
84 static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
85 {
86 	struct xe_vma *vma = NULL;
87 
88 	if (vm->usm.last_fault_vma) {   /* Fast lookup */
89 		if (vma_matches(vm->usm.last_fault_vma, page_addr))
90 			vma = vm->usm.last_fault_vma;
91 	}
92 	if (!vma)
93 		vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
94 
95 	return vma;
96 }
97 
98 static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
99 		       bool atomic, unsigned int id)
100 {
101 	struct xe_bo *bo = xe_vma_bo(vma);
102 	struct xe_vm *vm = xe_vma_vm(vma);
103 	unsigned int num_shared = 2; /* slots for bind + move */
104 	int err;
105 
106 	err = xe_vm_prepare_vma(exec, vma, num_shared);
107 	if (err)
108 		return err;
109 
110 	if (atomic && IS_DGFX(vm->xe)) {
111 		if (xe_vma_is_userptr(vma)) {
112 			err = -EACCES;
113 			return err;
114 		}
115 
116 		/* Migrate to VRAM, move should invalidate the VMA first */
117 		err = xe_bo_migrate(bo, XE_PL_VRAM0 + id);
118 		if (err)
119 			return err;
120 	} else if (bo) {
121 		/* Create backing store if needed */
122 		err = xe_bo_validate(bo, vm, true);
123 		if (err)
124 			return err;
125 	}
126 
127 	return 0;
128 }
129 
130 static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
131 {
132 	struct xe_device *xe = gt_to_xe(gt);
133 	struct xe_tile *tile = gt_to_tile(gt);
134 	struct drm_exec exec;
135 	struct xe_vm *vm;
136 	struct xe_vma *vma = NULL;
137 	struct dma_fence *fence;
138 	bool write_locked;
139 	int ret = 0;
140 	bool atomic;
141 
142 	/* SW isn't expected to handle TRTT faults */
143 	if (pf->trva_fault)
144 		return -EFAULT;
145 
146 	/* ASID to VM */
147 	mutex_lock(&xe->usm.lock);
148 	vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
149 	if (vm && xe_vm_in_fault_mode(vm))
150 		xe_vm_get(vm);
151 	else
152 		vm = NULL;
153 	mutex_unlock(&xe->usm.lock);
154 	if (!vm)
155 		return -EINVAL;
156 
157 retry_userptr:
158 	/*
159 	 * TODO: Avoid exclusive lock if VM doesn't have userptrs, or
160 	 * start out read-locked?
161 	 */
162 	down_write(&vm->lock);
163 	write_locked = true;
164 	vma = lookup_vma(vm, pf->page_addr);
165 	if (!vma) {
166 		ret = -EINVAL;
167 		goto unlock_vm;
168 	}
169 
170 	if (!xe_vma_is_userptr(vma) ||
171 	    !xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
172 		downgrade_write(&vm->lock);
173 		write_locked = false;
174 	}
175 
176 	trace_xe_vma_pagefault(vma);
177 
178 	atomic = access_is_atomic(pf->access_type);
179 
180 	/* Check if VMA is valid */
181 	if (vma_is_valid(tile, vma) && !atomic)
182 		goto unlock_vm;
183 
184 	/* TODO: Validate fault */
185 
186 	if (xe_vma_is_userptr(vma) && write_locked) {
187 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
188 
189 		spin_lock(&vm->userptr.invalidated_lock);
190 		list_del_init(&uvma->userptr.invalidate_link);
191 		spin_unlock(&vm->userptr.invalidated_lock);
192 
193 		ret = xe_vma_userptr_pin_pages(uvma);
194 		if (ret)
195 			goto unlock_vm;
196 
197 		downgrade_write(&vm->lock);
198 		write_locked = false;
199 	}
200 
201 	/* Lock VM and BOs dma-resv */
202 	drm_exec_init(&exec, 0, 0);
203 	drm_exec_until_all_locked(&exec) {
204 		ret = xe_pf_begin(&exec, vma, atomic, tile->id);
205 		drm_exec_retry_on_contention(&exec);
206 		if (ret)
207 			goto unlock_dma_resv;
208 	}
209 
210 	/* Bind VMA only to the GT that has faulted */
211 	trace_xe_vma_pf_bind(vma);
212 	fence = __xe_pt_bind_vma(tile, vma, xe_tile_migrate_engine(tile), NULL, 0,
213 				 vma->tile_present & BIT(tile->id));
214 	if (IS_ERR(fence)) {
215 		ret = PTR_ERR(fence);
216 		goto unlock_dma_resv;
217 	}
218 
219 	/*
220 	 * XXX: Should we drop the lock before waiting? This only helps if doing
221 	 * GPU binds which is currently only done if we have to wait for more
222 	 * than 10ms on a move.
223 	 */
224 	dma_fence_wait(fence, false);
225 	dma_fence_put(fence);
226 
227 	if (xe_vma_is_userptr(vma))
228 		ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
229 	vma->usm.tile_invalidated &= ~BIT(tile->id);
230 
231 unlock_dma_resv:
232 	drm_exec_fini(&exec);
233 unlock_vm:
234 	if (!ret)
235 		vm->usm.last_fault_vma = vma;
236 	if (write_locked)
237 		up_write(&vm->lock);
238 	else
239 		up_read(&vm->lock);
240 	if (ret == -EAGAIN)
241 		goto retry_userptr;
242 
243 	if (!ret) {
244 		ret = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
245 		if (ret >= 0)
246 			ret = 0;
247 	}
248 	xe_vm_put(vm);
249 
250 	return ret;
251 }
252 
253 static int send_pagefault_reply(struct xe_guc *guc,
254 				struct xe_guc_pagefault_reply *reply)
255 {
256 	u32 action[] = {
257 		XE_GUC_ACTION_PAGE_FAULT_RES_DESC,
258 		reply->dw0,
259 		reply->dw1,
260 	};
261 
262 	return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
263 }
264 
265 static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
266 {
267 	drm_dbg(&xe->drm, "\n\tASID: %d\n"
268 		 "\tVFID: %d\n"
269 		 "\tPDATA: 0x%04x\n"
270 		 "\tFaulted Address: 0x%08x%08x\n"
271 		 "\tFaultType: %d\n"
272 		 "\tAccessType: %d\n"
273 		 "\tFaultLevel: %d\n"
274 		 "\tEngineClass: %d\n"
275 		 "\tEngineInstance: %d\n",
276 		 pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
277 		 lower_32_bits(pf->page_addr),
278 		 pf->fault_type, pf->access_type, pf->fault_level,
279 		 pf->engine_class, pf->engine_instance);
280 }
281 
282 #define PF_MSG_LEN_DW	4
283 
284 static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
285 {
286 	const struct xe_guc_pagefault_desc *desc;
287 	bool ret = false;
288 
289 	spin_lock_irq(&pf_queue->lock);
290 	if (pf_queue->tail != pf_queue->head) {
291 		desc = (const struct xe_guc_pagefault_desc *)
292 			(pf_queue->data + pf_queue->tail);
293 
294 		pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
295 		pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
296 		pf->engine_class = FIELD_GET(PFD_ENG_CLASS, desc->dw0);
297 		pf->engine_instance = FIELD_GET(PFD_ENG_INSTANCE, desc->dw0);
298 		pf->pdata = FIELD_GET(PFD_PDATA_HI, desc->dw1) <<
299 			PFD_PDATA_HI_SHIFT;
300 		pf->pdata |= FIELD_GET(PFD_PDATA_LO, desc->dw0);
301 		pf->asid = FIELD_GET(PFD_ASID, desc->dw1);
302 		pf->vfid = FIELD_GET(PFD_VFID, desc->dw2);
303 		pf->access_type = FIELD_GET(PFD_ACCESS_TYPE, desc->dw2);
304 		pf->fault_type = FIELD_GET(PFD_FAULT_TYPE, desc->dw2);
305 		pf->page_addr = (u64)(FIELD_GET(PFD_VIRTUAL_ADDR_HI, desc->dw3)) <<
306 			PFD_VIRTUAL_ADDR_HI_SHIFT;
307 		pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
308 			PFD_VIRTUAL_ADDR_LO_SHIFT;
309 
310 		pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
311 			PF_QUEUE_NUM_DW;
312 		ret = true;
313 	}
314 	spin_unlock_irq(&pf_queue->lock);
315 
316 	return ret;
317 }
318 
319 static bool pf_queue_full(struct pf_queue *pf_queue)
320 {
321 	lockdep_assert_held(&pf_queue->lock);
322 
323 	return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <=
324 		PF_MSG_LEN_DW;
325 }
326 
327 int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
328 {
329 	struct xe_gt *gt = guc_to_gt(guc);
330 	struct xe_device *xe = gt_to_xe(gt);
331 	struct pf_queue *pf_queue;
332 	unsigned long flags;
333 	u32 asid;
334 	bool full;
335 
336 	/*
337 	 * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
338 	 */
339 	BUILD_BUG_ON(PF_QUEUE_NUM_DW % PF_MSG_LEN_DW);
340 
341 	if (unlikely(len != PF_MSG_LEN_DW))
342 		return -EPROTO;
343 
344 	asid = FIELD_GET(PFD_ASID, msg[1]);
345 	pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
346 
347 	spin_lock_irqsave(&pf_queue->lock, flags);
348 	full = pf_queue_full(pf_queue);
349 	if (!full) {
350 		memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
351 		pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW;
352 		queue_work(gt->usm.pf_wq, &pf_queue->worker);
353 	} else {
354 		drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
355 	}
356 	spin_unlock_irqrestore(&pf_queue->lock, flags);
357 
358 	return full ? -ENOSPC : 0;
359 }
360 
361 #define USM_QUEUE_MAX_RUNTIME_MS	20
362 
363 static void pf_queue_work_func(struct work_struct *w)
364 {
365 	struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
366 	struct xe_gt *gt = pf_queue->gt;
367 	struct xe_device *xe = gt_to_xe(gt);
368 	struct xe_guc_pagefault_reply reply = {};
369 	struct pagefault pf = {};
370 	unsigned long threshold;
371 	int ret;
372 
373 	threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
374 
375 	while (get_pagefault(pf_queue, &pf)) {
376 		ret = handle_pagefault(gt, &pf);
377 		if (unlikely(ret)) {
378 			print_pagefault(xe, &pf);
379 			pf.fault_unsuccessful = 1;
380 			drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
381 		}
382 
383 		reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
384 			FIELD_PREP(PFR_SUCCESS, pf.fault_unsuccessful) |
385 			FIELD_PREP(PFR_REPLY, PFR_ACCESS) |
386 			FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) |
387 			FIELD_PREP(PFR_ASID, pf.asid);
388 
389 		reply.dw1 = FIELD_PREP(PFR_VFID, pf.vfid) |
390 			FIELD_PREP(PFR_ENG_INSTANCE, pf.engine_instance) |
391 			FIELD_PREP(PFR_ENG_CLASS, pf.engine_class) |
392 			FIELD_PREP(PFR_PDATA, pf.pdata);
393 
394 		send_pagefault_reply(&gt->uc.guc, &reply);
395 
396 		if (time_after(jiffies, threshold) &&
397 		    pf_queue->tail != pf_queue->head) {
398 			queue_work(gt->usm.pf_wq, w);
399 			break;
400 		}
401 	}
402 }
403 
404 static void acc_queue_work_func(struct work_struct *w);
405 
406 int xe_gt_pagefault_init(struct xe_gt *gt)
407 {
408 	struct xe_device *xe = gt_to_xe(gt);
409 	int i;
410 
411 	if (!xe->info.has_usm)
412 		return 0;
413 
414 	for (i = 0; i < NUM_PF_QUEUE; ++i) {
415 		gt->usm.pf_queue[i].gt = gt;
416 		spin_lock_init(&gt->usm.pf_queue[i].lock);
417 		INIT_WORK(&gt->usm.pf_queue[i].worker, pf_queue_work_func);
418 	}
419 	for (i = 0; i < NUM_ACC_QUEUE; ++i) {
420 		gt->usm.acc_queue[i].gt = gt;
421 		spin_lock_init(&gt->usm.acc_queue[i].lock);
422 		INIT_WORK(&gt->usm.acc_queue[i].worker, acc_queue_work_func);
423 	}
424 
425 	gt->usm.pf_wq = alloc_workqueue("xe_gt_page_fault_work_queue",
426 					WQ_UNBOUND | WQ_HIGHPRI, NUM_PF_QUEUE);
427 	if (!gt->usm.pf_wq)
428 		return -ENOMEM;
429 
430 	gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue",
431 					 WQ_UNBOUND | WQ_HIGHPRI,
432 					 NUM_ACC_QUEUE);
433 	if (!gt->usm.acc_wq)
434 		return -ENOMEM;
435 
436 	return 0;
437 }
438 
439 void xe_gt_pagefault_reset(struct xe_gt *gt)
440 {
441 	struct xe_device *xe = gt_to_xe(gt);
442 	int i;
443 
444 	if (!xe->info.has_usm)
445 		return;
446 
447 	for (i = 0; i < NUM_PF_QUEUE; ++i) {
448 		spin_lock_irq(&gt->usm.pf_queue[i].lock);
449 		gt->usm.pf_queue[i].head = 0;
450 		gt->usm.pf_queue[i].tail = 0;
451 		spin_unlock_irq(&gt->usm.pf_queue[i].lock);
452 	}
453 
454 	for (i = 0; i < NUM_ACC_QUEUE; ++i) {
455 		spin_lock(&gt->usm.acc_queue[i].lock);
456 		gt->usm.acc_queue[i].head = 0;
457 		gt->usm.acc_queue[i].tail = 0;
458 		spin_unlock(&gt->usm.acc_queue[i].lock);
459 	}
460 }
461 
462 static int granularity_in_byte(int val)
463 {
464 	switch (val) {
465 	case 0:
466 		return SZ_128K;
467 	case 1:
468 		return SZ_2M;
469 	case 2:
470 		return SZ_16M;
471 	case 3:
472 		return SZ_64M;
473 	default:
474 		return 0;
475 	}
476 }
477 
478 static int sub_granularity_in_byte(int val)
479 {
480 	return (granularity_in_byte(val) / 32);
481 }
482 
483 static void print_acc(struct xe_device *xe, struct acc *acc)
484 {
485 	drm_warn(&xe->drm, "Access counter request:\n"
486 		 "\tType: %s\n"
487 		 "\tASID: %d\n"
488 		 "\tVFID: %d\n"
489 		 "\tEngine: %d:%d\n"
490 		 "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
491 		 "\tSub_Granularity Vector: 0x%08x\n"
492 		 "\tVA Range base: 0x%016llx\n",
493 		 acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
494 		 acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
495 		 granularity_in_byte(acc->granularity) / SZ_1K,
496 		 sub_granularity_in_byte(acc->granularity) / SZ_1K,
497 		 acc->sub_granularity, acc->va_range_base);
498 }
499 
500 static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
501 {
502 	u64 page_va = acc->va_range_base + (ffs(acc->sub_granularity) - 1) *
503 		sub_granularity_in_byte(acc->granularity);
504 
505 	return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
506 }
507 
508 static int handle_acc(struct xe_gt *gt, struct acc *acc)
509 {
510 	struct xe_device *xe = gt_to_xe(gt);
511 	struct xe_tile *tile = gt_to_tile(gt);
512 	struct drm_exec exec;
513 	struct xe_vm *vm;
514 	struct xe_vma *vma;
515 	int ret = 0;
516 
517 	/* We only support ACC_TRIGGER at the moment */
518 	if (acc->access_type != ACC_TRIGGER)
519 		return -EINVAL;
520 
521 	/* ASID to VM */
522 	mutex_lock(&xe->usm.lock);
523 	vm = xa_load(&xe->usm.asid_to_vm, acc->asid);
524 	if (vm)
525 		xe_vm_get(vm);
526 	mutex_unlock(&xe->usm.lock);
527 	if (!vm || !xe_vm_in_fault_mode(vm))
528 		return -EINVAL;
529 
530 	down_read(&vm->lock);
531 
532 	/* Lookup VMA */
533 	vma = get_acc_vma(vm, acc);
534 	if (!vma) {
535 		ret = -EINVAL;
536 		goto unlock_vm;
537 	}
538 
539 	trace_xe_vma_acc(vma);
540 
541 	/* Userptr or null can't be migrated, nothing to do */
542 	if (xe_vma_has_no_bo(vma))
543 		goto unlock_vm;
544 
545 	/* Lock VM and BOs dma-resv */
546 	drm_exec_init(&exec, 0, 0);
547 	drm_exec_until_all_locked(&exec) {
548 		ret = xe_pf_begin(&exec, vma, true, tile->id);
549 		drm_exec_retry_on_contention(&exec);
550 		if (ret)
551 			break;
552 	}
553 
554 	drm_exec_fini(&exec);
555 unlock_vm:
556 	up_read(&vm->lock);
557 	xe_vm_put(vm);
558 
559 	return ret;
560 }
561 
562 #define make_u64(hi__, low__)  ((u64)(hi__) << 32 | (u64)(low__))
563 
564 #define ACC_MSG_LEN_DW        4
565 
566 static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
567 {
568 	const struct xe_guc_acc_desc *desc;
569 	bool ret = false;
570 
571 	spin_lock(&acc_queue->lock);
572 	if (acc_queue->tail != acc_queue->head) {
573 		desc = (const struct xe_guc_acc_desc *)
574 			(acc_queue->data + acc_queue->tail);
575 
576 		acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
577 		acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
578 			FIELD_GET(ACC_SUBG_LO, desc->dw0);
579 		acc->engine_class = FIELD_GET(ACC_ENG_CLASS, desc->dw1);
580 		acc->engine_instance = FIELD_GET(ACC_ENG_INSTANCE, desc->dw1);
581 		acc->asid =  FIELD_GET(ACC_ASID, desc->dw1);
582 		acc->vfid =  FIELD_GET(ACC_VFID, desc->dw2);
583 		acc->access_type = FIELD_GET(ACC_TYPE, desc->dw0);
584 		acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
585 					      desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
586 
587 		acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) %
588 				  ACC_QUEUE_NUM_DW;
589 		ret = true;
590 	}
591 	spin_unlock(&acc_queue->lock);
592 
593 	return ret;
594 }
595 
596 static void acc_queue_work_func(struct work_struct *w)
597 {
598 	struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
599 	struct xe_gt *gt = acc_queue->gt;
600 	struct xe_device *xe = gt_to_xe(gt);
601 	struct acc acc = {};
602 	unsigned long threshold;
603 	int ret;
604 
605 	threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
606 
607 	while (get_acc(acc_queue, &acc)) {
608 		ret = handle_acc(gt, &acc);
609 		if (unlikely(ret)) {
610 			print_acc(xe, &acc);
611 			drm_warn(&xe->drm, "ACC: Unsuccessful %d\n", ret);
612 		}
613 
614 		if (time_after(jiffies, threshold) &&
615 		    acc_queue->tail != acc_queue->head) {
616 			queue_work(gt->usm.acc_wq, w);
617 			break;
618 		}
619 	}
620 }
621 
622 static bool acc_queue_full(struct acc_queue *acc_queue)
623 {
624 	lockdep_assert_held(&acc_queue->lock);
625 
626 	return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <=
627 		ACC_MSG_LEN_DW;
628 }
629 
630 int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
631 {
632 	struct xe_gt *gt = guc_to_gt(guc);
633 	struct acc_queue *acc_queue;
634 	u32 asid;
635 	bool full;
636 
637 	/*
638 	 * The below logic doesn't work unless ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW == 0
639 	 */
640 	BUILD_BUG_ON(ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW);
641 
642 	if (unlikely(len != ACC_MSG_LEN_DW))
643 		return -EPROTO;
644 
645 	asid = FIELD_GET(ACC_ASID, msg[1]);
646 	acc_queue = &gt->usm.acc_queue[asid % NUM_ACC_QUEUE];
647 
648 	spin_lock(&acc_queue->lock);
649 	full = acc_queue_full(acc_queue);
650 	if (!full) {
651 		memcpy(acc_queue->data + acc_queue->head, msg,
652 		       len * sizeof(u32));
653 		acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
654 		queue_work(gt->usm.acc_wq, &acc_queue->worker);
655 	} else {
656 		drm_warn(&gt_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
657 	}
658 	spin_unlock(&acc_queue->lock);
659 
660 	return full ? -ENOSPC : 0;
661 }
662