xref: /linux/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c (revision 5f2b6c5f6b692c696a232d12c43b8e41c0d393b9)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_gt_tlb_invalidation.h"
7 
8 #include "abi/guc_actions_abi.h"
9 #include "xe_device.h"
10 #include "xe_force_wake.h"
11 #include "xe_gt.h"
12 #include "xe_gt_printk.h"
13 #include "xe_guc.h"
14 #include "xe_guc_ct.h"
15 #include "xe_gt_stats.h"
16 #include "xe_mmio.h"
17 #include "xe_pm.h"
18 #include "xe_sriov.h"
19 #include "xe_trace.h"
20 #include "regs/xe_guc_regs.h"
21 
22 #define FENCE_STACK_BIT		DMA_FENCE_FLAG_USER_BITS
23 
24 /*
25  * TLB inval depends on pending commands in the CT queue and then the real
26  * invalidation time. Double up the time to process full CT queue
27  * just to be on the safe side.
28  */
tlb_timeout_jiffies(struct xe_gt * gt)29 static long tlb_timeout_jiffies(struct xe_gt *gt)
30 {
31 	/* this reflects what HW/GuC needs to process TLB inv request */
32 	const long hw_tlb_timeout = HZ / 4;
33 
34 	/* this estimates actual delay caused by the CTB transport */
35 	long delay = xe_guc_ct_queue_proc_time_jiffies(&gt->uc.guc.ct);
36 
37 	return hw_tlb_timeout + 2 * delay;
38 }
39 
xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence * fence)40 static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
41 {
42 	if (WARN_ON_ONCE(!fence->gt))
43 		return;
44 
45 	xe_pm_runtime_put(gt_to_xe(fence->gt));
46 	fence->gt = NULL; /* fini() should be called once */
47 }
48 
49 static void
__invalidation_fence_signal(struct xe_device * xe,struct xe_gt_tlb_invalidation_fence * fence)50 __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
51 {
52 	bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
53 
54 	trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
55 	xe_gt_tlb_invalidation_fence_fini(fence);
56 	dma_fence_signal(&fence->base);
57 	if (!stack)
58 		dma_fence_put(&fence->base);
59 }
60 
61 static void
invalidation_fence_signal(struct xe_device * xe,struct xe_gt_tlb_invalidation_fence * fence)62 invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
63 {
64 	list_del(&fence->link);
65 	__invalidation_fence_signal(xe, fence);
66 }
67 
xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence * fence)68 void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
69 {
70 	if (WARN_ON_ONCE(!fence->gt))
71 		return;
72 
73 	__invalidation_fence_signal(gt_to_xe(fence->gt), fence);
74 }
75 
xe_gt_tlb_fence_timeout(struct work_struct * work)76 static void xe_gt_tlb_fence_timeout(struct work_struct *work)
77 {
78 	struct xe_gt *gt = container_of(work, struct xe_gt,
79 					tlb_invalidation.fence_tdr.work);
80 	struct xe_device *xe = gt_to_xe(gt);
81 	struct xe_gt_tlb_invalidation_fence *fence, *next;
82 
83 	LNL_FLUSH_WORK(&gt->uc.guc.ct.g2h_worker);
84 
85 	spin_lock_irq(&gt->tlb_invalidation.pending_lock);
86 	list_for_each_entry_safe(fence, next,
87 				 &gt->tlb_invalidation.pending_fences, link) {
88 		s64 since_inval_ms = ktime_ms_delta(ktime_get(),
89 						    fence->invalidation_time);
90 
91 		if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
92 			break;
93 
94 		trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
95 		xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
96 			  fence->seqno, gt->tlb_invalidation.seqno_recv);
97 
98 		fence->base.error = -ETIME;
99 		invalidation_fence_signal(xe, fence);
100 	}
101 	if (!list_empty(&gt->tlb_invalidation.pending_fences))
102 		queue_delayed_work(system_wq,
103 				   &gt->tlb_invalidation.fence_tdr,
104 				   tlb_timeout_jiffies(gt));
105 	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
106 }
107 
108 /**
109  * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
110  * @gt: GT structure
111  *
112  * Initialize GT TLB invalidation state, purely software initialization, should
113  * be called once during driver load.
114  *
115  * Return: 0 on success, negative error code on error.
116  */
xe_gt_tlb_invalidation_init_early(struct xe_gt * gt)117 int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
118 {
119 	gt->tlb_invalidation.seqno = 1;
120 	INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
121 	spin_lock_init(&gt->tlb_invalidation.pending_lock);
122 	spin_lock_init(&gt->tlb_invalidation.lock);
123 	INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
124 			  xe_gt_tlb_fence_timeout);
125 
126 	return 0;
127 }
128 
129 /**
130  * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
131  * @gt: GT structure
132  *
133  * Signal any pending invalidation fences, should be called during a GT reset
134  */
xe_gt_tlb_invalidation_reset(struct xe_gt * gt)135 void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
136 {
137 	struct xe_gt_tlb_invalidation_fence *fence, *next;
138 	int pending_seqno;
139 
140 	/*
141 	 * we can get here before the CTs are even initialized if we're wedging
142 	 * very early, in which case there are not going to be any pending
143 	 * fences so we can bail immediately.
144 	 */
145 	if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
146 		return;
147 
148 	/*
149 	 * CT channel is already disabled at this point. No new TLB requests can
150 	 * appear.
151 	 */
152 
153 	mutex_lock(&gt->uc.guc.ct.lock);
154 	spin_lock_irq(&gt->tlb_invalidation.pending_lock);
155 	cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
156 	/*
157 	 * We might have various kworkers waiting for TLB flushes to complete
158 	 * which are not tracked with an explicit TLB fence, however at this
159 	 * stage that will never happen since the CT is already disabled, so
160 	 * make sure we signal them here under the assumption that we have
161 	 * completed a full GT reset.
162 	 */
163 	if (gt->tlb_invalidation.seqno == 1)
164 		pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
165 	else
166 		pending_seqno = gt->tlb_invalidation.seqno - 1;
167 	WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
168 
169 	list_for_each_entry_safe(fence, next,
170 				 &gt->tlb_invalidation.pending_fences, link)
171 		invalidation_fence_signal(gt_to_xe(gt), fence);
172 	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
173 	mutex_unlock(&gt->uc.guc.ct.lock);
174 }
175 
tlb_invalidation_seqno_past(struct xe_gt * gt,int seqno)176 static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
177 {
178 	int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
179 
180 	if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
181 		return false;
182 
183 	if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
184 		return true;
185 
186 	return seqno_recv >= seqno;
187 }
188 
send_tlb_invalidation(struct xe_guc * guc,struct xe_gt_tlb_invalidation_fence * fence,u32 * action,int len)189 static int send_tlb_invalidation(struct xe_guc *guc,
190 				 struct xe_gt_tlb_invalidation_fence *fence,
191 				 u32 *action, int len)
192 {
193 	struct xe_gt *gt = guc_to_gt(guc);
194 	struct xe_device *xe = gt_to_xe(gt);
195 	int seqno;
196 	int ret;
197 
198 	xe_gt_assert(gt, fence);
199 
200 	/*
201 	 * XXX: The seqno algorithm relies on TLB invalidation being processed
202 	 * in order which they currently are, if that changes the algorithm will
203 	 * need to be updated.
204 	 */
205 
206 	mutex_lock(&guc->ct.lock);
207 	seqno = gt->tlb_invalidation.seqno;
208 	fence->seqno = seqno;
209 	trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
210 	action[1] = seqno;
211 	ret = xe_guc_ct_send_locked(&guc->ct, action, len,
212 				    G2H_LEN_DW_TLB_INVALIDATE, 1);
213 	if (!ret) {
214 		spin_lock_irq(&gt->tlb_invalidation.pending_lock);
215 		/*
216 		 * We haven't actually published the TLB fence as per
217 		 * pending_fences, but in theory our seqno could have already
218 		 * been written as we acquired the pending_lock. In such a case
219 		 * we can just go ahead and signal the fence here.
220 		 */
221 		if (tlb_invalidation_seqno_past(gt, seqno)) {
222 			__invalidation_fence_signal(xe, fence);
223 		} else {
224 			fence->invalidation_time = ktime_get();
225 			list_add_tail(&fence->link,
226 				      &gt->tlb_invalidation.pending_fences);
227 
228 			if (list_is_singular(&gt->tlb_invalidation.pending_fences))
229 				queue_delayed_work(system_wq,
230 						   &gt->tlb_invalidation.fence_tdr,
231 						   tlb_timeout_jiffies(gt));
232 		}
233 		spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
234 	} else {
235 		__invalidation_fence_signal(xe, fence);
236 	}
237 	if (!ret) {
238 		gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
239 			TLB_INVALIDATION_SEQNO_MAX;
240 		if (!gt->tlb_invalidation.seqno)
241 			gt->tlb_invalidation.seqno = 1;
242 	}
243 	mutex_unlock(&guc->ct.lock);
244 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
245 
246 	return ret;
247 }
248 
249 #define MAKE_INVAL_OP(type)	((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
250 		XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
251 		XE_GUC_TLB_INVAL_FLUSH_CACHE)
252 
253 /**
254  * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
255  * @gt: GT structure
256  * @fence: invalidation fence which will be signal on TLB invalidation
257  * completion
258  *
259  * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
260  * caller can use the invalidation fence to wait for completion.
261  *
262  * Return: 0 on success, negative error code on error
263  */
xe_gt_tlb_invalidation_guc(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence)264 static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
265 				      struct xe_gt_tlb_invalidation_fence *fence)
266 {
267 	u32 action[] = {
268 		XE_GUC_ACTION_TLB_INVALIDATION,
269 		0,  /* seqno, replaced in send_tlb_invalidation */
270 		MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
271 	};
272 	int ret;
273 
274 	ret = send_tlb_invalidation(&gt->uc.guc, fence, action,
275 				    ARRAY_SIZE(action));
276 	/*
277 	 * -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
278 	 *  should be nuked on a GT reset so this error can be ignored.
279 	 */
280 	if (ret == -ECANCELED)
281 		return 0;
282 
283 	return ret;
284 }
285 
286 /**
287  * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
288  * @gt: GT structure
289  *
290  * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
291  * synchronous.
292  *
293  * Return: 0 on success, negative error code on error
294  */
xe_gt_tlb_invalidation_ggtt(struct xe_gt * gt)295 int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
296 {
297 	struct xe_device *xe = gt_to_xe(gt);
298 	unsigned int fw_ref;
299 
300 	if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
301 	    gt->uc.guc.submission_state.enabled) {
302 		struct xe_gt_tlb_invalidation_fence fence;
303 		int ret;
304 
305 		xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
306 		ret = xe_gt_tlb_invalidation_guc(gt, &fence);
307 		if (ret)
308 			return ret;
309 
310 		xe_gt_tlb_invalidation_fence_wait(&fence);
311 	} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
312 		struct xe_mmio *mmio = &gt->mmio;
313 
314 		if (IS_SRIOV_VF(xe))
315 			return 0;
316 
317 		fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
318 		if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
319 			xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
320 					PVC_GUC_TLB_INV_DESC1_INVALIDATE);
321 			xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
322 					PVC_GUC_TLB_INV_DESC0_VALID);
323 		} else {
324 			xe_mmio_write32(mmio, GUC_TLB_INV_CR,
325 					GUC_TLB_INV_CR_INVALIDATE);
326 		}
327 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
328 	}
329 
330 	return 0;
331 }
332 
333 /*
334  * Ensure that roundup_pow_of_two(length) doesn't overflow.
335  * Note that roundup_pow_of_two() operates on unsigned long,
336  * not on u64.
337  */
338 #define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
339 
340 /**
341  * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
342  * address range
343  *
344  * @gt: GT structure
345  * @fence: invalidation fence which will be signal on TLB invalidation
346  * completion
347  * @start: start address
348  * @end: end address
349  * @asid: address space id
350  *
351  * Issue a range based TLB invalidation if supported, if not fallback to a full
352  * TLB invalidation. Completion of TLB is asynchronous and caller can use
353  * the invalidation fence to wait for completion.
354  *
355  * Return: Negative error code on error, 0 on success
356  */
xe_gt_tlb_invalidation_range(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,u64 start,u64 end,u32 asid)357 int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
358 				 struct xe_gt_tlb_invalidation_fence *fence,
359 				 u64 start, u64 end, u32 asid)
360 {
361 	struct xe_device *xe = gt_to_xe(gt);
362 #define MAX_TLB_INVALIDATION_LEN	7
363 	u32 action[MAX_TLB_INVALIDATION_LEN];
364 	u64 length = end - start;
365 	int len = 0;
366 
367 	xe_gt_assert(gt, fence);
368 
369 	/* Execlists not supported */
370 	if (gt_to_xe(gt)->info.force_execlist) {
371 		__invalidation_fence_signal(xe, fence);
372 		return 0;
373 	}
374 
375 	action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
376 	action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
377 	if (!xe->info.has_range_tlb_invalidation ||
378 	    length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
379 		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
380 	} else {
381 		u64 orig_start = start;
382 		u64 align;
383 
384 		if (length < SZ_4K)
385 			length = SZ_4K;
386 
387 		/*
388 		 * We need to invalidate a higher granularity if start address
389 		 * is not aligned to length. When start is not aligned with
390 		 * length we need to find the length large enough to create an
391 		 * address mask covering the required range.
392 		 */
393 		align = roundup_pow_of_two(length);
394 		start = ALIGN_DOWN(start, align);
395 		end = ALIGN(end, align);
396 		length = align;
397 		while (start + length < end) {
398 			length <<= 1;
399 			start = ALIGN_DOWN(orig_start, length);
400 		}
401 
402 		/*
403 		 * Minimum invalidation size for a 2MB page that the hardware
404 		 * expects is 16MB
405 		 */
406 		if (length >= SZ_2M) {
407 			length = max_t(u64, SZ_16M, length);
408 			start = ALIGN_DOWN(orig_start, length);
409 		}
410 
411 		xe_gt_assert(gt, length >= SZ_4K);
412 		xe_gt_assert(gt, is_power_of_2(length));
413 		xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
414 						    ilog2(SZ_2M) + 1)));
415 		xe_gt_assert(gt, IS_ALIGNED(start, length));
416 
417 		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
418 		action[len++] = asid;
419 		action[len++] = lower_32_bits(start);
420 		action[len++] = upper_32_bits(start);
421 		action[len++] = ilog2(length) - ilog2(SZ_4K);
422 	}
423 
424 	xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
425 
426 	return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
427 }
428 
429 /**
430  * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
431  * @gt: graphics tile
432  * @vm: VM to invalidate
433  *
434  * Invalidate entire VM's address space
435  */
xe_gt_tlb_invalidation_vm(struct xe_gt * gt,struct xe_vm * vm)436 void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
437 {
438 	struct xe_gt_tlb_invalidation_fence fence;
439 	u64 range = 1ull << vm->xe->info.va_bits;
440 	int ret;
441 
442 	xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
443 
444 	ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
445 	if (ret < 0)
446 		return;
447 
448 	xe_gt_tlb_invalidation_fence_wait(&fence);
449 }
450 
451 /**
452  * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
453  * @gt: GT structure
454  * @fence: invalidation fence which will be signal on TLB invalidation
455  * completion, can be NULL
456  * @vma: VMA to invalidate
457  *
458  * Issue a range based TLB invalidation if supported, if not fallback to a full
459  * TLB invalidation. Completion of TLB is asynchronous and caller can use
460  * the invalidation fence to wait for completion.
461  *
462  * Return: Negative error code on error, 0 on success
463  */
xe_gt_tlb_invalidation_vma(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,struct xe_vma * vma)464 int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
465 			       struct xe_gt_tlb_invalidation_fence *fence,
466 			       struct xe_vma *vma)
467 {
468 	xe_gt_assert(gt, vma);
469 
470 	return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
471 					    xe_vma_end(vma),
472 					    xe_vma_vm(vma)->usm.asid);
473 }
474 
475 /**
476  * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
477  * @guc: guc
478  * @msg: message indicating TLB invalidation done
479  * @len: length of message
480  *
481  * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
482  * invalidation fences for seqno. Algorithm for this depends on seqno being
483  * received in-order and asserts this assumption.
484  *
485  * Return: 0 on success, -EPROTO for malformed messages.
486  */
xe_guc_tlb_invalidation_done_handler(struct xe_guc * guc,u32 * msg,u32 len)487 int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
488 {
489 	struct xe_gt *gt = guc_to_gt(guc);
490 	struct xe_device *xe = gt_to_xe(gt);
491 	struct xe_gt_tlb_invalidation_fence *fence, *next;
492 	unsigned long flags;
493 
494 	if (unlikely(len != 1))
495 		return -EPROTO;
496 
497 	/*
498 	 * This can also be run both directly from the IRQ handler and also in
499 	 * process_g2h_msg(). Only one may process any individual CT message,
500 	 * however the order they are processed here could result in skipping a
501 	 * seqno. To handle that we just process all the seqnos from the last
502 	 * seqno_recv up to and including the one in msg[0]. The delta should be
503 	 * very small so there shouldn't be much of pending_fences we actually
504 	 * need to iterate over here.
505 	 *
506 	 * From GuC POV we expect the seqnos to always appear in-order, so if we
507 	 * see something later in the timeline we can be sure that anything
508 	 * appearing earlier has already signalled, just that we have yet to
509 	 * officially process the CT message like if racing against
510 	 * process_g2h_msg().
511 	 */
512 	spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
513 	if (tlb_invalidation_seqno_past(gt, msg[0])) {
514 		spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
515 		return 0;
516 	}
517 
518 	WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
519 
520 	list_for_each_entry_safe(fence, next,
521 				 &gt->tlb_invalidation.pending_fences, link) {
522 		trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
523 
524 		if (!tlb_invalidation_seqno_past(gt, fence->seqno))
525 			break;
526 
527 		invalidation_fence_signal(xe, fence);
528 	}
529 
530 	if (!list_empty(&gt->tlb_invalidation.pending_fences))
531 		mod_delayed_work(system_wq,
532 				 &gt->tlb_invalidation.fence_tdr,
533 				 tlb_timeout_jiffies(gt));
534 	else
535 		cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
536 
537 	spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
538 
539 	return 0;
540 }
541 
542 static const char *
invalidation_fence_get_driver_name(struct dma_fence * dma_fence)543 invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
544 {
545 	return "xe";
546 }
547 
548 static const char *
invalidation_fence_get_timeline_name(struct dma_fence * dma_fence)549 invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
550 {
551 	return "invalidation_fence";
552 }
553 
554 static const struct dma_fence_ops invalidation_fence_ops = {
555 	.get_driver_name = invalidation_fence_get_driver_name,
556 	.get_timeline_name = invalidation_fence_get_timeline_name,
557 };
558 
559 /**
560  * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
561  * @gt: GT
562  * @fence: TLB invalidation fence to initialize
563  * @stack: fence is stack variable
564  *
565  * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
566  * will be automatically called when fence is signalled (all fences must signal),
567  * even on error.
568  */
xe_gt_tlb_invalidation_fence_init(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,bool stack)569 void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
570 				       struct xe_gt_tlb_invalidation_fence *fence,
571 				       bool stack)
572 {
573 	xe_pm_runtime_get_noresume(gt_to_xe(gt));
574 
575 	spin_lock_irq(&gt->tlb_invalidation.lock);
576 	dma_fence_init(&fence->base, &invalidation_fence_ops,
577 		       &gt->tlb_invalidation.lock,
578 		       dma_fence_context_alloc(1), 1);
579 	spin_unlock_irq(&gt->tlb_invalidation.lock);
580 	INIT_LIST_HEAD(&fence->link);
581 	if (stack)
582 		set_bit(FENCE_STACK_BIT, &fence->base.flags);
583 	else
584 		dma_fence_get(&fence->base);
585 	fence->gt = gt;
586 }
587