1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include "xe_gt_tlb_invalidation.h"
7
8 #include "abi/guc_actions_abi.h"
9 #include "xe_device.h"
10 #include "xe_force_wake.h"
11 #include "xe_gt.h"
12 #include "xe_gt_printk.h"
13 #include "xe_guc.h"
14 #include "xe_guc_ct.h"
15 #include "xe_gt_stats.h"
16 #include "xe_mmio.h"
17 #include "xe_pm.h"
18 #include "xe_sriov.h"
19 #include "xe_trace.h"
20 #include "regs/xe_guc_regs.h"
21
22 #define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
23
24 /*
25 * TLB inval depends on pending commands in the CT queue and then the real
26 * invalidation time. Double up the time to process full CT queue
27 * just to be on the safe side.
28 */
tlb_timeout_jiffies(struct xe_gt * gt)29 static long tlb_timeout_jiffies(struct xe_gt *gt)
30 {
31 /* this reflects what HW/GuC needs to process TLB inv request */
32 const long hw_tlb_timeout = HZ / 4;
33
34 /* this estimates actual delay caused by the CTB transport */
35 long delay = xe_guc_ct_queue_proc_time_jiffies(>->uc.guc.ct);
36
37 return hw_tlb_timeout + 2 * delay;
38 }
39
xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence * fence)40 static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
41 {
42 if (WARN_ON_ONCE(!fence->gt))
43 return;
44
45 xe_pm_runtime_put(gt_to_xe(fence->gt));
46 fence->gt = NULL; /* fini() should be called once */
47 }
48
49 static void
__invalidation_fence_signal(struct xe_device * xe,struct xe_gt_tlb_invalidation_fence * fence)50 __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
51 {
52 bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
53
54 trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
55 xe_gt_tlb_invalidation_fence_fini(fence);
56 dma_fence_signal(&fence->base);
57 if (!stack)
58 dma_fence_put(&fence->base);
59 }
60
61 static void
invalidation_fence_signal(struct xe_device * xe,struct xe_gt_tlb_invalidation_fence * fence)62 invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
63 {
64 list_del(&fence->link);
65 __invalidation_fence_signal(xe, fence);
66 }
67
xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence * fence)68 void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
69 {
70 if (WARN_ON_ONCE(!fence->gt))
71 return;
72
73 __invalidation_fence_signal(gt_to_xe(fence->gt), fence);
74 }
75
xe_gt_tlb_fence_timeout(struct work_struct * work)76 static void xe_gt_tlb_fence_timeout(struct work_struct *work)
77 {
78 struct xe_gt *gt = container_of(work, struct xe_gt,
79 tlb_invalidation.fence_tdr.work);
80 struct xe_device *xe = gt_to_xe(gt);
81 struct xe_gt_tlb_invalidation_fence *fence, *next;
82
83 LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker);
84
85 spin_lock_irq(>->tlb_invalidation.pending_lock);
86 list_for_each_entry_safe(fence, next,
87 >->tlb_invalidation.pending_fences, link) {
88 s64 since_inval_ms = ktime_ms_delta(ktime_get(),
89 fence->invalidation_time);
90
91 if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
92 break;
93
94 trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
95 xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
96 fence->seqno, gt->tlb_invalidation.seqno_recv);
97
98 fence->base.error = -ETIME;
99 invalidation_fence_signal(xe, fence);
100 }
101 if (!list_empty(>->tlb_invalidation.pending_fences))
102 queue_delayed_work(system_wq,
103 >->tlb_invalidation.fence_tdr,
104 tlb_timeout_jiffies(gt));
105 spin_unlock_irq(>->tlb_invalidation.pending_lock);
106 }
107
108 /**
109 * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
110 * @gt: GT structure
111 *
112 * Initialize GT TLB invalidation state, purely software initialization, should
113 * be called once during driver load.
114 *
115 * Return: 0 on success, negative error code on error.
116 */
xe_gt_tlb_invalidation_init_early(struct xe_gt * gt)117 int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
118 {
119 gt->tlb_invalidation.seqno = 1;
120 INIT_LIST_HEAD(>->tlb_invalidation.pending_fences);
121 spin_lock_init(>->tlb_invalidation.pending_lock);
122 spin_lock_init(>->tlb_invalidation.lock);
123 INIT_DELAYED_WORK(>->tlb_invalidation.fence_tdr,
124 xe_gt_tlb_fence_timeout);
125
126 return 0;
127 }
128
129 /**
130 * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
131 * @gt: GT structure
132 *
133 * Signal any pending invalidation fences, should be called during a GT reset
134 */
xe_gt_tlb_invalidation_reset(struct xe_gt * gt)135 void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
136 {
137 struct xe_gt_tlb_invalidation_fence *fence, *next;
138 int pending_seqno;
139
140 /*
141 * we can get here before the CTs are even initialized if we're wedging
142 * very early, in which case there are not going to be any pending
143 * fences so we can bail immediately.
144 */
145 if (!xe_guc_ct_initialized(>->uc.guc.ct))
146 return;
147
148 /*
149 * CT channel is already disabled at this point. No new TLB requests can
150 * appear.
151 */
152
153 mutex_lock(>->uc.guc.ct.lock);
154 spin_lock_irq(>->tlb_invalidation.pending_lock);
155 cancel_delayed_work(>->tlb_invalidation.fence_tdr);
156 /*
157 * We might have various kworkers waiting for TLB flushes to complete
158 * which are not tracked with an explicit TLB fence, however at this
159 * stage that will never happen since the CT is already disabled, so
160 * make sure we signal them here under the assumption that we have
161 * completed a full GT reset.
162 */
163 if (gt->tlb_invalidation.seqno == 1)
164 pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
165 else
166 pending_seqno = gt->tlb_invalidation.seqno - 1;
167 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
168
169 list_for_each_entry_safe(fence, next,
170 >->tlb_invalidation.pending_fences, link)
171 invalidation_fence_signal(gt_to_xe(gt), fence);
172 spin_unlock_irq(>->tlb_invalidation.pending_lock);
173 mutex_unlock(>->uc.guc.ct.lock);
174 }
175
tlb_invalidation_seqno_past(struct xe_gt * gt,int seqno)176 static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
177 {
178 int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
179
180 if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
181 return false;
182
183 if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
184 return true;
185
186 return seqno_recv >= seqno;
187 }
188
send_tlb_invalidation(struct xe_guc * guc,struct xe_gt_tlb_invalidation_fence * fence,u32 * action,int len)189 static int send_tlb_invalidation(struct xe_guc *guc,
190 struct xe_gt_tlb_invalidation_fence *fence,
191 u32 *action, int len)
192 {
193 struct xe_gt *gt = guc_to_gt(guc);
194 struct xe_device *xe = gt_to_xe(gt);
195 int seqno;
196 int ret;
197
198 xe_gt_assert(gt, fence);
199
200 /*
201 * XXX: The seqno algorithm relies on TLB invalidation being processed
202 * in order which they currently are, if that changes the algorithm will
203 * need to be updated.
204 */
205
206 mutex_lock(&guc->ct.lock);
207 seqno = gt->tlb_invalidation.seqno;
208 fence->seqno = seqno;
209 trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
210 action[1] = seqno;
211 ret = xe_guc_ct_send_locked(&guc->ct, action, len,
212 G2H_LEN_DW_TLB_INVALIDATE, 1);
213 if (!ret) {
214 spin_lock_irq(>->tlb_invalidation.pending_lock);
215 /*
216 * We haven't actually published the TLB fence as per
217 * pending_fences, but in theory our seqno could have already
218 * been written as we acquired the pending_lock. In such a case
219 * we can just go ahead and signal the fence here.
220 */
221 if (tlb_invalidation_seqno_past(gt, seqno)) {
222 __invalidation_fence_signal(xe, fence);
223 } else {
224 fence->invalidation_time = ktime_get();
225 list_add_tail(&fence->link,
226 >->tlb_invalidation.pending_fences);
227
228 if (list_is_singular(>->tlb_invalidation.pending_fences))
229 queue_delayed_work(system_wq,
230 >->tlb_invalidation.fence_tdr,
231 tlb_timeout_jiffies(gt));
232 }
233 spin_unlock_irq(>->tlb_invalidation.pending_lock);
234 } else {
235 __invalidation_fence_signal(xe, fence);
236 }
237 if (!ret) {
238 gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
239 TLB_INVALIDATION_SEQNO_MAX;
240 if (!gt->tlb_invalidation.seqno)
241 gt->tlb_invalidation.seqno = 1;
242 }
243 mutex_unlock(&guc->ct.lock);
244 xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
245
246 return ret;
247 }
248
249 #define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
250 XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
251 XE_GUC_TLB_INVAL_FLUSH_CACHE)
252
253 /**
254 * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
255 * @gt: GT structure
256 * @fence: invalidation fence which will be signal on TLB invalidation
257 * completion
258 *
259 * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
260 * caller can use the invalidation fence to wait for completion.
261 *
262 * Return: 0 on success, negative error code on error
263 */
xe_gt_tlb_invalidation_guc(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence)264 static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
265 struct xe_gt_tlb_invalidation_fence *fence)
266 {
267 u32 action[] = {
268 XE_GUC_ACTION_TLB_INVALIDATION,
269 0, /* seqno, replaced in send_tlb_invalidation */
270 MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
271 };
272 int ret;
273
274 ret = send_tlb_invalidation(>->uc.guc, fence, action,
275 ARRAY_SIZE(action));
276 /*
277 * -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
278 * should be nuked on a GT reset so this error can be ignored.
279 */
280 if (ret == -ECANCELED)
281 return 0;
282
283 return ret;
284 }
285
286 /**
287 * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
288 * @gt: GT structure
289 *
290 * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
291 * synchronous.
292 *
293 * Return: 0 on success, negative error code on error
294 */
xe_gt_tlb_invalidation_ggtt(struct xe_gt * gt)295 int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
296 {
297 struct xe_device *xe = gt_to_xe(gt);
298 unsigned int fw_ref;
299
300 if (xe_guc_ct_enabled(>->uc.guc.ct) &&
301 gt->uc.guc.submission_state.enabled) {
302 struct xe_gt_tlb_invalidation_fence fence;
303 int ret;
304
305 xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
306 ret = xe_gt_tlb_invalidation_guc(gt, &fence);
307 if (ret)
308 return ret;
309
310 xe_gt_tlb_invalidation_fence_wait(&fence);
311 } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
312 struct xe_mmio *mmio = >->mmio;
313
314 if (IS_SRIOV_VF(xe))
315 return 0;
316
317 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
318 if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
319 xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
320 PVC_GUC_TLB_INV_DESC1_INVALIDATE);
321 xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
322 PVC_GUC_TLB_INV_DESC0_VALID);
323 } else {
324 xe_mmio_write32(mmio, GUC_TLB_INV_CR,
325 GUC_TLB_INV_CR_INVALIDATE);
326 }
327 xe_force_wake_put(gt_to_fw(gt), fw_ref);
328 }
329
330 return 0;
331 }
332
send_tlb_invalidation_all(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence)333 static int send_tlb_invalidation_all(struct xe_gt *gt,
334 struct xe_gt_tlb_invalidation_fence *fence)
335 {
336 u32 action[] = {
337 XE_GUC_ACTION_TLB_INVALIDATION_ALL,
338 0, /* seqno, replaced in send_tlb_invalidation */
339 MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL),
340 };
341
342 return send_tlb_invalidation(>->uc.guc, fence, action, ARRAY_SIZE(action));
343 }
344
345 /**
346 * xe_gt_tlb_invalidation_all - Invalidate all TLBs across PF and all VFs.
347 * @gt: the &xe_gt structure
348 * @fence: the &xe_gt_tlb_invalidation_fence to be signaled on completion
349 *
350 * Send a request to invalidate all TLBs across PF and all VFs.
351 *
352 * Return: 0 on success, negative error code on error
353 */
xe_gt_tlb_invalidation_all(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence)354 int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence)
355 {
356 int err;
357
358 xe_gt_assert(gt, gt == fence->gt);
359
360 err = send_tlb_invalidation_all(gt, fence);
361 if (err)
362 xe_gt_err(gt, "TLB invalidation request failed (%pe)", ERR_PTR(err));
363
364 return err;
365 }
366
367 /*
368 * Ensure that roundup_pow_of_two(length) doesn't overflow.
369 * Note that roundup_pow_of_two() operates on unsigned long,
370 * not on u64.
371 */
372 #define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
373
374 /**
375 * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
376 * address range
377 *
378 * @gt: GT structure
379 * @fence: invalidation fence which will be signal on TLB invalidation
380 * completion
381 * @start: start address
382 * @end: end address
383 * @asid: address space id
384 *
385 * Issue a range based TLB invalidation if supported, if not fallback to a full
386 * TLB invalidation. Completion of TLB is asynchronous and caller can use
387 * the invalidation fence to wait for completion.
388 *
389 * Return: Negative error code on error, 0 on success
390 */
xe_gt_tlb_invalidation_range(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,u64 start,u64 end,u32 asid)391 int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
392 struct xe_gt_tlb_invalidation_fence *fence,
393 u64 start, u64 end, u32 asid)
394 {
395 struct xe_device *xe = gt_to_xe(gt);
396 #define MAX_TLB_INVALIDATION_LEN 7
397 u32 action[MAX_TLB_INVALIDATION_LEN];
398 u64 length = end - start;
399 int len = 0;
400
401 xe_gt_assert(gt, fence);
402
403 /* Execlists not supported */
404 if (gt_to_xe(gt)->info.force_execlist) {
405 __invalidation_fence_signal(xe, fence);
406 return 0;
407 }
408
409 action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
410 action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
411 if (!xe->info.has_range_tlb_invalidation ||
412 length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
413 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
414 } else {
415 u64 orig_start = start;
416 u64 align;
417
418 if (length < SZ_4K)
419 length = SZ_4K;
420
421 /*
422 * We need to invalidate a higher granularity if start address
423 * is not aligned to length. When start is not aligned with
424 * length we need to find the length large enough to create an
425 * address mask covering the required range.
426 */
427 align = roundup_pow_of_two(length);
428 start = ALIGN_DOWN(start, align);
429 end = ALIGN(end, align);
430 length = align;
431 while (start + length < end) {
432 length <<= 1;
433 start = ALIGN_DOWN(orig_start, length);
434 }
435
436 /*
437 * Minimum invalidation size for a 2MB page that the hardware
438 * expects is 16MB
439 */
440 if (length >= SZ_2M) {
441 length = max_t(u64, SZ_16M, length);
442 start = ALIGN_DOWN(orig_start, length);
443 }
444
445 xe_gt_assert(gt, length >= SZ_4K);
446 xe_gt_assert(gt, is_power_of_2(length));
447 xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
448 ilog2(SZ_2M) + 1)));
449 xe_gt_assert(gt, IS_ALIGNED(start, length));
450
451 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
452 action[len++] = asid;
453 action[len++] = lower_32_bits(start);
454 action[len++] = upper_32_bits(start);
455 action[len++] = ilog2(length) - ilog2(SZ_4K);
456 }
457
458 xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
459
460 return send_tlb_invalidation(>->uc.guc, fence, action, len);
461 }
462
463 /**
464 * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
465 * @gt: graphics tile
466 * @vm: VM to invalidate
467 *
468 * Invalidate entire VM's address space
469 */
xe_gt_tlb_invalidation_vm(struct xe_gt * gt,struct xe_vm * vm)470 void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
471 {
472 struct xe_gt_tlb_invalidation_fence fence;
473 u64 range = 1ull << vm->xe->info.va_bits;
474 int ret;
475
476 xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
477
478 ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
479 if (ret < 0)
480 return;
481
482 xe_gt_tlb_invalidation_fence_wait(&fence);
483 }
484
485 /**
486 * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
487 * @guc: guc
488 * @msg: message indicating TLB invalidation done
489 * @len: length of message
490 *
491 * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
492 * invalidation fences for seqno. Algorithm for this depends on seqno being
493 * received in-order and asserts this assumption.
494 *
495 * Return: 0 on success, -EPROTO for malformed messages.
496 */
xe_guc_tlb_invalidation_done_handler(struct xe_guc * guc,u32 * msg,u32 len)497 int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
498 {
499 struct xe_gt *gt = guc_to_gt(guc);
500 struct xe_device *xe = gt_to_xe(gt);
501 struct xe_gt_tlb_invalidation_fence *fence, *next;
502 unsigned long flags;
503
504 if (unlikely(len != 1))
505 return -EPROTO;
506
507 /*
508 * This can also be run both directly from the IRQ handler and also in
509 * process_g2h_msg(). Only one may process any individual CT message,
510 * however the order they are processed here could result in skipping a
511 * seqno. To handle that we just process all the seqnos from the last
512 * seqno_recv up to and including the one in msg[0]. The delta should be
513 * very small so there shouldn't be much of pending_fences we actually
514 * need to iterate over here.
515 *
516 * From GuC POV we expect the seqnos to always appear in-order, so if we
517 * see something later in the timeline we can be sure that anything
518 * appearing earlier has already signalled, just that we have yet to
519 * officially process the CT message like if racing against
520 * process_g2h_msg().
521 */
522 spin_lock_irqsave(>->tlb_invalidation.pending_lock, flags);
523 if (tlb_invalidation_seqno_past(gt, msg[0])) {
524 spin_unlock_irqrestore(>->tlb_invalidation.pending_lock, flags);
525 return 0;
526 }
527
528 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
529
530 list_for_each_entry_safe(fence, next,
531 >->tlb_invalidation.pending_fences, link) {
532 trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
533
534 if (!tlb_invalidation_seqno_past(gt, fence->seqno))
535 break;
536
537 invalidation_fence_signal(xe, fence);
538 }
539
540 if (!list_empty(>->tlb_invalidation.pending_fences))
541 mod_delayed_work(system_wq,
542 >->tlb_invalidation.fence_tdr,
543 tlb_timeout_jiffies(gt));
544 else
545 cancel_delayed_work(>->tlb_invalidation.fence_tdr);
546
547 spin_unlock_irqrestore(>->tlb_invalidation.pending_lock, flags);
548
549 return 0;
550 }
551
552 static const char *
invalidation_fence_get_driver_name(struct dma_fence * dma_fence)553 invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
554 {
555 return "xe";
556 }
557
558 static const char *
invalidation_fence_get_timeline_name(struct dma_fence * dma_fence)559 invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
560 {
561 return "invalidation_fence";
562 }
563
564 static const struct dma_fence_ops invalidation_fence_ops = {
565 .get_driver_name = invalidation_fence_get_driver_name,
566 .get_timeline_name = invalidation_fence_get_timeline_name,
567 };
568
569 /**
570 * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
571 * @gt: GT
572 * @fence: TLB invalidation fence to initialize
573 * @stack: fence is stack variable
574 *
575 * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
576 * will be automatically called when fence is signalled (all fences must signal),
577 * even on error.
578 */
xe_gt_tlb_invalidation_fence_init(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,bool stack)579 void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
580 struct xe_gt_tlb_invalidation_fence *fence,
581 bool stack)
582 {
583 xe_pm_runtime_get_noresume(gt_to_xe(gt));
584
585 spin_lock_irq(>->tlb_invalidation.lock);
586 dma_fence_init(&fence->base, &invalidation_fence_ops,
587 >->tlb_invalidation.lock,
588 dma_fence_context_alloc(1), 1);
589 spin_unlock_irq(>->tlb_invalidation.lock);
590 INIT_LIST_HEAD(&fence->link);
591 if (stack)
592 set_bit(FENCE_STACK_BIT, &fence->base.flags);
593 else
594 dma_fence_get(&fence->base);
595 fence->gt = gt;
596 }
597