1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include "xe_gt_tlb_invalidation.h"
7
8 #include "abi/guc_actions_abi.h"
9 #include "xe_device.h"
10 #include "xe_force_wake.h"
11 #include "xe_gt.h"
12 #include "xe_gt_printk.h"
13 #include "xe_guc.h"
14 #include "xe_guc_ct.h"
15 #include "xe_gt_stats.h"
16 #include "xe_mmio.h"
17 #include "xe_pm.h"
18 #include "xe_sriov.h"
19 #include "xe_trace.h"
20 #include "regs/xe_guc_regs.h"
21
22 #define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
23
24 /*
25 * TLB inval depends on pending commands in the CT queue and then the real
26 * invalidation time. Double up the time to process full CT queue
27 * just to be on the safe side.
28 */
tlb_timeout_jiffies(struct xe_gt * gt)29 static long tlb_timeout_jiffies(struct xe_gt *gt)
30 {
31 /* this reflects what HW/GuC needs to process TLB inv request */
32 const long hw_tlb_timeout = HZ / 4;
33
34 /* this estimates actual delay caused by the CTB transport */
35 long delay = xe_guc_ct_queue_proc_time_jiffies(>->uc.guc.ct);
36
37 return hw_tlb_timeout + 2 * delay;
38 }
39
xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence * fence)40 static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
41 {
42 if (WARN_ON_ONCE(!fence->gt))
43 return;
44
45 xe_pm_runtime_put(gt_to_xe(fence->gt));
46 fence->gt = NULL; /* fini() should be called once */
47 }
48
49 static void
__invalidation_fence_signal(struct xe_device * xe,struct xe_gt_tlb_invalidation_fence * fence)50 __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
51 {
52 bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
53
54 trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
55 xe_gt_tlb_invalidation_fence_fini(fence);
56 dma_fence_signal(&fence->base);
57 if (!stack)
58 dma_fence_put(&fence->base);
59 }
60
61 static void
invalidation_fence_signal(struct xe_device * xe,struct xe_gt_tlb_invalidation_fence * fence)62 invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
63 {
64 list_del(&fence->link);
65 __invalidation_fence_signal(xe, fence);
66 }
67
xe_gt_tlb_fence_timeout(struct work_struct * work)68 static void xe_gt_tlb_fence_timeout(struct work_struct *work)
69 {
70 struct xe_gt *gt = container_of(work, struct xe_gt,
71 tlb_invalidation.fence_tdr.work);
72 struct xe_device *xe = gt_to_xe(gt);
73 struct xe_gt_tlb_invalidation_fence *fence, *next;
74
75 spin_lock_irq(>->tlb_invalidation.pending_lock);
76 list_for_each_entry_safe(fence, next,
77 >->tlb_invalidation.pending_fences, link) {
78 s64 since_inval_ms = ktime_ms_delta(ktime_get(),
79 fence->invalidation_time);
80
81 if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
82 break;
83
84 trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
85 xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
86 fence->seqno, gt->tlb_invalidation.seqno_recv);
87
88 fence->base.error = -ETIME;
89 invalidation_fence_signal(xe, fence);
90 }
91 if (!list_empty(>->tlb_invalidation.pending_fences))
92 queue_delayed_work(system_wq,
93 >->tlb_invalidation.fence_tdr,
94 tlb_timeout_jiffies(gt));
95 spin_unlock_irq(>->tlb_invalidation.pending_lock);
96 }
97
98 /**
99 * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
100 * @gt: graphics tile
101 *
102 * Initialize GT TLB invalidation state, purely software initialization, should
103 * be called once during driver load.
104 *
105 * Return: 0 on success, negative error code on error.
106 */
xe_gt_tlb_invalidation_init(struct xe_gt * gt)107 int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
108 {
109 gt->tlb_invalidation.seqno = 1;
110 INIT_LIST_HEAD(>->tlb_invalidation.pending_fences);
111 spin_lock_init(>->tlb_invalidation.pending_lock);
112 spin_lock_init(>->tlb_invalidation.lock);
113 INIT_DELAYED_WORK(>->tlb_invalidation.fence_tdr,
114 xe_gt_tlb_fence_timeout);
115
116 return 0;
117 }
118
119 /**
120 * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
121 * @gt: graphics tile
122 *
123 * Signal any pending invalidation fences, should be called during a GT reset
124 */
xe_gt_tlb_invalidation_reset(struct xe_gt * gt)125 void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
126 {
127 struct xe_gt_tlb_invalidation_fence *fence, *next;
128 int pending_seqno;
129
130 /*
131 * CT channel is already disabled at this point. No new TLB requests can
132 * appear.
133 */
134
135 mutex_lock(>->uc.guc.ct.lock);
136 spin_lock_irq(>->tlb_invalidation.pending_lock);
137 cancel_delayed_work(>->tlb_invalidation.fence_tdr);
138 /*
139 * We might have various kworkers waiting for TLB flushes to complete
140 * which are not tracked with an explicit TLB fence, however at this
141 * stage that will never happen since the CT is already disabled, so
142 * make sure we signal them here under the assumption that we have
143 * completed a full GT reset.
144 */
145 if (gt->tlb_invalidation.seqno == 1)
146 pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
147 else
148 pending_seqno = gt->tlb_invalidation.seqno - 1;
149 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
150
151 list_for_each_entry_safe(fence, next,
152 >->tlb_invalidation.pending_fences, link)
153 invalidation_fence_signal(gt_to_xe(gt), fence);
154 spin_unlock_irq(>->tlb_invalidation.pending_lock);
155 mutex_unlock(>->uc.guc.ct.lock);
156 }
157
tlb_invalidation_seqno_past(struct xe_gt * gt,int seqno)158 static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
159 {
160 int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
161
162 if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
163 return false;
164
165 if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
166 return true;
167
168 return seqno_recv >= seqno;
169 }
170
send_tlb_invalidation(struct xe_guc * guc,struct xe_gt_tlb_invalidation_fence * fence,u32 * action,int len)171 static int send_tlb_invalidation(struct xe_guc *guc,
172 struct xe_gt_tlb_invalidation_fence *fence,
173 u32 *action, int len)
174 {
175 struct xe_gt *gt = guc_to_gt(guc);
176 struct xe_device *xe = gt_to_xe(gt);
177 int seqno;
178 int ret;
179
180 xe_gt_assert(gt, fence);
181
182 /*
183 * XXX: The seqno algorithm relies on TLB invalidation being processed
184 * in order which they currently are, if that changes the algorithm will
185 * need to be updated.
186 */
187
188 mutex_lock(&guc->ct.lock);
189 seqno = gt->tlb_invalidation.seqno;
190 fence->seqno = seqno;
191 trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
192 action[1] = seqno;
193 ret = xe_guc_ct_send_locked(&guc->ct, action, len,
194 G2H_LEN_DW_TLB_INVALIDATE, 1);
195 if (!ret) {
196 spin_lock_irq(>->tlb_invalidation.pending_lock);
197 /*
198 * We haven't actually published the TLB fence as per
199 * pending_fences, but in theory our seqno could have already
200 * been written as we acquired the pending_lock. In such a case
201 * we can just go ahead and signal the fence here.
202 */
203 if (tlb_invalidation_seqno_past(gt, seqno)) {
204 __invalidation_fence_signal(xe, fence);
205 } else {
206 fence->invalidation_time = ktime_get();
207 list_add_tail(&fence->link,
208 >->tlb_invalidation.pending_fences);
209
210 if (list_is_singular(>->tlb_invalidation.pending_fences))
211 queue_delayed_work(system_wq,
212 >->tlb_invalidation.fence_tdr,
213 tlb_timeout_jiffies(gt));
214 }
215 spin_unlock_irq(>->tlb_invalidation.pending_lock);
216 } else {
217 __invalidation_fence_signal(xe, fence);
218 }
219 if (!ret) {
220 gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
221 TLB_INVALIDATION_SEQNO_MAX;
222 if (!gt->tlb_invalidation.seqno)
223 gt->tlb_invalidation.seqno = 1;
224 }
225 mutex_unlock(&guc->ct.lock);
226 xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
227
228 return ret;
229 }
230
231 #define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
232 XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
233 XE_GUC_TLB_INVAL_FLUSH_CACHE)
234
235 /**
236 * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
237 * @gt: graphics tile
238 * @fence: invalidation fence which will be signal on TLB invalidation
239 * completion
240 *
241 * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
242 * caller can use the invalidation fence to wait for completion.
243 *
244 * Return: 0 on success, negative error code on error
245 */
xe_gt_tlb_invalidation_guc(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence)246 static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
247 struct xe_gt_tlb_invalidation_fence *fence)
248 {
249 u32 action[] = {
250 XE_GUC_ACTION_TLB_INVALIDATION,
251 0, /* seqno, replaced in send_tlb_invalidation */
252 MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
253 };
254
255 return send_tlb_invalidation(>->uc.guc, fence, action,
256 ARRAY_SIZE(action));
257 }
258
259 /**
260 * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
261 * @gt: graphics tile
262 *
263 * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
264 * synchronous.
265 *
266 * Return: 0 on success, negative error code on error
267 */
xe_gt_tlb_invalidation_ggtt(struct xe_gt * gt)268 int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
269 {
270 struct xe_device *xe = gt_to_xe(gt);
271
272 if (xe_guc_ct_enabled(>->uc.guc.ct) &&
273 gt->uc.guc.submission_state.enabled) {
274 struct xe_gt_tlb_invalidation_fence fence;
275 int ret;
276
277 xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
278 ret = xe_gt_tlb_invalidation_guc(gt, &fence);
279 if (ret)
280 return ret;
281
282 xe_gt_tlb_invalidation_fence_wait(&fence);
283 } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
284 if (IS_SRIOV_VF(xe))
285 return 0;
286
287 xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
288 if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
289 xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
290 PVC_GUC_TLB_INV_DESC1_INVALIDATE);
291 xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
292 PVC_GUC_TLB_INV_DESC0_VALID);
293 } else {
294 xe_mmio_write32(gt, GUC_TLB_INV_CR,
295 GUC_TLB_INV_CR_INVALIDATE);
296 }
297 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
298 }
299
300 return 0;
301 }
302
303 /**
304 * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
305 * address range
306 *
307 * @gt: graphics tile
308 * @fence: invalidation fence which will be signal on TLB invalidation
309 * completion
310 * @start: start address
311 * @end: end address
312 * @asid: address space id
313 *
314 * Issue a range based TLB invalidation if supported, if not fallback to a full
315 * TLB invalidation. Completion of TLB is asynchronous and caller can use
316 * the invalidation fence to wait for completion.
317 *
318 * Return: Negative error code on error, 0 on success
319 */
xe_gt_tlb_invalidation_range(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,u64 start,u64 end,u32 asid)320 int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
321 struct xe_gt_tlb_invalidation_fence *fence,
322 u64 start, u64 end, u32 asid)
323 {
324 struct xe_device *xe = gt_to_xe(gt);
325 #define MAX_TLB_INVALIDATION_LEN 7
326 u32 action[MAX_TLB_INVALIDATION_LEN];
327 int len = 0;
328
329 xe_gt_assert(gt, fence);
330
331 /* Execlists not supported */
332 if (gt_to_xe(gt)->info.force_execlist) {
333 __invalidation_fence_signal(xe, fence);
334 return 0;
335 }
336
337 action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
338 action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
339 if (!xe->info.has_range_tlb_invalidation) {
340 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
341 } else {
342 u64 orig_start = start;
343 u64 length = end - start;
344 u64 align;
345
346 if (length < SZ_4K)
347 length = SZ_4K;
348
349 /*
350 * We need to invalidate a higher granularity if start address
351 * is not aligned to length. When start is not aligned with
352 * length we need to find the length large enough to create an
353 * address mask covering the required range.
354 */
355 align = roundup_pow_of_two(length);
356 start = ALIGN_DOWN(start, align);
357 end = ALIGN(end, align);
358 length = align;
359 while (start + length < end) {
360 length <<= 1;
361 start = ALIGN_DOWN(orig_start, length);
362 }
363
364 /*
365 * Minimum invalidation size for a 2MB page that the hardware
366 * expects is 16MB
367 */
368 if (length >= SZ_2M) {
369 length = max_t(u64, SZ_16M, length);
370 start = ALIGN_DOWN(orig_start, length);
371 }
372
373 xe_gt_assert(gt, length >= SZ_4K);
374 xe_gt_assert(gt, is_power_of_2(length));
375 xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
376 ilog2(SZ_2M) + 1)));
377 xe_gt_assert(gt, IS_ALIGNED(start, length));
378
379 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
380 action[len++] = asid;
381 action[len++] = lower_32_bits(start);
382 action[len++] = upper_32_bits(start);
383 action[len++] = ilog2(length) - ilog2(SZ_4K);
384 }
385
386 xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
387
388 return send_tlb_invalidation(>->uc.guc, fence, action, len);
389 }
390
391 /**
392 * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
393 * @gt: graphics tile
394 * @fence: invalidation fence which will be signal on TLB invalidation
395 * completion, can be NULL
396 * @vma: VMA to invalidate
397 *
398 * Issue a range based TLB invalidation if supported, if not fallback to a full
399 * TLB invalidation. Completion of TLB is asynchronous and caller can use
400 * the invalidation fence to wait for completion.
401 *
402 * Return: Negative error code on error, 0 on success
403 */
xe_gt_tlb_invalidation_vma(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,struct xe_vma * vma)404 int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
405 struct xe_gt_tlb_invalidation_fence *fence,
406 struct xe_vma *vma)
407 {
408 xe_gt_assert(gt, vma);
409
410 return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
411 xe_vma_end(vma),
412 xe_vma_vm(vma)->usm.asid);
413 }
414
415 /**
416 * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
417 * @guc: guc
418 * @msg: message indicating TLB invalidation done
419 * @len: length of message
420 *
421 * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
422 * invalidation fences for seqno. Algorithm for this depends on seqno being
423 * received in-order and asserts this assumption.
424 *
425 * Return: 0 on success, -EPROTO for malformed messages.
426 */
xe_guc_tlb_invalidation_done_handler(struct xe_guc * guc,u32 * msg,u32 len)427 int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
428 {
429 struct xe_gt *gt = guc_to_gt(guc);
430 struct xe_device *xe = gt_to_xe(gt);
431 struct xe_gt_tlb_invalidation_fence *fence, *next;
432 unsigned long flags;
433
434 if (unlikely(len != 1))
435 return -EPROTO;
436
437 /*
438 * This can also be run both directly from the IRQ handler and also in
439 * process_g2h_msg(). Only one may process any individual CT message,
440 * however the order they are processed here could result in skipping a
441 * seqno. To handle that we just process all the seqnos from the last
442 * seqno_recv up to and including the one in msg[0]. The delta should be
443 * very small so there shouldn't be much of pending_fences we actually
444 * need to iterate over here.
445 *
446 * From GuC POV we expect the seqnos to always appear in-order, so if we
447 * see something later in the timeline we can be sure that anything
448 * appearing earlier has already signalled, just that we have yet to
449 * officially process the CT message like if racing against
450 * process_g2h_msg().
451 */
452 spin_lock_irqsave(>->tlb_invalidation.pending_lock, flags);
453 if (tlb_invalidation_seqno_past(gt, msg[0])) {
454 spin_unlock_irqrestore(>->tlb_invalidation.pending_lock, flags);
455 return 0;
456 }
457
458 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
459
460 list_for_each_entry_safe(fence, next,
461 >->tlb_invalidation.pending_fences, link) {
462 trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
463
464 if (!tlb_invalidation_seqno_past(gt, fence->seqno))
465 break;
466
467 invalidation_fence_signal(xe, fence);
468 }
469
470 if (!list_empty(>->tlb_invalidation.pending_fences))
471 mod_delayed_work(system_wq,
472 >->tlb_invalidation.fence_tdr,
473 tlb_timeout_jiffies(gt));
474 else
475 cancel_delayed_work(>->tlb_invalidation.fence_tdr);
476
477 spin_unlock_irqrestore(>->tlb_invalidation.pending_lock, flags);
478
479 return 0;
480 }
481
482 static const char *
invalidation_fence_get_driver_name(struct dma_fence * dma_fence)483 invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
484 {
485 return "xe";
486 }
487
488 static const char *
invalidation_fence_get_timeline_name(struct dma_fence * dma_fence)489 invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
490 {
491 return "invalidation_fence";
492 }
493
494 static const struct dma_fence_ops invalidation_fence_ops = {
495 .get_driver_name = invalidation_fence_get_driver_name,
496 .get_timeline_name = invalidation_fence_get_timeline_name,
497 };
498
499 /**
500 * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
501 * @gt: GT
502 * @fence: TLB invalidation fence to initialize
503 * @stack: fence is stack variable
504 *
505 * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
506 * will be automatically called when fence is signalled (all fences must signal),
507 * even on error.
508 */
xe_gt_tlb_invalidation_fence_init(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,bool stack)509 void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
510 struct xe_gt_tlb_invalidation_fence *fence,
511 bool stack)
512 {
513 xe_pm_runtime_get_noresume(gt_to_xe(gt));
514
515 spin_lock_irq(>->tlb_invalidation.lock);
516 dma_fence_init(&fence->base, &invalidation_fence_ops,
517 >->tlb_invalidation.lock,
518 dma_fence_context_alloc(1), 1);
519 spin_unlock_irq(>->tlb_invalidation.lock);
520 INIT_LIST_HEAD(&fence->link);
521 if (stack)
522 set_bit(FENCE_STACK_BIT, &fence->base.flags);
523 else
524 dma_fence_get(&fence->base);
525 fence->gt = gt;
526 }
527