1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include "xe_gt_tlb_invalidation.h"
7
8 #include "abi/guc_actions_abi.h"
9 #include "xe_device.h"
10 #include "xe_force_wake.h"
11 #include "xe_gt.h"
12 #include "xe_gt_printk.h"
13 #include "xe_guc.h"
14 #include "xe_guc_ct.h"
15 #include "xe_gt_stats.h"
16 #include "xe_mmio.h"
17 #include "xe_pm.h"
18 #include "xe_sriov.h"
19 #include "xe_trace.h"
20 #include "regs/xe_guc_regs.h"
21
22 #define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
23
24 /*
25 * TLB inval depends on pending commands in the CT queue and then the real
26 * invalidation time. Double up the time to process full CT queue
27 * just to be on the safe side.
28 */
tlb_timeout_jiffies(struct xe_gt * gt)29 static long tlb_timeout_jiffies(struct xe_gt *gt)
30 {
31 /* this reflects what HW/GuC needs to process TLB inv request */
32 const long hw_tlb_timeout = HZ / 4;
33
34 /* this estimates actual delay caused by the CTB transport */
35 long delay = xe_guc_ct_queue_proc_time_jiffies(>->uc.guc.ct);
36
37 return hw_tlb_timeout + 2 * delay;
38 }
39
xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence * fence)40 static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
41 {
42 if (WARN_ON_ONCE(!fence->gt))
43 return;
44
45 xe_pm_runtime_put(gt_to_xe(fence->gt));
46 fence->gt = NULL; /* fini() should be called once */
47 }
48
49 static void
__invalidation_fence_signal(struct xe_device * xe,struct xe_gt_tlb_invalidation_fence * fence)50 __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
51 {
52 bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
53
54 trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
55 xe_gt_tlb_invalidation_fence_fini(fence);
56 dma_fence_signal(&fence->base);
57 if (!stack)
58 dma_fence_put(&fence->base);
59 }
60
61 static void
invalidation_fence_signal(struct xe_device * xe,struct xe_gt_tlb_invalidation_fence * fence)62 invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
63 {
64 list_del(&fence->link);
65 __invalidation_fence_signal(xe, fence);
66 }
67
xe_gt_tlb_fence_timeout(struct work_struct * work)68 static void xe_gt_tlb_fence_timeout(struct work_struct *work)
69 {
70 struct xe_gt *gt = container_of(work, struct xe_gt,
71 tlb_invalidation.fence_tdr.work);
72 struct xe_device *xe = gt_to_xe(gt);
73 struct xe_gt_tlb_invalidation_fence *fence, *next;
74
75 LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker);
76
77 spin_lock_irq(>->tlb_invalidation.pending_lock);
78 list_for_each_entry_safe(fence, next,
79 >->tlb_invalidation.pending_fences, link) {
80 s64 since_inval_ms = ktime_ms_delta(ktime_get(),
81 fence->invalidation_time);
82
83 if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
84 break;
85
86 trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
87 xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
88 fence->seqno, gt->tlb_invalidation.seqno_recv);
89
90 fence->base.error = -ETIME;
91 invalidation_fence_signal(xe, fence);
92 }
93 if (!list_empty(>->tlb_invalidation.pending_fences))
94 queue_delayed_work(system_wq,
95 >->tlb_invalidation.fence_tdr,
96 tlb_timeout_jiffies(gt));
97 spin_unlock_irq(>->tlb_invalidation.pending_lock);
98 }
99
100 /**
101 * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
102 * @gt: graphics tile
103 *
104 * Initialize GT TLB invalidation state, purely software initialization, should
105 * be called once during driver load.
106 *
107 * Return: 0 on success, negative error code on error.
108 */
xe_gt_tlb_invalidation_init(struct xe_gt * gt)109 int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
110 {
111 gt->tlb_invalidation.seqno = 1;
112 INIT_LIST_HEAD(>->tlb_invalidation.pending_fences);
113 spin_lock_init(>->tlb_invalidation.pending_lock);
114 spin_lock_init(>->tlb_invalidation.lock);
115 INIT_DELAYED_WORK(>->tlb_invalidation.fence_tdr,
116 xe_gt_tlb_fence_timeout);
117
118 return 0;
119 }
120
121 /**
122 * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
123 * @gt: graphics tile
124 *
125 * Signal any pending invalidation fences, should be called during a GT reset
126 */
xe_gt_tlb_invalidation_reset(struct xe_gt * gt)127 void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
128 {
129 struct xe_gt_tlb_invalidation_fence *fence, *next;
130 int pending_seqno;
131
132 /*
133 * CT channel is already disabled at this point. No new TLB requests can
134 * appear.
135 */
136
137 mutex_lock(>->uc.guc.ct.lock);
138 spin_lock_irq(>->tlb_invalidation.pending_lock);
139 cancel_delayed_work(>->tlb_invalidation.fence_tdr);
140 /*
141 * We might have various kworkers waiting for TLB flushes to complete
142 * which are not tracked with an explicit TLB fence, however at this
143 * stage that will never happen since the CT is already disabled, so
144 * make sure we signal them here under the assumption that we have
145 * completed a full GT reset.
146 */
147 if (gt->tlb_invalidation.seqno == 1)
148 pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
149 else
150 pending_seqno = gt->tlb_invalidation.seqno - 1;
151 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
152
153 list_for_each_entry_safe(fence, next,
154 >->tlb_invalidation.pending_fences, link)
155 invalidation_fence_signal(gt_to_xe(gt), fence);
156 spin_unlock_irq(>->tlb_invalidation.pending_lock);
157 mutex_unlock(>->uc.guc.ct.lock);
158 }
159
tlb_invalidation_seqno_past(struct xe_gt * gt,int seqno)160 static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
161 {
162 int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
163
164 if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
165 return false;
166
167 if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
168 return true;
169
170 return seqno_recv >= seqno;
171 }
172
send_tlb_invalidation(struct xe_guc * guc,struct xe_gt_tlb_invalidation_fence * fence,u32 * action,int len)173 static int send_tlb_invalidation(struct xe_guc *guc,
174 struct xe_gt_tlb_invalidation_fence *fence,
175 u32 *action, int len)
176 {
177 struct xe_gt *gt = guc_to_gt(guc);
178 struct xe_device *xe = gt_to_xe(gt);
179 int seqno;
180 int ret;
181
182 xe_gt_assert(gt, fence);
183
184 /*
185 * XXX: The seqno algorithm relies on TLB invalidation being processed
186 * in order which they currently are, if that changes the algorithm will
187 * need to be updated.
188 */
189
190 mutex_lock(&guc->ct.lock);
191 seqno = gt->tlb_invalidation.seqno;
192 fence->seqno = seqno;
193 trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
194 action[1] = seqno;
195 ret = xe_guc_ct_send_locked(&guc->ct, action, len,
196 G2H_LEN_DW_TLB_INVALIDATE, 1);
197 if (!ret) {
198 spin_lock_irq(>->tlb_invalidation.pending_lock);
199 /*
200 * We haven't actually published the TLB fence as per
201 * pending_fences, but in theory our seqno could have already
202 * been written as we acquired the pending_lock. In such a case
203 * we can just go ahead and signal the fence here.
204 */
205 if (tlb_invalidation_seqno_past(gt, seqno)) {
206 __invalidation_fence_signal(xe, fence);
207 } else {
208 fence->invalidation_time = ktime_get();
209 list_add_tail(&fence->link,
210 >->tlb_invalidation.pending_fences);
211
212 if (list_is_singular(>->tlb_invalidation.pending_fences))
213 queue_delayed_work(system_wq,
214 >->tlb_invalidation.fence_tdr,
215 tlb_timeout_jiffies(gt));
216 }
217 spin_unlock_irq(>->tlb_invalidation.pending_lock);
218 } else {
219 __invalidation_fence_signal(xe, fence);
220 }
221 if (!ret) {
222 gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
223 TLB_INVALIDATION_SEQNO_MAX;
224 if (!gt->tlb_invalidation.seqno)
225 gt->tlb_invalidation.seqno = 1;
226 }
227 mutex_unlock(&guc->ct.lock);
228 xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
229
230 return ret;
231 }
232
233 #define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
234 XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
235 XE_GUC_TLB_INVAL_FLUSH_CACHE)
236
237 /**
238 * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
239 * @gt: graphics tile
240 * @fence: invalidation fence which will be signal on TLB invalidation
241 * completion
242 *
243 * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
244 * caller can use the invalidation fence to wait for completion.
245 *
246 * Return: 0 on success, negative error code on error
247 */
xe_gt_tlb_invalidation_guc(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence)248 static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
249 struct xe_gt_tlb_invalidation_fence *fence)
250 {
251 u32 action[] = {
252 XE_GUC_ACTION_TLB_INVALIDATION,
253 0, /* seqno, replaced in send_tlb_invalidation */
254 MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
255 };
256
257 return send_tlb_invalidation(>->uc.guc, fence, action,
258 ARRAY_SIZE(action));
259 }
260
261 /**
262 * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
263 * @gt: graphics tile
264 *
265 * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
266 * synchronous.
267 *
268 * Return: 0 on success, negative error code on error
269 */
xe_gt_tlb_invalidation_ggtt(struct xe_gt * gt)270 int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
271 {
272 struct xe_device *xe = gt_to_xe(gt);
273
274 if (xe_guc_ct_enabled(>->uc.guc.ct) &&
275 gt->uc.guc.submission_state.enabled) {
276 struct xe_gt_tlb_invalidation_fence fence;
277 int ret;
278
279 xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
280 ret = xe_gt_tlb_invalidation_guc(gt, &fence);
281 if (ret)
282 return ret;
283
284 xe_gt_tlb_invalidation_fence_wait(&fence);
285 } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
286 if (IS_SRIOV_VF(xe))
287 return 0;
288
289 xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
290 if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
291 xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
292 PVC_GUC_TLB_INV_DESC1_INVALIDATE);
293 xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
294 PVC_GUC_TLB_INV_DESC0_VALID);
295 } else {
296 xe_mmio_write32(gt, GUC_TLB_INV_CR,
297 GUC_TLB_INV_CR_INVALIDATE);
298 }
299 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
300 }
301
302 return 0;
303 }
304
305 /**
306 * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
307 * address range
308 *
309 * @gt: graphics tile
310 * @fence: invalidation fence which will be signal on TLB invalidation
311 * completion
312 * @start: start address
313 * @end: end address
314 * @asid: address space id
315 *
316 * Issue a range based TLB invalidation if supported, if not fallback to a full
317 * TLB invalidation. Completion of TLB is asynchronous and caller can use
318 * the invalidation fence to wait for completion.
319 *
320 * Return: Negative error code on error, 0 on success
321 */
xe_gt_tlb_invalidation_range(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,u64 start,u64 end,u32 asid)322 int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
323 struct xe_gt_tlb_invalidation_fence *fence,
324 u64 start, u64 end, u32 asid)
325 {
326 struct xe_device *xe = gt_to_xe(gt);
327 #define MAX_TLB_INVALIDATION_LEN 7
328 u32 action[MAX_TLB_INVALIDATION_LEN];
329 int len = 0;
330
331 xe_gt_assert(gt, fence);
332
333 /* Execlists not supported */
334 if (gt_to_xe(gt)->info.force_execlist) {
335 __invalidation_fence_signal(xe, fence);
336 return 0;
337 }
338
339 action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
340 action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
341 if (!xe->info.has_range_tlb_invalidation) {
342 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
343 } else {
344 u64 orig_start = start;
345 u64 length = end - start;
346 u64 align;
347
348 if (length < SZ_4K)
349 length = SZ_4K;
350
351 /*
352 * We need to invalidate a higher granularity if start address
353 * is not aligned to length. When start is not aligned with
354 * length we need to find the length large enough to create an
355 * address mask covering the required range.
356 */
357 align = roundup_pow_of_two(length);
358 start = ALIGN_DOWN(start, align);
359 end = ALIGN(end, align);
360 length = align;
361 while (start + length < end) {
362 length <<= 1;
363 start = ALIGN_DOWN(orig_start, length);
364 }
365
366 /*
367 * Minimum invalidation size for a 2MB page that the hardware
368 * expects is 16MB
369 */
370 if (length >= SZ_2M) {
371 length = max_t(u64, SZ_16M, length);
372 start = ALIGN_DOWN(orig_start, length);
373 }
374
375 xe_gt_assert(gt, length >= SZ_4K);
376 xe_gt_assert(gt, is_power_of_2(length));
377 xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
378 ilog2(SZ_2M) + 1)));
379 xe_gt_assert(gt, IS_ALIGNED(start, length));
380
381 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
382 action[len++] = asid;
383 action[len++] = lower_32_bits(start);
384 action[len++] = upper_32_bits(start);
385 action[len++] = ilog2(length) - ilog2(SZ_4K);
386 }
387
388 xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
389
390 return send_tlb_invalidation(>->uc.guc, fence, action, len);
391 }
392
393 /**
394 * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
395 * @gt: graphics tile
396 * @fence: invalidation fence which will be signal on TLB invalidation
397 * completion, can be NULL
398 * @vma: VMA to invalidate
399 *
400 * Issue a range based TLB invalidation if supported, if not fallback to a full
401 * TLB invalidation. Completion of TLB is asynchronous and caller can use
402 * the invalidation fence to wait for completion.
403 *
404 * Return: Negative error code on error, 0 on success
405 */
xe_gt_tlb_invalidation_vma(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,struct xe_vma * vma)406 int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
407 struct xe_gt_tlb_invalidation_fence *fence,
408 struct xe_vma *vma)
409 {
410 xe_gt_assert(gt, vma);
411
412 return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
413 xe_vma_end(vma),
414 xe_vma_vm(vma)->usm.asid);
415 }
416
417 /**
418 * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
419 * @guc: guc
420 * @msg: message indicating TLB invalidation done
421 * @len: length of message
422 *
423 * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
424 * invalidation fences for seqno. Algorithm for this depends on seqno being
425 * received in-order and asserts this assumption.
426 *
427 * Return: 0 on success, -EPROTO for malformed messages.
428 */
xe_guc_tlb_invalidation_done_handler(struct xe_guc * guc,u32 * msg,u32 len)429 int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
430 {
431 struct xe_gt *gt = guc_to_gt(guc);
432 struct xe_device *xe = gt_to_xe(gt);
433 struct xe_gt_tlb_invalidation_fence *fence, *next;
434 unsigned long flags;
435
436 if (unlikely(len != 1))
437 return -EPROTO;
438
439 /*
440 * This can also be run both directly from the IRQ handler and also in
441 * process_g2h_msg(). Only one may process any individual CT message,
442 * however the order they are processed here could result in skipping a
443 * seqno. To handle that we just process all the seqnos from the last
444 * seqno_recv up to and including the one in msg[0]. The delta should be
445 * very small so there shouldn't be much of pending_fences we actually
446 * need to iterate over here.
447 *
448 * From GuC POV we expect the seqnos to always appear in-order, so if we
449 * see something later in the timeline we can be sure that anything
450 * appearing earlier has already signalled, just that we have yet to
451 * officially process the CT message like if racing against
452 * process_g2h_msg().
453 */
454 spin_lock_irqsave(>->tlb_invalidation.pending_lock, flags);
455 if (tlb_invalidation_seqno_past(gt, msg[0])) {
456 spin_unlock_irqrestore(>->tlb_invalidation.pending_lock, flags);
457 return 0;
458 }
459
460 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
461
462 list_for_each_entry_safe(fence, next,
463 >->tlb_invalidation.pending_fences, link) {
464 trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
465
466 if (!tlb_invalidation_seqno_past(gt, fence->seqno))
467 break;
468
469 invalidation_fence_signal(xe, fence);
470 }
471
472 if (!list_empty(>->tlb_invalidation.pending_fences))
473 mod_delayed_work(system_wq,
474 >->tlb_invalidation.fence_tdr,
475 tlb_timeout_jiffies(gt));
476 else
477 cancel_delayed_work(>->tlb_invalidation.fence_tdr);
478
479 spin_unlock_irqrestore(>->tlb_invalidation.pending_lock, flags);
480
481 return 0;
482 }
483
484 static const char *
invalidation_fence_get_driver_name(struct dma_fence * dma_fence)485 invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
486 {
487 return "xe";
488 }
489
490 static const char *
invalidation_fence_get_timeline_name(struct dma_fence * dma_fence)491 invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
492 {
493 return "invalidation_fence";
494 }
495
496 static const struct dma_fence_ops invalidation_fence_ops = {
497 .get_driver_name = invalidation_fence_get_driver_name,
498 .get_timeline_name = invalidation_fence_get_timeline_name,
499 };
500
501 /**
502 * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
503 * @gt: GT
504 * @fence: TLB invalidation fence to initialize
505 * @stack: fence is stack variable
506 *
507 * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
508 * will be automatically called when fence is signalled (all fences must signal),
509 * even on error.
510 */
xe_gt_tlb_invalidation_fence_init(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,bool stack)511 void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
512 struct xe_gt_tlb_invalidation_fence *fence,
513 bool stack)
514 {
515 xe_pm_runtime_get_noresume(gt_to_xe(gt));
516
517 spin_lock_irq(>->tlb_invalidation.lock);
518 dma_fence_init(&fence->base, &invalidation_fence_ops,
519 >->tlb_invalidation.lock,
520 dma_fence_context_alloc(1), 1);
521 spin_unlock_irq(>->tlb_invalidation.lock);
522 INIT_LIST_HEAD(&fence->link);
523 if (stack)
524 set_bit(FENCE_STACK_BIT, &fence->base.flags);
525 else
526 dma_fence_get(&fence->base);
527 fence->gt = gt;
528 }
529