xref: /linux/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c (revision 42b16d3ac371a2fac9b6f08fd75f23f34ba3955a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_gt_tlb_invalidation.h"
7 
8 #include "abi/guc_actions_abi.h"
9 #include "xe_device.h"
10 #include "xe_force_wake.h"
11 #include "xe_gt.h"
12 #include "xe_gt_printk.h"
13 #include "xe_guc.h"
14 #include "xe_guc_ct.h"
15 #include "xe_gt_stats.h"
16 #include "xe_mmio.h"
17 #include "xe_pm.h"
18 #include "xe_sriov.h"
19 #include "xe_trace.h"
20 #include "regs/xe_guc_regs.h"
21 
22 #define FENCE_STACK_BIT		DMA_FENCE_FLAG_USER_BITS
23 
24 /*
25  * TLB inval depends on pending commands in the CT queue and then the real
26  * invalidation time. Double up the time to process full CT queue
27  * just to be on the safe side.
28  */
tlb_timeout_jiffies(struct xe_gt * gt)29 static long tlb_timeout_jiffies(struct xe_gt *gt)
30 {
31 	/* this reflects what HW/GuC needs to process TLB inv request */
32 	const long hw_tlb_timeout = HZ / 4;
33 
34 	/* this estimates actual delay caused by the CTB transport */
35 	long delay = xe_guc_ct_queue_proc_time_jiffies(&gt->uc.guc.ct);
36 
37 	return hw_tlb_timeout + 2 * delay;
38 }
39 
40 static void
__invalidation_fence_signal(struct xe_device * xe,struct xe_gt_tlb_invalidation_fence * fence)41 __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
42 {
43 	bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
44 
45 	trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
46 	xe_gt_tlb_invalidation_fence_fini(fence);
47 	dma_fence_signal(&fence->base);
48 	if (!stack)
49 		dma_fence_put(&fence->base);
50 }
51 
52 static void
invalidation_fence_signal(struct xe_device * xe,struct xe_gt_tlb_invalidation_fence * fence)53 invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
54 {
55 	list_del(&fence->link);
56 	__invalidation_fence_signal(xe, fence);
57 }
58 
xe_gt_tlb_fence_timeout(struct work_struct * work)59 static void xe_gt_tlb_fence_timeout(struct work_struct *work)
60 {
61 	struct xe_gt *gt = container_of(work, struct xe_gt,
62 					tlb_invalidation.fence_tdr.work);
63 	struct xe_device *xe = gt_to_xe(gt);
64 	struct xe_gt_tlb_invalidation_fence *fence, *next;
65 
66 	spin_lock_irq(&gt->tlb_invalidation.pending_lock);
67 	list_for_each_entry_safe(fence, next,
68 				 &gt->tlb_invalidation.pending_fences, link) {
69 		s64 since_inval_ms = ktime_ms_delta(ktime_get(),
70 						    fence->invalidation_time);
71 
72 		if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
73 			break;
74 
75 		trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
76 		xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
77 			  fence->seqno, gt->tlb_invalidation.seqno_recv);
78 
79 		fence->base.error = -ETIME;
80 		invalidation_fence_signal(xe, fence);
81 	}
82 	if (!list_empty(&gt->tlb_invalidation.pending_fences))
83 		queue_delayed_work(system_wq,
84 				   &gt->tlb_invalidation.fence_tdr,
85 				   tlb_timeout_jiffies(gt));
86 	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
87 }
88 
89 /**
90  * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
91  * @gt: graphics tile
92  *
93  * Initialize GT TLB invalidation state, purely software initialization, should
94  * be called once during driver load.
95  *
96  * Return: 0 on success, negative error code on error.
97  */
xe_gt_tlb_invalidation_init(struct xe_gt * gt)98 int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
99 {
100 	gt->tlb_invalidation.seqno = 1;
101 	INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
102 	spin_lock_init(&gt->tlb_invalidation.pending_lock);
103 	spin_lock_init(&gt->tlb_invalidation.lock);
104 	INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
105 			  xe_gt_tlb_fence_timeout);
106 
107 	return 0;
108 }
109 
110 /**
111  * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
112  * @gt: graphics tile
113  *
114  * Signal any pending invalidation fences, should be called during a GT reset
115  */
xe_gt_tlb_invalidation_reset(struct xe_gt * gt)116 void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
117 {
118 	struct xe_gt_tlb_invalidation_fence *fence, *next;
119 	int pending_seqno;
120 
121 	/*
122 	 * CT channel is already disabled at this point. No new TLB requests can
123 	 * appear.
124 	 */
125 
126 	mutex_lock(&gt->uc.guc.ct.lock);
127 	spin_lock_irq(&gt->tlb_invalidation.pending_lock);
128 	cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
129 	/*
130 	 * We might have various kworkers waiting for TLB flushes to complete
131 	 * which are not tracked with an explicit TLB fence, however at this
132 	 * stage that will never happen since the CT is already disabled, so
133 	 * make sure we signal them here under the assumption that we have
134 	 * completed a full GT reset.
135 	 */
136 	if (gt->tlb_invalidation.seqno == 1)
137 		pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
138 	else
139 		pending_seqno = gt->tlb_invalidation.seqno - 1;
140 	WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
141 
142 	list_for_each_entry_safe(fence, next,
143 				 &gt->tlb_invalidation.pending_fences, link)
144 		invalidation_fence_signal(gt_to_xe(gt), fence);
145 	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
146 	mutex_unlock(&gt->uc.guc.ct.lock);
147 }
148 
tlb_invalidation_seqno_past(struct xe_gt * gt,int seqno)149 static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
150 {
151 	int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
152 
153 	if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
154 		return false;
155 
156 	if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
157 		return true;
158 
159 	return seqno_recv >= seqno;
160 }
161 
send_tlb_invalidation(struct xe_guc * guc,struct xe_gt_tlb_invalidation_fence * fence,u32 * action,int len)162 static int send_tlb_invalidation(struct xe_guc *guc,
163 				 struct xe_gt_tlb_invalidation_fence *fence,
164 				 u32 *action, int len)
165 {
166 	struct xe_gt *gt = guc_to_gt(guc);
167 	struct xe_device *xe = gt_to_xe(gt);
168 	int seqno;
169 	int ret;
170 
171 	xe_gt_assert(gt, fence);
172 
173 	/*
174 	 * XXX: The seqno algorithm relies on TLB invalidation being processed
175 	 * in order which they currently are, if that changes the algorithm will
176 	 * need to be updated.
177 	 */
178 
179 	mutex_lock(&guc->ct.lock);
180 	seqno = gt->tlb_invalidation.seqno;
181 	fence->seqno = seqno;
182 	trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
183 	action[1] = seqno;
184 	ret = xe_guc_ct_send_locked(&guc->ct, action, len,
185 				    G2H_LEN_DW_TLB_INVALIDATE, 1);
186 	if (!ret) {
187 		spin_lock_irq(&gt->tlb_invalidation.pending_lock);
188 		/*
189 		 * We haven't actually published the TLB fence as per
190 		 * pending_fences, but in theory our seqno could have already
191 		 * been written as we acquired the pending_lock. In such a case
192 		 * we can just go ahead and signal the fence here.
193 		 */
194 		if (tlb_invalidation_seqno_past(gt, seqno)) {
195 			__invalidation_fence_signal(xe, fence);
196 		} else {
197 			fence->invalidation_time = ktime_get();
198 			list_add_tail(&fence->link,
199 				      &gt->tlb_invalidation.pending_fences);
200 
201 			if (list_is_singular(&gt->tlb_invalidation.pending_fences))
202 				queue_delayed_work(system_wq,
203 						   &gt->tlb_invalidation.fence_tdr,
204 						   tlb_timeout_jiffies(gt));
205 		}
206 		spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
207 	} else if (ret < 0) {
208 		__invalidation_fence_signal(xe, fence);
209 	}
210 	if (!ret) {
211 		gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
212 			TLB_INVALIDATION_SEQNO_MAX;
213 		if (!gt->tlb_invalidation.seqno)
214 			gt->tlb_invalidation.seqno = 1;
215 	}
216 	mutex_unlock(&guc->ct.lock);
217 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
218 
219 	return ret;
220 }
221 
222 #define MAKE_INVAL_OP(type)	((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
223 		XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
224 		XE_GUC_TLB_INVAL_FLUSH_CACHE)
225 
226 /**
227  * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
228  * @gt: graphics tile
229  * @fence: invalidation fence which will be signal on TLB invalidation
230  * completion
231  *
232  * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
233  * caller can use the invalidation fence to wait for completion.
234  *
235  * Return: 0 on success, negative error code on error
236  */
xe_gt_tlb_invalidation_guc(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence)237 static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
238 				      struct xe_gt_tlb_invalidation_fence *fence)
239 {
240 	u32 action[] = {
241 		XE_GUC_ACTION_TLB_INVALIDATION,
242 		0,  /* seqno, replaced in send_tlb_invalidation */
243 		MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
244 	};
245 
246 	return send_tlb_invalidation(&gt->uc.guc, fence, action,
247 				     ARRAY_SIZE(action));
248 }
249 
250 /**
251  * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
252  * @gt: graphics tile
253  *
254  * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
255  * synchronous.
256  *
257  * Return: 0 on success, negative error code on error
258  */
xe_gt_tlb_invalidation_ggtt(struct xe_gt * gt)259 int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
260 {
261 	struct xe_device *xe = gt_to_xe(gt);
262 
263 	if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
264 	    gt->uc.guc.submission_state.enabled) {
265 		struct xe_gt_tlb_invalidation_fence fence;
266 		int ret;
267 
268 		xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
269 		ret = xe_gt_tlb_invalidation_guc(gt, &fence);
270 		if (ret < 0) {
271 			xe_gt_tlb_invalidation_fence_fini(&fence);
272 			return ret;
273 		}
274 
275 		xe_gt_tlb_invalidation_fence_wait(&fence);
276 	} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
277 		if (IS_SRIOV_VF(xe))
278 			return 0;
279 
280 		xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
281 		if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
282 			xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
283 					PVC_GUC_TLB_INV_DESC1_INVALIDATE);
284 			xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
285 					PVC_GUC_TLB_INV_DESC0_VALID);
286 		} else {
287 			xe_mmio_write32(gt, GUC_TLB_INV_CR,
288 					GUC_TLB_INV_CR_INVALIDATE);
289 		}
290 		xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
291 	}
292 
293 	return 0;
294 }
295 
296 /**
297  * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
298  * address range
299  *
300  * @gt: graphics tile
301  * @fence: invalidation fence which will be signal on TLB invalidation
302  * completion
303  * @start: start address
304  * @end: end address
305  * @asid: address space id
306  *
307  * Issue a range based TLB invalidation if supported, if not fallback to a full
308  * TLB invalidation. Completion of TLB is asynchronous and caller can use
309  * the invalidation fence to wait for completion.
310  *
311  * Return: Negative error code on error, 0 on success
312  */
xe_gt_tlb_invalidation_range(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,u64 start,u64 end,u32 asid)313 int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
314 				 struct xe_gt_tlb_invalidation_fence *fence,
315 				 u64 start, u64 end, u32 asid)
316 {
317 	struct xe_device *xe = gt_to_xe(gt);
318 #define MAX_TLB_INVALIDATION_LEN	7
319 	u32 action[MAX_TLB_INVALIDATION_LEN];
320 	int len = 0;
321 
322 	xe_gt_assert(gt, fence);
323 
324 	/* Execlists not supported */
325 	if (gt_to_xe(gt)->info.force_execlist) {
326 		__invalidation_fence_signal(xe, fence);
327 		return 0;
328 	}
329 
330 	action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
331 	action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
332 	if (!xe->info.has_range_tlb_invalidation) {
333 		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
334 	} else {
335 		u64 orig_start = start;
336 		u64 length = end - start;
337 		u64 align;
338 
339 		if (length < SZ_4K)
340 			length = SZ_4K;
341 
342 		/*
343 		 * We need to invalidate a higher granularity if start address
344 		 * is not aligned to length. When start is not aligned with
345 		 * length we need to find the length large enough to create an
346 		 * address mask covering the required range.
347 		 */
348 		align = roundup_pow_of_two(length);
349 		start = ALIGN_DOWN(start, align);
350 		end = ALIGN(end, align);
351 		length = align;
352 		while (start + length < end) {
353 			length <<= 1;
354 			start = ALIGN_DOWN(orig_start, length);
355 		}
356 
357 		/*
358 		 * Minimum invalidation size for a 2MB page that the hardware
359 		 * expects is 16MB
360 		 */
361 		if (length >= SZ_2M) {
362 			length = max_t(u64, SZ_16M, length);
363 			start = ALIGN_DOWN(orig_start, length);
364 		}
365 
366 		xe_gt_assert(gt, length >= SZ_4K);
367 		xe_gt_assert(gt, is_power_of_2(length));
368 		xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
369 						    ilog2(SZ_2M) + 1)));
370 		xe_gt_assert(gt, IS_ALIGNED(start, length));
371 
372 		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
373 		action[len++] = asid;
374 		action[len++] = lower_32_bits(start);
375 		action[len++] = upper_32_bits(start);
376 		action[len++] = ilog2(length) - ilog2(SZ_4K);
377 	}
378 
379 	xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
380 
381 	return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
382 }
383 
384 /**
385  * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
386  * @gt: graphics tile
387  * @fence: invalidation fence which will be signal on TLB invalidation
388  * completion, can be NULL
389  * @vma: VMA to invalidate
390  *
391  * Issue a range based TLB invalidation if supported, if not fallback to a full
392  * TLB invalidation. Completion of TLB is asynchronous and caller can use
393  * the invalidation fence to wait for completion.
394  *
395  * Return: Negative error code on error, 0 on success
396  */
xe_gt_tlb_invalidation_vma(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,struct xe_vma * vma)397 int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
398 			       struct xe_gt_tlb_invalidation_fence *fence,
399 			       struct xe_vma *vma)
400 {
401 	xe_gt_assert(gt, vma);
402 
403 	return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
404 					    xe_vma_end(vma),
405 					    xe_vma_vm(vma)->usm.asid);
406 }
407 
408 /**
409  * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
410  * @guc: guc
411  * @msg: message indicating TLB invalidation done
412  * @len: length of message
413  *
414  * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
415  * invalidation fences for seqno. Algorithm for this depends on seqno being
416  * received in-order and asserts this assumption.
417  *
418  * Return: 0 on success, -EPROTO for malformed messages.
419  */
xe_guc_tlb_invalidation_done_handler(struct xe_guc * guc,u32 * msg,u32 len)420 int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
421 {
422 	struct xe_gt *gt = guc_to_gt(guc);
423 	struct xe_device *xe = gt_to_xe(gt);
424 	struct xe_gt_tlb_invalidation_fence *fence, *next;
425 	unsigned long flags;
426 
427 	if (unlikely(len != 1))
428 		return -EPROTO;
429 
430 	/*
431 	 * This can also be run both directly from the IRQ handler and also in
432 	 * process_g2h_msg(). Only one may process any individual CT message,
433 	 * however the order they are processed here could result in skipping a
434 	 * seqno. To handle that we just process all the seqnos from the last
435 	 * seqno_recv up to and including the one in msg[0]. The delta should be
436 	 * very small so there shouldn't be much of pending_fences we actually
437 	 * need to iterate over here.
438 	 *
439 	 * From GuC POV we expect the seqnos to always appear in-order, so if we
440 	 * see something later in the timeline we can be sure that anything
441 	 * appearing earlier has already signalled, just that we have yet to
442 	 * officially process the CT message like if racing against
443 	 * process_g2h_msg().
444 	 */
445 	spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
446 	if (tlb_invalidation_seqno_past(gt, msg[0])) {
447 		spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
448 		return 0;
449 	}
450 
451 	WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
452 
453 	list_for_each_entry_safe(fence, next,
454 				 &gt->tlb_invalidation.pending_fences, link) {
455 		trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
456 
457 		if (!tlb_invalidation_seqno_past(gt, fence->seqno))
458 			break;
459 
460 		invalidation_fence_signal(xe, fence);
461 	}
462 
463 	if (!list_empty(&gt->tlb_invalidation.pending_fences))
464 		mod_delayed_work(system_wq,
465 				 &gt->tlb_invalidation.fence_tdr,
466 				 tlb_timeout_jiffies(gt));
467 	else
468 		cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
469 
470 	spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
471 
472 	return 0;
473 }
474 
475 static const char *
invalidation_fence_get_driver_name(struct dma_fence * dma_fence)476 invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
477 {
478 	return "xe";
479 }
480 
481 static const char *
invalidation_fence_get_timeline_name(struct dma_fence * dma_fence)482 invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
483 {
484 	return "invalidation_fence";
485 }
486 
487 static const struct dma_fence_ops invalidation_fence_ops = {
488 	.get_driver_name = invalidation_fence_get_driver_name,
489 	.get_timeline_name = invalidation_fence_get_timeline_name,
490 };
491 
492 /**
493  * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
494  * @gt: GT
495  * @fence: TLB invalidation fence to initialize
496  * @stack: fence is stack variable
497  *
498  * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
499  * must be called if fence is not signaled.
500  */
xe_gt_tlb_invalidation_fence_init(struct xe_gt * gt,struct xe_gt_tlb_invalidation_fence * fence,bool stack)501 void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
502 				       struct xe_gt_tlb_invalidation_fence *fence,
503 				       bool stack)
504 {
505 	xe_pm_runtime_get_noresume(gt_to_xe(gt));
506 
507 	spin_lock_irq(&gt->tlb_invalidation.lock);
508 	dma_fence_init(&fence->base, &invalidation_fence_ops,
509 		       &gt->tlb_invalidation.lock,
510 		       dma_fence_context_alloc(1), 1);
511 	spin_unlock_irq(&gt->tlb_invalidation.lock);
512 	INIT_LIST_HEAD(&fence->link);
513 	if (stack)
514 		set_bit(FENCE_STACK_BIT, &fence->base.flags);
515 	else
516 		dma_fence_get(&fence->base);
517 	fence->gt = gt;
518 }
519 
520 /**
521  * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence
522  * @fence: TLB invalidation fence to finalize
523  *
524  * Drop PM ref which fence took durinig init.
525  */
xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence * fence)526 void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
527 {
528 	xe_pm_runtime_put(gt_to_xe(fence->gt));
529 }
530