xref: /linux/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c (revision e77a8005748547fb1f10645097f13ccdd804d7e5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_gt_tlb_invalidation.h"
7 
8 #include "abi/guc_actions_abi.h"
9 #include "xe_device.h"
10 #include "xe_force_wake.h"
11 #include "xe_gt.h"
12 #include "xe_gt_printk.h"
13 #include "xe_guc.h"
14 #include "xe_guc_ct.h"
15 #include "xe_gt_stats.h"
16 #include "xe_mmio.h"
17 #include "xe_pm.h"
18 #include "xe_sriov.h"
19 #include "xe_trace.h"
20 #include "regs/xe_guc_regs.h"
21 
22 #define FENCE_STACK_BIT		DMA_FENCE_FLAG_USER_BITS
23 
24 /*
25  * TLB inval depends on pending commands in the CT queue and then the real
26  * invalidation time. Double up the time to process full CT queue
27  * just to be on the safe side.
28  */
29 static long tlb_timeout_jiffies(struct xe_gt *gt)
30 {
31 	/* this reflects what HW/GuC needs to process TLB inv request */
32 	const long hw_tlb_timeout = HZ / 4;
33 
34 	/* this estimates actual delay caused by the CTB transport */
35 	long delay = xe_guc_ct_queue_proc_time_jiffies(&gt->uc.guc.ct);
36 
37 	return hw_tlb_timeout + 2 * delay;
38 }
39 
40 static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
41 {
42 	if (WARN_ON_ONCE(!fence->gt))
43 		return;
44 
45 	xe_pm_runtime_put(gt_to_xe(fence->gt));
46 	fence->gt = NULL; /* fini() should be called once */
47 }
48 
49 static void
50 __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
51 {
52 	bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
53 
54 	trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
55 	xe_gt_tlb_invalidation_fence_fini(fence);
56 	dma_fence_signal(&fence->base);
57 	if (!stack)
58 		dma_fence_put(&fence->base);
59 }
60 
61 static void
62 invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
63 {
64 	list_del(&fence->link);
65 	__invalidation_fence_signal(xe, fence);
66 }
67 
68 static void xe_gt_tlb_fence_timeout(struct work_struct *work)
69 {
70 	struct xe_gt *gt = container_of(work, struct xe_gt,
71 					tlb_invalidation.fence_tdr.work);
72 	struct xe_device *xe = gt_to_xe(gt);
73 	struct xe_gt_tlb_invalidation_fence *fence, *next;
74 
75 	spin_lock_irq(&gt->tlb_invalidation.pending_lock);
76 	list_for_each_entry_safe(fence, next,
77 				 &gt->tlb_invalidation.pending_fences, link) {
78 		s64 since_inval_ms = ktime_ms_delta(ktime_get(),
79 						    fence->invalidation_time);
80 
81 		if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
82 			break;
83 
84 		trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
85 		xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
86 			  fence->seqno, gt->tlb_invalidation.seqno_recv);
87 
88 		fence->base.error = -ETIME;
89 		invalidation_fence_signal(xe, fence);
90 	}
91 	if (!list_empty(&gt->tlb_invalidation.pending_fences))
92 		queue_delayed_work(system_wq,
93 				   &gt->tlb_invalidation.fence_tdr,
94 				   tlb_timeout_jiffies(gt));
95 	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
96 }
97 
98 /**
99  * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
100  * @gt: graphics tile
101  *
102  * Initialize GT TLB invalidation state, purely software initialization, should
103  * be called once during driver load.
104  *
105  * Return: 0 on success, negative error code on error.
106  */
107 int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
108 {
109 	gt->tlb_invalidation.seqno = 1;
110 	INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
111 	spin_lock_init(&gt->tlb_invalidation.pending_lock);
112 	spin_lock_init(&gt->tlb_invalidation.lock);
113 	INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
114 			  xe_gt_tlb_fence_timeout);
115 
116 	return 0;
117 }
118 
119 /**
120  * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
121  * @gt: graphics tile
122  *
123  * Signal any pending invalidation fences, should be called during a GT reset
124  */
125 void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
126 {
127 	struct xe_gt_tlb_invalidation_fence *fence, *next;
128 	int pending_seqno;
129 
130 	/*
131 	 * CT channel is already disabled at this point. No new TLB requests can
132 	 * appear.
133 	 */
134 
135 	mutex_lock(&gt->uc.guc.ct.lock);
136 	spin_lock_irq(&gt->tlb_invalidation.pending_lock);
137 	cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
138 	/*
139 	 * We might have various kworkers waiting for TLB flushes to complete
140 	 * which are not tracked with an explicit TLB fence, however at this
141 	 * stage that will never happen since the CT is already disabled, so
142 	 * make sure we signal them here under the assumption that we have
143 	 * completed a full GT reset.
144 	 */
145 	if (gt->tlb_invalidation.seqno == 1)
146 		pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
147 	else
148 		pending_seqno = gt->tlb_invalidation.seqno - 1;
149 	WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
150 
151 	list_for_each_entry_safe(fence, next,
152 				 &gt->tlb_invalidation.pending_fences, link)
153 		invalidation_fence_signal(gt_to_xe(gt), fence);
154 	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
155 	mutex_unlock(&gt->uc.guc.ct.lock);
156 }
157 
158 static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
159 {
160 	int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
161 
162 	if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
163 		return false;
164 
165 	if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
166 		return true;
167 
168 	return seqno_recv >= seqno;
169 }
170 
171 static int send_tlb_invalidation(struct xe_guc *guc,
172 				 struct xe_gt_tlb_invalidation_fence *fence,
173 				 u32 *action, int len)
174 {
175 	struct xe_gt *gt = guc_to_gt(guc);
176 	struct xe_device *xe = gt_to_xe(gt);
177 	int seqno;
178 	int ret;
179 
180 	xe_gt_assert(gt, fence);
181 
182 	/*
183 	 * XXX: The seqno algorithm relies on TLB invalidation being processed
184 	 * in order which they currently are, if that changes the algorithm will
185 	 * need to be updated.
186 	 */
187 
188 	mutex_lock(&guc->ct.lock);
189 	seqno = gt->tlb_invalidation.seqno;
190 	fence->seqno = seqno;
191 	trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
192 	action[1] = seqno;
193 	ret = xe_guc_ct_send_locked(&guc->ct, action, len,
194 				    G2H_LEN_DW_TLB_INVALIDATE, 1);
195 	if (!ret) {
196 		spin_lock_irq(&gt->tlb_invalidation.pending_lock);
197 		/*
198 		 * We haven't actually published the TLB fence as per
199 		 * pending_fences, but in theory our seqno could have already
200 		 * been written as we acquired the pending_lock. In such a case
201 		 * we can just go ahead and signal the fence here.
202 		 */
203 		if (tlb_invalidation_seqno_past(gt, seqno)) {
204 			__invalidation_fence_signal(xe, fence);
205 		} else {
206 			fence->invalidation_time = ktime_get();
207 			list_add_tail(&fence->link,
208 				      &gt->tlb_invalidation.pending_fences);
209 
210 			if (list_is_singular(&gt->tlb_invalidation.pending_fences))
211 				queue_delayed_work(system_wq,
212 						   &gt->tlb_invalidation.fence_tdr,
213 						   tlb_timeout_jiffies(gt));
214 		}
215 		spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
216 	} else {
217 		__invalidation_fence_signal(xe, fence);
218 	}
219 	if (!ret) {
220 		gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
221 			TLB_INVALIDATION_SEQNO_MAX;
222 		if (!gt->tlb_invalidation.seqno)
223 			gt->tlb_invalidation.seqno = 1;
224 	}
225 	mutex_unlock(&guc->ct.lock);
226 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
227 
228 	return ret;
229 }
230 
231 #define MAKE_INVAL_OP(type)	((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
232 		XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
233 		XE_GUC_TLB_INVAL_FLUSH_CACHE)
234 
235 /**
236  * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
237  * @gt: graphics tile
238  * @fence: invalidation fence which will be signal on TLB invalidation
239  * completion
240  *
241  * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
242  * caller can use the invalidation fence to wait for completion.
243  *
244  * Return: 0 on success, negative error code on error
245  */
246 static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
247 				      struct xe_gt_tlb_invalidation_fence *fence)
248 {
249 	u32 action[] = {
250 		XE_GUC_ACTION_TLB_INVALIDATION,
251 		0,  /* seqno, replaced in send_tlb_invalidation */
252 		MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
253 	};
254 
255 	return send_tlb_invalidation(&gt->uc.guc, fence, action,
256 				     ARRAY_SIZE(action));
257 }
258 
259 /**
260  * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
261  * @gt: graphics tile
262  *
263  * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
264  * synchronous.
265  *
266  * Return: 0 on success, negative error code on error
267  */
268 int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
269 {
270 	struct xe_device *xe = gt_to_xe(gt);
271 	unsigned int fw_ref;
272 
273 	if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
274 	    gt->uc.guc.submission_state.enabled) {
275 		struct xe_gt_tlb_invalidation_fence fence;
276 		int ret;
277 
278 		xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
279 		ret = xe_gt_tlb_invalidation_guc(gt, &fence);
280 		if (ret)
281 			return ret;
282 
283 		xe_gt_tlb_invalidation_fence_wait(&fence);
284 	} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
285 		struct xe_mmio *mmio = &gt->mmio;
286 
287 		if (IS_SRIOV_VF(xe))
288 			return 0;
289 
290 		fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
291 		if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
292 			xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
293 					PVC_GUC_TLB_INV_DESC1_INVALIDATE);
294 			xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
295 					PVC_GUC_TLB_INV_DESC0_VALID);
296 		} else {
297 			xe_mmio_write32(mmio, GUC_TLB_INV_CR,
298 					GUC_TLB_INV_CR_INVALIDATE);
299 		}
300 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
301 	}
302 
303 	return 0;
304 }
305 
306 /**
307  * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
308  * address range
309  *
310  * @gt: graphics tile
311  * @fence: invalidation fence which will be signal on TLB invalidation
312  * completion
313  * @start: start address
314  * @end: end address
315  * @asid: address space id
316  *
317  * Issue a range based TLB invalidation if supported, if not fallback to a full
318  * TLB invalidation. Completion of TLB is asynchronous and caller can use
319  * the invalidation fence to wait for completion.
320  *
321  * Return: Negative error code on error, 0 on success
322  */
323 int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
324 				 struct xe_gt_tlb_invalidation_fence *fence,
325 				 u64 start, u64 end, u32 asid)
326 {
327 	struct xe_device *xe = gt_to_xe(gt);
328 #define MAX_TLB_INVALIDATION_LEN	7
329 	u32 action[MAX_TLB_INVALIDATION_LEN];
330 	int len = 0;
331 
332 	xe_gt_assert(gt, fence);
333 
334 	/* Execlists not supported */
335 	if (gt_to_xe(gt)->info.force_execlist) {
336 		__invalidation_fence_signal(xe, fence);
337 		return 0;
338 	}
339 
340 	action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
341 	action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
342 	if (!xe->info.has_range_tlb_invalidation) {
343 		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
344 	} else {
345 		u64 orig_start = start;
346 		u64 length = end - start;
347 		u64 align;
348 
349 		if (length < SZ_4K)
350 			length = SZ_4K;
351 
352 		/*
353 		 * We need to invalidate a higher granularity if start address
354 		 * is not aligned to length. When start is not aligned with
355 		 * length we need to find the length large enough to create an
356 		 * address mask covering the required range.
357 		 */
358 		align = roundup_pow_of_two(length);
359 		start = ALIGN_DOWN(start, align);
360 		end = ALIGN(end, align);
361 		length = align;
362 		while (start + length < end) {
363 			length <<= 1;
364 			start = ALIGN_DOWN(orig_start, length);
365 		}
366 
367 		/*
368 		 * Minimum invalidation size for a 2MB page that the hardware
369 		 * expects is 16MB
370 		 */
371 		if (length >= SZ_2M) {
372 			length = max_t(u64, SZ_16M, length);
373 			start = ALIGN_DOWN(orig_start, length);
374 		}
375 
376 		xe_gt_assert(gt, length >= SZ_4K);
377 		xe_gt_assert(gt, is_power_of_2(length));
378 		xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
379 						    ilog2(SZ_2M) + 1)));
380 		xe_gt_assert(gt, IS_ALIGNED(start, length));
381 
382 		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
383 		action[len++] = asid;
384 		action[len++] = lower_32_bits(start);
385 		action[len++] = upper_32_bits(start);
386 		action[len++] = ilog2(length) - ilog2(SZ_4K);
387 	}
388 
389 	xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
390 
391 	return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
392 }
393 
394 /**
395  * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
396  * @gt: graphics tile
397  * @fence: invalidation fence which will be signal on TLB invalidation
398  * completion, can be NULL
399  * @vma: VMA to invalidate
400  *
401  * Issue a range based TLB invalidation if supported, if not fallback to a full
402  * TLB invalidation. Completion of TLB is asynchronous and caller can use
403  * the invalidation fence to wait for completion.
404  *
405  * Return: Negative error code on error, 0 on success
406  */
407 int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
408 			       struct xe_gt_tlb_invalidation_fence *fence,
409 			       struct xe_vma *vma)
410 {
411 	xe_gt_assert(gt, vma);
412 
413 	return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
414 					    xe_vma_end(vma),
415 					    xe_vma_vm(vma)->usm.asid);
416 }
417 
418 /**
419  * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
420  * @guc: guc
421  * @msg: message indicating TLB invalidation done
422  * @len: length of message
423  *
424  * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
425  * invalidation fences for seqno. Algorithm for this depends on seqno being
426  * received in-order and asserts this assumption.
427  *
428  * Return: 0 on success, -EPROTO for malformed messages.
429  */
430 int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
431 {
432 	struct xe_gt *gt = guc_to_gt(guc);
433 	struct xe_device *xe = gt_to_xe(gt);
434 	struct xe_gt_tlb_invalidation_fence *fence, *next;
435 	unsigned long flags;
436 
437 	if (unlikely(len != 1))
438 		return -EPROTO;
439 
440 	/*
441 	 * This can also be run both directly from the IRQ handler and also in
442 	 * process_g2h_msg(). Only one may process any individual CT message,
443 	 * however the order they are processed here could result in skipping a
444 	 * seqno. To handle that we just process all the seqnos from the last
445 	 * seqno_recv up to and including the one in msg[0]. The delta should be
446 	 * very small so there shouldn't be much of pending_fences we actually
447 	 * need to iterate over here.
448 	 *
449 	 * From GuC POV we expect the seqnos to always appear in-order, so if we
450 	 * see something later in the timeline we can be sure that anything
451 	 * appearing earlier has already signalled, just that we have yet to
452 	 * officially process the CT message like if racing against
453 	 * process_g2h_msg().
454 	 */
455 	spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
456 	if (tlb_invalidation_seqno_past(gt, msg[0])) {
457 		spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
458 		return 0;
459 	}
460 
461 	WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
462 
463 	list_for_each_entry_safe(fence, next,
464 				 &gt->tlb_invalidation.pending_fences, link) {
465 		trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
466 
467 		if (!tlb_invalidation_seqno_past(gt, fence->seqno))
468 			break;
469 
470 		invalidation_fence_signal(xe, fence);
471 	}
472 
473 	if (!list_empty(&gt->tlb_invalidation.pending_fences))
474 		mod_delayed_work(system_wq,
475 				 &gt->tlb_invalidation.fence_tdr,
476 				 tlb_timeout_jiffies(gt));
477 	else
478 		cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
479 
480 	spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
481 
482 	return 0;
483 }
484 
485 static const char *
486 invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
487 {
488 	return "xe";
489 }
490 
491 static const char *
492 invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
493 {
494 	return "invalidation_fence";
495 }
496 
497 static const struct dma_fence_ops invalidation_fence_ops = {
498 	.get_driver_name = invalidation_fence_get_driver_name,
499 	.get_timeline_name = invalidation_fence_get_timeline_name,
500 };
501 
502 /**
503  * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
504  * @gt: GT
505  * @fence: TLB invalidation fence to initialize
506  * @stack: fence is stack variable
507  *
508  * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
509  * will be automatically called when fence is signalled (all fences must signal),
510  * even on error.
511  */
512 void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
513 				       struct xe_gt_tlb_invalidation_fence *fence,
514 				       bool stack)
515 {
516 	xe_pm_runtime_get_noresume(gt_to_xe(gt));
517 
518 	spin_lock_irq(&gt->tlb_invalidation.lock);
519 	dma_fence_init(&fence->base, &invalidation_fence_ops,
520 		       &gt->tlb_invalidation.lock,
521 		       dma_fence_context_alloc(1), 1);
522 	spin_unlock_irq(&gt->tlb_invalidation.lock);
523 	INIT_LIST_HEAD(&fence->link);
524 	if (stack)
525 		set_bit(FENCE_STACK_BIT, &fence->base.flags);
526 	else
527 		dma_fence_get(&fence->base);
528 	fence->gt = gt;
529 }
530