xref: /linux/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c (revision d2a4a07190f42e4f82805daf58e708400b703f1c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_gt_tlb_invalidation.h"
7 
8 #include "abi/guc_actions_abi.h"
9 #include "xe_device.h"
10 #include "xe_gt.h"
11 #include "xe_gt_printk.h"
12 #include "xe_guc.h"
13 #include "xe_guc_ct.h"
14 #include "xe_mmio.h"
15 #include "xe_trace.h"
16 #include "regs/xe_guc_regs.h"
17 
18 #define TLB_TIMEOUT	(HZ / 4)
19 
20 static void xe_gt_tlb_fence_timeout(struct work_struct *work)
21 {
22 	struct xe_gt *gt = container_of(work, struct xe_gt,
23 					tlb_invalidation.fence_tdr.work);
24 	struct xe_gt_tlb_invalidation_fence *fence, *next;
25 
26 	spin_lock_irq(&gt->tlb_invalidation.pending_lock);
27 	list_for_each_entry_safe(fence, next,
28 				 &gt->tlb_invalidation.pending_fences, link) {
29 		s64 since_inval_ms = ktime_ms_delta(ktime_get(),
30 						    fence->invalidation_time);
31 
32 		if (msecs_to_jiffies(since_inval_ms) < TLB_TIMEOUT)
33 			break;
34 
35 		trace_xe_gt_tlb_invalidation_fence_timeout(fence);
36 		xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
37 			  fence->seqno, gt->tlb_invalidation.seqno_recv);
38 
39 		list_del(&fence->link);
40 		fence->base.error = -ETIME;
41 		dma_fence_signal(&fence->base);
42 		dma_fence_put(&fence->base);
43 	}
44 	if (!list_empty(&gt->tlb_invalidation.pending_fences))
45 		queue_delayed_work(system_wq,
46 				   &gt->tlb_invalidation.fence_tdr,
47 				   TLB_TIMEOUT);
48 	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
49 }
50 
51 /**
52  * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
53  * @gt: graphics tile
54  *
55  * Initialize GT TLB invalidation state, purely software initialization, should
56  * be called once during driver load.
57  *
58  * Return: 0 on success, negative error code on error.
59  */
60 int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
61 {
62 	gt->tlb_invalidation.seqno = 1;
63 	INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
64 	spin_lock_init(&gt->tlb_invalidation.pending_lock);
65 	spin_lock_init(&gt->tlb_invalidation.lock);
66 	INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
67 			  xe_gt_tlb_fence_timeout);
68 
69 	return 0;
70 }
71 
72 static void
73 __invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
74 {
75 	trace_xe_gt_tlb_invalidation_fence_signal(fence);
76 	dma_fence_signal(&fence->base);
77 	dma_fence_put(&fence->base);
78 }
79 
80 static void
81 invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
82 {
83 	list_del(&fence->link);
84 	__invalidation_fence_signal(fence);
85 }
86 
87 /**
88  * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
89  * @gt: graphics tile
90  *
91  * Signal any pending invalidation fences, should be called during a GT reset
92  */
93 void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
94 {
95 	struct xe_gt_tlb_invalidation_fence *fence, *next;
96 	struct xe_guc *guc = &gt->uc.guc;
97 	int pending_seqno;
98 
99 	/*
100 	 * CT channel is already disabled at this point. No new TLB requests can
101 	 * appear.
102 	 */
103 
104 	mutex_lock(&gt->uc.guc.ct.lock);
105 	spin_lock_irq(&gt->tlb_invalidation.pending_lock);
106 	cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
107 	/*
108 	 * We might have various kworkers waiting for TLB flushes to complete
109 	 * which are not tracked with an explicit TLB fence, however at this
110 	 * stage that will never happen since the CT is already disabled, so
111 	 * make sure we signal them here under the assumption that we have
112 	 * completed a full GT reset.
113 	 */
114 	if (gt->tlb_invalidation.seqno == 1)
115 		pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
116 	else
117 		pending_seqno = gt->tlb_invalidation.seqno - 1;
118 	WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
119 	wake_up_all(&guc->ct.wq);
120 
121 	list_for_each_entry_safe(fence, next,
122 				 &gt->tlb_invalidation.pending_fences, link)
123 		invalidation_fence_signal(fence);
124 	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
125 	mutex_unlock(&gt->uc.guc.ct.lock);
126 }
127 
128 static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
129 {
130 	int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
131 
132 	if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
133 		return false;
134 
135 	if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
136 		return true;
137 
138 	return seqno_recv >= seqno;
139 }
140 
141 static int send_tlb_invalidation(struct xe_guc *guc,
142 				 struct xe_gt_tlb_invalidation_fence *fence,
143 				 u32 *action, int len)
144 {
145 	struct xe_gt *gt = guc_to_gt(guc);
146 	int seqno;
147 	int ret;
148 
149 	/*
150 	 * XXX: The seqno algorithm relies on TLB invalidation being processed
151 	 * in order which they currently are, if that changes the algorithm will
152 	 * need to be updated.
153 	 */
154 
155 	mutex_lock(&guc->ct.lock);
156 	seqno = gt->tlb_invalidation.seqno;
157 	if (fence) {
158 		fence->seqno = seqno;
159 		trace_xe_gt_tlb_invalidation_fence_send(fence);
160 	}
161 	action[1] = seqno;
162 	ret = xe_guc_ct_send_locked(&guc->ct, action, len,
163 				    G2H_LEN_DW_TLB_INVALIDATE, 1);
164 	if (!ret && fence) {
165 		spin_lock_irq(&gt->tlb_invalidation.pending_lock);
166 		/*
167 		 * We haven't actually published the TLB fence as per
168 		 * pending_fences, but in theory our seqno could have already
169 		 * been written as we acquired the pending_lock. In such a case
170 		 * we can just go ahead and signal the fence here.
171 		 */
172 		if (tlb_invalidation_seqno_past(gt, seqno)) {
173 			__invalidation_fence_signal(fence);
174 		} else {
175 			fence->invalidation_time = ktime_get();
176 			list_add_tail(&fence->link,
177 				      &gt->tlb_invalidation.pending_fences);
178 
179 			if (list_is_singular(&gt->tlb_invalidation.pending_fences))
180 				queue_delayed_work(system_wq,
181 						   &gt->tlb_invalidation.fence_tdr,
182 						   TLB_TIMEOUT);
183 		}
184 		spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
185 	} else if (ret < 0 && fence) {
186 		__invalidation_fence_signal(fence);
187 	}
188 	if (!ret) {
189 		gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
190 			TLB_INVALIDATION_SEQNO_MAX;
191 		if (!gt->tlb_invalidation.seqno)
192 			gt->tlb_invalidation.seqno = 1;
193 		ret = seqno;
194 	}
195 	mutex_unlock(&guc->ct.lock);
196 
197 	return ret;
198 }
199 
200 #define MAKE_INVAL_OP(type)	((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
201 		XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
202 		XE_GUC_TLB_INVAL_FLUSH_CACHE)
203 
204 /**
205  * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
206  * @gt: graphics tile
207  *
208  * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
209  * caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion.
210  *
211  * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
212  * negative error code on error.
213  */
214 static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
215 {
216 	u32 action[] = {
217 		XE_GUC_ACTION_TLB_INVALIDATION,
218 		0,  /* seqno, replaced in send_tlb_invalidation */
219 		MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
220 	};
221 
222 	return send_tlb_invalidation(&gt->uc.guc, NULL, action,
223 				     ARRAY_SIZE(action));
224 }
225 
226 /**
227  * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
228  * @gt: graphics tile
229  *
230  * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
231  * synchronous.
232  *
233  * Return: 0 on success, negative error code on error
234  */
235 int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
236 {
237 	struct xe_device *xe = gt_to_xe(gt);
238 
239 	if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
240 	    gt->uc.guc.submission_state.enabled) {
241 		int seqno;
242 
243 		seqno = xe_gt_tlb_invalidation_guc(gt);
244 		if (seqno <= 0)
245 			return seqno;
246 
247 		xe_gt_tlb_invalidation_wait(gt, seqno);
248 	} else if (xe_device_uc_enabled(xe)) {
249 		xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
250 		if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
251 			xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
252 					PVC_GUC_TLB_INV_DESC1_INVALIDATE);
253 			xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
254 					PVC_GUC_TLB_INV_DESC0_VALID);
255 		} else {
256 			xe_mmio_write32(gt, GUC_TLB_INV_CR,
257 					GUC_TLB_INV_CR_INVALIDATE);
258 		}
259 		xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
260 	}
261 
262 	return 0;
263 }
264 
265 /**
266  * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
267  * @gt: graphics tile
268  * @fence: invalidation fence which will be signal on TLB invalidation
269  * completion, can be NULL
270  * @vma: VMA to invalidate
271  *
272  * Issue a range based TLB invalidation if supported, if not fallback to a full
273  * TLB invalidation. Completion of TLB is asynchronous and caller can either use
274  * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
275  * completion.
276  *
277  * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
278  * negative error code on error.
279  */
280 int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
281 			       struct xe_gt_tlb_invalidation_fence *fence,
282 			       struct xe_vma *vma)
283 {
284 	struct xe_device *xe = gt_to_xe(gt);
285 #define MAX_TLB_INVALIDATION_LEN	7
286 	u32 action[MAX_TLB_INVALIDATION_LEN];
287 	int len = 0;
288 
289 	xe_gt_assert(gt, vma);
290 
291 	/* Execlists not supported */
292 	if (gt_to_xe(gt)->info.force_execlist) {
293 		if (fence)
294 			__invalidation_fence_signal(fence);
295 
296 		return 0;
297 	}
298 
299 	action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
300 	action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
301 	if (!xe->info.has_range_tlb_invalidation) {
302 		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
303 	} else {
304 		u64 start = xe_vma_start(vma);
305 		u64 length = xe_vma_size(vma);
306 		u64 align, end;
307 
308 		if (length < SZ_4K)
309 			length = SZ_4K;
310 
311 		/*
312 		 * We need to invalidate a higher granularity if start address
313 		 * is not aligned to length. When start is not aligned with
314 		 * length we need to find the length large enough to create an
315 		 * address mask covering the required range.
316 		 */
317 		align = roundup_pow_of_two(length);
318 		start = ALIGN_DOWN(xe_vma_start(vma), align);
319 		end = ALIGN(xe_vma_end(vma), align);
320 		length = align;
321 		while (start + length < end) {
322 			length <<= 1;
323 			start = ALIGN_DOWN(xe_vma_start(vma), length);
324 		}
325 
326 		/*
327 		 * Minimum invalidation size for a 2MB page that the hardware
328 		 * expects is 16MB
329 		 */
330 		if (length >= SZ_2M) {
331 			length = max_t(u64, SZ_16M, length);
332 			start = ALIGN_DOWN(xe_vma_start(vma), length);
333 		}
334 
335 		xe_gt_assert(gt, length >= SZ_4K);
336 		xe_gt_assert(gt, is_power_of_2(length));
337 		xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1)));
338 		xe_gt_assert(gt, IS_ALIGNED(start, length));
339 
340 		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
341 		action[len++] = xe_vma_vm(vma)->usm.asid;
342 		action[len++] = lower_32_bits(start);
343 		action[len++] = upper_32_bits(start);
344 		action[len++] = ilog2(length) - ilog2(SZ_4K);
345 	}
346 
347 	xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
348 
349 	return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
350 }
351 
352 /**
353  * xe_gt_tlb_invalidation_wait - Wait for TLB to complete
354  * @gt: graphics tile
355  * @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation
356  *
357  * Wait for 200ms for a TLB invalidation to complete, in practice we always
358  * should receive the TLB invalidation within 200ms.
359  *
360  * Return: 0 on success, -ETIME on TLB invalidation timeout
361  */
362 int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
363 {
364 	struct xe_guc *guc = &gt->uc.guc;
365 	int ret;
366 
367 	/* Execlists not supported */
368 	if (gt_to_xe(gt)->info.force_execlist)
369 		return 0;
370 
371 	/*
372 	 * XXX: See above, this algorithm only works if seqno are always in
373 	 * order
374 	 */
375 	ret = wait_event_timeout(guc->ct.wq,
376 				 tlb_invalidation_seqno_past(gt, seqno),
377 				 TLB_TIMEOUT);
378 	if (!ret) {
379 		struct drm_printer p = xe_gt_err_printer(gt);
380 
381 		xe_gt_err(gt, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
382 			  seqno, gt->tlb_invalidation.seqno_recv);
383 		xe_guc_ct_print(&guc->ct, &p, true);
384 		return -ETIME;
385 	}
386 
387 	return 0;
388 }
389 
390 /**
391  * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
392  * @guc: guc
393  * @msg: message indicating TLB invalidation done
394  * @len: length of message
395  *
396  * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
397  * invalidation fences for seqno. Algorithm for this depends on seqno being
398  * received in-order and asserts this assumption.
399  *
400  * Return: 0 on success, -EPROTO for malformed messages.
401  */
402 int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
403 {
404 	struct xe_gt *gt = guc_to_gt(guc);
405 	struct xe_gt_tlb_invalidation_fence *fence, *next;
406 	unsigned long flags;
407 
408 	if (unlikely(len != 1))
409 		return -EPROTO;
410 
411 	/*
412 	 * This can also be run both directly from the IRQ handler and also in
413 	 * process_g2h_msg(). Only one may process any individual CT message,
414 	 * however the order they are processed here could result in skipping a
415 	 * seqno. To handle that we just process all the seqnos from the last
416 	 * seqno_recv up to and including the one in msg[0]. The delta should be
417 	 * very small so there shouldn't be much of pending_fences we actually
418 	 * need to iterate over here.
419 	 *
420 	 * From GuC POV we expect the seqnos to always appear in-order, so if we
421 	 * see something later in the timeline we can be sure that anything
422 	 * appearing earlier has already signalled, just that we have yet to
423 	 * officially process the CT message like if racing against
424 	 * process_g2h_msg().
425 	 */
426 	spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
427 	if (tlb_invalidation_seqno_past(gt, msg[0])) {
428 		spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
429 		return 0;
430 	}
431 
432 	/*
433 	 * wake_up_all() and wait_event_timeout() already have the correct
434 	 * barriers.
435 	 */
436 	WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
437 	wake_up_all(&guc->ct.wq);
438 
439 	list_for_each_entry_safe(fence, next,
440 				 &gt->tlb_invalidation.pending_fences, link) {
441 		trace_xe_gt_tlb_invalidation_fence_recv(fence);
442 
443 		if (!tlb_invalidation_seqno_past(gt, fence->seqno))
444 			break;
445 
446 		invalidation_fence_signal(fence);
447 	}
448 
449 	if (!list_empty(&gt->tlb_invalidation.pending_fences))
450 		mod_delayed_work(system_wq,
451 				 &gt->tlb_invalidation.fence_tdr,
452 				 TLB_TIMEOUT);
453 	else
454 		cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
455 
456 	spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
457 
458 	return 0;
459 }
460