xref: /linux/drivers/gpu/drm/xe/xe_memirq.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "regs/xe_guc_regs.h"
9 #include "regs/xe_irq_regs.h"
10 #include "regs/xe_regs.h"
11 
12 #include "xe_assert.h"
13 #include "xe_bo.h"
14 #include "xe_device.h"
15 #include "xe_device_types.h"
16 #include "xe_gt.h"
17 #include "xe_gt_printk.h"
18 #include "xe_guc.h"
19 #include "xe_hw_engine.h"
20 #include "xe_map.h"
21 #include "xe_memirq.h"
22 
23 #define memirq_assert(m, condition)	xe_tile_assert(memirq_to_tile(m), condition)
24 #define memirq_printk(m, _level, _fmt, ...)			\
25 	drm_##_level(&memirq_to_xe(m)->drm, "MEMIRQ%u: " _fmt,	\
26 		     memirq_to_tile(m)->id, ##__VA_ARGS__)
27 
28 #ifdef CONFIG_DRM_XE_DEBUG_MEMIRQ
29 #define memirq_debug(m, _fmt, ...)	memirq_printk(m, dbg, _fmt, ##__VA_ARGS__)
30 #else
31 #define memirq_debug(...)
32 #endif
33 
34 #define memirq_err(m, _fmt, ...)	memirq_printk(m, err, _fmt, ##__VA_ARGS__)
35 #define memirq_err_ratelimited(m, _fmt, ...)	\
36 	memirq_printk(m, err_ratelimited, _fmt, ##__VA_ARGS__)
37 
38 static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq)
39 {
40 	return container_of(memirq, struct xe_tile, memirq);
41 }
42 
43 static struct xe_device *memirq_to_xe(struct xe_memirq *memirq)
44 {
45 	return tile_to_xe(memirq_to_tile(memirq));
46 }
47 
48 static const char *guc_name(struct xe_guc *guc)
49 {
50 	return xe_gt_is_media_type(guc_to_gt(guc)) ? "media GuC" : "GuC";
51 }
52 
53 /**
54  * DOC: Memory Based Interrupts
55  *
56  * MMIO register based interrupts infrastructure used for non-virtualized mode
57  * or SRIOV-8 (which supports 8 Virtual Functions) does not scale efficiently
58  * to allow delivering interrupts to a large number of Virtual machines or
59  * containers. Memory based interrupt status reporting provides an efficient
60  * and scalable infrastructure.
61  *
62  * For memory based interrupt status reporting hardware sequence is:
63  *  * Engine writes the interrupt event to memory
64  *    (Pointer to memory location is provided by SW. This memory surface must
65  *    be mapped to system memory and must be marked as un-cacheable (UC) on
66  *    Graphics IP Caches)
67  *  * Engine triggers an interrupt to host.
68  */
69 
70 /**
71  * DOC: Memory Based Interrupts Page Layout
72  *
73  * `Memory Based Interrupts`_ requires three different objects, which are
74  * called "page" in the specs, even if they aren't page-sized or aligned.
75  *
76  * To simplify the code we allocate a single page size object and then use
77  * offsets to embedded "pages". The address of those "pages" are then
78  * programmed in the HW via LRI and LRM in the context image.
79  *
80  * - _`Interrupt Status Report Page`: this page contains the interrupt
81  *   status vectors for each unit. Each bit in the interrupt vectors is
82  *   converted to a byte, with the byte being set to 0xFF when an
83  *   interrupt is triggered; interrupt vectors are 16b big so each unit
84  *   gets 16B. One space is reserved for each bit in one of the
85  *   GT_INTR_DWx registers, so this object needs a total of 1024B.
86  *   This object needs to be 4KiB aligned.
87  *
88  * - _`Interrupt Source Report Page`: this is the equivalent of the
89  *   GEN11_GT_INTR_DWx registers, with each bit in those registers being
90  *   mapped to a byte here. The offsets are the same, just bytes instead
91  *   of bits. This object needs to be cacheline aligned.
92  *
93  * - Interrupt Mask: the HW needs a location to fetch the interrupt
94  *   mask vector to be used by the LRM in the context, so we just use
95  *   the next available space in the interrupt page.
96  *
97  * ::
98  *
99  *   0x0000   +===========+  <== Interrupt Status Report Page
100  *            |           |
101  *            |           |     ____ +----+----------------+
102  *            |           |    /     |  0 | USER INTERRUPT |
103  *            +-----------+ __/      |  1 |                |
104  *            |  HWE(n)   | __       |    | CTX SWITCH     |
105  *            +-----------+   \      |    | WAIT SEMAPHORE |
106  *            |           |    \____ | 15 |                |
107  *            |           |          +----+----------------+
108  *            |           |
109  *   0x0400   +===========+  <== Interrupt Source Report Page
110  *            |  HWE(0)   |
111  *            |  HWE(1)   |
112  *            |           |
113  *            |  HWE(x)   |
114  *   0x0440   +===========+  <== Interrupt Enable Mask
115  *            |           |
116  *            |           |
117  *            +-----------+
118  *
119  *
120  * MSI-X use case
121  *
122  * When using MSI-X, hw engines report interrupt status and source to engine
123  * instance 0. For this scenario, in order to differentiate between the
124  * engines, we need to pass different status/source pointers in the LRC.
125  *
126  * The requirements on those pointers are:
127  * - Interrupt status should be 4KiB aligned
128  * - Interrupt source should be 64 bytes aligned
129  *
130  * To accommodate this, we duplicate the memirq page layout above -
131  * allocating a page for each engine instance and pass this page in the LRC.
132  * Note that the same page can be reused for different engine types.
133  * For example, an LRC executing on CCS #x will have pointers to page #x,
134  * and an LRC executing on BCS #x will have the same pointers.
135  *
136  * ::
137  *
138  *   0x0000   +==============================+  <== page for instance 0 (BCS0, CCS0, etc.)
139  *            | Interrupt Status Report Page |
140  *   0x0400   +==============================+
141  *            | Interrupt Source Report Page |
142  *   0x0440   +==============================+
143  *            | Interrupt Enable Mask        |
144  *            +==============================+
145  *            | Not used                     |
146  *   0x1000   +==============================+  <== page for instance 1 (BCS1, CCS1, etc.)
147  *            | Interrupt Status Report Page |
148  *   0x1400   +==============================+
149  *            | Interrupt Source Report Page |
150  *   0x1440   +==============================+
151  *            | Not used                     |
152  *   0x2000   +==============================+  <== page for instance 2 (BCS2, CCS2, etc.)
153  *            | ...                          |
154  *            +==============================+
155  *
156  */
157 
158 static void __release_xe_bo(struct drm_device *drm, void *arg)
159 {
160 	struct xe_bo *bo = arg;
161 
162 	xe_bo_unpin_map_no_vm(bo);
163 }
164 
165 static inline bool hw_reports_to_instance_zero(struct xe_memirq *memirq)
166 {
167 	/*
168 	 * When the HW engines are configured to use MSI-X,
169 	 * they report interrupt status and source to the offset of
170 	 * engine instance 0.
171 	 */
172 	return xe_device_has_msix(memirq_to_xe(memirq));
173 }
174 
175 static int memirq_alloc_pages(struct xe_memirq *memirq)
176 {
177 	struct xe_device *xe = memirq_to_xe(memirq);
178 	struct xe_tile *tile = memirq_to_tile(memirq);
179 	size_t bo_size = hw_reports_to_instance_zero(memirq) ?
180 		XE_HW_ENGINE_MAX_INSTANCE * SZ_4K : SZ_4K;
181 	struct xe_bo *bo;
182 	int err;
183 
184 	BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET(0), SZ_64));
185 	BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET(0), SZ_4K));
186 
187 	/* XXX: convert to managed bo */
188 	bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size,
189 				  ttm_bo_type_kernel,
190 				  XE_BO_FLAG_SYSTEM |
191 				  XE_BO_FLAG_GGTT |
192 				  XE_BO_FLAG_GGTT_INVALIDATE |
193 				  XE_BO_FLAG_NEEDS_UC |
194 				  XE_BO_FLAG_NEEDS_CPU_ACCESS);
195 	if (IS_ERR(bo)) {
196 		err = PTR_ERR(bo);
197 		goto out;
198 	}
199 
200 	memirq_assert(memirq, !xe_bo_is_vram(bo));
201 	memirq_assert(memirq, !memirq->bo);
202 
203 	iosys_map_memset(&bo->vmap, 0, 0, bo_size);
204 
205 	memirq->bo = bo;
206 	memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET(0));
207 	memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET(0));
208 	memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET);
209 
210 	memirq_assert(memirq, !memirq->source.is_iomem);
211 	memirq_assert(memirq, !memirq->status.is_iomem);
212 	memirq_assert(memirq, !memirq->mask.is_iomem);
213 
214 	memirq_debug(memirq, "page offsets: bo %#x bo_size %zu source %#x status %#x\n",
215 		     xe_bo_ggtt_addr(bo), bo_size, XE_MEMIRQ_SOURCE_OFFSET(0),
216 		     XE_MEMIRQ_STATUS_OFFSET(0));
217 
218 	return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo);
219 
220 out:
221 	memirq_err(memirq, "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
222 	return err;
223 }
224 
225 static void memirq_set_enable(struct xe_memirq *memirq, bool enable)
226 {
227 	iosys_map_wr(&memirq->mask, 0, u32, enable ? GENMASK(15, 0) : 0);
228 
229 	memirq->enabled = enable;
230 }
231 
232 /**
233  * xe_memirq_init - Initialize data used by `Memory Based Interrupts`_.
234  * @memirq: the &xe_memirq to initialize
235  *
236  * Allocate `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
237  * used by `Memory Based Interrupts`_.
238  *
239  * These allocations are managed and will be implicitly released on unload.
240  *
241  * If this function fails then the driver won't be able to operate correctly.
242  * If `Memory Based Interrupts`_ are not used this function will return 0.
243  *
244  * Return: 0 on success or a negative error code on failure.
245  */
246 int xe_memirq_init(struct xe_memirq *memirq)
247 {
248 	struct xe_device *xe = memirq_to_xe(memirq);
249 	int err;
250 
251 	if (!xe_device_uses_memirq(xe))
252 		return 0;
253 
254 	err = memirq_alloc_pages(memirq);
255 	if (unlikely(err))
256 		return err;
257 
258 	/* we need to start with all irqs enabled */
259 	memirq_set_enable(memirq, true);
260 
261 	return 0;
262 }
263 
264 static u32 __memirq_source_page(struct xe_memirq *memirq, u16 instance)
265 {
266 	memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE);
267 	memirq_assert(memirq, memirq->bo);
268 
269 	instance = hw_reports_to_instance_zero(memirq) ? instance : 0;
270 	return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET(instance);
271 }
272 
273 /**
274  * xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_.
275  * @memirq: the &xe_memirq to query
276  * @hwe: the hw engine for which we want the report page
277  *
278  * Shall be called when `Memory Based Interrupts`_ are used
279  * and xe_memirq_init() didn't fail.
280  *
281  * Return: GGTT's offset of the `Interrupt Source Report Page`_.
282  */
283 u32 xe_memirq_source_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
284 {
285 	memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
286 
287 	return __memirq_source_page(memirq, hwe->instance);
288 }
289 
290 static u32 __memirq_status_page(struct xe_memirq *memirq, u16 instance)
291 {
292 	memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE);
293 	memirq_assert(memirq, memirq->bo);
294 
295 	instance = hw_reports_to_instance_zero(memirq) ? instance : 0;
296 	return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET(instance);
297 }
298 
299 /**
300  * xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_.
301  * @memirq: the &xe_memirq to query
302  * @hwe: the hw engine for which we want the report page
303  *
304  * Shall be called when `Memory Based Interrupts`_ are used
305  * and xe_memirq_init() didn't fail.
306  *
307  * Return: GGTT's offset of the `Interrupt Status Report Page`_.
308  */
309 u32 xe_memirq_status_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
310 {
311 	memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
312 
313 	return __memirq_status_page(memirq, hwe->instance);
314 }
315 
316 /**
317  * xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask.
318  * @memirq: the &xe_memirq to query
319  *
320  * Shall be called when `Memory Based Interrupts`_ are used
321  * and xe_memirq_init() didn't fail.
322  *
323  * Return: GGTT's offset of the Interrupt Enable Mask.
324  */
325 u32 xe_memirq_enable_ptr(struct xe_memirq *memirq)
326 {
327 	memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
328 	memirq_assert(memirq, memirq->bo);
329 
330 	return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET;
331 }
332 
333 /**
334  * xe_memirq_init_guc - Prepare GuC for `Memory Based Interrupts`_.
335  * @memirq: the &xe_memirq
336  * @guc: the &xe_guc to setup
337  *
338  * Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
339  * to be used by the GuC when `Memory Based Interrupts`_ are required.
340  *
341  * Shall be called when `Memory Based Interrupts`_ are used
342  * and xe_memirq_init() didn't fail.
343  *
344  * Return: 0 on success or a negative error code on failure.
345  */
346 int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc)
347 {
348 	bool is_media = xe_gt_is_media_type(guc_to_gt(guc));
349 	u32 offset = is_media ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
350 	u32 source, status;
351 	int err;
352 
353 	memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
354 
355 	source = __memirq_source_page(memirq, 0) + offset;
356 	status = __memirq_status_page(memirq, 0) + offset * SZ_16;
357 
358 	err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY,
359 				source);
360 	if (unlikely(err))
361 		goto failed;
362 
363 	err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR_KEY,
364 				status);
365 	if (unlikely(err))
366 		goto failed;
367 
368 	return 0;
369 
370 failed:
371 	memirq_err(memirq, "Failed to setup report pages in %s (%pe)\n",
372 		   guc_name(guc), ERR_PTR(err));
373 	return err;
374 }
375 
376 /**
377  * xe_memirq_reset - Disable processing of `Memory Based Interrupts`_.
378  * @memirq: struct xe_memirq
379  *
380  * This is part of the driver IRQ setup flow.
381  *
382  * This function shall only be used on platforms that use
383  * `Memory Based Interrupts`_.
384  */
385 void xe_memirq_reset(struct xe_memirq *memirq)
386 {
387 	memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
388 
389 	if (memirq->bo)
390 		memirq_set_enable(memirq, false);
391 }
392 
393 /**
394  * xe_memirq_postinstall - Enable processing of `Memory Based Interrupts`_.
395  * @memirq: the &xe_memirq
396  *
397  * This is part of the driver IRQ setup flow.
398  *
399  * This function shall only be used on platforms that use
400  * `Memory Based Interrupts`_.
401  */
402 void xe_memirq_postinstall(struct xe_memirq *memirq)
403 {
404 	memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
405 
406 	if (memirq->bo)
407 		memirq_set_enable(memirq, true);
408 }
409 
410 static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
411 			    u16 offset, const char *name)
412 {
413 	u8 value;
414 
415 	value = iosys_map_rd(vector, offset, u8);
416 	if (value) {
417 		if (value != 0xff)
418 			memirq_err_ratelimited(memirq,
419 					       "Unexpected memirq value %#x from %s at %u\n",
420 					       value, name, offset);
421 		iosys_map_wr(vector, offset, u8, 0x00);
422 	}
423 
424 	return value;
425 }
426 
427 static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
428 				   struct xe_hw_engine *hwe)
429 {
430 	memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr);
431 
432 	if (memirq_received(memirq, status, ilog2(GT_RENDER_USER_INTERRUPT), hwe->name))
433 		xe_hw_engine_handle_irq(hwe, GT_RENDER_USER_INTERRUPT);
434 }
435 
436 static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status,
437 				struct xe_guc *guc)
438 {
439 	const char *name = guc_name(guc);
440 
441 	memirq_debug(memirq, "STATUS %s %*ph\n", name, 16, status->vaddr);
442 
443 	if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
444 		xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
445 }
446 
447 /**
448  * xe_memirq_hwe_handler - Check and process interrupts for a specific HW engine.
449  * @memirq: the &xe_memirq
450  * @hwe: the hw engine to process
451  *
452  * This function reads and dispatches `Memory Based Interrupts` for the provided HW engine.
453  */
454 void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
455 {
456 	u16 offset = hwe->irq_offset;
457 	u16 instance = hw_reports_to_instance_zero(memirq) ? hwe->instance : 0;
458 	struct iosys_map src_offset = IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap,
459 							    XE_MEMIRQ_SOURCE_OFFSET(instance));
460 
461 	if (memirq_received(memirq, &src_offset, offset, "SRC")) {
462 		struct iosys_map status_offset =
463 			IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap,
464 					      XE_MEMIRQ_STATUS_OFFSET(instance) + offset * SZ_16);
465 		memirq_dispatch_engine(memirq, &status_offset, hwe);
466 	}
467 }
468 
469 /**
470  * xe_memirq_handler - The `Memory Based Interrupts`_ Handler.
471  * @memirq: the &xe_memirq
472  *
473  * This function reads and dispatches `Memory Based Interrupts`.
474  */
475 void xe_memirq_handler(struct xe_memirq *memirq)
476 {
477 	struct xe_device *xe = memirq_to_xe(memirq);
478 	struct xe_tile *tile = memirq_to_tile(memirq);
479 	struct xe_hw_engine *hwe;
480 	enum xe_hw_engine_id id;
481 	struct iosys_map map;
482 	unsigned int gtid;
483 	struct xe_gt *gt;
484 
485 	if (!memirq->bo)
486 		return;
487 
488 	memirq_assert(memirq, !memirq->source.is_iomem);
489 	memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr);
490 	memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr + 32);
491 
492 	for_each_gt(gt, xe, gtid) {
493 		if (gt->tile != tile)
494 			continue;
495 
496 		for_each_hw_engine(hwe, gt, id)
497 			xe_memirq_hwe_handler(memirq, hwe);
498 	}
499 
500 	/* GuC and media GuC (if present) must be checked separately */
501 
502 	if (memirq_received(memirq, &memirq->source, ilog2(INTR_GUC), "SRC")) {
503 		map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_GUC) * SZ_16);
504 		memirq_dispatch_guc(memirq, &map, &tile->primary_gt->uc.guc);
505 	}
506 
507 	if (!tile->media_gt)
508 		return;
509 
510 	if (memirq_received(memirq, &memirq->source, ilog2(INTR_MGUC), "SRC")) {
511 		map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_MGUC) * SZ_16);
512 		memirq_dispatch_guc(memirq, &map, &tile->media_gt->uc.guc);
513 	}
514 }
515