1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include <drm/drm_managed.h>
7
8 #include "regs/xe_gt_regs.h"
9 #include "regs/xe_guc_regs.h"
10 #include "regs/xe_regs.h"
11
12 #include "xe_assert.h"
13 #include "xe_bo.h"
14 #include "xe_device.h"
15 #include "xe_device_types.h"
16 #include "xe_gt.h"
17 #include "xe_gt_printk.h"
18 #include "xe_guc.h"
19 #include "xe_hw_engine.h"
20 #include "xe_map.h"
21 #include "xe_memirq.h"
22 #include "xe_sriov.h"
23 #include "xe_sriov_printk.h"
24
25 #define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition)
26 #define memirq_debug(m, msg...) xe_sriov_dbg_verbose(memirq_to_xe(m), "MEMIRQ: " msg)
27
memirq_to_tile(struct xe_memirq * memirq)28 static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq)
29 {
30 return container_of(memirq, struct xe_tile, sriov.vf.memirq);
31 }
32
memirq_to_xe(struct xe_memirq * memirq)33 static struct xe_device *memirq_to_xe(struct xe_memirq *memirq)
34 {
35 return tile_to_xe(memirq_to_tile(memirq));
36 }
37
guc_name(struct xe_guc * guc)38 static const char *guc_name(struct xe_guc *guc)
39 {
40 return xe_gt_is_media_type(guc_to_gt(guc)) ? "media GuC" : "GuC";
41 }
42
43 /**
44 * DOC: Memory Based Interrupts
45 *
46 * MMIO register based interrupts infrastructure used for non-virtualized mode
47 * or SRIOV-8 (which supports 8 Virtual Functions) does not scale efficiently
48 * to allow delivering interrupts to a large number of Virtual machines or
49 * containers. Memory based interrupt status reporting provides an efficient
50 * and scalable infrastructure.
51 *
52 * For memory based interrupt status reporting hardware sequence is:
53 * * Engine writes the interrupt event to memory
54 * (Pointer to memory location is provided by SW. This memory surface must
55 * be mapped to system memory and must be marked as un-cacheable (UC) on
56 * Graphics IP Caches)
57 * * Engine triggers an interrupt to host.
58 */
59
60 /**
61 * DOC: Memory Based Interrupts Page Layout
62 *
63 * `Memory Based Interrupts`_ requires three different objects, which are
64 * called "page" in the specs, even if they aren't page-sized or aligned.
65 *
66 * To simplify the code we allocate a single page size object and then use
67 * offsets to embedded "pages". The address of those "pages" are then
68 * programmed in the HW via LRI and LRM in the context image.
69 *
70 * - _`Interrupt Status Report Page`: this page contains the interrupt
71 * status vectors for each unit. Each bit in the interrupt vectors is
72 * converted to a byte, with the byte being set to 0xFF when an
73 * interrupt is triggered; interrupt vectors are 16b big so each unit
74 * gets 16B. One space is reserved for each bit in one of the
75 * GT_INTR_DWx registers, so this object needs a total of 1024B.
76 * This object needs to be 4KiB aligned.
77 *
78 * - _`Interrupt Source Report Page`: this is the equivalent of the
79 * GEN11_GT_INTR_DWx registers, with each bit in those registers being
80 * mapped to a byte here. The offsets are the same, just bytes instead
81 * of bits. This object needs to be cacheline aligned.
82 *
83 * - Interrupt Mask: the HW needs a location to fetch the interrupt
84 * mask vector to be used by the LRM in the context, so we just use
85 * the next available space in the interrupt page.
86 *
87 * ::
88 *
89 * 0x0000 +===========+ <== Interrupt Status Report Page
90 * | |
91 * | | ____ +----+----------------+
92 * | | / | 0 | USER INTERRUPT |
93 * +-----------+ __/ | 1 | |
94 * | HWE(n) | __ | | CTX SWITCH |
95 * +-----------+ \ | | WAIT SEMAPHORE |
96 * | | \____ | 15 | |
97 * | | +----+----------------+
98 * | |
99 * 0x0400 +===========+ <== Interrupt Source Report Page
100 * | HWE(0) |
101 * | HWE(1) |
102 * | |
103 * | HWE(x) |
104 * 0x0440 +===========+ <== Interrupt Enable Mask
105 * | |
106 * | |
107 * +-----------+
108 */
109
__release_xe_bo(struct drm_device * drm,void * arg)110 static void __release_xe_bo(struct drm_device *drm, void *arg)
111 {
112 struct xe_bo *bo = arg;
113
114 xe_bo_unpin_map_no_vm(bo);
115 }
116
memirq_alloc_pages(struct xe_memirq * memirq)117 static int memirq_alloc_pages(struct xe_memirq *memirq)
118 {
119 struct xe_device *xe = memirq_to_xe(memirq);
120 struct xe_tile *tile = memirq_to_tile(memirq);
121 struct xe_bo *bo;
122 int err;
123
124 BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET, SZ_64));
125 BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET, SZ_4K));
126
127 /* XXX: convert to managed bo */
128 bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
129 ttm_bo_type_kernel,
130 XE_BO_FLAG_SYSTEM |
131 XE_BO_FLAG_GGTT |
132 XE_BO_FLAG_GGTT_INVALIDATE |
133 XE_BO_FLAG_NEEDS_UC |
134 XE_BO_FLAG_NEEDS_CPU_ACCESS);
135 if (IS_ERR(bo)) {
136 err = PTR_ERR(bo);
137 goto out;
138 }
139
140 memirq_assert(memirq, !xe_bo_is_vram(bo));
141 memirq_assert(memirq, !memirq->bo);
142
143 iosys_map_memset(&bo->vmap, 0, 0, SZ_4K);
144
145 memirq->bo = bo;
146 memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET);
147 memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET);
148 memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET);
149
150 memirq_assert(memirq, !memirq->source.is_iomem);
151 memirq_assert(memirq, !memirq->status.is_iomem);
152 memirq_assert(memirq, !memirq->mask.is_iomem);
153
154 memirq_debug(memirq, "page offsets: source %#x status %#x\n",
155 xe_memirq_source_ptr(memirq), xe_memirq_status_ptr(memirq));
156
157 return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo);
158
159 out:
160 xe_sriov_err(memirq_to_xe(memirq),
161 "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
162 return err;
163 }
164
memirq_set_enable(struct xe_memirq * memirq,bool enable)165 static void memirq_set_enable(struct xe_memirq *memirq, bool enable)
166 {
167 iosys_map_wr(&memirq->mask, 0, u32, enable ? GENMASK(15, 0) : 0);
168
169 memirq->enabled = enable;
170 }
171
172 /**
173 * xe_memirq_init - Initialize data used by `Memory Based Interrupts`_.
174 * @memirq: the &xe_memirq to initialize
175 *
176 * Allocate `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
177 * used by `Memory Based Interrupts`_.
178 *
179 * These allocations are managed and will be implicitly released on unload.
180 *
181 * Note: This function shall be called only by the VF driver.
182 *
183 * If this function fails then VF driver won't be able to operate correctly.
184 * If `Memory Based Interrupts`_ are not used this function will return 0.
185 *
186 * Return: 0 on success or a negative error code on failure.
187 */
xe_memirq_init(struct xe_memirq * memirq)188 int xe_memirq_init(struct xe_memirq *memirq)
189 {
190 struct xe_device *xe = memirq_to_xe(memirq);
191 int err;
192
193 memirq_assert(memirq, IS_SRIOV_VF(xe));
194
195 if (!xe_device_has_memirq(xe))
196 return 0;
197
198 err = memirq_alloc_pages(memirq);
199 if (unlikely(err))
200 return err;
201
202 /* we need to start with all irqs enabled */
203 memirq_set_enable(memirq, true);
204
205 return 0;
206 }
207
208 /**
209 * xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_.
210 * @memirq: the &xe_memirq to query
211 *
212 * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
213 * and xe_memirq_init() didn't fail.
214 *
215 * Return: GGTT's offset of the `Interrupt Source Report Page`_.
216 */
xe_memirq_source_ptr(struct xe_memirq * memirq)217 u32 xe_memirq_source_ptr(struct xe_memirq *memirq)
218 {
219 memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
220 memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
221 memirq_assert(memirq, memirq->bo);
222
223 return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET;
224 }
225
226 /**
227 * xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_.
228 * @memirq: the &xe_memirq to query
229 *
230 * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
231 * and xe_memirq_init() didn't fail.
232 *
233 * Return: GGTT's offset of the `Interrupt Status Report Page`_.
234 */
xe_memirq_status_ptr(struct xe_memirq * memirq)235 u32 xe_memirq_status_ptr(struct xe_memirq *memirq)
236 {
237 memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
238 memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
239 memirq_assert(memirq, memirq->bo);
240
241 return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET;
242 }
243
244 /**
245 * xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask.
246 * @memirq: the &xe_memirq to query
247 *
248 * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
249 * and xe_memirq_init() didn't fail.
250 *
251 * Return: GGTT's offset of the Interrupt Enable Mask.
252 */
xe_memirq_enable_ptr(struct xe_memirq * memirq)253 u32 xe_memirq_enable_ptr(struct xe_memirq *memirq)
254 {
255 memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
256 memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
257 memirq_assert(memirq, memirq->bo);
258
259 return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET;
260 }
261
262 /**
263 * xe_memirq_init_guc - Prepare GuC for `Memory Based Interrupts`_.
264 * @memirq: the &xe_memirq
265 * @guc: the &xe_guc to setup
266 *
267 * Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
268 * to be used by the GuC when `Memory Based Interrupts`_ are required.
269 *
270 * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
271 * and xe_memirq_init() didn't fail.
272 *
273 * Return: 0 on success or a negative error code on failure.
274 */
xe_memirq_init_guc(struct xe_memirq * memirq,struct xe_guc * guc)275 int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc)
276 {
277 bool is_media = xe_gt_is_media_type(guc_to_gt(guc));
278 u32 offset = is_media ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
279 u32 source, status;
280 int err;
281
282 memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
283 memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
284 memirq_assert(memirq, memirq->bo);
285
286 source = xe_memirq_source_ptr(memirq) + offset;
287 status = xe_memirq_status_ptr(memirq) + offset * SZ_16;
288
289 err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY,
290 source);
291 if (unlikely(err))
292 goto failed;
293
294 err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR_KEY,
295 status);
296 if (unlikely(err))
297 goto failed;
298
299 return 0;
300
301 failed:
302 xe_sriov_err(memirq_to_xe(memirq),
303 "Failed to setup report pages in %s (%pe)\n",
304 guc_name(guc), ERR_PTR(err));
305 return err;
306 }
307
308 /**
309 * xe_memirq_reset - Disable processing of `Memory Based Interrupts`_.
310 * @memirq: struct xe_memirq
311 *
312 * This is part of the driver IRQ setup flow.
313 *
314 * This function shall only be used by the VF driver on platforms that use
315 * `Memory Based Interrupts`_.
316 */
xe_memirq_reset(struct xe_memirq * memirq)317 void xe_memirq_reset(struct xe_memirq *memirq)
318 {
319 memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
320 memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
321
322 if (memirq->bo)
323 memirq_set_enable(memirq, false);
324 }
325
326 /**
327 * xe_memirq_postinstall - Enable processing of `Memory Based Interrupts`_.
328 * @memirq: the &xe_memirq
329 *
330 * This is part of the driver IRQ setup flow.
331 *
332 * This function shall only be used by the VF driver on platforms that use
333 * `Memory Based Interrupts`_.
334 */
xe_memirq_postinstall(struct xe_memirq * memirq)335 void xe_memirq_postinstall(struct xe_memirq *memirq)
336 {
337 memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
338 memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
339
340 if (memirq->bo)
341 memirq_set_enable(memirq, true);
342 }
343
memirq_received(struct xe_memirq * memirq,struct iosys_map * vector,u16 offset,const char * name)344 static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
345 u16 offset, const char *name)
346 {
347 u8 value;
348
349 value = iosys_map_rd(vector, offset, u8);
350 if (value) {
351 if (value != 0xff)
352 xe_sriov_err_ratelimited(memirq_to_xe(memirq),
353 "Unexpected memirq value %#x from %s at %u\n",
354 value, name, offset);
355 iosys_map_wr(vector, offset, u8, 0x00);
356 }
357
358 return value;
359 }
360
memirq_dispatch_engine(struct xe_memirq * memirq,struct iosys_map * status,struct xe_hw_engine * hwe)361 static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
362 struct xe_hw_engine *hwe)
363 {
364 memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr);
365
366 if (memirq_received(memirq, status, ilog2(GT_RENDER_USER_INTERRUPT), hwe->name))
367 xe_hw_engine_handle_irq(hwe, GT_RENDER_USER_INTERRUPT);
368 }
369
memirq_dispatch_guc(struct xe_memirq * memirq,struct iosys_map * status,struct xe_guc * guc)370 static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status,
371 struct xe_guc *guc)
372 {
373 const char *name = guc_name(guc);
374
375 memirq_debug(memirq, "STATUS %s %*ph\n", name, 16, status->vaddr);
376
377 if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
378 xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
379 }
380
381 /**
382 * xe_memirq_handler - The `Memory Based Interrupts`_ Handler.
383 * @memirq: the &xe_memirq
384 *
385 * This function reads and dispatches `Memory Based Interrupts`.
386 */
xe_memirq_handler(struct xe_memirq * memirq)387 void xe_memirq_handler(struct xe_memirq *memirq)
388 {
389 struct xe_device *xe = memirq_to_xe(memirq);
390 struct xe_tile *tile = memirq_to_tile(memirq);
391 struct xe_hw_engine *hwe;
392 enum xe_hw_engine_id id;
393 struct iosys_map map;
394 unsigned int gtid;
395 struct xe_gt *gt;
396
397 if (!memirq->bo)
398 return;
399
400 memirq_assert(memirq, !memirq->source.is_iomem);
401 memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr);
402 memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr + 32);
403
404 for_each_gt(gt, xe, gtid) {
405 if (gt->tile != tile)
406 continue;
407
408 for_each_hw_engine(hwe, gt, id) {
409 if (memirq_received(memirq, &memirq->source, hwe->irq_offset, "SRC")) {
410 map = IOSYS_MAP_INIT_OFFSET(&memirq->status,
411 hwe->irq_offset * SZ_16);
412 memirq_dispatch_engine(memirq, &map, hwe);
413 }
414 }
415 }
416
417 /* GuC and media GuC (if present) must be checked separately */
418
419 if (memirq_received(memirq, &memirq->source, ilog2(INTR_GUC), "SRC")) {
420 map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_GUC) * SZ_16);
421 memirq_dispatch_guc(memirq, &map, &tile->primary_gt->uc.guc);
422 }
423
424 if (!tile->media_gt)
425 return;
426
427 if (memirq_received(memirq, &memirq->source, ilog2(INTR_MGUC), "SRC")) {
428 map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_MGUC) * SZ_16);
429 memirq_dispatch_guc(memirq, &map, &tile->media_gt->uc.guc);
430 }
431 }
432