1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021-2023 Intel Corporation
4 */
5
6 #include "xe_mmio.h"
7
8 #include <linux/delay.h>
9 #include <linux/io-64-nonatomic-lo-hi.h>
10 #include <linux/minmax.h>
11 #include <linux/pci.h>
12
13 #include <drm/drm_managed.h>
14 #include <drm/drm_print.h>
15
16 #include "regs/xe_bars.h"
17 #include "regs/xe_regs.h"
18 #include "xe_device.h"
19 #include "xe_gt.h"
20 #include "xe_gt_printk.h"
21 #include "xe_gt_sriov_vf.h"
22 #include "xe_macros.h"
23 #include "xe_sriov.h"
24 #include "xe_trace.h"
25
tiles_fini(void * arg)26 static void tiles_fini(void *arg)
27 {
28 struct xe_device *xe = arg;
29 struct xe_tile *tile;
30 int id;
31
32 for_each_remote_tile(tile, xe, id)
33 tile->mmio.regs = NULL;
34 }
35
36 /*
37 * On multi-tile devices, partition the BAR space for MMIO on each tile,
38 * possibly accounting for register override on the number of tiles available.
39 * tile_mmio_size contains both the tile's 4MB register space, as well as
40 * additional space for the GTT and other (possibly unused) regions).
41 * Resulting memory layout is like below:
42 *
43 * .----------------------. <- tile_count * tile_mmio_size
44 * | .... |
45 * |----------------------| <- 2 * tile_mmio_size
46 * | tile1 GTT + other |
47 * |----------------------| <- 1 * tile_mmio_size + 4MB
48 * | tile1->mmio.regs |
49 * |----------------------| <- 1 * tile_mmio_size
50 * | tile0 GTT + other |
51 * |----------------------| <- 4MB
52 * | tile0->mmio.regs |
53 * '----------------------' <- 0MB
54 */
mmio_multi_tile_setup(struct xe_device * xe,size_t tile_mmio_size)55 static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
56 {
57 struct xe_tile *tile;
58 u8 id;
59
60 /*
61 * Nothing to be done as tile 0 has already been setup earlier with the
62 * entire BAR mapped - see xe_mmio_probe_early()
63 */
64 if (xe->info.tile_count == 1)
65 return;
66
67 /* Possibly override number of tile based on configuration register */
68 if (!xe->info.skip_mtcfg) {
69 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
70 u8 tile_count;
71 u32 mtcfg;
72
73 /*
74 * Although the per-tile mmio regs are not yet initialized, this
75 * is fine as it's going to the root tile's mmio, that's
76 * guaranteed to be initialized earlier in xe_mmio_probe_early()
77 */
78 mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
79 tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
80
81 if (tile_count < xe->info.tile_count) {
82 drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
83 xe->info.tile_count, tile_count);
84 xe->info.tile_count = tile_count;
85
86 /*
87 * FIXME: Needs some work for standalone media, but
88 * should be impossible with multi-tile for now:
89 * multi-tile platform with standalone media doesn't
90 * exist
91 */
92 xe->info.gt_count = xe->info.tile_count;
93 }
94 }
95
96 for_each_remote_tile(tile, xe, id)
97 xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M);
98 }
99
xe_mmio_probe_tiles(struct xe_device * xe)100 int xe_mmio_probe_tiles(struct xe_device *xe)
101 {
102 size_t tile_mmio_size = SZ_16M;
103
104 mmio_multi_tile_setup(xe, tile_mmio_size);
105
106 return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
107 }
108
mmio_fini(void * arg)109 static void mmio_fini(void *arg)
110 {
111 struct xe_device *xe = arg;
112 struct xe_tile *root_tile = xe_device_get_root_tile(xe);
113
114 pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
115 xe->mmio.regs = NULL;
116 root_tile->mmio.regs = NULL;
117 }
118
xe_mmio_probe_early(struct xe_device * xe)119 int xe_mmio_probe_early(struct xe_device *xe)
120 {
121 struct xe_tile *root_tile = xe_device_get_root_tile(xe);
122 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
123
124 /*
125 * Map the entire BAR.
126 * The first 16MB of the BAR, belong to the root tile, and include:
127 * registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB).
128 */
129 xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR);
130 xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0);
131 if (!xe->mmio.regs) {
132 drm_err(&xe->drm, "failed to map registers\n");
133 return -EIO;
134 }
135
136 /* Setup first tile; other tiles (if present) will be setup later. */
137 xe_mmio_init(&root_tile->mmio, root_tile, xe->mmio.regs, SZ_4M);
138
139 return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
140 }
141 ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO); /* See xe_pci_probe() */
142
143 /**
144 * xe_mmio_init() - Initialize an MMIO instance
145 * @mmio: Pointer to the MMIO instance to initialize
146 * @tile: The tile to which the MMIO region belongs
147 * @ptr: Pointer to the start of the MMIO region
148 * @size: The size of the MMIO region in bytes
149 *
150 * This is a convenience function for minimal initialization of struct xe_mmio.
151 */
xe_mmio_init(struct xe_mmio * mmio,struct xe_tile * tile,void __iomem * ptr,u32 size)152 void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size)
153 {
154 xe_tile_assert(tile, size <= XE_REG_ADDR_MAX);
155
156 mmio->regs = ptr;
157 mmio->regs_size = size;
158 mmio->tile = tile;
159 }
160
mmio_flush_pending_writes(struct xe_mmio * mmio)161 static void mmio_flush_pending_writes(struct xe_mmio *mmio)
162 {
163 #define DUMMY_REG_OFFSET 0x130030
164 int i;
165
166 if (mmio->tile->xe->info.platform != XE_LUNARLAKE)
167 return;
168
169 /* 4 dummy writes */
170 for (i = 0; i < 4; i++)
171 writel(0, mmio->regs + DUMMY_REG_OFFSET);
172 }
173
xe_mmio_read8(struct xe_mmio * mmio,struct xe_reg reg)174 u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
175 {
176 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
177 u8 val;
178
179 /* Wa_15015404425 */
180 mmio_flush_pending_writes(mmio);
181
182 val = readb(mmio->regs + addr);
183 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
184
185 return val;
186 }
187
xe_mmio_read16(struct xe_mmio * mmio,struct xe_reg reg)188 u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
189 {
190 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
191 u16 val;
192
193 /* Wa_15015404425 */
194 mmio_flush_pending_writes(mmio);
195
196 val = readw(mmio->regs + addr);
197 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
198
199 return val;
200 }
201
xe_mmio_write32(struct xe_mmio * mmio,struct xe_reg reg,u32 val)202 void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
203 {
204 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
205
206 trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
207
208 if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
209 xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?:
210 mmio->tile->primary_gt, reg, val);
211 else
212 writel(val, mmio->regs + addr);
213 }
214
xe_mmio_read32(struct xe_mmio * mmio,struct xe_reg reg)215 u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
216 {
217 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
218 u32 val;
219
220 /* Wa_15015404425 */
221 mmio_flush_pending_writes(mmio);
222
223 if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
224 val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?:
225 mmio->tile->primary_gt, reg);
226 else
227 val = readl(mmio->regs + addr);
228
229 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
230
231 return val;
232 }
233
xe_mmio_rmw32(struct xe_mmio * mmio,struct xe_reg reg,u32 clr,u32 set)234 u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set)
235 {
236 u32 old, reg_val;
237
238 old = xe_mmio_read32(mmio, reg);
239 reg_val = (old & ~clr) | set;
240 xe_mmio_write32(mmio, reg, reg_val);
241
242 return old;
243 }
244
xe_mmio_write32_and_verify(struct xe_mmio * mmio,struct xe_reg reg,u32 val,u32 mask,u32 eval)245 int xe_mmio_write32_and_verify(struct xe_mmio *mmio,
246 struct xe_reg reg, u32 val, u32 mask, u32 eval)
247 {
248 u32 reg_val;
249
250 xe_mmio_write32(mmio, reg, val);
251 reg_val = xe_mmio_read32(mmio, reg);
252
253 return (reg_val & mask) != eval ? -EINVAL : 0;
254 }
255
xe_mmio_in_range(const struct xe_mmio * mmio,const struct xe_mmio_range * range,struct xe_reg reg)256 bool xe_mmio_in_range(const struct xe_mmio *mmio,
257 const struct xe_mmio_range *range,
258 struct xe_reg reg)
259 {
260 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
261
262 return range && addr >= range->start && addr <= range->end;
263 }
264
265 /**
266 * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
267 * @mmio: MMIO target
268 * @reg: register to read value from
269 *
270 * Although Intel GPUs have some 64-bit registers, the hardware officially
271 * only supports GTTMMADR register reads of 32 bits or smaller. Even if
272 * a readq operation may return a reasonable value, that violation of the
273 * spec shouldn't be relied upon and all 64-bit register reads should be
274 * performed as two 32-bit reads of the upper and lower dwords.
275 *
276 * When reading registers that may be changing (such as
277 * counters), a rollover of the lower dword between the two 32-bit reads
278 * can be problematic. This function attempts to ensure the upper dword has
279 * stabilized before returning the 64-bit value.
280 *
281 * Note that because this function may re-read the register multiple times
282 * while waiting for the value to stabilize it should not be used to read
283 * any registers where read operations have side effects.
284 *
285 * Returns the value of the 64-bit register.
286 */
xe_mmio_read64_2x32(struct xe_mmio * mmio,struct xe_reg reg)287 u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
288 {
289 struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
290 u32 ldw, udw, oldudw, retries;
291
292 reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr);
293 reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr);
294
295 /* we shouldn't adjust just one register address */
296 xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4);
297
298 oldudw = xe_mmio_read32(mmio, reg_udw);
299 for (retries = 5; retries; --retries) {
300 ldw = xe_mmio_read32(mmio, reg);
301 udw = xe_mmio_read32(mmio, reg_udw);
302
303 if (udw == oldudw)
304 break;
305
306 oldudw = udw;
307 }
308
309 drm_WARN(&mmio->tile->xe->drm, retries == 0,
310 "64-bit read of %#x did not stabilize\n", reg.addr);
311
312 return (u64)udw << 32 | ldw;
313 }
314
__xe_mmio_wait32(struct xe_mmio * mmio,struct xe_reg reg,u32 mask,u32 val,u32 timeout_us,u32 * out_val,bool atomic,bool expect_match)315 static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
316 u32 timeout_us, u32 *out_val, bool atomic, bool expect_match)
317 {
318 ktime_t cur = ktime_get_raw();
319 const ktime_t end = ktime_add_us(cur, timeout_us);
320 int ret = -ETIMEDOUT;
321 s64 wait = 10;
322 u32 read;
323 bool check;
324
325 for (;;) {
326 read = xe_mmio_read32(mmio, reg);
327
328 check = (read & mask) == val;
329 if (!expect_match)
330 check = !check;
331
332 if (check) {
333 ret = 0;
334 break;
335 }
336
337 cur = ktime_get_raw();
338 if (!ktime_before(cur, end))
339 break;
340
341 if (ktime_after(ktime_add_us(cur, wait), end))
342 wait = ktime_us_delta(end, cur);
343
344 if (atomic)
345 udelay(wait);
346 else
347 usleep_range(wait, wait << 1);
348 wait <<= 1;
349 }
350
351 if (ret != 0) {
352 read = xe_mmio_read32(mmio, reg);
353
354 check = (read & mask) == val;
355 if (!expect_match)
356 check = !check;
357
358 if (check)
359 ret = 0;
360 }
361
362 if (out_val)
363 *out_val = read;
364
365 return ret;
366 }
367
368 /**
369 * xe_mmio_wait32() - Wait for a register to match the desired masked value
370 * @mmio: MMIO target
371 * @reg: register to read value from
372 * @mask: mask to be applied to the value read from the register
373 * @val: desired value after applying the mask
374 * @timeout_us: time out after this period of time. Wait logic tries to be
375 * smart, applying an exponential backoff until @timeout_us is reached.
376 * @out_val: if not NULL, points where to store the last unmasked value
377 * @atomic: needs to be true if calling from an atomic context
378 *
379 * This function polls for the desired masked value and returns zero on success
380 * or -ETIMEDOUT if timed out.
381 *
382 * Note that @timeout_us represents the minimum amount of time to wait before
383 * giving up. The actual time taken by this function can be a little more than
384 * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
385 * it is possible that this function succeeds even after @timeout_us has passed.
386 */
xe_mmio_wait32(struct xe_mmio * mmio,struct xe_reg reg,u32 mask,u32 val,u32 timeout_us,u32 * out_val,bool atomic)387 int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
388 u32 *out_val, bool atomic)
389 {
390 return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true);
391 }
392
393 /**
394 * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
395 * @mmio: MMIO target
396 * @reg: register to read value from
397 * @mask: mask to be applied to the value read from the register
398 * @val: value not to be matched after applying the mask
399 * @timeout_us: time out after this period of time
400 * @out_val: if not NULL, points where to store the last unmasked value
401 * @atomic: needs to be true if calling from an atomic context
402 *
403 * This function works exactly like xe_mmio_wait32() with the exception that
404 * @val is expected not to be matched.
405 */
xe_mmio_wait32_not(struct xe_mmio * mmio,struct xe_reg reg,u32 mask,u32 val,u32 timeout_us,u32 * out_val,bool atomic)406 int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
407 u32 *out_val, bool atomic)
408 {
409 return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
410 }
411