1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021-2023 Intel Corporation
4 */
5
6 #include "xe_mmio.h"
7
8 #include <linux/delay.h>
9 #include <linux/io-64-nonatomic-lo-hi.h>
10 #include <linux/minmax.h>
11 #include <linux/pci.h>
12
13 #include <drm/drm_managed.h>
14 #include <drm/drm_print.h>
15
16 #include "regs/xe_bars.h"
17 #include "regs/xe_regs.h"
18 #include "xe_device.h"
19 #include "xe_gt.h"
20 #include "xe_gt_printk.h"
21 #include "xe_gt_sriov_vf.h"
22 #include "xe_macros.h"
23 #include "xe_sriov.h"
24 #include "xe_trace.h"
25
tiles_fini(void * arg)26 static void tiles_fini(void *arg)
27 {
28 struct xe_device *xe = arg;
29 struct xe_tile *tile;
30 int id;
31
32 for_each_remote_tile(tile, xe, id)
33 tile->mmio.regs = NULL;
34 }
35
36 /*
37 * On multi-tile devices, partition the BAR space for MMIO on each tile,
38 * possibly accounting for register override on the number of tiles available.
39 * Resulting memory layout is like below:
40 *
41 * .----------------------. <- tile_count * tile_mmio_size
42 * | .... |
43 * |----------------------| <- 2 * tile_mmio_size
44 * | tile1->mmio.regs |
45 * |----------------------| <- 1 * tile_mmio_size
46 * | tile0->mmio.regs |
47 * '----------------------' <- 0MB
48 */
mmio_multi_tile_setup(struct xe_device * xe,size_t tile_mmio_size)49 static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
50 {
51 struct xe_tile *tile;
52 void __iomem *regs;
53 u8 id;
54
55 /*
56 * Nothing to be done as tile 0 has already been setup earlier with the
57 * entire BAR mapped - see xe_mmio_init()
58 */
59 if (xe->info.tile_count == 1)
60 return;
61
62 /* Possibly override number of tile based on configuration register */
63 if (!xe->info.skip_mtcfg) {
64 struct xe_gt *gt = xe_root_mmio_gt(xe);
65 u8 tile_count;
66 u32 mtcfg;
67
68 /*
69 * Although the per-tile mmio regs are not yet initialized, this
70 * is fine as it's going to the root gt, that's guaranteed to be
71 * initialized earlier in xe_mmio_init()
72 */
73 mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
74 tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
75
76 if (tile_count < xe->info.tile_count) {
77 drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
78 xe->info.tile_count, tile_count);
79 xe->info.tile_count = tile_count;
80
81 /*
82 * FIXME: Needs some work for standalone media, but
83 * should be impossible with multi-tile for now:
84 * multi-tile platform with standalone media doesn't
85 * exist
86 */
87 xe->info.gt_count = xe->info.tile_count;
88 }
89 }
90
91 regs = xe->mmio.regs;
92 for_each_tile(tile, xe, id) {
93 tile->mmio.size = tile_mmio_size;
94 tile->mmio.regs = regs;
95 regs += tile_mmio_size;
96 }
97 }
98
99 /*
100 * On top of all the multi-tile MMIO space there can be a platform-dependent
101 * extension for each tile, resulting in a layout like below:
102 *
103 * .----------------------. <- ext_base + tile_count * tile_mmio_ext_size
104 * | .... |
105 * |----------------------| <- ext_base + 2 * tile_mmio_ext_size
106 * | tile1->mmio_ext.regs |
107 * |----------------------| <- ext_base + 1 * tile_mmio_ext_size
108 * | tile0->mmio_ext.regs |
109 * |======================| <- ext_base = tile_count * tile_mmio_size
110 * | |
111 * | mmio.regs |
112 * | |
113 * '----------------------' <- 0MB
114 *
115 * Set up the tile[]->mmio_ext pointers/sizes.
116 */
mmio_extension_setup(struct xe_device * xe,size_t tile_mmio_size,size_t tile_mmio_ext_size)117 static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size,
118 size_t tile_mmio_ext_size)
119 {
120 struct xe_tile *tile;
121 void __iomem *regs;
122 u8 id;
123
124 if (!xe->info.has_mmio_ext)
125 return;
126
127 regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count;
128 for_each_tile(tile, xe, id) {
129 tile->mmio_ext.size = tile_mmio_ext_size;
130 tile->mmio_ext.regs = regs;
131 regs += tile_mmio_ext_size;
132 }
133 }
134
xe_mmio_probe_tiles(struct xe_device * xe)135 int xe_mmio_probe_tiles(struct xe_device *xe)
136 {
137 size_t tile_mmio_size = SZ_16M;
138 size_t tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
139
140 mmio_multi_tile_setup(xe, tile_mmio_size);
141 mmio_extension_setup(xe, tile_mmio_size, tile_mmio_ext_size);
142
143 return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
144 }
145
mmio_fini(void * arg)146 static void mmio_fini(void *arg)
147 {
148 struct xe_device *xe = arg;
149 struct xe_tile *root_tile = xe_device_get_root_tile(xe);
150
151 pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
152 xe->mmio.regs = NULL;
153 root_tile->mmio.regs = NULL;
154 }
155
xe_mmio_init(struct xe_device * xe)156 int xe_mmio_init(struct xe_device *xe)
157 {
158 struct xe_tile *root_tile = xe_device_get_root_tile(xe);
159 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
160 const int mmio_bar = 0;
161
162 /*
163 * Map the entire BAR.
164 * The first 16MB of the BAR, belong to the root tile, and include:
165 * registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB).
166 */
167 xe->mmio.size = pci_resource_len(pdev, mmio_bar);
168 xe->mmio.regs = pci_iomap(pdev, mmio_bar, GTTMMADR_BAR);
169 if (xe->mmio.regs == NULL) {
170 drm_err(&xe->drm, "failed to map registers\n");
171 return -EIO;
172 }
173
174 /* Setup first tile; other tiles (if present) will be setup later. */
175 root_tile->mmio.size = SZ_16M;
176 root_tile->mmio.regs = xe->mmio.regs;
177
178 return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
179 }
180
mmio_flush_pending_writes(struct xe_gt * gt)181 static void mmio_flush_pending_writes(struct xe_gt *gt)
182 {
183 #define DUMMY_REG_OFFSET 0x130030
184 struct xe_tile *tile = gt_to_tile(gt);
185 int i;
186
187 if (tile->xe->info.platform != XE_LUNARLAKE)
188 return;
189
190 /* 4 dummy writes */
191 for (i = 0; i < 4; i++)
192 writel(0, tile->mmio.regs + DUMMY_REG_OFFSET);
193 }
194
xe_mmio_read8(struct xe_gt * gt,struct xe_reg reg)195 u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
196 {
197 struct xe_tile *tile = gt_to_tile(gt);
198 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
199 u8 val;
200
201 /* Wa_15015404425 */
202 mmio_flush_pending_writes(gt);
203
204 val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
205 trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
206
207 return val;
208 }
209
xe_mmio_read16(struct xe_gt * gt,struct xe_reg reg)210 u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
211 {
212 struct xe_tile *tile = gt_to_tile(gt);
213 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
214 u16 val;
215
216 /* Wa_15015404425 */
217 mmio_flush_pending_writes(gt);
218
219 val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
220 trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
221
222 return val;
223 }
224
xe_mmio_write32(struct xe_gt * gt,struct xe_reg reg,u32 val)225 void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
226 {
227 struct xe_tile *tile = gt_to_tile(gt);
228 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
229
230 trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
231
232 if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
233 xe_gt_sriov_vf_write32(gt, reg, val);
234 else
235 writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
236 }
237
xe_mmio_read32(struct xe_gt * gt,struct xe_reg reg)238 u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
239 {
240 struct xe_tile *tile = gt_to_tile(gt);
241 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
242 u32 val;
243
244 /* Wa_15015404425 */
245 mmio_flush_pending_writes(gt);
246
247 if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
248 val = xe_gt_sriov_vf_read32(gt, reg);
249 else
250 val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
251
252 trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
253
254 return val;
255 }
256
xe_mmio_rmw32(struct xe_gt * gt,struct xe_reg reg,u32 clr,u32 set)257 u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
258 {
259 u32 old, reg_val;
260
261 old = xe_mmio_read32(gt, reg);
262 reg_val = (old & ~clr) | set;
263 xe_mmio_write32(gt, reg, reg_val);
264
265 return old;
266 }
267
xe_mmio_write32_and_verify(struct xe_gt * gt,struct xe_reg reg,u32 val,u32 mask,u32 eval)268 int xe_mmio_write32_and_verify(struct xe_gt *gt,
269 struct xe_reg reg, u32 val, u32 mask, u32 eval)
270 {
271 u32 reg_val;
272
273 xe_mmio_write32(gt, reg, val);
274 reg_val = xe_mmio_read32(gt, reg);
275
276 return (reg_val & mask) != eval ? -EINVAL : 0;
277 }
278
xe_mmio_in_range(const struct xe_gt * gt,const struct xe_mmio_range * range,struct xe_reg reg)279 bool xe_mmio_in_range(const struct xe_gt *gt,
280 const struct xe_mmio_range *range,
281 struct xe_reg reg)
282 {
283 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
284
285 return range && addr >= range->start && addr <= range->end;
286 }
287
288 /**
289 * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
290 * @gt: MMIO target GT
291 * @reg: register to read value from
292 *
293 * Although Intel GPUs have some 64-bit registers, the hardware officially
294 * only supports GTTMMADR register reads of 32 bits or smaller. Even if
295 * a readq operation may return a reasonable value, that violation of the
296 * spec shouldn't be relied upon and all 64-bit register reads should be
297 * performed as two 32-bit reads of the upper and lower dwords.
298 *
299 * When reading registers that may be changing (such as
300 * counters), a rollover of the lower dword between the two 32-bit reads
301 * can be problematic. This function attempts to ensure the upper dword has
302 * stabilized before returning the 64-bit value.
303 *
304 * Note that because this function may re-read the register multiple times
305 * while waiting for the value to stabilize it should not be used to read
306 * any registers where read operations have side effects.
307 *
308 * Returns the value of the 64-bit register.
309 */
xe_mmio_read64_2x32(struct xe_gt * gt,struct xe_reg reg)310 u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
311 {
312 struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
313 u32 ldw, udw, oldudw, retries;
314
315 reg.addr = xe_mmio_adjusted_addr(gt, reg.addr);
316 reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr);
317
318 /* we shouldn't adjust just one register address */
319 xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4);
320
321 oldudw = xe_mmio_read32(gt, reg_udw);
322 for (retries = 5; retries; --retries) {
323 ldw = xe_mmio_read32(gt, reg);
324 udw = xe_mmio_read32(gt, reg_udw);
325
326 if (udw == oldudw)
327 break;
328
329 oldudw = udw;
330 }
331
332 xe_gt_WARN(gt, retries == 0,
333 "64-bit read of %#x did not stabilize\n", reg.addr);
334
335 return (u64)udw << 32 | ldw;
336 }
337
__xe_mmio_wait32(struct xe_gt * gt,struct xe_reg reg,u32 mask,u32 val,u32 timeout_us,u32 * out_val,bool atomic,bool expect_match)338 static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
339 u32 *out_val, bool atomic, bool expect_match)
340 {
341 ktime_t cur = ktime_get_raw();
342 const ktime_t end = ktime_add_us(cur, timeout_us);
343 int ret = -ETIMEDOUT;
344 s64 wait = 10;
345 u32 read;
346 bool check;
347
348 for (;;) {
349 read = xe_mmio_read32(gt, reg);
350
351 check = (read & mask) == val;
352 if (!expect_match)
353 check = !check;
354
355 if (check) {
356 ret = 0;
357 break;
358 }
359
360 cur = ktime_get_raw();
361 if (!ktime_before(cur, end))
362 break;
363
364 if (ktime_after(ktime_add_us(cur, wait), end))
365 wait = ktime_us_delta(end, cur);
366
367 if (atomic)
368 udelay(wait);
369 else
370 usleep_range(wait, wait << 1);
371 wait <<= 1;
372 }
373
374 if (ret != 0) {
375 read = xe_mmio_read32(gt, reg);
376
377 check = (read & mask) == val;
378 if (!expect_match)
379 check = !check;
380
381 if (check)
382 ret = 0;
383 }
384
385 if (out_val)
386 *out_val = read;
387
388 return ret;
389 }
390
391 /**
392 * xe_mmio_wait32() - Wait for a register to match the desired masked value
393 * @gt: MMIO target GT
394 * @reg: register to read value from
395 * @mask: mask to be applied to the value read from the register
396 * @val: desired value after applying the mask
397 * @timeout_us: time out after this period of time. Wait logic tries to be
398 * smart, applying an exponential backoff until @timeout_us is reached.
399 * @out_val: if not NULL, points where to store the last unmasked value
400 * @atomic: needs to be true if calling from an atomic context
401 *
402 * This function polls for the desired masked value and returns zero on success
403 * or -ETIMEDOUT if timed out.
404 *
405 * Note that @timeout_us represents the minimum amount of time to wait before
406 * giving up. The actual time taken by this function can be a little more than
407 * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
408 * it is possible that this function succeeds even after @timeout_us has passed.
409 */
xe_mmio_wait32(struct xe_gt * gt,struct xe_reg reg,u32 mask,u32 val,u32 timeout_us,u32 * out_val,bool atomic)410 int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
411 u32 *out_val, bool atomic)
412 {
413 return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
414 }
415
416 /**
417 * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
418 * @gt: MMIO target GT
419 * @reg: register to read value from
420 * @mask: mask to be applied to the value read from the register
421 * @val: value not to be matched after applying the mask
422 * @timeout_us: time out after this period of time
423 * @out_val: if not NULL, points where to store the last unmasked value
424 * @atomic: needs to be true if calling from an atomic context
425 *
426 * This function works exactly like xe_mmio_wait32() with the exception that
427 * @val is expected not to be matched.
428 */
xe_mmio_wait32_not(struct xe_gt * gt,struct xe_reg reg,u32 mask,u32 val,u32 timeout_us,u32 * out_val,bool atomic)429 int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
430 u32 *out_val, bool atomic)
431 {
432 return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
433 }
434