xref: /linux/drivers/gpu/drm/xe/xe_mmio.c (revision d7b618bc41ee3d44c070212dff93949702ede997)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021-2023 Intel Corporation
4  */
5 
6 #include "xe_mmio.h"
7 
8 #include <linux/delay.h>
9 #include <linux/io-64-nonatomic-lo-hi.h>
10 #include <linux/minmax.h>
11 #include <linux/pci.h>
12 
13 #include <drm/drm_managed.h>
14 #include <drm/drm_print.h>
15 
16 #include "regs/xe_bars.h"
17 #include "regs/xe_regs.h"
18 #include "xe_device.h"
19 #include "xe_gt.h"
20 #include "xe_gt_printk.h"
21 #include "xe_gt_sriov_vf.h"
22 #include "xe_macros.h"
23 #include "xe_sriov.h"
24 #include "xe_trace.h"
25 
26 static void tiles_fini(void *arg)
27 {
28 	struct xe_device *xe = arg;
29 	struct xe_tile *tile;
30 	int id;
31 
32 	for_each_remote_tile(tile, xe, id)
33 		tile->mmio.regs = NULL;
34 }
35 
36 /*
37  * On multi-tile devices, partition the BAR space for MMIO on each tile,
38  * possibly accounting for register override on the number of tiles available.
39  * tile_mmio_size contains both the tile's 4MB register space, as well as
40  * additional space for the GTT and other (possibly unused) regions).
41  * Resulting memory layout is like below:
42  *
43  * .----------------------. <- tile_count * tile_mmio_size
44  * |         ....         |
45  * |----------------------| <- 2 * tile_mmio_size
46  * |   tile1 GTT + other  |
47  * |----------------------| <- 1 * tile_mmio_size + 4MB
48  * |   tile1->mmio.regs   |
49  * |----------------------| <- 1 * tile_mmio_size
50  * |   tile0 GTT + other  |
51  * |----------------------| <- 4MB
52  * |   tile0->mmio.regs   |
53  * '----------------------' <- 0MB
54  */
55 static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
56 {
57 	struct xe_tile *tile;
58 	struct xe_gt *gt;
59 	u8 id;
60 
61 	/*
62 	 * Nothing to be done as tile 0 has already been setup earlier with the
63 	 * entire BAR mapped - see xe_mmio_probe_early()
64 	 */
65 	if (xe->info.tile_count == 1)
66 		return;
67 
68 	/* Possibly override number of tile based on configuration register */
69 	if (!xe->info.skip_mtcfg) {
70 		struct xe_mmio *mmio = xe_root_tile_mmio(xe);
71 		u8 tile_count, gt_count;
72 		u32 mtcfg;
73 
74 		/*
75 		 * Although the per-tile mmio regs are not yet initialized, this
76 		 * is fine as it's going to the root tile's mmio, that's
77 		 * guaranteed to be initialized earlier in xe_mmio_probe_early()
78 		 */
79 		mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
80 		tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
81 
82 		if (tile_count < xe->info.tile_count) {
83 			drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
84 				 xe->info.tile_count, tile_count);
85 			xe->info.tile_count = tile_count;
86 
87 			/*
88 			 * We've already setup gt_count according to the full
89 			 * tile count.  Re-calculate it to only include the GTs
90 			 * that belong to the remaining tile(s).
91 			 */
92 			gt_count = 0;
93 			for_each_gt(gt, xe, id)
94 				if (gt->info.id < tile_count * xe->info.max_gt_per_tile)
95 					gt_count++;
96 			xe->info.gt_count = gt_count;
97 		}
98 	}
99 
100 	for_each_remote_tile(tile, xe, id)
101 		xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M);
102 }
103 
104 int xe_mmio_probe_tiles(struct xe_device *xe)
105 {
106 	size_t tile_mmio_size = SZ_16M;
107 
108 	mmio_multi_tile_setup(xe, tile_mmio_size);
109 
110 	return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
111 }
112 
113 static void mmio_fini(void *arg)
114 {
115 	struct xe_device *xe = arg;
116 	struct xe_tile *root_tile = xe_device_get_root_tile(xe);
117 
118 	pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
119 	xe->mmio.regs = NULL;
120 	root_tile->mmio.regs = NULL;
121 }
122 
123 int xe_mmio_probe_early(struct xe_device *xe)
124 {
125 	struct xe_tile *root_tile = xe_device_get_root_tile(xe);
126 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
127 
128 	/*
129 	 * Map the entire BAR.
130 	 * The first 16MB of the BAR, belong to the root tile, and include:
131 	 * registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB).
132 	 */
133 	xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR);
134 	xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0);
135 	if (!xe->mmio.regs) {
136 		drm_err(&xe->drm, "failed to map registers\n");
137 		return -EIO;
138 	}
139 
140 	/* Setup first tile; other tiles (if present) will be setup later. */
141 	xe_mmio_init(&root_tile->mmio, root_tile, xe->mmio.regs, SZ_4M);
142 
143 	return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
144 }
145 ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO); /* See xe_pci_probe() */
146 
147 /**
148  * xe_mmio_init() - Initialize an MMIO instance
149  * @mmio: Pointer to the MMIO instance to initialize
150  * @tile: The tile to which the MMIO region belongs
151  * @ptr: Pointer to the start of the MMIO region
152  * @size: The size of the MMIO region in bytes
153  *
154  * This is a convenience function for minimal initialization of struct xe_mmio.
155  */
156 void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size)
157 {
158 	xe_tile_assert(tile, size <= XE_REG_ADDR_MAX);
159 
160 	mmio->regs = ptr;
161 	mmio->regs_size = size;
162 	mmio->tile = tile;
163 }
164 
165 static void mmio_flush_pending_writes(struct xe_mmio *mmio)
166 {
167 #define DUMMY_REG_OFFSET	0x130030
168 	int i;
169 
170 	if (mmio->tile->xe->info.platform != XE_LUNARLAKE)
171 		return;
172 
173 	/* 4 dummy writes */
174 	for (i = 0; i < 4; i++)
175 		writel(0, mmio->regs + DUMMY_REG_OFFSET);
176 }
177 
178 u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
179 {
180 	u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
181 	u8 val;
182 
183 	/* Wa_15015404425 */
184 	mmio_flush_pending_writes(mmio);
185 
186 	val = readb(mmio->regs + addr);
187 	trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
188 
189 	return val;
190 }
191 
192 u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
193 {
194 	u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
195 	u16 val;
196 
197 	/* Wa_15015404425 */
198 	mmio_flush_pending_writes(mmio);
199 
200 	val = readw(mmio->regs + addr);
201 	trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
202 
203 	return val;
204 }
205 
206 void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
207 {
208 	u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
209 
210 	trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
211 
212 	if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
213 		xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?:
214 				       mmio->tile->primary_gt, reg, val);
215 	else
216 		writel(val, mmio->regs + addr);
217 }
218 
219 u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
220 {
221 	u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
222 	u32 val;
223 
224 	/* Wa_15015404425 */
225 	mmio_flush_pending_writes(mmio);
226 
227 	if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
228 		val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?:
229 					    mmio->tile->primary_gt, reg);
230 	else
231 		val = readl(mmio->regs + addr);
232 
233 	trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
234 
235 	return val;
236 }
237 
238 u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set)
239 {
240 	u32 old, reg_val;
241 
242 	old = xe_mmio_read32(mmio, reg);
243 	reg_val = (old & ~clr) | set;
244 	xe_mmio_write32(mmio, reg, reg_val);
245 
246 	return old;
247 }
248 
249 int xe_mmio_write32_and_verify(struct xe_mmio *mmio,
250 			       struct xe_reg reg, u32 val, u32 mask, u32 eval)
251 {
252 	u32 reg_val;
253 
254 	xe_mmio_write32(mmio, reg, val);
255 	reg_val = xe_mmio_read32(mmio, reg);
256 
257 	return (reg_val & mask) != eval ? -EINVAL : 0;
258 }
259 
260 bool xe_mmio_in_range(const struct xe_mmio *mmio,
261 		      const struct xe_mmio_range *range,
262 		      struct xe_reg reg)
263 {
264 	u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
265 
266 	return range && addr >= range->start && addr <= range->end;
267 }
268 
269 /**
270  * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
271  * @mmio: MMIO target
272  * @reg: register to read value from
273  *
274  * Although Intel GPUs have some 64-bit registers, the hardware officially
275  * only supports GTTMMADR register reads of 32 bits or smaller.  Even if
276  * a readq operation may return a reasonable value, that violation of the
277  * spec shouldn't be relied upon and all 64-bit register reads should be
278  * performed as two 32-bit reads of the upper and lower dwords.
279  *
280  * When reading registers that may be changing (such as
281  * counters), a rollover of the lower dword between the two 32-bit reads
282  * can be problematic.  This function attempts to ensure the upper dword has
283  * stabilized before returning the 64-bit value.
284  *
285  * Note that because this function may re-read the register multiple times
286  * while waiting for the value to stabilize it should not be used to read
287  * any registers where read operations have side effects.
288  *
289  * Returns the value of the 64-bit register.
290  */
291 u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
292 {
293 	struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
294 	u32 ldw, udw, oldudw, retries;
295 
296 	reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr);
297 	reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr);
298 
299 	/* we shouldn't adjust just one register address */
300 	xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4);
301 
302 	oldudw = xe_mmio_read32(mmio, reg_udw);
303 	for (retries = 5; retries; --retries) {
304 		ldw = xe_mmio_read32(mmio, reg);
305 		udw = xe_mmio_read32(mmio, reg_udw);
306 
307 		if (udw == oldudw)
308 			break;
309 
310 		oldudw = udw;
311 	}
312 
313 	drm_WARN(&mmio->tile->xe->drm, retries == 0,
314 		 "64-bit read of %#x did not stabilize\n", reg.addr);
315 
316 	return (u64)udw << 32 | ldw;
317 }
318 
319 static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
320 			    u32 timeout_us, u32 *out_val, bool atomic, bool expect_match)
321 {
322 	ktime_t cur = ktime_get_raw();
323 	const ktime_t end = ktime_add_us(cur, timeout_us);
324 	int ret = -ETIMEDOUT;
325 	s64 wait = 10;
326 	u32 read;
327 	bool check;
328 
329 	for (;;) {
330 		read = xe_mmio_read32(mmio, reg);
331 
332 		check = (read & mask) == val;
333 		if (!expect_match)
334 			check = !check;
335 
336 		if (check) {
337 			ret = 0;
338 			break;
339 		}
340 
341 		cur = ktime_get_raw();
342 		if (!ktime_before(cur, end))
343 			break;
344 
345 		if (ktime_after(ktime_add_us(cur, wait), end))
346 			wait = ktime_us_delta(end, cur);
347 
348 		if (atomic)
349 			udelay(wait);
350 		else
351 			usleep_range(wait, wait << 1);
352 		wait <<= 1;
353 	}
354 
355 	if (ret != 0) {
356 		read = xe_mmio_read32(mmio, reg);
357 
358 		check = (read & mask) == val;
359 		if (!expect_match)
360 			check = !check;
361 
362 		if (check)
363 			ret = 0;
364 	}
365 
366 	if (out_val)
367 		*out_val = read;
368 
369 	return ret;
370 }
371 
372 /**
373  * xe_mmio_wait32() - Wait for a register to match the desired masked value
374  * @mmio: MMIO target
375  * @reg: register to read value from
376  * @mask: mask to be applied to the value read from the register
377  * @val: desired value after applying the mask
378  * @timeout_us: time out after this period of time. Wait logic tries to be
379  * smart, applying an exponential backoff until @timeout_us is reached.
380  * @out_val: if not NULL, points where to store the last unmasked value
381  * @atomic: needs to be true if calling from an atomic context
382  *
383  * This function polls for the desired masked value and returns zero on success
384  * or -ETIMEDOUT if timed out.
385  *
386  * Note that @timeout_us represents the minimum amount of time to wait before
387  * giving up. The actual time taken by this function can be a little more than
388  * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
389  * it is possible that this function succeeds even after @timeout_us has passed.
390  */
391 int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
392 		   u32 *out_val, bool atomic)
393 {
394 	return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true);
395 }
396 
397 /**
398  * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
399  * @mmio: MMIO target
400  * @reg: register to read value from
401  * @mask: mask to be applied to the value read from the register
402  * @val: value not to be matched after applying the mask
403  * @timeout_us: time out after this period of time
404  * @out_val: if not NULL, points where to store the last unmasked value
405  * @atomic: needs to be true if calling from an atomic context
406  *
407  * This function works exactly like xe_mmio_wait32() with the exception that
408  * @val is expected not to be matched.
409  */
410 int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
411 		       u32 *out_val, bool atomic)
412 {
413 	return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
414 }
415