xref: /linux/drivers/gpu/drm/xe/xe_mmio.c (revision b8e4b0529d59a3ccd0b25a31d3cfc8b0f3b34068)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021-2023 Intel Corporation
4  */
5 
6 #include "xe_mmio.h"
7 
8 #include <linux/delay.h>
9 #include <linux/io-64-nonatomic-lo-hi.h>
10 #include <linux/minmax.h>
11 #include <linux/pci.h>
12 
13 #include <drm/drm_managed.h>
14 #include <drm/drm_print.h>
15 
16 #include "regs/xe_bars.h"
17 #include "regs/xe_regs.h"
18 #include "xe_device.h"
19 #include "xe_gt.h"
20 #include "xe_gt_printk.h"
21 #include "xe_gt_sriov_vf.h"
22 #include "xe_macros.h"
23 #include "xe_sriov.h"
24 #include "xe_trace.h"
25 
26 static void tiles_fini(void *arg)
27 {
28 	struct xe_device *xe = arg;
29 	struct xe_tile *tile;
30 	int id;
31 
32 	for_each_tile(tile, xe, id)
33 		tile->mmio.regs = NULL;
34 }
35 
36 int xe_mmio_probe_tiles(struct xe_device *xe)
37 {
38 	size_t tile_mmio_size = SZ_16M, tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
39 	u8 id, tile_count = xe->info.tile_count;
40 	struct xe_gt *gt = xe_root_mmio_gt(xe);
41 	struct xe_tile *tile;
42 	void __iomem *regs;
43 	u32 mtcfg;
44 
45 	if (tile_count == 1)
46 		goto add_mmio_ext;
47 
48 	if (!xe->info.skip_mtcfg) {
49 		mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
50 		tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
51 		if (tile_count < xe->info.tile_count) {
52 			drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
53 					xe->info.tile_count, tile_count);
54 			xe->info.tile_count = tile_count;
55 
56 			/*
57 			 * FIXME: Needs some work for standalone media, but should be impossible
58 			 * with multi-tile for now.
59 			 */
60 			xe->info.gt_count = xe->info.tile_count;
61 		}
62 	}
63 
64 	regs = xe->mmio.regs;
65 	for_each_tile(tile, xe, id) {
66 		tile->mmio.size = tile_mmio_size;
67 		tile->mmio.regs = regs;
68 		regs += tile_mmio_size;
69 	}
70 
71 add_mmio_ext:
72 	/*
73 	 * By design, there's a contiguous multi-tile MMIO space (16MB hard coded per tile).
74 	 * When supported, there could be an additional contiguous multi-tile MMIO extension
75 	 * space ON TOP of it, and hence the necessity for distinguished MMIO spaces.
76 	 */
77 	if (xe->info.has_mmio_ext) {
78 		regs = xe->mmio.regs + tile_mmio_size * tile_count;
79 
80 		for_each_tile(tile, xe, id) {
81 			tile->mmio_ext.size = tile_mmio_ext_size;
82 			tile->mmio_ext.regs = regs;
83 
84 			regs += tile_mmio_ext_size;
85 		}
86 	}
87 
88 	return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
89 }
90 
91 static void mmio_fini(void *arg)
92 {
93 	struct xe_device *xe = arg;
94 
95 	pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
96 	xe->mmio.regs = NULL;
97 }
98 
99 int xe_mmio_init(struct xe_device *xe)
100 {
101 	struct xe_tile *root_tile = xe_device_get_root_tile(xe);
102 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
103 	const int mmio_bar = 0;
104 
105 	/*
106 	 * Map the entire BAR.
107 	 * The first 16MB of the BAR, belong to the root tile, and include:
108 	 * registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB).
109 	 */
110 	xe->mmio.size = pci_resource_len(pdev, mmio_bar);
111 	xe->mmio.regs = pci_iomap(pdev, mmio_bar, GTTMMADR_BAR);
112 	if (xe->mmio.regs == NULL) {
113 		drm_err(&xe->drm, "failed to map registers\n");
114 		return -EIO;
115 	}
116 
117 	/* Setup first tile; other tiles (if present) will be setup later. */
118 	root_tile->mmio.size = SZ_16M;
119 	root_tile->mmio.regs = xe->mmio.regs;
120 
121 	return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
122 }
123 
124 u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
125 {
126 	struct xe_tile *tile = gt_to_tile(gt);
127 	u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
128 	u8 val;
129 
130 	val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
131 	trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
132 
133 	return val;
134 }
135 
136 u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
137 {
138 	struct xe_tile *tile = gt_to_tile(gt);
139 	u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
140 	u16 val;
141 
142 	val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
143 	trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
144 
145 	return val;
146 }
147 
148 void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
149 {
150 	struct xe_tile *tile = gt_to_tile(gt);
151 	u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
152 
153 	trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
154 	writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
155 }
156 
157 u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
158 {
159 	struct xe_tile *tile = gt_to_tile(gt);
160 	u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
161 	u32 val;
162 
163 	if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
164 		val = xe_gt_sriov_vf_read32(gt, reg);
165 	else
166 		val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
167 
168 	trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
169 
170 	return val;
171 }
172 
173 u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
174 {
175 	u32 old, reg_val;
176 
177 	old = xe_mmio_read32(gt, reg);
178 	reg_val = (old & ~clr) | set;
179 	xe_mmio_write32(gt, reg, reg_val);
180 
181 	return old;
182 }
183 
184 int xe_mmio_write32_and_verify(struct xe_gt *gt,
185 			       struct xe_reg reg, u32 val, u32 mask, u32 eval)
186 {
187 	u32 reg_val;
188 
189 	xe_mmio_write32(gt, reg, val);
190 	reg_val = xe_mmio_read32(gt, reg);
191 
192 	return (reg_val & mask) != eval ? -EINVAL : 0;
193 }
194 
195 bool xe_mmio_in_range(const struct xe_gt *gt,
196 		      const struct xe_mmio_range *range,
197 		      struct xe_reg reg)
198 {
199 	u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
200 
201 	return range && addr >= range->start && addr <= range->end;
202 }
203 
204 /**
205  * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
206  * @gt: MMIO target GT
207  * @reg: register to read value from
208  *
209  * Although Intel GPUs have some 64-bit registers, the hardware officially
210  * only supports GTTMMADR register reads of 32 bits or smaller.  Even if
211  * a readq operation may return a reasonable value, that violation of the
212  * spec shouldn't be relied upon and all 64-bit register reads should be
213  * performed as two 32-bit reads of the upper and lower dwords.
214  *
215  * When reading registers that may be changing (such as
216  * counters), a rollover of the lower dword between the two 32-bit reads
217  * can be problematic.  This function attempts to ensure the upper dword has
218  * stabilized before returning the 64-bit value.
219  *
220  * Note that because this function may re-read the register multiple times
221  * while waiting for the value to stabilize it should not be used to read
222  * any registers where read operations have side effects.
223  *
224  * Returns the value of the 64-bit register.
225  */
226 u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
227 {
228 	struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
229 	u32 ldw, udw, oldudw, retries;
230 
231 	reg.addr = xe_mmio_adjusted_addr(gt, reg.addr);
232 	reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr);
233 
234 	/* we shouldn't adjust just one register address */
235 	xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4);
236 
237 	oldudw = xe_mmio_read32(gt, reg_udw);
238 	for (retries = 5; retries; --retries) {
239 		ldw = xe_mmio_read32(gt, reg);
240 		udw = xe_mmio_read32(gt, reg_udw);
241 
242 		if (udw == oldudw)
243 			break;
244 
245 		oldudw = udw;
246 	}
247 
248 	xe_gt_WARN(gt, retries == 0,
249 		   "64-bit read of %#x did not stabilize\n", reg.addr);
250 
251 	return (u64)udw << 32 | ldw;
252 }
253 
254 /**
255  * xe_mmio_wait32() - Wait for a register to match the desired masked value
256  * @gt: MMIO target GT
257  * @reg: register to read value from
258  * @mask: mask to be applied to the value read from the register
259  * @val: desired value after applying the mask
260  * @timeout_us: time out after this period of time. Wait logic tries to be
261  * smart, applying an exponential backoff until @timeout_us is reached.
262  * @out_val: if not NULL, points where to store the last unmasked value
263  * @atomic: needs to be true if calling from an atomic context
264  *
265  * This function polls for the desired masked value and returns zero on success
266  * or -ETIMEDOUT if timed out.
267  *
268  * Note that @timeout_us represents the minimum amount of time to wait before
269  * giving up. The actual time taken by this function can be a little more than
270  * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
271  * it is possible that this function succeeds even after @timeout_us has passed.
272  */
273 int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
274 		   u32 *out_val, bool atomic)
275 {
276 	ktime_t cur = ktime_get_raw();
277 	const ktime_t end = ktime_add_us(cur, timeout_us);
278 	int ret = -ETIMEDOUT;
279 	s64 wait = 10;
280 	u32 read;
281 
282 	for (;;) {
283 		read = xe_mmio_read32(gt, reg);
284 		if ((read & mask) == val) {
285 			ret = 0;
286 			break;
287 		}
288 
289 		cur = ktime_get_raw();
290 		if (!ktime_before(cur, end))
291 			break;
292 
293 		if (ktime_after(ktime_add_us(cur, wait), end))
294 			wait = ktime_us_delta(end, cur);
295 
296 		if (atomic)
297 			udelay(wait);
298 		else
299 			usleep_range(wait, wait << 1);
300 		wait <<= 1;
301 	}
302 
303 	if (ret != 0) {
304 		read = xe_mmio_read32(gt, reg);
305 		if ((read & mask) == val)
306 			ret = 0;
307 	}
308 
309 	if (out_val)
310 		*out_val = read;
311 
312 	return ret;
313 }
314 
315 /**
316  * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
317  * @gt: MMIO target GT
318  * @reg: register to read value from
319  * @mask: mask to be applied to the value read from the register
320  * @val: value to match after applying the mask
321  * @timeout_us: time out after this period of time. Wait logic tries to be
322  * smart, applying an exponential backoff until @timeout_us is reached.
323  * @out_val: if not NULL, points where to store the last unmasked value
324  * @atomic: needs to be true if calling from an atomic context
325  *
326  * This function polls for a masked value to change from a given value and
327  * returns zero on success or -ETIMEDOUT if timed out.
328  *
329  * Note that @timeout_us represents the minimum amount of time to wait before
330  * giving up. The actual time taken by this function can be a little more than
331  * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
332  * it is possible that this function succeeds even after @timeout_us has passed.
333  */
334 int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
335 		       u32 *out_val, bool atomic)
336 {
337 	ktime_t cur = ktime_get_raw();
338 	const ktime_t end = ktime_add_us(cur, timeout_us);
339 	int ret = -ETIMEDOUT;
340 	s64 wait = 10;
341 	u32 read;
342 
343 	for (;;) {
344 		read = xe_mmio_read32(gt, reg);
345 		if ((read & mask) != val) {
346 			ret = 0;
347 			break;
348 		}
349 
350 		cur = ktime_get_raw();
351 		if (!ktime_before(cur, end))
352 			break;
353 
354 		if (ktime_after(ktime_add_us(cur, wait), end))
355 			wait = ktime_us_delta(end, cur);
356 
357 		if (atomic)
358 			udelay(wait);
359 		else
360 			usleep_range(wait, wait << 1);
361 		wait <<= 1;
362 	}
363 
364 	if (ret != 0) {
365 		read = xe_mmio_read32(gt, reg);
366 		if ((read & mask) != val)
367 			ret = 0;
368 	}
369 
370 	if (out_val)
371 		*out_val = read;
372 
373 	return ret;
374 }
375