xref: /linux/drivers/gpu/drm/xe/xe_pat.c (revision 257ca10c7317d4a424e48bb95d14ca53a1f1dd6f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_pat.h"
7 
8 #include <drm/xe_drm.h>
9 
10 #include "regs/xe_reg_defs.h"
11 #include "xe_assert.h"
12 #include "xe_device.h"
13 #include "xe_gt.h"
14 #include "xe_gt_mcr.h"
15 #include "xe_mmio.h"
16 
17 #define _PAT_ATS				0x47fc
18 #define _PAT_INDEX(index)			_PICK_EVEN_2RANGES(index, 8, \
19 								   0x4800, 0x4804, \
20 								   0x4848, 0x484c)
21 #define _PAT_PTA				0x4820
22 
23 #define XE2_NO_PROMOTE				REG_BIT(10)
24 #define XE2_COMP_EN				REG_BIT(9)
25 #define XE2_L3_CLOS				REG_GENMASK(7, 6)
26 #define XE2_L3_POLICY				REG_GENMASK(5, 4)
27 #define XE2_L4_POLICY				REG_GENMASK(3, 2)
28 #define XE2_COH_MODE				REG_GENMASK(1, 0)
29 
30 #define XELPG_L4_POLICY_MASK			REG_GENMASK(3, 2)
31 #define XELPG_PAT_3_UC				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 3)
32 #define XELPG_PAT_1_WT				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 1)
33 #define XELPG_PAT_0_WB				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 0)
34 #define XELPG_INDEX_COH_MODE_MASK		REG_GENMASK(1, 0)
35 #define XELPG_3_COH_2W				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 3)
36 #define XELPG_2_COH_1W				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 2)
37 #define XELPG_0_COH_NON				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 0)
38 
39 #define XEHPC_CLOS_LEVEL_MASK			REG_GENMASK(3, 2)
40 #define XEHPC_PAT_CLOS(x)			REG_FIELD_PREP(XEHPC_CLOS_LEVEL_MASK, x)
41 
42 #define XELP_MEM_TYPE_MASK			REG_GENMASK(1, 0)
43 #define XELP_PAT_WB				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 3)
44 #define XELP_PAT_WT				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 2)
45 #define XELP_PAT_WC				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 1)
46 #define XELP_PAT_UC				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 0)
47 
48 static const char *XELP_MEM_TYPE_STR_MAP[] = { "UC", "WC", "WT", "WB" };
49 
50 struct xe_pat_ops {
51 	void (*program_graphics)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
52 				 int n_entries);
53 	void (*program_media)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
54 			      int n_entries);
55 	void (*dump)(struct xe_gt *gt, struct drm_printer *p);
56 };
57 
58 static const struct xe_pat_table_entry xelp_pat_table[] = {
59 	[0] = { XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
60 	[1] = { XELP_PAT_WC, XE_COH_NONE },
61 	[2] = { XELP_PAT_WT, XE_COH_NONE },
62 	[3] = { XELP_PAT_UC, XE_COH_NONE },
63 };
64 
65 static const struct xe_pat_table_entry xehpc_pat_table[] = {
66 	[0] = { XELP_PAT_UC, XE_COH_NONE },
67 	[1] = { XELP_PAT_WC, XE_COH_NONE },
68 	[2] = { XELP_PAT_WT, XE_COH_NONE },
69 	[3] = { XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
70 	[4] = { XEHPC_PAT_CLOS(1) | XELP_PAT_WT, XE_COH_NONE },
71 	[5] = { XEHPC_PAT_CLOS(1) | XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
72 	[6] = { XEHPC_PAT_CLOS(2) | XELP_PAT_WT, XE_COH_NONE },
73 	[7] = { XEHPC_PAT_CLOS(2) | XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
74 };
75 
76 static const struct xe_pat_table_entry xelpg_pat_table[] = {
77 	[0] = { XELPG_PAT_0_WB, XE_COH_NONE },
78 	[1] = { XELPG_PAT_1_WT, XE_COH_NONE },
79 	[2] = { XELPG_PAT_3_UC, XE_COH_NONE },
80 	[3] = { XELPG_PAT_0_WB | XELPG_2_COH_1W, XE_COH_AT_LEAST_1WAY },
81 	[4] = { XELPG_PAT_0_WB | XELPG_3_COH_2W, XE_COH_AT_LEAST_1WAY },
82 };
83 
84 /*
85  * The Xe2 table is getting large/complicated so it's easier to review if
86  * provided in a form that exactly matches the bspec's formatting.  The meaning
87  * of the fields here are:
88  *   - no_promote:  0=promotable, 1=no promote
89  *   - comp_en:     0=disable, 1=enable
90  *   - l3clos:      L3 class of service (0-3)
91  *   - l3_policy:   0=WB, 1=XD ("WB - Transient Display"), 3=UC
92  *   - l4_policy:   0=WB, 1=WT, 3=UC
93  *   - coh_mode:    0=no snoop, 2=1-way coherent, 3=2-way coherent
94  *
95  * Reserved entries should be programmed with the maximum caching, minimum
96  * coherency (which matches an all-0's encoding), so we can just omit them
97  * in the table.
98  */
99 #define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
100 	{ \
101 		.value = (no_promote ? XE2_NO_PROMOTE : 0) | \
102 			(comp_en ? XE2_COMP_EN : 0) | \
103 			REG_FIELD_PREP(XE2_L3_CLOS, l3clos) | \
104 			REG_FIELD_PREP(XE2_L3_POLICY, l3_policy) | \
105 			REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \
106 			REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \
107 		.coh_mode = __coh_mode ? XE_COH_AT_LEAST_1WAY : XE_COH_NONE \
108 	}
109 
110 static const struct xe_pat_table_entry xe2_pat_table[] = {
111 	[ 0] = XE2_PAT( 0, 0, 0, 0, 3, 0 ),
112 	[ 1] = XE2_PAT( 0, 0, 0, 0, 3, 2 ),
113 	[ 2] = XE2_PAT( 0, 0, 0, 0, 3, 3 ),
114 	[ 3] = XE2_PAT( 0, 0, 0, 3, 3, 0 ),
115 	[ 4] = XE2_PAT( 0, 0, 0, 3, 0, 2 ),
116 	[ 5] = XE2_PAT( 0, 0, 0, 3, 3, 2 ),
117 	[ 6] = XE2_PAT( 1, 0, 0, 1, 3, 0 ),
118 	[ 7] = XE2_PAT( 0, 0, 0, 3, 0, 3 ),
119 	[ 8] = XE2_PAT( 0, 0, 0, 3, 0, 0 ),
120 	[ 9] = XE2_PAT( 0, 1, 0, 0, 3, 0 ),
121 	[10] = XE2_PAT( 0, 1, 0, 3, 0, 0 ),
122 	[11] = XE2_PAT( 1, 1, 0, 1, 3, 0 ),
123 	[12] = XE2_PAT( 0, 1, 0, 3, 3, 0 ),
124 	[13] = XE2_PAT( 0, 0, 0, 0, 0, 0 ),
125 	[14] = XE2_PAT( 0, 1, 0, 0, 0, 0 ),
126 	[15] = XE2_PAT( 1, 1, 0, 1, 1, 0 ),
127 	/* 16..19 are reserved; leave set to all 0's */
128 	[20] = XE2_PAT( 0, 0, 1, 0, 3, 0 ),
129 	[21] = XE2_PAT( 0, 1, 1, 0, 3, 0 ),
130 	[22] = XE2_PAT( 0, 0, 1, 0, 3, 2 ),
131 	[23] = XE2_PAT( 0, 0, 1, 0, 3, 3 ),
132 	[24] = XE2_PAT( 0, 0, 2, 0, 3, 0 ),
133 	[25] = XE2_PAT( 0, 1, 2, 0, 3, 0 ),
134 	[26] = XE2_PAT( 0, 0, 2, 0, 3, 2 ),
135 	[27] = XE2_PAT( 0, 0, 2, 0, 3, 3 ),
136 	[28] = XE2_PAT( 0, 0, 3, 0, 3, 0 ),
137 	[29] = XE2_PAT( 0, 1, 3, 0, 3, 0 ),
138 	[30] = XE2_PAT( 0, 0, 3, 0, 3, 2 ),
139 	[31] = XE2_PAT( 0, 0, 3, 0, 3, 3 ),
140 };
141 
142 /* Special PAT values programmed outside the main table */
143 static const struct xe_pat_table_entry xe2_pat_ats = XE2_PAT( 0, 0, 0, 0, 3, 3 );
144 
145 u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
146 {
147 	WARN_ON(pat_index >= xe->pat.n_entries);
148 	return xe->pat.table[pat_index].coh_mode;
149 }
150 
151 static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
152 			int n_entries)
153 {
154 	for (int i = 0; i < n_entries; i++) {
155 		struct xe_reg reg = XE_REG(_PAT_INDEX(i));
156 
157 		xe_mmio_write32(gt, reg, table[i].value);
158 	}
159 }
160 
161 static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[],
162 			    int n_entries)
163 {
164 	for (int i = 0; i < n_entries; i++) {
165 		struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i));
166 
167 		xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value);
168 	}
169 }
170 
171 static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
172 {
173 	struct xe_device *xe = gt_to_xe(gt);
174 	int i, err;
175 
176 	xe_device_mem_access_get(xe);
177 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
178 	if (err)
179 		goto err_fw;
180 
181 	drm_printf(p, "PAT table:\n");
182 
183 	for (i = 0; i < xe->pat.n_entries; i++) {
184 		u32 pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
185 		u8 mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
186 
187 		drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i,
188 			   XELP_MEM_TYPE_STR_MAP[mem_type], pat);
189 	}
190 
191 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
192 err_fw:
193 	xe_assert(xe, !err);
194 	xe_device_mem_access_put(xe);
195 }
196 
197 static const struct xe_pat_ops xelp_pat_ops = {
198 	.program_graphics = program_pat,
199 	.dump = xelp_dump,
200 };
201 
202 static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
203 {
204 	struct xe_device *xe = gt_to_xe(gt);
205 	int i, err;
206 
207 	xe_device_mem_access_get(xe);
208 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
209 	if (err)
210 		goto err_fw;
211 
212 	drm_printf(p, "PAT table:\n");
213 
214 	for (i = 0; i < xe->pat.n_entries; i++) {
215 		u32 pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
216 		u8 mem_type;
217 
218 		mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
219 
220 		drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i,
221 			   XELP_MEM_TYPE_STR_MAP[mem_type], pat);
222 	}
223 
224 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
225 err_fw:
226 	xe_assert(xe, !err);
227 	xe_device_mem_access_put(xe);
228 }
229 
230 static const struct xe_pat_ops xehp_pat_ops = {
231 	.program_graphics = program_pat_mcr,
232 	.dump = xehp_dump,
233 };
234 
235 static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
236 {
237 	struct xe_device *xe = gt_to_xe(gt);
238 	int i, err;
239 
240 	xe_device_mem_access_get(xe);
241 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
242 	if (err)
243 		goto err_fw;
244 
245 	drm_printf(p, "PAT table:\n");
246 
247 	for (i = 0; i < xe->pat.n_entries; i++) {
248 		u32 pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
249 
250 		drm_printf(p, "PAT[%2d] = [ %u, %u ] (%#8x)\n", i,
251 			   REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat),
252 			   REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat);
253 	}
254 
255 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
256 err_fw:
257 	xe_assert(xe, !err);
258 	xe_device_mem_access_put(xe);
259 }
260 
261 static const struct xe_pat_ops xehpc_pat_ops = {
262 	.program_graphics = program_pat_mcr,
263 	.dump = xehpc_dump,
264 };
265 
266 static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
267 {
268 	struct xe_device *xe = gt_to_xe(gt);
269 	int i, err;
270 
271 	xe_device_mem_access_get(xe);
272 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
273 	if (err)
274 		goto err_fw;
275 
276 	drm_printf(p, "PAT table:\n");
277 
278 	for (i = 0; i < xe->pat.n_entries; i++) {
279 		u32 pat;
280 
281 		if (xe_gt_is_media_type(gt))
282 			pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
283 		else
284 			pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
285 
286 		drm_printf(p, "PAT[%2d] = [ %u, %u ] (%#8x)\n", i,
287 			   REG_FIELD_GET(XELPG_L4_POLICY_MASK, pat),
288 			   REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat);
289 	}
290 
291 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
292 err_fw:
293 	xe_assert(xe, !err);
294 	xe_device_mem_access_put(xe);
295 }
296 
297 /*
298  * SAMedia register offsets are adjusted by the write methods and they target
299  * registers that are not MCR, while for normal GT they are MCR
300  */
301 static const struct xe_pat_ops xelpg_pat_ops = {
302 	.program_graphics = program_pat,
303 	.program_media = program_pat_mcr,
304 	.dump = xelpg_dump,
305 };
306 
307 static void xe2lpg_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
308 			       int n_entries)
309 {
310 	program_pat_mcr(gt, table, n_entries);
311 	xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe2_pat_ats.value);
312 }
313 
314 static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
315 			       int n_entries)
316 {
317 	program_pat(gt, table, n_entries);
318 	xe_mmio_write32(gt, XE_REG(_PAT_ATS), xe2_pat_ats.value);
319 }
320 
321 static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
322 {
323 	struct xe_device *xe = gt_to_xe(gt);
324 	int i, err;
325 	u32 pat;
326 
327 	xe_device_mem_access_get(xe);
328 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
329 	if (err)
330 		goto err_fw;
331 
332 	drm_printf(p, "PAT table:\n");
333 
334 	for (i = 0; i < xe->pat.n_entries; i++) {
335 		if (xe_gt_is_media_type(gt))
336 			pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
337 		else
338 			pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
339 
340 		drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u, %u ]  (%#8x)\n", i,
341 			   !!(pat & XE2_NO_PROMOTE),
342 			   !!(pat & XE2_COMP_EN),
343 			   REG_FIELD_GET(XE2_L3_CLOS, pat),
344 			   REG_FIELD_GET(XE2_L3_POLICY, pat),
345 			   REG_FIELD_GET(XE2_L4_POLICY, pat),
346 			   REG_FIELD_GET(XE2_COH_MODE, pat),
347 			   pat);
348 	}
349 
350 	/*
351 	 * Also print PTA_MODE, which describes how the hardware accesses
352 	 * PPGTT entries.
353 	 */
354 	if (xe_gt_is_media_type(gt))
355 		pat = xe_mmio_read32(gt, XE_REG(_PAT_PTA));
356 	else
357 		pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
358 
359 	drm_printf(p, "Page Table Access:\n");
360 	drm_printf(p, "PTA_MODE= [ %u, %u, %u, %u, %u, %u ]  (%#8x)\n",
361 		   !!(pat & XE2_NO_PROMOTE),
362 		   !!(pat & XE2_COMP_EN),
363 		   REG_FIELD_GET(XE2_L3_CLOS, pat),
364 		   REG_FIELD_GET(XE2_L3_POLICY, pat),
365 		   REG_FIELD_GET(XE2_L4_POLICY, pat),
366 		   REG_FIELD_GET(XE2_COH_MODE, pat),
367 		   pat);
368 
369 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
370 err_fw:
371 	xe_assert(xe, !err);
372 	xe_device_mem_access_put(xe);
373 }
374 
375 static const struct xe_pat_ops xe2_pat_ops = {
376 	.program_graphics = xe2lpg_program_pat,
377 	.program_media = xe2lpm_program_pat,
378 	.dump = xe2_dump,
379 };
380 
381 void xe_pat_init_early(struct xe_device *xe)
382 {
383 	if (GRAPHICS_VER(xe) == 20) {
384 		xe->pat.ops = &xe2_pat_ops;
385 		xe->pat.table = xe2_pat_table;
386 		xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table);
387 		xe->pat.idx[XE_CACHE_NONE] = 3;
388 		xe->pat.idx[XE_CACHE_WT] = 15;
389 		xe->pat.idx[XE_CACHE_WB] = 2;
390 		xe->pat.idx[XE_CACHE_NONE_COMPRESSION] = 12; /*Applicable on xe2 and beyond */
391 	} else if (xe->info.platform == XE_METEORLAKE) {
392 		xe->pat.ops = &xelpg_pat_ops;
393 		xe->pat.table = xelpg_pat_table;
394 		xe->pat.n_entries = ARRAY_SIZE(xelpg_pat_table);
395 		xe->pat.idx[XE_CACHE_NONE] = 2;
396 		xe->pat.idx[XE_CACHE_WT] = 1;
397 		xe->pat.idx[XE_CACHE_WB] = 3;
398 	} else if (xe->info.platform == XE_PVC) {
399 		xe->pat.ops = &xehpc_pat_ops;
400 		xe->pat.table = xehpc_pat_table;
401 		xe->pat.n_entries = ARRAY_SIZE(xehpc_pat_table);
402 		xe->pat.idx[XE_CACHE_NONE] = 0;
403 		xe->pat.idx[XE_CACHE_WT] = 2;
404 		xe->pat.idx[XE_CACHE_WB] = 3;
405 	} else if (xe->info.platform == XE_DG2) {
406 		/*
407 		 * Table is the same as previous platforms, but programming
408 		 * method has changed.
409 		 */
410 		xe->pat.ops = &xehp_pat_ops;
411 		xe->pat.table = xelp_pat_table;
412 		xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
413 		xe->pat.idx[XE_CACHE_NONE] = 3;
414 		xe->pat.idx[XE_CACHE_WT] = 2;
415 		xe->pat.idx[XE_CACHE_WB] = 0;
416 	} else if (GRAPHICS_VERx100(xe) <= 1210) {
417 		WARN_ON_ONCE(!IS_DGFX(xe) && !xe->info.has_llc);
418 		xe->pat.ops = &xelp_pat_ops;
419 		xe->pat.table = xelp_pat_table;
420 		xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
421 		xe->pat.idx[XE_CACHE_NONE] = 3;
422 		xe->pat.idx[XE_CACHE_WT] = 2;
423 		xe->pat.idx[XE_CACHE_WB] = 0;
424 	} else {
425 		/*
426 		 * Going forward we expect to need new PAT settings for most
427 		 * new platforms; failure to provide a new table can easily
428 		 * lead to subtle, hard-to-debug problems.  If none of the
429 		 * conditions above match the platform we're running on we'll
430 		 * raise an error rather than trying to silently inherit the
431 		 * most recent platform's behavior.
432 		 */
433 		drm_err(&xe->drm, "Missing PAT table for platform with graphics version %d.%02d!\n",
434 			GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
435 	}
436 }
437 
438 void xe_pat_init(struct xe_gt *gt)
439 {
440 	struct xe_device *xe = gt_to_xe(gt);
441 
442 	if (!xe->pat.ops)
443 		return;
444 
445 	if (xe_gt_is_media_type(gt))
446 		xe->pat.ops->program_media(gt, xe->pat.table, xe->pat.n_entries);
447 	else
448 		xe->pat.ops->program_graphics(gt, xe->pat.table, xe->pat.n_entries);
449 }
450 
451 void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
452 {
453 	struct xe_device *xe = gt_to_xe(gt);
454 
455 	if (!xe->pat.ops->dump)
456 		return;
457 
458 	xe->pat.ops->dump(gt, p);
459 }
460