xref: /linux/drivers/gpu/drm/xe/xe_pat.c (revision 1b5d39e6672fdee158c3306f5cb2df8975c77e5a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_pat.h"
7 
8 #include <uapi/drm/xe_drm.h>
9 
10 #include <generated/xe_wa_oob.h>
11 
12 #include "regs/xe_reg_defs.h"
13 #include "xe_assert.h"
14 #include "xe_device.h"
15 #include "xe_force_wake.h"
16 #include "xe_gt.h"
17 #include "xe_gt_mcr.h"
18 #include "xe_mmio.h"
19 #include "xe_sriov.h"
20 #include "xe_wa.h"
21 
22 #define _PAT_ATS				0x47fc
23 #define _PAT_INDEX(index)			_PICK_EVEN_2RANGES(index, 8, \
24 								   0x4800, 0x4804, \
25 								   0x4848, 0x484c)
26 #define _PAT_PTA				0x4820
27 
28 #define XE2_NO_PROMOTE				REG_BIT(10)
29 #define XE2_COMP_EN				REG_BIT(9)
30 #define XE2_L3_CLOS				REG_GENMASK(7, 6)
31 #define XE2_L3_POLICY				REG_GENMASK(5, 4)
32 #define XE2_L4_POLICY				REG_GENMASK(3, 2)
33 #define XE2_COH_MODE				REG_GENMASK(1, 0)
34 
35 #define XELPG_L4_POLICY_MASK			REG_GENMASK(3, 2)
36 #define XELPG_PAT_3_UC				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 3)
37 #define XELPG_PAT_1_WT				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 1)
38 #define XELPG_PAT_0_WB				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 0)
39 #define XELPG_INDEX_COH_MODE_MASK		REG_GENMASK(1, 0)
40 #define XELPG_3_COH_2W				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 3)
41 #define XELPG_2_COH_1W				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 2)
42 #define XELPG_0_COH_NON				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 0)
43 
44 #define XEHPC_CLOS_LEVEL_MASK			REG_GENMASK(3, 2)
45 #define XEHPC_PAT_CLOS(x)			REG_FIELD_PREP(XEHPC_CLOS_LEVEL_MASK, x)
46 
47 #define XELP_MEM_TYPE_MASK			REG_GENMASK(1, 0)
48 #define XELP_PAT_WB				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 3)
49 #define XELP_PAT_WT				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 2)
50 #define XELP_PAT_WC				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 1)
51 #define XELP_PAT_UC				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 0)
52 
53 #define PAT_LABEL_LEN 20
54 
55 static const char *XELP_MEM_TYPE_STR_MAP[] = { "UC", "WC", "WT", "WB" };
56 
57 static void xe_pat_index_label(char *label, size_t len, int index)
58 {
59 	snprintf(label, len, "PAT[%2d] ", index);
60 }
61 
62 static void xelp_pat_entry_dump(struct drm_printer *p, int index, u32 pat)
63 {
64 	u8 mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
65 
66 	drm_printf(p, "PAT[%2d] = %s (%#8x)\n", index,
67 		   XELP_MEM_TYPE_STR_MAP[mem_type], pat);
68 }
69 
70 static void xehpc_pat_entry_dump(struct drm_printer *p, int index, u32 pat)
71 {
72 	drm_printf(p, "PAT[%2d] = [ %u, %u ] (%#8x)\n", index,
73 		   REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat),
74 		   REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat);
75 }
76 
77 static void xelpg_pat_entry_dump(struct drm_printer *p, int index, u32 pat)
78 {
79 	drm_printf(p, "PAT[%2d] = [ %u, %u ] (%#8x)\n", index,
80 		   REG_FIELD_GET(XELPG_L4_POLICY_MASK, pat),
81 		   REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat);
82 }
83 
84 struct xe_pat_ops {
85 	void (*program_graphics)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
86 				 int n_entries);
87 	void (*program_media)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
88 			      int n_entries);
89 	int (*dump)(struct xe_gt *gt, struct drm_printer *p);
90 };
91 
92 static const struct xe_pat_table_entry xelp_pat_table[] = {
93 	[0] = { XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
94 	[1] = { XELP_PAT_WC, XE_COH_NONE },
95 	[2] = { XELP_PAT_WT, XE_COH_NONE },
96 	[3] = { XELP_PAT_UC, XE_COH_NONE },
97 };
98 
99 static const struct xe_pat_table_entry xehpc_pat_table[] = {
100 	[0] = { XELP_PAT_UC, XE_COH_NONE },
101 	[1] = { XELP_PAT_WC, XE_COH_NONE },
102 	[2] = { XELP_PAT_WT, XE_COH_NONE },
103 	[3] = { XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
104 	[4] = { XEHPC_PAT_CLOS(1) | XELP_PAT_WT, XE_COH_NONE },
105 	[5] = { XEHPC_PAT_CLOS(1) | XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
106 	[6] = { XEHPC_PAT_CLOS(2) | XELP_PAT_WT, XE_COH_NONE },
107 	[7] = { XEHPC_PAT_CLOS(2) | XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
108 };
109 
110 static const struct xe_pat_table_entry xelpg_pat_table[] = {
111 	[0] = { XELPG_PAT_0_WB, XE_COH_NONE },
112 	[1] = { XELPG_PAT_1_WT, XE_COH_NONE },
113 	[2] = { XELPG_PAT_3_UC, XE_COH_NONE },
114 	[3] = { XELPG_PAT_0_WB | XELPG_2_COH_1W, XE_COH_AT_LEAST_1WAY },
115 	[4] = { XELPG_PAT_0_WB | XELPG_3_COH_2W, XE_COH_AT_LEAST_1WAY },
116 };
117 
118 /*
119  * The Xe2 table is getting large/complicated so it's easier to review if
120  * provided in a form that exactly matches the bspec's formatting.  The meaning
121  * of the fields here are:
122  *   - no_promote:  0=promotable, 1=no promote
123  *   - comp_en:     0=disable, 1=enable
124  *   - l3clos:      L3 class of service (0-3)
125  *   - l3_policy:   0=WB, 1=XD ("WB - Transient Display"), 3=UC
126  *   - l4_policy:   0=WB, 1=WT, 3=UC
127  *   - coh_mode:    0=no snoop, 2=1-way coherent, 3=2-way coherent
128  *
129  * Reserved entries should be programmed with the maximum caching, minimum
130  * coherency (which matches an all-0's encoding), so we can just omit them
131  * in the table.
132  *
133  * Note: There is an implicit assumption in the driver that compression and
134  * coh_1way+ are mutually exclusive. If this is ever not true then userptr
135  * and imported dma-buf from external device will have uncleared ccs state. See
136  * also xe_bo_needs_ccs_pages().
137  */
138 #define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
139 	{ \
140 		.value = (no_promote ? XE2_NO_PROMOTE : 0) | \
141 			(comp_en ? XE2_COMP_EN : 0) | \
142 			REG_FIELD_PREP(XE2_L3_CLOS, l3clos) | \
143 			REG_FIELD_PREP(XE2_L3_POLICY, l3_policy) | \
144 			REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \
145 			REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \
146 		.coh_mode = (BUILD_BUG_ON_ZERO(__coh_mode && comp_en) || __coh_mode) ? \
147 			XE_COH_AT_LEAST_1WAY : XE_COH_NONE, \
148 		.valid = 1 \
149 	}
150 
151 static const struct xe_pat_table_entry xe2_pat_table[] = {
152 	[ 0] = XE2_PAT( 0, 0, 0, 0, 3, 0 ),
153 	[ 1] = XE2_PAT( 0, 0, 0, 0, 3, 2 ),
154 	[ 2] = XE2_PAT( 0, 0, 0, 0, 3, 3 ),
155 	[ 3] = XE2_PAT( 0, 0, 0, 3, 3, 0 ),
156 	[ 4] = XE2_PAT( 0, 0, 0, 3, 0, 2 ),
157 	[ 5] = XE2_PAT( 0, 0, 0, 3, 3, 2 ),
158 	[ 6] = XE2_PAT( 1, 0, 0, 1, 3, 0 ),
159 	[ 7] = XE2_PAT( 0, 0, 0, 3, 0, 3 ),
160 	[ 8] = XE2_PAT( 0, 0, 0, 3, 0, 0 ),
161 	[ 9] = XE2_PAT( 0, 1, 0, 0, 3, 0 ),
162 	[10] = XE2_PAT( 0, 1, 0, 3, 0, 0 ),
163 	[11] = XE2_PAT( 1, 1, 0, 1, 3, 0 ),
164 	[12] = XE2_PAT( 0, 1, 0, 3, 3, 0 ),
165 	[13] = XE2_PAT( 0, 0, 0, 0, 0, 0 ),
166 	[14] = XE2_PAT( 0, 1, 0, 0, 0, 0 ),
167 	[15] = XE2_PAT( 1, 1, 0, 1, 1, 0 ),
168 	/* 16..19 are reserved; leave set to all 0's */
169 	[20] = XE2_PAT( 0, 0, 1, 0, 3, 0 ),
170 	[21] = XE2_PAT( 0, 1, 1, 0, 3, 0 ),
171 	[22] = XE2_PAT( 0, 0, 1, 0, 3, 2 ),
172 	[23] = XE2_PAT( 0, 0, 1, 0, 3, 3 ),
173 	[24] = XE2_PAT( 0, 0, 2, 0, 3, 0 ),
174 	[25] = XE2_PAT( 0, 1, 2, 0, 3, 0 ),
175 	[26] = XE2_PAT( 0, 0, 2, 0, 3, 2 ),
176 	[27] = XE2_PAT( 0, 0, 2, 0, 3, 3 ),
177 	[28] = XE2_PAT( 0, 0, 3, 0, 3, 0 ),
178 	[29] = XE2_PAT( 0, 1, 3, 0, 3, 0 ),
179 	[30] = XE2_PAT( 0, 0, 3, 0, 3, 2 ),
180 	[31] = XE2_PAT( 0, 0, 3, 0, 3, 3 ),
181 };
182 
183 /* Special PAT values programmed outside the main table */
184 static const struct xe_pat_table_entry xe2_pat_ats = XE2_PAT( 0, 0, 0, 0, 3, 3 );
185 static const struct xe_pat_table_entry xe2_pat_pta = XE2_PAT( 0, 0, 0, 0, 3, 0 );
186 
187 /*
188  * Xe3p_XPC PAT table uses the same layout as Xe2/Xe3, except that there's no
189  * option for compression.  Also note that the "L3" and "L4" register fields
190  * actually control L2 and L3 cache respectively on this platform.
191  */
192 #define XE3P_XPC_PAT(no_promote, l3clos, l3_policy, l4_policy, __coh_mode) \
193 	XE2_PAT(no_promote, 0, l3clos, l3_policy, l4_policy, __coh_mode)
194 
195 static const struct xe_pat_table_entry xe3p_xpc_pat_ats = XE3P_XPC_PAT( 0, 0, 0, 0, 3 );
196 static const struct xe_pat_table_entry xe3p_xpc_pat_pta = XE3P_XPC_PAT( 0, 0, 0, 0, 0 );
197 
198 static const struct xe_pat_table_entry xe3p_xpc_pat_table[] = {
199 	[ 0] = XE3P_XPC_PAT( 0, 0, 0, 0, 0 ),
200 	[ 1] = XE3P_XPC_PAT( 0, 0, 0, 0, 2 ),
201 	[ 2] = XE3P_XPC_PAT( 0, 0, 0, 0, 3 ),
202 	[ 3] = XE3P_XPC_PAT( 0, 0, 3, 3, 0 ),
203 	[ 4] = XE3P_XPC_PAT( 0, 0, 3, 3, 2 ),
204 	[ 5] = XE3P_XPC_PAT( 0, 0, 3, 0, 0 ),
205 	[ 6] = XE3P_XPC_PAT( 0, 0, 3, 0, 2 ),
206 	[ 7] = XE3P_XPC_PAT( 0, 0, 3, 0, 3 ),
207 	[ 8] = XE3P_XPC_PAT( 0, 0, 0, 3, 0 ),
208 	[ 9] = XE3P_XPC_PAT( 0, 0, 0, 3, 2 ),
209 	[10] = XE3P_XPC_PAT( 0, 0, 0, 3, 3 ),
210 	/* 11..22 are reserved; leave set to all 0's */
211 	[23] = XE3P_XPC_PAT( 0, 1, 0, 0, 0 ),
212 	[24] = XE3P_XPC_PAT( 0, 1, 0, 0, 2 ),
213 	[25] = XE3P_XPC_PAT( 0, 1, 0, 0, 3 ),
214 	[26] = XE3P_XPC_PAT( 0, 2, 0, 0, 0 ),
215 	[27] = XE3P_XPC_PAT( 0, 2, 0, 0, 2 ),
216 	[28] = XE3P_XPC_PAT( 0, 2, 0, 0, 3 ),
217 	[29] = XE3P_XPC_PAT( 0, 3, 0, 0, 0 ),
218 	[30] = XE3P_XPC_PAT( 0, 3, 0, 0, 2 ),
219 	[31] = XE3P_XPC_PAT( 0, 3, 0, 0, 3 ),
220 };
221 
222 u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
223 {
224 	WARN_ON(pat_index >= xe->pat.n_entries);
225 	return xe->pat.table[pat_index].coh_mode;
226 }
227 
228 bool xe_pat_index_get_comp_en(struct xe_device *xe, u16 pat_index)
229 {
230 	WARN_ON(pat_index >= xe->pat.n_entries);
231 	return !!(xe->pat.table[pat_index].value & XE2_COMP_EN);
232 }
233 
234 static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
235 			int n_entries)
236 {
237 	struct xe_device *xe = gt_to_xe(gt);
238 
239 	for (int i = 0; i < n_entries; i++) {
240 		struct xe_reg reg = XE_REG(_PAT_INDEX(i));
241 
242 		xe_mmio_write32(&gt->mmio, reg, table[i].value);
243 	}
244 
245 	if (xe->pat.pat_ats)
246 		xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe->pat.pat_ats->value);
247 	if (xe->pat.pat_pta)
248 		xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe->pat.pat_pta->value);
249 }
250 
251 static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[],
252 			    int n_entries)
253 {
254 	struct xe_device *xe = gt_to_xe(gt);
255 
256 	for (int i = 0; i < n_entries; i++) {
257 		struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i));
258 
259 		xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value);
260 	}
261 
262 	if (xe->pat.pat_ats)
263 		xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe->pat.pat_ats->value);
264 	if (xe->pat.pat_pta)
265 		xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe->pat.pat_pta->value);
266 }
267 
268 static int xelp_dump(struct xe_gt *gt, struct drm_printer *p)
269 {
270 	struct xe_device *xe = gt_to_xe(gt);
271 	int i;
272 
273 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
274 	if (!fw_ref.domains)
275 		return -ETIMEDOUT;
276 
277 	drm_printf(p, "PAT table:\n");
278 
279 	for (i = 0; i < xe->pat.n_entries; i++) {
280 		u32 pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
281 
282 		xelp_pat_entry_dump(p, i, pat);
283 	}
284 
285 	return 0;
286 }
287 
288 static const struct xe_pat_ops xelp_pat_ops = {
289 	.program_graphics = program_pat,
290 	.dump = xelp_dump,
291 };
292 
293 static int xehp_dump(struct xe_gt *gt, struct drm_printer *p)
294 {
295 	struct xe_device *xe = gt_to_xe(gt);
296 	int i;
297 
298 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
299 	if (!fw_ref.domains)
300 		return -ETIMEDOUT;
301 
302 	drm_printf(p, "PAT table:\n");
303 
304 	for (i = 0; i < xe->pat.n_entries; i++) {
305 		u32 pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
306 
307 		xelp_pat_entry_dump(p, i, pat);
308 	}
309 
310 	return 0;
311 }
312 
313 static const struct xe_pat_ops xehp_pat_ops = {
314 	.program_graphics = program_pat_mcr,
315 	.dump = xehp_dump,
316 };
317 
318 static int xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
319 {
320 	struct xe_device *xe = gt_to_xe(gt);
321 	int i;
322 
323 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
324 	if (!fw_ref.domains)
325 		return -ETIMEDOUT;
326 
327 	drm_printf(p, "PAT table:\n");
328 
329 	for (i = 0; i < xe->pat.n_entries; i++) {
330 		u32 pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
331 
332 		xehpc_pat_entry_dump(p, i, pat);
333 	}
334 
335 	return 0;
336 }
337 
338 static const struct xe_pat_ops xehpc_pat_ops = {
339 	.program_graphics = program_pat_mcr,
340 	.dump = xehpc_dump,
341 };
342 
343 static int xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
344 {
345 	struct xe_device *xe = gt_to_xe(gt);
346 	int i;
347 
348 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
349 	if (!fw_ref.domains)
350 		return -ETIMEDOUT;
351 
352 	drm_printf(p, "PAT table:\n");
353 
354 	for (i = 0; i < xe->pat.n_entries; i++) {
355 		u32 pat;
356 
357 		if (xe_gt_is_media_type(gt))
358 			pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
359 		else
360 			pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
361 
362 		xelpg_pat_entry_dump(p, i, pat);
363 	}
364 
365 	return 0;
366 }
367 
368 /*
369  * SAMedia register offsets are adjusted by the write methods and they target
370  * registers that are not MCR, while for normal GT they are MCR
371  */
372 static const struct xe_pat_ops xelpg_pat_ops = {
373 	.program_graphics = program_pat,
374 	.program_media = program_pat_mcr,
375 	.dump = xelpg_dump,
376 };
377 
378 static void xe2_pat_entry_dump(struct drm_printer *p, const char *label, u32 pat, bool rsvd)
379 {
380 	drm_printf(p, "%s= [ %u, %u, %u, %u, %u, %u ]  (%#8x)%s\n", label,
381 		   !!(pat & XE2_NO_PROMOTE),
382 		   !!(pat & XE2_COMP_EN),
383 		   REG_FIELD_GET(XE2_L3_CLOS, pat),
384 		   REG_FIELD_GET(XE2_L3_POLICY, pat),
385 		   REG_FIELD_GET(XE2_L4_POLICY, pat),
386 		   REG_FIELD_GET(XE2_COH_MODE, pat),
387 		   pat, rsvd ? " *" : "");
388 }
389 
390 static void xe3p_xpc_pat_entry_dump(struct drm_printer *p, const char *label, u32 pat, bool rsvd)
391 {
392 	drm_printf(p, "%s= [ %u, %u, %u, %u, %u ]  (%#8x)%s\n", label,
393 		   !!(pat & XE2_NO_PROMOTE),
394 		   REG_FIELD_GET(XE2_L3_CLOS, pat),
395 		   REG_FIELD_GET(XE2_L3_POLICY, pat),
396 		   REG_FIELD_GET(XE2_L4_POLICY, pat),
397 		   REG_FIELD_GET(XE2_COH_MODE, pat),
398 		   pat, rsvd ? " *" : "");
399 }
400 
401 static int xe2_dump(struct xe_gt *gt, struct drm_printer *p)
402 {
403 	struct xe_device *xe = gt_to_xe(gt);
404 	u32 pat;
405 	int i;
406 	char label[PAT_LABEL_LEN];
407 
408 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
409 	if (!fw_ref.domains)
410 		return -ETIMEDOUT;
411 
412 	drm_printf(p, "PAT table: (* = reserved entry)\n");
413 
414 	for (i = 0; i < xe->pat.n_entries; i++) {
415 		if (xe_gt_is_media_type(gt))
416 			pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
417 		else
418 			pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
419 
420 		xe_pat_index_label(label, sizeof(label), i);
421 		xe2_pat_entry_dump(p, label, pat, !xe->pat.table[i].valid);
422 	}
423 
424 	/*
425 	 * Also print PTA_MODE, which describes how the hardware accesses
426 	 * PPGTT entries.
427 	 */
428 	if (xe_gt_is_media_type(gt))
429 		pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_PTA));
430 	else
431 		pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
432 
433 	drm_printf(p, "Page Table Access:\n");
434 	xe2_pat_entry_dump(p, "PTA_MODE", pat, false);
435 
436 	return 0;
437 }
438 
439 static const struct xe_pat_ops xe2_pat_ops = {
440 	.program_graphics = program_pat_mcr,
441 	.program_media = program_pat,
442 	.dump = xe2_dump,
443 };
444 
445 static int xe3p_xpc_dump(struct xe_gt *gt, struct drm_printer *p)
446 {
447 	struct xe_device *xe = gt_to_xe(gt);
448 	u32 pat;
449 	int i;
450 	char label[PAT_LABEL_LEN];
451 
452 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
453 	if (!fw_ref.domains)
454 		return -ETIMEDOUT;
455 
456 	drm_printf(p, "PAT table: (* = reserved entry)\n");
457 
458 	for (i = 0; i < xe->pat.n_entries; i++) {
459 		pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
460 
461 		xe_pat_index_label(label, sizeof(label), i);
462 		xe3p_xpc_pat_entry_dump(p, label, pat, !xe->pat.table[i].valid);
463 	}
464 
465 	/*
466 	 * Also print PTA_MODE, which describes how the hardware accesses
467 	 * PPGTT entries.
468 	 */
469 	pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
470 
471 	drm_printf(p, "Page Table Access:\n");
472 	xe3p_xpc_pat_entry_dump(p, "PTA_MODE", pat, false);
473 
474 	return 0;
475 }
476 
477 static const struct xe_pat_ops xe3p_xpc_pat_ops = {
478 	.program_graphics = program_pat_mcr,
479 	.program_media = program_pat,
480 	.dump = xe3p_xpc_dump,
481 };
482 
483 void xe_pat_init_early(struct xe_device *xe)
484 {
485 	if (GRAPHICS_VERx100(xe) == 3511) {
486 		xe->pat.ops = &xe3p_xpc_pat_ops;
487 		xe->pat.table = xe3p_xpc_pat_table;
488 		xe->pat.pat_ats = &xe3p_xpc_pat_ats;
489 		xe->pat.pat_pta = &xe3p_xpc_pat_pta;
490 		xe->pat.n_entries = ARRAY_SIZE(xe3p_xpc_pat_table);
491 		xe->pat.idx[XE_CACHE_NONE] = 3;
492 		xe->pat.idx[XE_CACHE_WT] = 3;	/* N/A (no display); use UC */
493 		xe->pat.idx[XE_CACHE_WB] = 2;
494 	} else if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
495 		xe->pat.ops = &xe2_pat_ops;
496 		xe->pat.table = xe2_pat_table;
497 		xe->pat.pat_ats = &xe2_pat_ats;
498 		if (IS_DGFX(xe))
499 			xe->pat.pat_pta = &xe2_pat_pta;
500 
501 		/* Wa_16023588340. XXX: Should use XE_WA */
502 		if (GRAPHICS_VERx100(xe) == 2001)
503 			xe->pat.n_entries = 28; /* Disable CLOS3 */
504 		else
505 			xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table);
506 
507 		xe->pat.idx[XE_CACHE_NONE] = 3;
508 		xe->pat.idx[XE_CACHE_WT] = 15;
509 		xe->pat.idx[XE_CACHE_WB] = 2;
510 		xe->pat.idx[XE_CACHE_NONE_COMPRESSION] = 12; /*Applicable on xe2 and beyond */
511 	} else if (xe->info.platform == XE_METEORLAKE) {
512 		xe->pat.ops = &xelpg_pat_ops;
513 		xe->pat.table = xelpg_pat_table;
514 		xe->pat.n_entries = ARRAY_SIZE(xelpg_pat_table);
515 		xe->pat.idx[XE_CACHE_NONE] = 2;
516 		xe->pat.idx[XE_CACHE_WT] = 1;
517 		xe->pat.idx[XE_CACHE_WB] = 3;
518 	} else if (xe->info.platform == XE_PVC) {
519 		xe->pat.ops = &xehpc_pat_ops;
520 		xe->pat.table = xehpc_pat_table;
521 		xe->pat.n_entries = ARRAY_SIZE(xehpc_pat_table);
522 		xe->pat.idx[XE_CACHE_NONE] = 0;
523 		xe->pat.idx[XE_CACHE_WT] = 2;
524 		xe->pat.idx[XE_CACHE_WB] = 3;
525 	} else if (xe->info.platform == XE_DG2) {
526 		/*
527 		 * Table is the same as previous platforms, but programming
528 		 * method has changed.
529 		 */
530 		xe->pat.ops = &xehp_pat_ops;
531 		xe->pat.table = xelp_pat_table;
532 		xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
533 		xe->pat.idx[XE_CACHE_NONE] = 3;
534 		xe->pat.idx[XE_CACHE_WT] = 2;
535 		xe->pat.idx[XE_CACHE_WB] = 0;
536 	} else if (GRAPHICS_VERx100(xe) <= 1210) {
537 		WARN_ON_ONCE(!IS_DGFX(xe) && !xe->info.has_llc);
538 		xe->pat.ops = &xelp_pat_ops;
539 		xe->pat.table = xelp_pat_table;
540 		xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
541 		xe->pat.idx[XE_CACHE_NONE] = 3;
542 		xe->pat.idx[XE_CACHE_WT] = 2;
543 		xe->pat.idx[XE_CACHE_WB] = 0;
544 	} else {
545 		/*
546 		 * Going forward we expect to need new PAT settings for most
547 		 * new platforms; failure to provide a new table can easily
548 		 * lead to subtle, hard-to-debug problems.  If none of the
549 		 * conditions above match the platform we're running on we'll
550 		 * raise an error rather than trying to silently inherit the
551 		 * most recent platform's behavior.
552 		 */
553 		drm_err(&xe->drm, "Missing PAT table for platform with graphics version %d.%02d!\n",
554 			GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
555 	}
556 
557 	/* VFs can't program nor dump PAT settings */
558 	if (IS_SRIOV_VF(xe))
559 		xe->pat.ops = NULL;
560 
561 	xe_assert(xe, !xe->pat.ops || xe->pat.ops->dump);
562 	xe_assert(xe, !xe->pat.ops || xe->pat.ops->program_graphics);
563 	xe_assert(xe, !xe->pat.ops || MEDIA_VER(xe) < 13 || xe->pat.ops->program_media);
564 }
565 
566 void xe_pat_init(struct xe_gt *gt)
567 {
568 	struct xe_device *xe = gt_to_xe(gt);
569 
570 	if (!xe->pat.ops)
571 		return;
572 
573 	if (xe_gt_is_media_type(gt))
574 		xe->pat.ops->program_media(gt, xe->pat.table, xe->pat.n_entries);
575 	else
576 		xe->pat.ops->program_graphics(gt, xe->pat.table, xe->pat.n_entries);
577 }
578 
579 /**
580  * xe_pat_dump() - Dump GT PAT table into a drm printer.
581  * @gt: the &xe_gt
582  * @p: the &drm_printer
583  *
584  * Return: 0 on success or a negative error code on failure.
585  */
586 int xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
587 {
588 	struct xe_device *xe = gt_to_xe(gt);
589 
590 	if (!xe->pat.ops)
591 		return -EOPNOTSUPP;
592 
593 	return xe->pat.ops->dump(gt, p);
594 }
595 
596 /**
597  * xe_pat_dump_sw_config() - Dump the software-configured GT PAT table into a drm printer.
598  * @gt: the &xe_gt
599  * @p: the &drm_printer
600  *
601  * Return: 0 on success or a negative error code on failure.
602  */
603 int xe_pat_dump_sw_config(struct xe_gt *gt, struct drm_printer *p)
604 {
605 	struct xe_device *xe = gt_to_xe(gt);
606 	char label[PAT_LABEL_LEN];
607 
608 	if (!xe->pat.table || !xe->pat.n_entries)
609 		return -EOPNOTSUPP;
610 
611 	drm_printf(p, "PAT table:%s\n", GRAPHICS_VER(xe) >= 20 ? " (* = reserved entry)" : "");
612 	for (u32 i = 0; i < xe->pat.n_entries; i++) {
613 		u32 pat = xe->pat.table[i].value;
614 
615 		if (GRAPHICS_VERx100(xe) == 3511) {
616 			xe_pat_index_label(label, sizeof(label), i);
617 			xe3p_xpc_pat_entry_dump(p, label, pat, !xe->pat.table[i].valid);
618 		} else if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
619 			xe_pat_index_label(label, sizeof(label), i);
620 			xe2_pat_entry_dump(p, label, pat, !xe->pat.table[i].valid);
621 		} else if (xe->info.platform == XE_METEORLAKE) {
622 			xelpg_pat_entry_dump(p, i, pat);
623 		} else if (xe->info.platform == XE_PVC) {
624 			xehpc_pat_entry_dump(p, i, pat);
625 		} else if (xe->info.platform == XE_DG2 || GRAPHICS_VERx100(xe) <= 1210) {
626 			xelp_pat_entry_dump(p, i, pat);
627 		} else {
628 			return -EOPNOTSUPP;
629 		}
630 	}
631 
632 	if (xe->pat.pat_pta) {
633 		u32 pat = xe->pat.pat_pta->value;
634 
635 		drm_printf(p, "Page Table Access:\n");
636 		xe2_pat_entry_dump(p, "PTA_MODE", pat, false);
637 	}
638 
639 	if (xe->pat.pat_ats) {
640 		u32 pat = xe->pat.pat_ats->value;
641 
642 		drm_printf(p, "PCIe ATS/PASID:\n");
643 		xe2_pat_entry_dump(p, "PAT_ATS ", pat, false);
644 	}
645 
646 	drm_printf(p, "Cache Level:\n");
647 	drm_printf(p, "IDX[XE_CACHE_NONE] = %d\n", xe->pat.idx[XE_CACHE_NONE]);
648 	drm_printf(p, "IDX[XE_CACHE_WT] = %d\n", xe->pat.idx[XE_CACHE_WT]);
649 	drm_printf(p, "IDX[XE_CACHE_WB] = %d\n", xe->pat.idx[XE_CACHE_WB]);
650 	if (GRAPHICS_VER(xe) >= 20) {
651 		drm_printf(p, "IDX[XE_CACHE_NONE_COMPRESSION] = %d\n",
652 			   xe->pat.idx[XE_CACHE_NONE_COMPRESSION]);
653 	}
654 
655 	return 0;
656 }
657