xref: /linux/drivers/gpu/drm/xe/xe_pat.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_pat.h"
7 
8 #include <uapi/drm/xe_drm.h>
9 
10 #include <generated/xe_wa_oob.h>
11 
12 #include "regs/xe_reg_defs.h"
13 #include "xe_assert.h"
14 #include "xe_device.h"
15 #include "xe_force_wake.h"
16 #include "xe_gt.h"
17 #include "xe_gt_mcr.h"
18 #include "xe_mmio.h"
19 #include "xe_sriov.h"
20 #include "xe_wa.h"
21 
22 #define _PAT_ATS				0x47fc
23 #define _PAT_INDEX(index)			_PICK_EVEN_2RANGES(index, 8, \
24 								   0x4800, 0x4804, \
25 								   0x4848, 0x484c)
26 #define _PAT_PTA				0x4820
27 
28 #define XE2_NO_PROMOTE				REG_BIT(10)
29 #define XE2_COMP_EN				REG_BIT(9)
30 #define XE2_L3_CLOS				REG_GENMASK(7, 6)
31 #define XE2_L3_POLICY				REG_GENMASK(5, 4)
32 #define XE2_L4_POLICY				REG_GENMASK(3, 2)
33 #define XE2_COH_MODE				REG_GENMASK(1, 0)
34 
35 #define XELPG_L4_POLICY_MASK			REG_GENMASK(3, 2)
36 #define XELPG_PAT_3_UC				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 3)
37 #define XELPG_PAT_1_WT				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 1)
38 #define XELPG_PAT_0_WB				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 0)
39 #define XELPG_INDEX_COH_MODE_MASK		REG_GENMASK(1, 0)
40 #define XELPG_3_COH_2W				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 3)
41 #define XELPG_2_COH_1W				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 2)
42 #define XELPG_0_COH_NON				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 0)
43 
44 #define XEHPC_CLOS_LEVEL_MASK			REG_GENMASK(3, 2)
45 #define XEHPC_PAT_CLOS(x)			REG_FIELD_PREP(XEHPC_CLOS_LEVEL_MASK, x)
46 
47 #define XELP_MEM_TYPE_MASK			REG_GENMASK(1, 0)
48 #define XELP_PAT_WB				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 3)
49 #define XELP_PAT_WT				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 2)
50 #define XELP_PAT_WC				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 1)
51 #define XELP_PAT_UC				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 0)
52 
53 static const char *XELP_MEM_TYPE_STR_MAP[] = { "UC", "WC", "WT", "WB" };
54 
55 struct xe_pat_ops {
56 	void (*program_graphics)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
57 				 int n_entries);
58 	void (*program_media)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
59 			      int n_entries);
60 	int (*dump)(struct xe_gt *gt, struct drm_printer *p);
61 };
62 
63 static const struct xe_pat_table_entry xelp_pat_table[] = {
64 	[0] = { XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
65 	[1] = { XELP_PAT_WC, XE_COH_NONE },
66 	[2] = { XELP_PAT_WT, XE_COH_NONE },
67 	[3] = { XELP_PAT_UC, XE_COH_NONE },
68 };
69 
70 static const struct xe_pat_table_entry xehpc_pat_table[] = {
71 	[0] = { XELP_PAT_UC, XE_COH_NONE },
72 	[1] = { XELP_PAT_WC, XE_COH_NONE },
73 	[2] = { XELP_PAT_WT, XE_COH_NONE },
74 	[3] = { XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
75 	[4] = { XEHPC_PAT_CLOS(1) | XELP_PAT_WT, XE_COH_NONE },
76 	[5] = { XEHPC_PAT_CLOS(1) | XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
77 	[6] = { XEHPC_PAT_CLOS(2) | XELP_PAT_WT, XE_COH_NONE },
78 	[7] = { XEHPC_PAT_CLOS(2) | XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
79 };
80 
81 static const struct xe_pat_table_entry xelpg_pat_table[] = {
82 	[0] = { XELPG_PAT_0_WB, XE_COH_NONE },
83 	[1] = { XELPG_PAT_1_WT, XE_COH_NONE },
84 	[2] = { XELPG_PAT_3_UC, XE_COH_NONE },
85 	[3] = { XELPG_PAT_0_WB | XELPG_2_COH_1W, XE_COH_AT_LEAST_1WAY },
86 	[4] = { XELPG_PAT_0_WB | XELPG_3_COH_2W, XE_COH_AT_LEAST_1WAY },
87 };
88 
89 /*
90  * The Xe2 table is getting large/complicated so it's easier to review if
91  * provided in a form that exactly matches the bspec's formatting.  The meaning
92  * of the fields here are:
93  *   - no_promote:  0=promotable, 1=no promote
94  *   - comp_en:     0=disable, 1=enable
95  *   - l3clos:      L3 class of service (0-3)
96  *   - l3_policy:   0=WB, 1=XD ("WB - Transient Display"), 3=UC
97  *   - l4_policy:   0=WB, 1=WT, 3=UC
98  *   - coh_mode:    0=no snoop, 2=1-way coherent, 3=2-way coherent
99  *
100  * Reserved entries should be programmed with the maximum caching, minimum
101  * coherency (which matches an all-0's encoding), so we can just omit them
102  * in the table.
103  *
104  * Note: There is an implicit assumption in the driver that compression and
105  * coh_1way+ are mutually exclusive. If this is ever not true then userptr
106  * and imported dma-buf from external device will have uncleared ccs state. See
107  * also xe_bo_needs_ccs_pages().
108  */
109 #define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
110 	{ \
111 		.value = (no_promote ? XE2_NO_PROMOTE : 0) | \
112 			(comp_en ? XE2_COMP_EN : 0) | \
113 			REG_FIELD_PREP(XE2_L3_CLOS, l3clos) | \
114 			REG_FIELD_PREP(XE2_L3_POLICY, l3_policy) | \
115 			REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \
116 			REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \
117 		.coh_mode = (BUILD_BUG_ON_ZERO(__coh_mode && comp_en) || __coh_mode) ? \
118 			XE_COH_AT_LEAST_1WAY : XE_COH_NONE, \
119 		.valid = 1 \
120 	}
121 
122 static const struct xe_pat_table_entry xe2_pat_table[] = {
123 	[ 0] = XE2_PAT( 0, 0, 0, 0, 3, 0 ),
124 	[ 1] = XE2_PAT( 0, 0, 0, 0, 3, 2 ),
125 	[ 2] = XE2_PAT( 0, 0, 0, 0, 3, 3 ),
126 	[ 3] = XE2_PAT( 0, 0, 0, 3, 3, 0 ),
127 	[ 4] = XE2_PAT( 0, 0, 0, 3, 0, 2 ),
128 	[ 5] = XE2_PAT( 0, 0, 0, 3, 3, 2 ),
129 	[ 6] = XE2_PAT( 1, 0, 0, 1, 3, 0 ),
130 	[ 7] = XE2_PAT( 0, 0, 0, 3, 0, 3 ),
131 	[ 8] = XE2_PAT( 0, 0, 0, 3, 0, 0 ),
132 	[ 9] = XE2_PAT( 0, 1, 0, 0, 3, 0 ),
133 	[10] = XE2_PAT( 0, 1, 0, 3, 0, 0 ),
134 	[11] = XE2_PAT( 1, 1, 0, 1, 3, 0 ),
135 	[12] = XE2_PAT( 0, 1, 0, 3, 3, 0 ),
136 	[13] = XE2_PAT( 0, 0, 0, 0, 0, 0 ),
137 	[14] = XE2_PAT( 0, 1, 0, 0, 0, 0 ),
138 	[15] = XE2_PAT( 1, 1, 0, 1, 1, 0 ),
139 	/* 16..19 are reserved; leave set to all 0's */
140 	[20] = XE2_PAT( 0, 0, 1, 0, 3, 0 ),
141 	[21] = XE2_PAT( 0, 1, 1, 0, 3, 0 ),
142 	[22] = XE2_PAT( 0, 0, 1, 0, 3, 2 ),
143 	[23] = XE2_PAT( 0, 0, 1, 0, 3, 3 ),
144 	[24] = XE2_PAT( 0, 0, 2, 0, 3, 0 ),
145 	[25] = XE2_PAT( 0, 1, 2, 0, 3, 0 ),
146 	[26] = XE2_PAT( 0, 0, 2, 0, 3, 2 ),
147 	[27] = XE2_PAT( 0, 0, 2, 0, 3, 3 ),
148 	[28] = XE2_PAT( 0, 0, 3, 0, 3, 0 ),
149 	[29] = XE2_PAT( 0, 1, 3, 0, 3, 0 ),
150 	[30] = XE2_PAT( 0, 0, 3, 0, 3, 2 ),
151 	[31] = XE2_PAT( 0, 0, 3, 0, 3, 3 ),
152 };
153 
154 /* Special PAT values programmed outside the main table */
155 static const struct xe_pat_table_entry xe2_pat_ats = XE2_PAT( 0, 0, 0, 0, 3, 3 );
156 static const struct xe_pat_table_entry xe2_pat_pta = XE2_PAT( 0, 0, 0, 0, 3, 0 );
157 
158 /*
159  * Xe3p_XPC PAT table uses the same layout as Xe2/Xe3, except that there's no
160  * option for compression.  Also note that the "L3" and "L4" register fields
161  * actually control L2 and L3 cache respectively on this platform.
162  */
163 #define XE3P_XPC_PAT(no_promote, l3clos, l3_policy, l4_policy, __coh_mode) \
164 	XE2_PAT(no_promote, 0, l3clos, l3_policy, l4_policy, __coh_mode)
165 
166 static const struct xe_pat_table_entry xe3p_xpc_pat_ats = XE3P_XPC_PAT( 0, 0, 0, 0, 3 );
167 static const struct xe_pat_table_entry xe3p_xpc_pat_pta = XE3P_XPC_PAT( 0, 0, 0, 0, 0 );
168 
169 static const struct xe_pat_table_entry xe3p_xpc_pat_table[] = {
170 	[ 0] = XE3P_XPC_PAT( 0, 0, 0, 0, 0 ),
171 	[ 1] = XE3P_XPC_PAT( 0, 0, 0, 0, 2 ),
172 	[ 2] = XE3P_XPC_PAT( 0, 0, 0, 0, 3 ),
173 	[ 3] = XE3P_XPC_PAT( 0, 0, 3, 3, 0 ),
174 	[ 4] = XE3P_XPC_PAT( 0, 0, 3, 3, 2 ),
175 	[ 5] = XE3P_XPC_PAT( 0, 0, 3, 0, 0 ),
176 	[ 6] = XE3P_XPC_PAT( 0, 0, 3, 0, 2 ),
177 	[ 7] = XE3P_XPC_PAT( 0, 0, 3, 0, 3 ),
178 	[ 8] = XE3P_XPC_PAT( 0, 0, 0, 3, 0 ),
179 	[ 9] = XE3P_XPC_PAT( 0, 0, 0, 3, 2 ),
180 	[10] = XE3P_XPC_PAT( 0, 0, 0, 3, 3 ),
181 	/* 11..22 are reserved; leave set to all 0's */
182 	[23] = XE3P_XPC_PAT( 0, 1, 0, 0, 0 ),
183 	[24] = XE3P_XPC_PAT( 0, 1, 0, 0, 2 ),
184 	[25] = XE3P_XPC_PAT( 0, 1, 0, 0, 3 ),
185 	[26] = XE3P_XPC_PAT( 0, 2, 0, 0, 0 ),
186 	[27] = XE3P_XPC_PAT( 0, 2, 0, 0, 2 ),
187 	[28] = XE3P_XPC_PAT( 0, 2, 0, 0, 3 ),
188 	[29] = XE3P_XPC_PAT( 0, 3, 0, 0, 0 ),
189 	[30] = XE3P_XPC_PAT( 0, 3, 0, 0, 2 ),
190 	[31] = XE3P_XPC_PAT( 0, 3, 0, 0, 3 ),
191 };
192 
193 u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
194 {
195 	WARN_ON(pat_index >= xe->pat.n_entries);
196 	return xe->pat.table[pat_index].coh_mode;
197 }
198 
199 static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
200 			int n_entries)
201 {
202 	struct xe_device *xe = gt_to_xe(gt);
203 
204 	for (int i = 0; i < n_entries; i++) {
205 		struct xe_reg reg = XE_REG(_PAT_INDEX(i));
206 
207 		xe_mmio_write32(&gt->mmio, reg, table[i].value);
208 	}
209 
210 	if (xe->pat.pat_ats)
211 		xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe->pat.pat_ats->value);
212 	if (xe->pat.pat_pta)
213 		xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe->pat.pat_pta->value);
214 }
215 
216 static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[],
217 			    int n_entries)
218 {
219 	struct xe_device *xe = gt_to_xe(gt);
220 
221 	for (int i = 0; i < n_entries; i++) {
222 		struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i));
223 
224 		xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value);
225 	}
226 
227 	if (xe->pat.pat_ats)
228 		xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe->pat.pat_ats->value);
229 	if (xe->pat.pat_pta)
230 		xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe->pat.pat_pta->value);
231 }
232 
233 static int xelp_dump(struct xe_gt *gt, struct drm_printer *p)
234 {
235 	struct xe_device *xe = gt_to_xe(gt);
236 	unsigned int fw_ref;
237 	int i;
238 
239 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
240 	if (!fw_ref)
241 		return -ETIMEDOUT;
242 
243 	drm_printf(p, "PAT table:\n");
244 
245 	for (i = 0; i < xe->pat.n_entries; i++) {
246 		u32 pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
247 		u8 mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
248 
249 		drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i,
250 			   XELP_MEM_TYPE_STR_MAP[mem_type], pat);
251 	}
252 
253 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
254 	return 0;
255 }
256 
257 static const struct xe_pat_ops xelp_pat_ops = {
258 	.program_graphics = program_pat,
259 	.dump = xelp_dump,
260 };
261 
262 static int xehp_dump(struct xe_gt *gt, struct drm_printer *p)
263 {
264 	struct xe_device *xe = gt_to_xe(gt);
265 	unsigned int fw_ref;
266 	int i;
267 
268 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
269 	if (!fw_ref)
270 		return -ETIMEDOUT;
271 
272 	drm_printf(p, "PAT table:\n");
273 
274 	for (i = 0; i < xe->pat.n_entries; i++) {
275 		u32 pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
276 		u8 mem_type;
277 
278 		mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
279 
280 		drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i,
281 			   XELP_MEM_TYPE_STR_MAP[mem_type], pat);
282 	}
283 
284 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
285 	return 0;
286 }
287 
288 static const struct xe_pat_ops xehp_pat_ops = {
289 	.program_graphics = program_pat_mcr,
290 	.dump = xehp_dump,
291 };
292 
293 static int xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
294 {
295 	struct xe_device *xe = gt_to_xe(gt);
296 	unsigned int fw_ref;
297 	int i;
298 
299 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
300 	if (!fw_ref)
301 		return -ETIMEDOUT;
302 
303 	drm_printf(p, "PAT table:\n");
304 
305 	for (i = 0; i < xe->pat.n_entries; i++) {
306 		u32 pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
307 
308 		drm_printf(p, "PAT[%2d] = [ %u, %u ] (%#8x)\n", i,
309 			   REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat),
310 			   REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat);
311 	}
312 
313 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
314 	return 0;
315 }
316 
317 static const struct xe_pat_ops xehpc_pat_ops = {
318 	.program_graphics = program_pat_mcr,
319 	.dump = xehpc_dump,
320 };
321 
322 static int xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
323 {
324 	struct xe_device *xe = gt_to_xe(gt);
325 	unsigned int fw_ref;
326 	int i;
327 
328 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
329 	if (!fw_ref)
330 		return -ETIMEDOUT;
331 
332 	drm_printf(p, "PAT table:\n");
333 
334 	for (i = 0; i < xe->pat.n_entries; i++) {
335 		u32 pat;
336 
337 		if (xe_gt_is_media_type(gt))
338 			pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
339 		else
340 			pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
341 
342 		drm_printf(p, "PAT[%2d] = [ %u, %u ] (%#8x)\n", i,
343 			   REG_FIELD_GET(XELPG_L4_POLICY_MASK, pat),
344 			   REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat);
345 	}
346 
347 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
348 	return 0;
349 }
350 
351 /*
352  * SAMedia register offsets are adjusted by the write methods and they target
353  * registers that are not MCR, while for normal GT they are MCR
354  */
355 static const struct xe_pat_ops xelpg_pat_ops = {
356 	.program_graphics = program_pat,
357 	.program_media = program_pat_mcr,
358 	.dump = xelpg_dump,
359 };
360 
361 static int xe2_dump(struct xe_gt *gt, struct drm_printer *p)
362 {
363 	struct xe_device *xe = gt_to_xe(gt);
364 	unsigned int fw_ref;
365 	u32 pat;
366 	int i;
367 
368 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
369 	if (!fw_ref)
370 		return -ETIMEDOUT;
371 
372 	drm_printf(p, "PAT table: (* = reserved entry)\n");
373 
374 	for (i = 0; i < xe->pat.n_entries; i++) {
375 		if (xe_gt_is_media_type(gt))
376 			pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
377 		else
378 			pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
379 
380 		drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u, %u ]  (%#8x)%s\n", i,
381 			   !!(pat & XE2_NO_PROMOTE),
382 			   !!(pat & XE2_COMP_EN),
383 			   REG_FIELD_GET(XE2_L3_CLOS, pat),
384 			   REG_FIELD_GET(XE2_L3_POLICY, pat),
385 			   REG_FIELD_GET(XE2_L4_POLICY, pat),
386 			   REG_FIELD_GET(XE2_COH_MODE, pat),
387 			   pat, xe->pat.table[i].valid ? "" : " *");
388 	}
389 
390 	/*
391 	 * Also print PTA_MODE, which describes how the hardware accesses
392 	 * PPGTT entries.
393 	 */
394 	if (xe_gt_is_media_type(gt))
395 		pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_PTA));
396 	else
397 		pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
398 
399 	drm_printf(p, "Page Table Access:\n");
400 	drm_printf(p, "PTA_MODE= [ %u, %u, %u, %u, %u, %u ]  (%#8x)\n",
401 		   !!(pat & XE2_NO_PROMOTE),
402 		   !!(pat & XE2_COMP_EN),
403 		   REG_FIELD_GET(XE2_L3_CLOS, pat),
404 		   REG_FIELD_GET(XE2_L3_POLICY, pat),
405 		   REG_FIELD_GET(XE2_L4_POLICY, pat),
406 		   REG_FIELD_GET(XE2_COH_MODE, pat),
407 		   pat);
408 
409 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
410 	return 0;
411 }
412 
413 static const struct xe_pat_ops xe2_pat_ops = {
414 	.program_graphics = program_pat_mcr,
415 	.program_media = program_pat,
416 	.dump = xe2_dump,
417 };
418 
419 static int xe3p_xpc_dump(struct xe_gt *gt, struct drm_printer *p)
420 {
421 	struct xe_device *xe = gt_to_xe(gt);
422 	unsigned int fw_ref;
423 	u32 pat;
424 	int i;
425 
426 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
427 	if (!fw_ref)
428 		return -ETIMEDOUT;
429 
430 	drm_printf(p, "PAT table: (* = reserved entry)\n");
431 
432 	for (i = 0; i < xe->pat.n_entries; i++) {
433 		pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
434 
435 		drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u ]  (%#8x)%s\n", i,
436 			   !!(pat & XE2_NO_PROMOTE),
437 			   REG_FIELD_GET(XE2_L3_CLOS, pat),
438 			   REG_FIELD_GET(XE2_L3_POLICY, pat),
439 			   REG_FIELD_GET(XE2_L4_POLICY, pat),
440 			   REG_FIELD_GET(XE2_COH_MODE, pat),
441 			   pat, xe->pat.table[i].valid ? "" : " *");
442 	}
443 
444 	/*
445 	 * Also print PTA_MODE, which describes how the hardware accesses
446 	 * PPGTT entries.
447 	 */
448 	pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
449 
450 	drm_printf(p, "Page Table Access:\n");
451 	drm_printf(p, "PTA_MODE= [ %u, %u, %u, %u, %u ]  (%#8x)\n",
452 		   !!(pat & XE2_NO_PROMOTE),
453 		   REG_FIELD_GET(XE2_L3_CLOS, pat),
454 		   REG_FIELD_GET(XE2_L3_POLICY, pat),
455 		   REG_FIELD_GET(XE2_L4_POLICY, pat),
456 		   REG_FIELD_GET(XE2_COH_MODE, pat),
457 		   pat);
458 
459 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
460 	return 0;
461 }
462 
463 static const struct xe_pat_ops xe3p_xpc_pat_ops = {
464 	.program_graphics = program_pat_mcr,
465 	.program_media = program_pat,
466 	.dump = xe3p_xpc_dump,
467 };
468 
469 void xe_pat_init_early(struct xe_device *xe)
470 {
471 	if (GRAPHICS_VERx100(xe) == 3511) {
472 		xe->pat.ops = &xe3p_xpc_pat_ops;
473 		xe->pat.table = xe3p_xpc_pat_table;
474 		xe->pat.pat_ats = &xe3p_xpc_pat_ats;
475 		xe->pat.pat_pta = &xe3p_xpc_pat_pta;
476 		xe->pat.n_entries = ARRAY_SIZE(xe3p_xpc_pat_table);
477 		xe->pat.idx[XE_CACHE_NONE] = 3;
478 		xe->pat.idx[XE_CACHE_WT] = 3;	/* N/A (no display); use UC */
479 		xe->pat.idx[XE_CACHE_WB] = 2;
480 	} else if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
481 		xe->pat.ops = &xe2_pat_ops;
482 		xe->pat.table = xe2_pat_table;
483 		xe->pat.pat_ats = &xe2_pat_ats;
484 		if (IS_DGFX(xe))
485 			xe->pat.pat_pta = &xe2_pat_pta;
486 
487 		/* Wa_16023588340. XXX: Should use XE_WA */
488 		if (GRAPHICS_VERx100(xe) == 2001)
489 			xe->pat.n_entries = 28; /* Disable CLOS3 */
490 		else
491 			xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table);
492 
493 		xe->pat.idx[XE_CACHE_NONE] = 3;
494 		xe->pat.idx[XE_CACHE_WT] = 15;
495 		xe->pat.idx[XE_CACHE_WB] = 2;
496 		xe->pat.idx[XE_CACHE_NONE_COMPRESSION] = 12; /*Applicable on xe2 and beyond */
497 	} else if (xe->info.platform == XE_METEORLAKE) {
498 		xe->pat.ops = &xelpg_pat_ops;
499 		xe->pat.table = xelpg_pat_table;
500 		xe->pat.n_entries = ARRAY_SIZE(xelpg_pat_table);
501 		xe->pat.idx[XE_CACHE_NONE] = 2;
502 		xe->pat.idx[XE_CACHE_WT] = 1;
503 		xe->pat.idx[XE_CACHE_WB] = 3;
504 	} else if (xe->info.platform == XE_PVC) {
505 		xe->pat.ops = &xehpc_pat_ops;
506 		xe->pat.table = xehpc_pat_table;
507 		xe->pat.n_entries = ARRAY_SIZE(xehpc_pat_table);
508 		xe->pat.idx[XE_CACHE_NONE] = 0;
509 		xe->pat.idx[XE_CACHE_WT] = 2;
510 		xe->pat.idx[XE_CACHE_WB] = 3;
511 	} else if (xe->info.platform == XE_DG2) {
512 		/*
513 		 * Table is the same as previous platforms, but programming
514 		 * method has changed.
515 		 */
516 		xe->pat.ops = &xehp_pat_ops;
517 		xe->pat.table = xelp_pat_table;
518 		xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
519 		xe->pat.idx[XE_CACHE_NONE] = 3;
520 		xe->pat.idx[XE_CACHE_WT] = 2;
521 		xe->pat.idx[XE_CACHE_WB] = 0;
522 	} else if (GRAPHICS_VERx100(xe) <= 1210) {
523 		WARN_ON_ONCE(!IS_DGFX(xe) && !xe->info.has_llc);
524 		xe->pat.ops = &xelp_pat_ops;
525 		xe->pat.table = xelp_pat_table;
526 		xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
527 		xe->pat.idx[XE_CACHE_NONE] = 3;
528 		xe->pat.idx[XE_CACHE_WT] = 2;
529 		xe->pat.idx[XE_CACHE_WB] = 0;
530 	} else {
531 		/*
532 		 * Going forward we expect to need new PAT settings for most
533 		 * new platforms; failure to provide a new table can easily
534 		 * lead to subtle, hard-to-debug problems.  If none of the
535 		 * conditions above match the platform we're running on we'll
536 		 * raise an error rather than trying to silently inherit the
537 		 * most recent platform's behavior.
538 		 */
539 		drm_err(&xe->drm, "Missing PAT table for platform with graphics version %d.%02d!\n",
540 			GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
541 	}
542 
543 	/* VFs can't program nor dump PAT settings */
544 	if (IS_SRIOV_VF(xe))
545 		xe->pat.ops = NULL;
546 
547 	xe_assert(xe, !xe->pat.ops || xe->pat.ops->dump);
548 	xe_assert(xe, !xe->pat.ops || xe->pat.ops->program_graphics);
549 	xe_assert(xe, !xe->pat.ops || MEDIA_VER(xe) < 13 || xe->pat.ops->program_media);
550 }
551 
552 void xe_pat_init(struct xe_gt *gt)
553 {
554 	struct xe_device *xe = gt_to_xe(gt);
555 
556 	if (!xe->pat.ops)
557 		return;
558 
559 	if (xe_gt_is_media_type(gt))
560 		xe->pat.ops->program_media(gt, xe->pat.table, xe->pat.n_entries);
561 	else
562 		xe->pat.ops->program_graphics(gt, xe->pat.table, xe->pat.n_entries);
563 }
564 
565 /**
566  * xe_pat_dump() - Dump GT PAT table into a drm printer.
567  * @gt: the &xe_gt
568  * @p: the &drm_printer
569  *
570  * Return: 0 on success or a negative error code on failure.
571  */
572 int xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
573 {
574 	struct xe_device *xe = gt_to_xe(gt);
575 
576 	if (!xe->pat.ops)
577 		return -EOPNOTSUPP;
578 
579 	return xe->pat.ops->dump(gt, p);
580 }
581