xref: /linux/drivers/gpu/drm/xe/xe_pat.c (revision 37aeccf5f839c155e8c9100937a01059b24e61b5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_pat.h"
7 
8 #include <uapi/drm/xe_drm.h>
9 
10 #include <generated/xe_wa_oob.h>
11 
12 #include "regs/xe_reg_defs.h"
13 #include "xe_assert.h"
14 #include "xe_device.h"
15 #include "xe_force_wake.h"
16 #include "xe_gt.h"
17 #include "xe_gt_mcr.h"
18 #include "xe_mmio.h"
19 #include "xe_sriov.h"
20 #include "xe_wa.h"
21 
22 #define _PAT_ATS				0x47fc
23 #define _PAT_INDEX(index)			_PICK_EVEN_2RANGES(index, 8, \
24 								   0x4800, 0x4804, \
25 								   0x4848, 0x484c)
26 #define _PAT_PTA				0x4820
27 
28 #define XE2_NO_PROMOTE				REG_BIT(10)
29 #define XE2_COMP_EN				REG_BIT(9)
30 #define XE2_L3_CLOS				REG_GENMASK(7, 6)
31 #define XE2_L3_POLICY				REG_GENMASK(5, 4)
32 #define XE2_L4_POLICY				REG_GENMASK(3, 2)
33 #define XE2_COH_MODE				REG_GENMASK(1, 0)
34 
35 #define XELPG_L4_POLICY_MASK			REG_GENMASK(3, 2)
36 #define XELPG_PAT_3_UC				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 3)
37 #define XELPG_PAT_1_WT				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 1)
38 #define XELPG_PAT_0_WB				REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 0)
39 #define XELPG_INDEX_COH_MODE_MASK		REG_GENMASK(1, 0)
40 #define XELPG_3_COH_2W				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 3)
41 #define XELPG_2_COH_1W				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 2)
42 #define XELPG_0_COH_NON				REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 0)
43 
44 #define XEHPC_CLOS_LEVEL_MASK			REG_GENMASK(3, 2)
45 #define XEHPC_PAT_CLOS(x)			REG_FIELD_PREP(XEHPC_CLOS_LEVEL_MASK, x)
46 
47 #define XELP_MEM_TYPE_MASK			REG_GENMASK(1, 0)
48 #define XELP_PAT_WB				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 3)
49 #define XELP_PAT_WT				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 2)
50 #define XELP_PAT_WC				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 1)
51 #define XELP_PAT_UC				REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 0)
52 
53 static const char *XELP_MEM_TYPE_STR_MAP[] = { "UC", "WC", "WT", "WB" };
54 
55 struct xe_pat_ops {
56 	void (*program_graphics)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
57 				 int n_entries);
58 	void (*program_media)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
59 			      int n_entries);
60 	void (*dump)(struct xe_gt *gt, struct drm_printer *p);
61 };
62 
63 static const struct xe_pat_table_entry xelp_pat_table[] = {
64 	[0] = { XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
65 	[1] = { XELP_PAT_WC, XE_COH_NONE },
66 	[2] = { XELP_PAT_WT, XE_COH_NONE },
67 	[3] = { XELP_PAT_UC, XE_COH_NONE },
68 };
69 
70 static const struct xe_pat_table_entry xehpc_pat_table[] = {
71 	[0] = { XELP_PAT_UC, XE_COH_NONE },
72 	[1] = { XELP_PAT_WC, XE_COH_NONE },
73 	[2] = { XELP_PAT_WT, XE_COH_NONE },
74 	[3] = { XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
75 	[4] = { XEHPC_PAT_CLOS(1) | XELP_PAT_WT, XE_COH_NONE },
76 	[5] = { XEHPC_PAT_CLOS(1) | XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
77 	[6] = { XEHPC_PAT_CLOS(2) | XELP_PAT_WT, XE_COH_NONE },
78 	[7] = { XEHPC_PAT_CLOS(2) | XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
79 };
80 
81 static const struct xe_pat_table_entry xelpg_pat_table[] = {
82 	[0] = { XELPG_PAT_0_WB, XE_COH_NONE },
83 	[1] = { XELPG_PAT_1_WT, XE_COH_NONE },
84 	[2] = { XELPG_PAT_3_UC, XE_COH_NONE },
85 	[3] = { XELPG_PAT_0_WB | XELPG_2_COH_1W, XE_COH_AT_LEAST_1WAY },
86 	[4] = { XELPG_PAT_0_WB | XELPG_3_COH_2W, XE_COH_AT_LEAST_1WAY },
87 };
88 
89 /*
90  * The Xe2 table is getting large/complicated so it's easier to review if
91  * provided in a form that exactly matches the bspec's formatting.  The meaning
92  * of the fields here are:
93  *   - no_promote:  0=promotable, 1=no promote
94  *   - comp_en:     0=disable, 1=enable
95  *   - l3clos:      L3 class of service (0-3)
96  *   - l3_policy:   0=WB, 1=XD ("WB - Transient Display"), 3=UC
97  *   - l4_policy:   0=WB, 1=WT, 3=UC
98  *   - coh_mode:    0=no snoop, 2=1-way coherent, 3=2-way coherent
99  *
100  * Reserved entries should be programmed with the maximum caching, minimum
101  * coherency (which matches an all-0's encoding), so we can just omit them
102  * in the table.
103  *
104  * Note: There is an implicit assumption in the driver that compression and
105  * coh_1way+ are mutually exclusive. If this is ever not true then userptr
106  * and imported dma-buf from external device will have uncleared ccs state.
107  */
108 #define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
109 	{ \
110 		.value = (no_promote ? XE2_NO_PROMOTE : 0) | \
111 			(comp_en ? XE2_COMP_EN : 0) | \
112 			REG_FIELD_PREP(XE2_L3_CLOS, l3clos) | \
113 			REG_FIELD_PREP(XE2_L3_POLICY, l3_policy) | \
114 			REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \
115 			REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \
116 		.coh_mode = (BUILD_BUG_ON_ZERO(__coh_mode && comp_en) || __coh_mode) ? \
117 			XE_COH_AT_LEAST_1WAY : XE_COH_NONE \
118 	}
119 
120 static const struct xe_pat_table_entry xe2_pat_table[] = {
121 	[ 0] = XE2_PAT( 0, 0, 0, 0, 3, 0 ),
122 	[ 1] = XE2_PAT( 0, 0, 0, 0, 3, 2 ),
123 	[ 2] = XE2_PAT( 0, 0, 0, 0, 3, 3 ),
124 	[ 3] = XE2_PAT( 0, 0, 0, 3, 3, 0 ),
125 	[ 4] = XE2_PAT( 0, 0, 0, 3, 0, 2 ),
126 	[ 5] = XE2_PAT( 0, 0, 0, 3, 3, 2 ),
127 	[ 6] = XE2_PAT( 1, 0, 0, 1, 3, 0 ),
128 	[ 7] = XE2_PAT( 0, 0, 0, 3, 0, 3 ),
129 	[ 8] = XE2_PAT( 0, 0, 0, 3, 0, 0 ),
130 	[ 9] = XE2_PAT( 0, 1, 0, 0, 3, 0 ),
131 	[10] = XE2_PAT( 0, 1, 0, 3, 0, 0 ),
132 	[11] = XE2_PAT( 1, 1, 0, 1, 3, 0 ),
133 	[12] = XE2_PAT( 0, 1, 0, 3, 3, 0 ),
134 	[13] = XE2_PAT( 0, 0, 0, 0, 0, 0 ),
135 	[14] = XE2_PAT( 0, 1, 0, 0, 0, 0 ),
136 	[15] = XE2_PAT( 1, 1, 0, 1, 1, 0 ),
137 	/* 16..19 are reserved; leave set to all 0's */
138 	[20] = XE2_PAT( 0, 0, 1, 0, 3, 0 ),
139 	[21] = XE2_PAT( 0, 1, 1, 0, 3, 0 ),
140 	[22] = XE2_PAT( 0, 0, 1, 0, 3, 2 ),
141 	[23] = XE2_PAT( 0, 0, 1, 0, 3, 3 ),
142 	[24] = XE2_PAT( 0, 0, 2, 0, 3, 0 ),
143 	[25] = XE2_PAT( 0, 1, 2, 0, 3, 0 ),
144 	[26] = XE2_PAT( 0, 0, 2, 0, 3, 2 ),
145 	[27] = XE2_PAT( 0, 0, 2, 0, 3, 3 ),
146 	[28] = XE2_PAT( 0, 0, 3, 0, 3, 0 ),
147 	[29] = XE2_PAT( 0, 1, 3, 0, 3, 0 ),
148 	[30] = XE2_PAT( 0, 0, 3, 0, 3, 2 ),
149 	[31] = XE2_PAT( 0, 0, 3, 0, 3, 3 ),
150 };
151 
152 /* Special PAT values programmed outside the main table */
153 static const struct xe_pat_table_entry xe2_pat_ats = XE2_PAT( 0, 0, 0, 0, 3, 3 );
154 static const struct xe_pat_table_entry xe2_pat_pta = XE2_PAT( 0, 0, 0, 0, 3, 0 );
155 
156 u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
157 {
158 	WARN_ON(pat_index >= xe->pat.n_entries);
159 	return xe->pat.table[pat_index].coh_mode;
160 }
161 
162 static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
163 			int n_entries)
164 {
165 	for (int i = 0; i < n_entries; i++) {
166 		struct xe_reg reg = XE_REG(_PAT_INDEX(i));
167 
168 		xe_mmio_write32(&gt->mmio, reg, table[i].value);
169 	}
170 }
171 
172 static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[],
173 			    int n_entries)
174 {
175 	for (int i = 0; i < n_entries; i++) {
176 		struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i));
177 
178 		xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value);
179 	}
180 }
181 
182 static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
183 {
184 	struct xe_device *xe = gt_to_xe(gt);
185 	int i, err;
186 
187 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
188 	if (err)
189 		goto err_fw;
190 
191 	drm_printf(p, "PAT table:\n");
192 
193 	for (i = 0; i < xe->pat.n_entries; i++) {
194 		u32 pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
195 		u8 mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
196 
197 		drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i,
198 			   XELP_MEM_TYPE_STR_MAP[mem_type], pat);
199 	}
200 
201 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
202 err_fw:
203 	xe_assert(xe, !err);
204 }
205 
206 static const struct xe_pat_ops xelp_pat_ops = {
207 	.program_graphics = program_pat,
208 	.dump = xelp_dump,
209 };
210 
211 static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
212 {
213 	struct xe_device *xe = gt_to_xe(gt);
214 	int i, err;
215 
216 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
217 	if (err)
218 		goto err_fw;
219 
220 	drm_printf(p, "PAT table:\n");
221 
222 	for (i = 0; i < xe->pat.n_entries; i++) {
223 		u32 pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
224 		u8 mem_type;
225 
226 		mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
227 
228 		drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i,
229 			   XELP_MEM_TYPE_STR_MAP[mem_type], pat);
230 	}
231 
232 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
233 err_fw:
234 	xe_assert(xe, !err);
235 }
236 
237 static const struct xe_pat_ops xehp_pat_ops = {
238 	.program_graphics = program_pat_mcr,
239 	.dump = xehp_dump,
240 };
241 
242 static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
243 {
244 	struct xe_device *xe = gt_to_xe(gt);
245 	int i, err;
246 
247 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
248 	if (err)
249 		goto err_fw;
250 
251 	drm_printf(p, "PAT table:\n");
252 
253 	for (i = 0; i < xe->pat.n_entries; i++) {
254 		u32 pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
255 
256 		drm_printf(p, "PAT[%2d] = [ %u, %u ] (%#8x)\n", i,
257 			   REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat),
258 			   REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat);
259 	}
260 
261 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
262 err_fw:
263 	xe_assert(xe, !err);
264 }
265 
266 static const struct xe_pat_ops xehpc_pat_ops = {
267 	.program_graphics = program_pat_mcr,
268 	.dump = xehpc_dump,
269 };
270 
271 static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
272 {
273 	struct xe_device *xe = gt_to_xe(gt);
274 	int i, err;
275 
276 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
277 	if (err)
278 		goto err_fw;
279 
280 	drm_printf(p, "PAT table:\n");
281 
282 	for (i = 0; i < xe->pat.n_entries; i++) {
283 		u32 pat;
284 
285 		if (xe_gt_is_media_type(gt))
286 			pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
287 		else
288 			pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
289 
290 		drm_printf(p, "PAT[%2d] = [ %u, %u ] (%#8x)\n", i,
291 			   REG_FIELD_GET(XELPG_L4_POLICY_MASK, pat),
292 			   REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat);
293 	}
294 
295 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
296 err_fw:
297 	xe_assert(xe, !err);
298 }
299 
300 /*
301  * SAMedia register offsets are adjusted by the write methods and they target
302  * registers that are not MCR, while for normal GT they are MCR
303  */
304 static const struct xe_pat_ops xelpg_pat_ops = {
305 	.program_graphics = program_pat,
306 	.program_media = program_pat_mcr,
307 	.dump = xelpg_dump,
308 };
309 
310 static void xe2lpg_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
311 			       int n_entries)
312 {
313 	program_pat_mcr(gt, table, n_entries);
314 	xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe2_pat_ats.value);
315 
316 	if (IS_DGFX(gt_to_xe(gt)))
317 		xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe2_pat_pta.value);
318 }
319 
320 static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
321 			       int n_entries)
322 {
323 	program_pat(gt, table, n_entries);
324 	xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe2_pat_ats.value);
325 
326 	if (IS_DGFX(gt_to_xe(gt)))
327 		xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe2_pat_pta.value);
328 }
329 
330 static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
331 {
332 	struct xe_device *xe = gt_to_xe(gt);
333 	int i, err;
334 	u32 pat;
335 
336 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
337 	if (err)
338 		goto err_fw;
339 
340 	drm_printf(p, "PAT table:\n");
341 
342 	for (i = 0; i < xe->pat.n_entries; i++) {
343 		if (xe_gt_is_media_type(gt))
344 			pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
345 		else
346 			pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
347 
348 		drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u, %u ]  (%#8x)\n", i,
349 			   !!(pat & XE2_NO_PROMOTE),
350 			   !!(pat & XE2_COMP_EN),
351 			   REG_FIELD_GET(XE2_L3_CLOS, pat),
352 			   REG_FIELD_GET(XE2_L3_POLICY, pat),
353 			   REG_FIELD_GET(XE2_L4_POLICY, pat),
354 			   REG_FIELD_GET(XE2_COH_MODE, pat),
355 			   pat);
356 	}
357 
358 	/*
359 	 * Also print PTA_MODE, which describes how the hardware accesses
360 	 * PPGTT entries.
361 	 */
362 	if (xe_gt_is_media_type(gt))
363 		pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_PTA));
364 	else
365 		pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
366 
367 	drm_printf(p, "Page Table Access:\n");
368 	drm_printf(p, "PTA_MODE= [ %u, %u, %u, %u, %u, %u ]  (%#8x)\n",
369 		   !!(pat & XE2_NO_PROMOTE),
370 		   !!(pat & XE2_COMP_EN),
371 		   REG_FIELD_GET(XE2_L3_CLOS, pat),
372 		   REG_FIELD_GET(XE2_L3_POLICY, pat),
373 		   REG_FIELD_GET(XE2_L4_POLICY, pat),
374 		   REG_FIELD_GET(XE2_COH_MODE, pat),
375 		   pat);
376 
377 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
378 err_fw:
379 	xe_assert(xe, !err);
380 }
381 
382 static const struct xe_pat_ops xe2_pat_ops = {
383 	.program_graphics = xe2lpg_program_pat,
384 	.program_media = xe2lpm_program_pat,
385 	.dump = xe2_dump,
386 };
387 
388 void xe_pat_init_early(struct xe_device *xe)
389 {
390 	if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
391 		xe->pat.ops = &xe2_pat_ops;
392 		xe->pat.table = xe2_pat_table;
393 
394 		/* Wa_16023588340. XXX: Should use XE_WA */
395 		if (GRAPHICS_VERx100(xe) == 2001)
396 			xe->pat.n_entries = 28; /* Disable CLOS3 */
397 		else
398 			xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table);
399 
400 		xe->pat.idx[XE_CACHE_NONE] = 3;
401 		xe->pat.idx[XE_CACHE_WT] = 15;
402 		xe->pat.idx[XE_CACHE_WB] = 2;
403 		xe->pat.idx[XE_CACHE_NONE_COMPRESSION] = 12; /*Applicable on xe2 and beyond */
404 	} else if (xe->info.platform == XE_METEORLAKE) {
405 		xe->pat.ops = &xelpg_pat_ops;
406 		xe->pat.table = xelpg_pat_table;
407 		xe->pat.n_entries = ARRAY_SIZE(xelpg_pat_table);
408 		xe->pat.idx[XE_CACHE_NONE] = 2;
409 		xe->pat.idx[XE_CACHE_WT] = 1;
410 		xe->pat.idx[XE_CACHE_WB] = 3;
411 	} else if (xe->info.platform == XE_PVC) {
412 		xe->pat.ops = &xehpc_pat_ops;
413 		xe->pat.table = xehpc_pat_table;
414 		xe->pat.n_entries = ARRAY_SIZE(xehpc_pat_table);
415 		xe->pat.idx[XE_CACHE_NONE] = 0;
416 		xe->pat.idx[XE_CACHE_WT] = 2;
417 		xe->pat.idx[XE_CACHE_WB] = 3;
418 	} else if (xe->info.platform == XE_DG2) {
419 		/*
420 		 * Table is the same as previous platforms, but programming
421 		 * method has changed.
422 		 */
423 		xe->pat.ops = &xehp_pat_ops;
424 		xe->pat.table = xelp_pat_table;
425 		xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
426 		xe->pat.idx[XE_CACHE_NONE] = 3;
427 		xe->pat.idx[XE_CACHE_WT] = 2;
428 		xe->pat.idx[XE_CACHE_WB] = 0;
429 	} else if (GRAPHICS_VERx100(xe) <= 1210) {
430 		WARN_ON_ONCE(!IS_DGFX(xe) && !xe->info.has_llc);
431 		xe->pat.ops = &xelp_pat_ops;
432 		xe->pat.table = xelp_pat_table;
433 		xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
434 		xe->pat.idx[XE_CACHE_NONE] = 3;
435 		xe->pat.idx[XE_CACHE_WT] = 2;
436 		xe->pat.idx[XE_CACHE_WB] = 0;
437 	} else {
438 		/*
439 		 * Going forward we expect to need new PAT settings for most
440 		 * new platforms; failure to provide a new table can easily
441 		 * lead to subtle, hard-to-debug problems.  If none of the
442 		 * conditions above match the platform we're running on we'll
443 		 * raise an error rather than trying to silently inherit the
444 		 * most recent platform's behavior.
445 		 */
446 		drm_err(&xe->drm, "Missing PAT table for platform with graphics version %d.%02d!\n",
447 			GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
448 	}
449 
450 	/* VFs can't program nor dump PAT settings */
451 	if (IS_SRIOV_VF(xe))
452 		xe->pat.ops = NULL;
453 
454 	xe_assert(xe, !xe->pat.ops || xe->pat.ops->dump);
455 	xe_assert(xe, !xe->pat.ops || xe->pat.ops->program_graphics);
456 	xe_assert(xe, !xe->pat.ops || MEDIA_VER(xe) < 13 || xe->pat.ops->program_media);
457 }
458 
459 void xe_pat_init(struct xe_gt *gt)
460 {
461 	struct xe_device *xe = gt_to_xe(gt);
462 
463 	if (!xe->pat.ops)
464 		return;
465 
466 	if (xe_gt_is_media_type(gt))
467 		xe->pat.ops->program_media(gt, xe->pat.table, xe->pat.n_entries);
468 	else
469 		xe->pat.ops->program_graphics(gt, xe->pat.table, xe->pat.n_entries);
470 }
471 
472 void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
473 {
474 	struct xe_device *xe = gt_to_xe(gt);
475 
476 	if (!xe->pat.ops)
477 		return;
478 
479 	xe->pat.ops->dump(gt, p);
480 }
481