xref: /linux/drivers/gpu/drm/xe/xe_reg_whitelist.c (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_reg_whitelist.h"
7 
8 #include "regs/xe_engine_regs.h"
9 #include "regs/xe_gt_regs.h"
10 #include "regs/xe_oa_regs.h"
11 #include "xe_device.h"
12 #include "xe_gt_types.h"
13 #include "xe_gt_printk.h"
14 #include "xe_platform_types.h"
15 #include "xe_reg_sr.h"
16 #include "xe_rtp.h"
17 #include "xe_step.h"
18 
19 #undef XE_REG_MCR
20 #define XE_REG_MCR(...)     XE_REG(__VA_ARGS__, .mcr = 1)
21 
match_not_render(const struct xe_device * xe,const struct xe_gt * gt,const struct xe_hw_engine * hwe)22 static bool match_not_render(const struct xe_device *xe,
23 			     const struct xe_gt *gt,
24 			     const struct xe_hw_engine *hwe)
25 {
26 	return hwe->class != XE_ENGINE_CLASS_RENDER;
27 }
28 
match_has_mert(const struct xe_device * xe,const struct xe_gt * gt,const struct xe_hw_engine * hwe)29 static bool match_has_mert(const struct xe_device *xe,
30 			   const struct xe_gt *gt,
31 			   const struct xe_hw_engine *hwe)
32 {
33 	return xe_device_has_mert((struct xe_device *)xe);
34 }
35 
36 static const struct xe_rtp_entry_sr register_whitelist[] = {
37 	{ XE_RTP_NAME("WaAllowPMDepthAndInvocationCountAccessFromUMD, 1408556865"),
38 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
39 	  XE_RTP_ACTIONS(WHITELIST(PS_INVOCATION_COUNT,
40 				   RING_FORCE_TO_NONPRIV_ACCESS_RD |
41 				   RING_FORCE_TO_NONPRIV_RANGE_4))
42 	},
43 	{ XE_RTP_NAME("1508744258, 14012131227, 1808121037"),
44 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
45 	  XE_RTP_ACTIONS(WHITELIST(COMMON_SLICE_CHICKEN1, 0))
46 	},
47 	{ XE_RTP_NAME("1806527549"),
48 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
49 	  XE_RTP_ACTIONS(WHITELIST(HIZ_CHICKEN, 0))
50 	},
51 	{ XE_RTP_NAME("allow_read_ctx_timestamp"),
52 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1260), FUNC(match_not_render)),
53 	  XE_RTP_ACTIONS(WHITELIST(RING_CTX_TIMESTAMP(0),
54 				RING_FORCE_TO_NONPRIV_ACCESS_RD,
55 				XE_RTP_ACTION_FLAG(ENGINE_BASE)))
56 	},
57 	{ XE_RTP_NAME("16014440446"),
58 	  XE_RTP_RULES(PLATFORM(PVC)),
59 	  XE_RTP_ACTIONS(WHITELIST(XE_REG(0x4400),
60 				   RING_FORCE_TO_NONPRIV_DENY |
61 				   RING_FORCE_TO_NONPRIV_RANGE_64),
62 			 WHITELIST(XE_REG(0x4500),
63 				   RING_FORCE_TO_NONPRIV_DENY |
64 				   RING_FORCE_TO_NONPRIV_RANGE_64))
65 	},
66 	{ XE_RTP_NAME("16017236439"),
67 	  XE_RTP_RULES(PLATFORM(PVC), ENGINE_CLASS(COPY)),
68 	  XE_RTP_ACTIONS(WHITELIST(BCS_SWCTRL(0),
69 				   RING_FORCE_TO_NONPRIV_DENY,
70 				   XE_RTP_ACTION_FLAG(ENGINE_BASE)))
71 	},
72 	{ XE_RTP_NAME("16020183090"),
73 	  XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
74 		       ENGINE_CLASS(RENDER)),
75 	  XE_RTP_ACTIONS(WHITELIST(CSBE_DEBUG_STATUS(RENDER_RING_BASE), 0))
76 	},
77 	{ XE_RTP_NAME("14024997852"),
78 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 3005), ENGINE_CLASS(RENDER)),
79 	  XE_RTP_ACTIONS(WHITELIST(FF_MODE,
80 				   RING_FORCE_TO_NONPRIV_ACCESS_RW),
81 			 WHITELIST(VFLSKPD,
82 				   RING_FORCE_TO_NONPRIV_ACCESS_RW))
83 	},
84 	{ XE_RTP_NAME("14024997852"),
85 	  XE_RTP_RULES(GRAPHICS_VERSION(3510), GRAPHICS_STEP(A0, B0),
86 		       ENGINE_CLASS(RENDER)),
87 	  XE_RTP_ACTIONS(WHITELIST(FF_MODE,
88 				   RING_FORCE_TO_NONPRIV_ACCESS_RW),
89 			 WHITELIST(VFLSKPD,
90 				   RING_FORCE_TO_NONPRIV_ACCESS_RW))
91 	},
92 
93 #define WHITELIST_OA_MMIO_TRG(trg, status, head) \
94 	WHITELIST(trg, RING_FORCE_TO_NONPRIV_ACCESS_RW), \
95 	WHITELIST(status, RING_FORCE_TO_NONPRIV_ACCESS_RD), \
96 	WHITELIST(head, RING_FORCE_TO_NONPRIV_ACCESS_RD | RING_FORCE_TO_NONPRIV_RANGE_4)
97 
98 #define WHITELIST_OAG_MMIO_TRG \
99 	WHITELIST_OA_MMIO_TRG(OAG_MMIOTRIGGER, OAG_OASTATUS, OAG_OAHEADPTR)
100 
101 #define WHITELIST_OAM_MMIO_TRG \
102 	WHITELIST_OA_MMIO_TRG(OAM_MMIO_TRG(XE_OAM_SAG_BASE_ADJ), \
103 			      OAM_STATUS(XE_OAM_SAG_BASE_ADJ), \
104 			      OAM_HEAD_POINTER(XE_OAM_SAG_BASE_ADJ)), \
105 	WHITELIST_OA_MMIO_TRG(OAM_MMIO_TRG(XE_OAM_SCMI_0_BASE_ADJ), \
106 			      OAM_STATUS(XE_OAM_SCMI_0_BASE_ADJ), \
107 			      OAM_HEAD_POINTER(XE_OAM_SCMI_0_BASE_ADJ)), \
108 	WHITELIST_OA_MMIO_TRG(OAM_MMIO_TRG(XE_OAM_SCMI_1_BASE_ADJ), \
109 			      OAM_STATUS(XE_OAM_SCMI_1_BASE_ADJ), \
110 			      OAM_HEAD_POINTER(XE_OAM_SCMI_1_BASE_ADJ))
111 
112 #define WHITELIST_OA_MERT_MMIO_TRG \
113 	WHITELIST_OA_MMIO_TRG(OAMERT_MMIO_TRG, OAMERT_STATUS, OAMERT_HEAD_POINTER)
114 
115 	{ XE_RTP_NAME("oag_mmio_trg_rcs"),
116 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
117 		       ENGINE_CLASS(RENDER)),
118 	  XE_RTP_ACTIONS(WHITELIST_OAG_MMIO_TRG)
119 	},
120 	{ XE_RTP_NAME("oag_mmio_trg_ccs"),
121 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
122 		       ENGINE_CLASS(COMPUTE)),
123 	  XE_RTP_ACTIONS(WHITELIST_OAG_MMIO_TRG)
124 	},
125 	{ XE_RTP_NAME("oam_mmio_trg_vcs"),
126 	  XE_RTP_RULES(MEDIA_VERSION_RANGE(1300, XE_RTP_END_VERSION_UNDEFINED),
127 		       ENGINE_CLASS(VIDEO_DECODE)),
128 	  XE_RTP_ACTIONS(WHITELIST_OAM_MMIO_TRG)
129 	},
130 	{ XE_RTP_NAME("oam_mmio_trg_vecs"),
131 	  XE_RTP_RULES(MEDIA_VERSION_RANGE(1300, XE_RTP_END_VERSION_UNDEFINED),
132 		       ENGINE_CLASS(VIDEO_ENHANCE)),
133 	  XE_RTP_ACTIONS(WHITELIST_OAM_MMIO_TRG)
134 	},
135 	{ XE_RTP_NAME("oa_mert_mmio_trg_ccs"),
136 	  XE_RTP_RULES(FUNC(match_has_mert), ENGINE_CLASS(COMPUTE)),
137 	  XE_RTP_ACTIONS(WHITELIST_OA_MERT_MMIO_TRG)
138 	},
139 	{ XE_RTP_NAME("oa_mert_mmio_trg_bcs"),
140 	  XE_RTP_RULES(FUNC(match_has_mert), ENGINE_CLASS(COPY)),
141 	  XE_RTP_ACTIONS(WHITELIST_OA_MERT_MMIO_TRG)
142 	},
143 };
144 
whitelist_apply_to_hwe(struct xe_hw_engine * hwe)145 static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe)
146 {
147 	struct xe_reg_sr *sr = &hwe->reg_whitelist;
148 	struct xe_reg_sr_entry *entry;
149 	struct drm_printer p;
150 	unsigned long reg;
151 	unsigned int slot;
152 
153 	xe_gt_dbg(hwe->gt, "Add %s whitelist to engine\n", sr->name);
154 	p = xe_gt_dbg_printer(hwe->gt);
155 
156 	slot = 0;
157 	xa_for_each(&sr->xa, reg, entry) {
158 		struct xe_reg_sr_entry hwe_entry = {
159 			.reg = RING_FORCE_TO_NONPRIV(hwe->mmio_base, slot),
160 			.set_bits = entry->reg.addr | entry->set_bits,
161 			.clr_bits = ~0u,
162 			.read_mask = entry->read_mask,
163 		};
164 
165 		if (slot == RING_MAX_NONPRIV_SLOTS) {
166 			xe_gt_err(hwe->gt,
167 				  "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
168 				  hwe->name, RING_MAX_NONPRIV_SLOTS);
169 			break;
170 		}
171 
172 		xe_reg_whitelist_print_entry(&p, 0, reg, entry);
173 		xe_reg_sr_add(&hwe->reg_sr, &hwe_entry, hwe->gt);
174 
175 		slot++;
176 	}
177 }
178 
179 /**
180  * xe_reg_whitelist_process_engine - process table of registers to whitelist
181  * @hwe: engine instance to process whitelist for
182  *
183  * Process wwhitelist table for this platform, saving in @hwe all the
184  * registers that need to be whitelisted by the hardware so they can be accessed
185  * by userspace.
186  */
xe_reg_whitelist_process_engine(struct xe_hw_engine * hwe)187 void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
188 {
189 	struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
190 
191 	xe_rtp_process_to_sr(&ctx, register_whitelist, ARRAY_SIZE(register_whitelist),
192 			     &hwe->reg_whitelist, false);
193 	whitelist_apply_to_hwe(hwe);
194 }
195 
196 /**
197  * xe_reg_whitelist_print_entry - print one whitelist entry
198  * @p: DRM printer
199  * @indent: indent level
200  * @reg: register allowed/denied
201  * @entry: save-restore entry
202  *
203  * Print details about the entry added to allow/deny access
204  */
xe_reg_whitelist_print_entry(struct drm_printer * p,unsigned int indent,u32 reg,struct xe_reg_sr_entry * entry)205 void xe_reg_whitelist_print_entry(struct drm_printer *p, unsigned int indent,
206 				  u32 reg, struct xe_reg_sr_entry *entry)
207 {
208 	u32 val = entry->set_bits;
209 	const char *access_str = "(invalid)";
210 	unsigned int range_bit = 2;
211 	u32 range_start, range_end;
212 	bool deny;
213 
214 	deny = val & RING_FORCE_TO_NONPRIV_DENY;
215 
216 	switch (val & RING_FORCE_TO_NONPRIV_RANGE_MASK) {
217 	case RING_FORCE_TO_NONPRIV_RANGE_4:
218 		range_bit = 4;
219 		break;
220 	case RING_FORCE_TO_NONPRIV_RANGE_16:
221 		range_bit = 6;
222 		break;
223 	case RING_FORCE_TO_NONPRIV_RANGE_64:
224 		range_bit = 8;
225 		break;
226 	}
227 
228 	range_start = reg & REG_GENMASK(25, range_bit);
229 	range_end = range_start | REG_GENMASK(range_bit - 1, 0);
230 
231 	switch (val & RING_FORCE_TO_NONPRIV_ACCESS_MASK) {
232 	case RING_FORCE_TO_NONPRIV_ACCESS_RW:
233 		access_str = "rw";
234 		break;
235 	case RING_FORCE_TO_NONPRIV_ACCESS_RD:
236 		access_str = "read";
237 		break;
238 	case RING_FORCE_TO_NONPRIV_ACCESS_WR:
239 		access_str = "write";
240 		break;
241 	}
242 
243 	drm_printf_indent(p, indent, "REG[0x%x-0x%x]: %s %s access\n",
244 			  range_start, range_end,
245 			  deny ? "deny" : "allow",
246 			  access_str);
247 }
248 
249 /**
250  * xe_reg_whitelist_dump - print all whitelist entries
251  * @sr: Save/restore entries
252  * @p: DRM printer
253  */
xe_reg_whitelist_dump(struct xe_reg_sr * sr,struct drm_printer * p)254 void xe_reg_whitelist_dump(struct xe_reg_sr *sr, struct drm_printer *p)
255 {
256 	struct xe_reg_sr_entry *entry;
257 	unsigned long reg;
258 
259 	if (!sr->name || xa_empty(&sr->xa))
260 		return;
261 
262 	drm_printf(p, "%s\n", sr->name);
263 	xa_for_each(&sr->xa, reg, entry)
264 		xe_reg_whitelist_print_entry(p, 1, reg, entry);
265 }
266