xref: /linux/drivers/gpu/drm/xe/xe_rtp.c (revision 42b16d3ac371a2fac9b6f08fd75f23f34ba3955a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_rtp.h"
7 
8 #include <kunit/visibility.h>
9 
10 #include <uapi/drm/xe_drm.h>
11 
12 #include "xe_gt.h"
13 #include "xe_gt_topology.h"
14 #include "xe_macros.h"
15 #include "xe_reg_sr.h"
16 #include "xe_sriov.h"
17 
18 /**
19  * DOC: Register Table Processing
20  *
21  * Internal infrastructure to define how registers should be updated based on
22  * rules and actions. This can be used to define tables with multiple entries
23  * (one per register) that will be walked over at some point in time to apply
24  * the values to the registers that have matching rules.
25  */
26 
has_samedia(const struct xe_device * xe)27 static bool has_samedia(const struct xe_device *xe)
28 {
29 	return xe->info.media_verx100 >= 1300;
30 }
31 
rule_matches(const struct xe_device * xe,struct xe_gt * gt,struct xe_hw_engine * hwe,const struct xe_rtp_rule * rules,unsigned int n_rules)32 static bool rule_matches(const struct xe_device *xe,
33 			 struct xe_gt *gt,
34 			 struct xe_hw_engine *hwe,
35 			 const struct xe_rtp_rule *rules,
36 			 unsigned int n_rules)
37 {
38 	const struct xe_rtp_rule *r;
39 	unsigned int i, rcount = 0;
40 	bool match;
41 
42 	for (r = rules, i = 0; i < n_rules; r = &rules[++i]) {
43 		switch (r->match_type) {
44 		case XE_RTP_MATCH_OR:
45 			/*
46 			 * This is only reached if a complete set of
47 			 * rules passed or none were evaluated. For both cases,
48 			 * shortcut the other rules and return the proper value.
49 			 */
50 			goto done;
51 		case XE_RTP_MATCH_PLATFORM:
52 			match = xe->info.platform == r->platform;
53 			break;
54 		case XE_RTP_MATCH_SUBPLATFORM:
55 			match = xe->info.platform == r->platform &&
56 				xe->info.subplatform == r->subplatform;
57 			break;
58 		case XE_RTP_MATCH_GRAPHICS_VERSION:
59 			match = xe->info.graphics_verx100 == r->ver_start &&
60 				(!has_samedia(xe) || !xe_gt_is_media_type(gt));
61 			break;
62 		case XE_RTP_MATCH_GRAPHICS_VERSION_RANGE:
63 			match = xe->info.graphics_verx100 >= r->ver_start &&
64 				xe->info.graphics_verx100 <= r->ver_end &&
65 				(!has_samedia(xe) || !xe_gt_is_media_type(gt));
66 			break;
67 		case XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT:
68 			match = xe->info.graphics_verx100 == r->ver_start;
69 			break;
70 		case XE_RTP_MATCH_GRAPHICS_STEP:
71 			match = xe->info.step.graphics >= r->step_start &&
72 				xe->info.step.graphics < r->step_end &&
73 				(!has_samedia(xe) || !xe_gt_is_media_type(gt));
74 			break;
75 		case XE_RTP_MATCH_MEDIA_VERSION:
76 			match = xe->info.media_verx100 == r->ver_start &&
77 				(!has_samedia(xe) || xe_gt_is_media_type(gt));
78 			break;
79 		case XE_RTP_MATCH_MEDIA_VERSION_RANGE:
80 			match = xe->info.media_verx100 >= r->ver_start &&
81 				xe->info.media_verx100 <= r->ver_end &&
82 				(!has_samedia(xe) || xe_gt_is_media_type(gt));
83 			break;
84 		case XE_RTP_MATCH_MEDIA_STEP:
85 			match = xe->info.step.media >= r->step_start &&
86 				xe->info.step.media < r->step_end &&
87 				(!has_samedia(xe) || xe_gt_is_media_type(gt));
88 			break;
89 		case XE_RTP_MATCH_MEDIA_VERSION_ANY_GT:
90 			match = xe->info.media_verx100 == r->ver_start;
91 			break;
92 		case XE_RTP_MATCH_INTEGRATED:
93 			match = !xe->info.is_dgfx;
94 			break;
95 		case XE_RTP_MATCH_DISCRETE:
96 			match = xe->info.is_dgfx;
97 			break;
98 		case XE_RTP_MATCH_ENGINE_CLASS:
99 			if (drm_WARN_ON(&xe->drm, !hwe))
100 				return false;
101 
102 			match = hwe->class == r->engine_class;
103 			break;
104 		case XE_RTP_MATCH_NOT_ENGINE_CLASS:
105 			if (drm_WARN_ON(&xe->drm, !hwe))
106 				return false;
107 
108 			match = hwe->class != r->engine_class;
109 			break;
110 		case XE_RTP_MATCH_FUNC:
111 			match = r->match_func(gt, hwe);
112 			break;
113 		default:
114 			drm_warn(&xe->drm, "Invalid RTP match %u\n",
115 				 r->match_type);
116 			match = false;
117 		}
118 
119 		if (!match) {
120 			/*
121 			 * Advance rules until we find XE_RTP_MATCH_OR to check
122 			 * if there's another set of conditions to check
123 			 */
124 			while (++i < n_rules && rules[i].match_type != XE_RTP_MATCH_OR)
125 				;
126 
127 			if (i >= n_rules)
128 				return false;
129 
130 			rcount = 0;
131 		} else {
132 			rcount++;
133 		}
134 	}
135 
136 done:
137 	if (drm_WARN_ON(&xe->drm, !rcount))
138 		return false;
139 
140 	return true;
141 }
142 
rtp_add_sr_entry(const struct xe_rtp_action * action,struct xe_gt * gt,u32 mmio_base,struct xe_reg_sr * sr)143 static void rtp_add_sr_entry(const struct xe_rtp_action *action,
144 			     struct xe_gt *gt,
145 			     u32 mmio_base,
146 			     struct xe_reg_sr *sr)
147 {
148 	struct xe_reg_sr_entry sr_entry = {
149 		.reg = action->reg,
150 		.clr_bits = action->clr_bits,
151 		.set_bits = action->set_bits,
152 		.read_mask = action->read_mask,
153 	};
154 
155 	sr_entry.reg.addr += mmio_base;
156 	xe_reg_sr_add(sr, &sr_entry, gt);
157 }
158 
rtp_process_one_sr(const struct xe_rtp_entry_sr * entry,struct xe_device * xe,struct xe_gt * gt,struct xe_hw_engine * hwe,struct xe_reg_sr * sr)159 static bool rtp_process_one_sr(const struct xe_rtp_entry_sr *entry,
160 			       struct xe_device *xe, struct xe_gt *gt,
161 			       struct xe_hw_engine *hwe, struct xe_reg_sr *sr)
162 {
163 	const struct xe_rtp_action *action;
164 	u32 mmio_base;
165 	unsigned int i;
166 
167 	if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules))
168 		return false;
169 
170 	for (i = 0, action = &entry->actions[0]; i < entry->n_actions; action++, i++) {
171 		if ((entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) ||
172 		    (action->flags & XE_RTP_ACTION_FLAG_ENGINE_BASE))
173 			mmio_base = hwe->mmio_base;
174 		else
175 			mmio_base = 0;
176 
177 		rtp_add_sr_entry(action, gt, mmio_base, sr);
178 	}
179 
180 	return true;
181 }
182 
rtp_get_context(struct xe_rtp_process_ctx * ctx,struct xe_hw_engine ** hwe,struct xe_gt ** gt,struct xe_device ** xe)183 static void rtp_get_context(struct xe_rtp_process_ctx *ctx,
184 			    struct xe_hw_engine **hwe,
185 			    struct xe_gt **gt,
186 			    struct xe_device **xe)
187 {
188 	switch (ctx->type) {
189 	case XE_RTP_PROCESS_TYPE_GT:
190 		*hwe = NULL;
191 		*gt = ctx->gt;
192 		*xe = gt_to_xe(*gt);
193 		break;
194 	case XE_RTP_PROCESS_TYPE_ENGINE:
195 		*hwe = ctx->hwe;
196 		*gt = (*hwe)->gt;
197 		*xe = gt_to_xe(*gt);
198 		break;
199 	};
200 }
201 
202 /**
203  * xe_rtp_process_ctx_enable_active_tracking - Enable tracking of active entries
204  *
205  * Set additional metadata to track what entries are considered "active", i.e.
206  * their rules match the condition. Bits are never cleared: entries with
207  * matching rules set the corresponding bit in the bitmap.
208  *
209  * @ctx: The context for processing the table
210  * @active_entries: bitmap to store the active entries
211  * @n_entries: number of entries to be processed
212  */
xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx * ctx,unsigned long * active_entries,size_t n_entries)213 void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
214 					       unsigned long *active_entries,
215 					       size_t n_entries)
216 {
217 	ctx->active_entries = active_entries;
218 	ctx->n_entries = n_entries;
219 }
220 EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking);
221 
rtp_mark_active(struct xe_device * xe,struct xe_rtp_process_ctx * ctx,unsigned int idx)222 static void rtp_mark_active(struct xe_device *xe,
223 			    struct xe_rtp_process_ctx *ctx,
224 			    unsigned int idx)
225 {
226 	if (!ctx->active_entries)
227 		return;
228 
229 	if (drm_WARN_ON(&xe->drm, idx >= ctx->n_entries))
230 		return;
231 
232 	bitmap_set(ctx->active_entries, idx, 1);
233 }
234 
235 /**
236  * xe_rtp_process_to_sr - Process all rtp @entries, adding the matching ones to
237  *                        the save-restore argument.
238  * @ctx: The context for processing the table, with one of device, gt or hwe
239  * @entries: Table with RTP definitions
240  * @sr: Save-restore struct where matching rules execute the action. This can be
241  *      viewed as the "coalesced view" of multiple the tables. The bits for each
242  *      register set are expected not to collide with previously added entries
243  *
244  * Walk the table pointed by @entries (with an empty sentinel) and add all
245  * entries with matching rules to @sr. If @hwe is not NULL, its mmio_base is
246  * used to calculate the right register offset
247  */
xe_rtp_process_to_sr(struct xe_rtp_process_ctx * ctx,const struct xe_rtp_entry_sr * entries,struct xe_reg_sr * sr)248 void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
249 			  const struct xe_rtp_entry_sr *entries,
250 			  struct xe_reg_sr *sr)
251 {
252 	const struct xe_rtp_entry_sr *entry;
253 	struct xe_hw_engine *hwe = NULL;
254 	struct xe_gt *gt = NULL;
255 	struct xe_device *xe = NULL;
256 
257 	rtp_get_context(ctx, &hwe, &gt, &xe);
258 
259 	if (IS_SRIOV_VF(xe))
260 		return;
261 
262 	for (entry = entries; entry && entry->name; entry++) {
263 		bool match = false;
264 
265 		if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) {
266 			struct xe_hw_engine *each_hwe;
267 			enum xe_hw_engine_id id;
268 
269 			for_each_hw_engine(each_hwe, gt, id)
270 				match |= rtp_process_one_sr(entry, xe, gt,
271 							    each_hwe, sr);
272 		} else {
273 			match = rtp_process_one_sr(entry, xe, gt, hwe, sr);
274 		}
275 
276 		if (match)
277 			rtp_mark_active(xe, ctx, entry - entries);
278 	}
279 }
280 EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr);
281 
282 /**
283  * xe_rtp_process - Process all rtp @entries, without running any action
284  * @ctx: The context for processing the table, with one of device, gt or hwe
285  * @entries: Table with RTP definitions
286  *
287  * Walk the table pointed by @entries (with an empty sentinel), executing the
288  * rules. One difference from xe_rtp_process_to_sr(): there is no action
289  * associated with each entry since this uses struct xe_rtp_entry. Its main use
290  * is for marking active workarounds via
291  * xe_rtp_process_ctx_enable_active_tracking().
292  */
xe_rtp_process(struct xe_rtp_process_ctx * ctx,const struct xe_rtp_entry * entries)293 void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
294 		    const struct xe_rtp_entry *entries)
295 {
296 	const struct xe_rtp_entry *entry;
297 	struct xe_hw_engine *hwe;
298 	struct xe_gt *gt;
299 	struct xe_device *xe;
300 
301 	rtp_get_context(ctx, &hwe, &gt, &xe);
302 
303 	for (entry = entries; entry && entry->rules; entry++) {
304 		if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules))
305 			continue;
306 
307 		rtp_mark_active(xe, ctx, entry - entries);
308 	}
309 }
310 EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process);
311 
xe_rtp_match_even_instance(const struct xe_gt * gt,const struct xe_hw_engine * hwe)312 bool xe_rtp_match_even_instance(const struct xe_gt *gt,
313 				const struct xe_hw_engine *hwe)
314 {
315 	return hwe->instance % 2 == 0;
316 }
317 
xe_rtp_match_first_render_or_compute(const struct xe_gt * gt,const struct xe_hw_engine * hwe)318 bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
319 					  const struct xe_hw_engine *hwe)
320 {
321 	u64 render_compute_mask = gt->info.engine_mask &
322 		(XE_HW_ENGINE_CCS_MASK | XE_HW_ENGINE_RCS_MASK);
323 
324 	return render_compute_mask &&
325 		hwe->engine_id == __ffs(render_compute_mask);
326 }
327 
xe_rtp_match_first_gslice_fused_off(const struct xe_gt * gt,const struct xe_hw_engine * hwe)328 bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
329 					 const struct xe_hw_engine *hwe)
330 {
331 	unsigned int dss_per_gslice = 4;
332 	unsigned int dss;
333 
334 	if (drm_WARN(&gt_to_xe(gt)->drm, xe_dss_mask_empty(gt->fuse_topo.g_dss_mask),
335 		     "Checking gslice for platform without geometry pipeline\n"))
336 		return false;
337 
338 	dss = xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0);
339 
340 	return dss >= dss_per_gslice;
341 }
342 
343