xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "abi/guc_actions_sriov_abi.h"
9 #include "abi/guc_relay_actions_abi.h"
10 
11 #include "regs/xe_gt_regs.h"
12 #include "regs/xe_guc_regs.h"
13 #include "regs/xe_regs.h"
14 
15 #include "xe_mmio.h"
16 #include "xe_gt_sriov_printk.h"
17 #include "xe_gt_sriov_pf_helpers.h"
18 #include "xe_gt_sriov_pf_service.h"
19 #include "xe_gt_sriov_pf_service_types.h"
20 #include "xe_guc_ct.h"
21 #include "xe_guc_hxg_helpers.h"
22 #include "xe_sriov_pf_service.h"
23 
24 static const struct xe_reg tgl_runtime_regs[] = {
25 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
26 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
27 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
28 	XELP_GT_SLICE_ENABLE,		/* _MMIO(0x9138) */
29 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
30 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
31 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
32 };
33 
34 static const struct xe_reg ats_m_runtime_regs[] = {
35 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
36 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
37 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
38 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
39 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
40 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
41 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
42 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
43 };
44 
45 static const struct xe_reg pvc_runtime_regs[] = {
46 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
47 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
48 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
49 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
50 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
51 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
52 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
53 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
54 };
55 
56 static const struct xe_reg ver_1270_runtime_regs[] = {
57 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
58 	XEHP_FUSE4,			/* _MMIO(0x9114) */
59 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
60 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
61 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
62 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
63 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
64 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
65 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
66 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
67 };
68 
69 static const struct xe_reg ver_2000_runtime_regs[] = {
70 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
71 	XEHP_FUSE4,			/* _MMIO(0x9114) */
72 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
73 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
74 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
75 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
76 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
77 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
78 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
79 	XE2_GT_COMPUTE_DSS_2,		/* _MMIO(0x914c) */
80 	XE2_GT_GEOMETRY_DSS_1,		/* _MMIO(0x9150) */
81 	XE2_GT_GEOMETRY_DSS_2,		/* _MMIO(0x9154) */
82 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
83 };
84 
85 static const struct xe_reg ver_3000_runtime_regs[] = {
86 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
87 	XEHP_FUSE4,			/* _MMIO(0x9114) */
88 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
89 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
90 	MIRROR_L3BANK_ENABLE,		/* _MMIO(0x9130) */
91 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
92 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
93 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
94 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
95 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
96 	XE2_GT_COMPUTE_DSS_2,		/* _MMIO(0x914c) */
97 	XE2_GT_GEOMETRY_DSS_1,		/* _MMIO(0x9150) */
98 	XE2_GT_GEOMETRY_DSS_2,		/* _MMIO(0x9154) */
99 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
100 };
101 
102 static const struct xe_reg ver_35_runtime_regs[] = {
103 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
104 	XEHP_FUSE4,			/* _MMIO(0x9114) */
105 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
106 	MIRROR_L3BANK_ENABLE,		/* _MMIO(0x9130) */
107 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
108 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
109 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
110 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
111 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
112 	XE2_GT_COMPUTE_DSS_2,		/* _MMIO(0x914c) */
113 	XE2_GT_GEOMETRY_DSS_1,		/* _MMIO(0x9150) */
114 	XE2_GT_GEOMETRY_DSS_2,		/* _MMIO(0x9154) */
115 	SERVICE_COPY_ENABLE,		/* _MMIO(0x9170) */
116 };
117 
118 static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
119 {
120 	const struct xe_reg *regs;
121 
122 	if (GRAPHICS_VER(xe) >= 35) {
123 		*count = ARRAY_SIZE(ver_35_runtime_regs);
124 		regs = ver_35_runtime_regs;
125 	} else if (GRAPHICS_VERx100(xe) >= 3000) {
126 		*count = ARRAY_SIZE(ver_3000_runtime_regs);
127 		regs = ver_3000_runtime_regs;
128 	} else if (GRAPHICS_VERx100(xe) >= 2000) {
129 		*count = ARRAY_SIZE(ver_2000_runtime_regs);
130 		regs = ver_2000_runtime_regs;
131 	} else if (GRAPHICS_VERx100(xe) >= 1270) {
132 		*count = ARRAY_SIZE(ver_1270_runtime_regs);
133 		regs = ver_1270_runtime_regs;
134 	} else if (GRAPHICS_VERx100(xe) == 1260) {
135 		*count = ARRAY_SIZE(pvc_runtime_regs);
136 		regs = pvc_runtime_regs;
137 	} else if (GRAPHICS_VERx100(xe) == 1255) {
138 		*count = ARRAY_SIZE(ats_m_runtime_regs);
139 		regs = ats_m_runtime_regs;
140 	} else if (GRAPHICS_VERx100(xe) == 1200) {
141 		*count = ARRAY_SIZE(tgl_runtime_regs);
142 		regs = tgl_runtime_regs;
143 	} else {
144 		regs = ERR_PTR(-ENOPKG);
145 		*count = 0;
146 	}
147 
148 	return regs;
149 }
150 
151 static int pf_alloc_runtime_info(struct xe_gt *gt)
152 {
153 	struct xe_device *xe = gt_to_xe(gt);
154 	const struct xe_reg *regs;
155 	unsigned int size;
156 	u32 *values;
157 
158 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
159 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.size);
160 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.regs);
161 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.values);
162 
163 	regs = pick_runtime_regs(xe, &size);
164 	if (IS_ERR(regs))
165 		return PTR_ERR(regs);
166 
167 	if (unlikely(!size))
168 		return 0;
169 
170 	values = drmm_kcalloc(&xe->drm, size, sizeof(u32), GFP_KERNEL);
171 	if (!values)
172 		return -ENOMEM;
173 
174 	gt->sriov.pf.service.runtime.size = size;
175 	gt->sriov.pf.service.runtime.regs = regs;
176 	gt->sriov.pf.service.runtime.values = values;
177 
178 	return 0;
179 }
180 
181 static void read_many(struct xe_gt *gt, unsigned int count,
182 		      const struct xe_reg *regs, u32 *values)
183 {
184 	while (count--)
185 		*values++ = xe_mmio_read32(&gt->mmio, *regs++);
186 }
187 
188 static void pf_prepare_runtime_info(struct xe_gt *gt)
189 {
190 	const struct xe_reg *regs;
191 	unsigned int size;
192 	u32 *values;
193 
194 	if (!gt->sriov.pf.service.runtime.size)
195 		return;
196 
197 	size = gt->sriov.pf.service.runtime.size;
198 	regs = gt->sriov.pf.service.runtime.regs;
199 	values = gt->sriov.pf.service.runtime.values;
200 
201 	read_many(gt, size, regs, values);
202 
203 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
204 		struct drm_printer p = xe_gt_dbg_printer(gt);
205 
206 		xe_gt_sriov_pf_service_print_runtime(gt, &p);
207 	}
208 }
209 
210 /**
211  * xe_gt_sriov_pf_service_init - Early initialization of the GT SR-IOV PF services.
212  * @gt: the &xe_gt to initialize
213  *
214  * Performs early initialization of the GT SR-IOV PF services, including preparation
215  * of the runtime info that will be shared with VFs.
216  *
217  * This function can only be called on PF.
218  */
219 int xe_gt_sriov_pf_service_init(struct xe_gt *gt)
220 {
221 	int err;
222 
223 	err = pf_alloc_runtime_info(gt);
224 	if (unlikely(err))
225 		goto failed;
226 
227 	return 0;
228 failed:
229 	xe_gt_sriov_err(gt, "Failed to initialize service (%pe)\n", ERR_PTR(err));
230 	return err;
231 }
232 
233 /**
234  * xe_gt_sriov_pf_service_update - Update PF SR-IOV services.
235  * @gt: the &xe_gt to update
236  *
237  * Updates runtime data shared with VFs.
238  *
239  * This function can be called more than once.
240  * This function can only be called on PF.
241  */
242 void xe_gt_sriov_pf_service_update(struct xe_gt *gt)
243 {
244 	pf_prepare_runtime_info(gt);
245 }
246 
247 /* Return: length of the response message or a negative error code on failure. */
248 static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
249 				    const u32 *request, u32 len, u32 *response, u32 size)
250 {
251 	u32 wanted_major, wanted_minor;
252 	u32 major, minor;
253 	u32 mbz;
254 	int err;
255 
256 	if (unlikely(len != VF2PF_HANDSHAKE_REQUEST_MSG_LEN))
257 		return -EMSGSIZE;
258 
259 	mbz = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_0_MBZ, request[0]);
260 	if (unlikely(mbz))
261 		return -EPFNOSUPPORT;
262 
263 	wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]);
264 	wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]);
265 
266 	err = xe_sriov_pf_service_handshake_vf(gt_to_xe(gt), origin, wanted_major, wanted_minor,
267 					       &major, &minor);
268 	if (err < 0)
269 		return err;
270 
271 	xe_gt_assert(gt, major || minor);
272 	xe_gt_assert(gt, size >= VF2PF_HANDSHAKE_RESPONSE_MSG_LEN);
273 
274 	response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
275 		      FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
276 		      FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, 0);
277 	response[1] = FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, major) |
278 		      FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, minor);
279 
280 	return VF2PF_HANDSHAKE_RESPONSE_MSG_LEN;
281 }
282 
283 struct reg_data {
284 	u32 offset;
285 	u32 value;
286 } __packed;
287 static_assert(hxg_sizeof(struct reg_data) == 2);
288 
289 /* Return: number of entries copied or negative error code on failure. */
290 static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit,
291 				    struct reg_data *data, u32 *remaining)
292 {
293 	struct xe_gt_sriov_pf_service_runtime_regs *runtime;
294 	unsigned int count, i;
295 	u32 addr;
296 
297 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
298 
299 	runtime = &gt->sriov.pf.service.runtime;
300 
301 	if (start > runtime->size)
302 		return -ERANGE;
303 
304 	count = min_t(u32, runtime->size - start, limit);
305 
306 	for (i = 0; i < count; ++i, ++data) {
307 		addr = runtime->regs[start + i].addr;
308 		data->offset = xe_mmio_adjusted_addr(&gt->mmio, addr);
309 		data->value = runtime->values[start + i];
310 	}
311 
312 	*remaining = runtime->size - start - count;
313 	return count;
314 }
315 
316 /* Return: length of the response message or a negative error code on failure. */
317 static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin,
318 					const u32 *msg, u32 msg_len, u32 *response, u32 resp_size)
319 {
320 	const u32 chunk_size = hxg_sizeof(struct reg_data);
321 	struct reg_data *reg_data_buf;
322 	u32 limit, start, max_chunks;
323 	u32 remaining = 0;
324 	int ret;
325 
326 	/* this action is available from ABI 1.0 */
327 	if (!xe_sriov_pf_service_is_negotiated(gt_to_xe(gt), origin, 1, 0))
328 		return -EACCES;
329 
330 	if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
331 		return -EMSGSIZE;
332 	if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
333 		return -EPROTO;
334 	if (unlikely(resp_size < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN))
335 		return -EINVAL;
336 
337 	limit = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, msg[0]);
338 	start = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, msg[1]);
339 
340 	resp_size = min_t(u32, resp_size, VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN);
341 	max_chunks = (resp_size - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / chunk_size;
342 	limit = limit == VF2PF_QUERY_RUNTIME_NO_LIMIT ? max_chunks : min_t(u32, max_chunks, limit);
343 	reg_data_buf = (void *)(response + VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN);
344 
345 	ret = pf_service_runtime_query(gt, start, limit, reg_data_buf, &remaining);
346 	if (ret < 0)
347 		return ret;
348 
349 	response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
350 		      FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
351 		      FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, ret);
352 	response[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, remaining);
353 
354 	return VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + ret * hxg_sizeof(struct reg_data);
355 }
356 
357 /**
358  * xe_gt_sriov_pf_service_process_request - Service GT level SR-IOV request message from the VF.
359  * @gt: the &xe_gt that provides the service
360  * @origin: VF number that is requesting the service
361  * @msg: request message
362  * @msg_len: length of the request message (in dwords)
363  * @response: placeholder for the response message
364  * @resp_size: length of the response message buffer (in dwords)
365  *
366  * This function processes `Relay Message`_ request from the VF.
367  *
368  * Return: length of the response message or a negative error code on failure.
369  */
370 int xe_gt_sriov_pf_service_process_request(struct xe_gt *gt, u32 origin,
371 					   const u32 *msg, u32 msg_len,
372 					   u32 *response, u32 resp_size)
373 {
374 	u32 action, data __maybe_unused;
375 	int ret;
376 
377 	xe_gt_assert(gt, msg_len >= GUC_HXG_MSG_MIN_LEN);
378 	xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_REQUEST);
379 
380 	action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
381 	data = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
382 	xe_gt_sriov_dbg_verbose(gt, "service action %#x:%u from VF%u\n",
383 				action, data, origin);
384 
385 	switch (action) {
386 	case GUC_RELAY_ACTION_VF2PF_HANDSHAKE:
387 		ret = pf_process_handshake_msg(gt, origin, msg, msg_len, response, resp_size);
388 		break;
389 	case GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME:
390 		ret = pf_process_runtime_query_msg(gt, origin, msg, msg_len, response, resp_size);
391 		break;
392 	default:
393 		ret = -EOPNOTSUPP;
394 		break;
395 	}
396 
397 	return ret;
398 }
399 
400 /**
401  * xe_gt_sriov_pf_service_print_runtime - Print PF runtime data shared with VFs.
402  * @gt: the &xe_gt
403  * @p: the &drm_printer
404  *
405  * This function is for PF use only.
406  */
407 int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p)
408 {
409 	const struct xe_reg *regs;
410 	unsigned int size;
411 	u32 *values;
412 
413 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
414 
415 	size = gt->sriov.pf.service.runtime.size;
416 	regs = gt->sriov.pf.service.runtime.regs;
417 	values = gt->sriov.pf.service.runtime.values;
418 
419 	for (; size--; regs++, values++) {
420 		drm_printf(p, "reg[%#x] = %#x\n",
421 			   xe_mmio_adjusted_addr(&gt->mmio, regs->addr), *values);
422 	}
423 
424 	return 0;
425 }
426