xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c (revision 220994d61cebfc04f071d69049127657c7e8191b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "abi/guc_actions_sriov_abi.h"
9 #include "abi/guc_relay_actions_abi.h"
10 
11 #include "regs/xe_gt_regs.h"
12 #include "regs/xe_guc_regs.h"
13 #include "regs/xe_regs.h"
14 
15 #include "xe_mmio.h"
16 #include "xe_gt_sriov_printk.h"
17 #include "xe_gt_sriov_pf_helpers.h"
18 #include "xe_gt_sriov_pf_service.h"
19 #include "xe_gt_sriov_pf_service_types.h"
20 #include "xe_guc_ct.h"
21 #include "xe_guc_hxg_helpers.h"
22 #include "xe_sriov_pf_service.h"
23 
24 static const struct xe_reg tgl_runtime_regs[] = {
25 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
26 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
27 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
28 	XELP_GT_SLICE_ENABLE,		/* _MMIO(0x9138) */
29 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
30 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
31 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
32 };
33 
34 static const struct xe_reg ats_m_runtime_regs[] = {
35 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
36 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
37 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
38 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
39 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
40 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
41 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
42 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
43 };
44 
45 static const struct xe_reg pvc_runtime_regs[] = {
46 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
47 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
48 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
49 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
50 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
51 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
52 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
53 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
54 };
55 
56 static const struct xe_reg ver_1270_runtime_regs[] = {
57 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
58 	XEHP_FUSE4,			/* _MMIO(0x9114) */
59 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
60 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
61 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
62 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
63 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
64 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
65 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
66 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
67 };
68 
69 static const struct xe_reg ver_2000_runtime_regs[] = {
70 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
71 	XEHP_FUSE4,			/* _MMIO(0x9114) */
72 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
73 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
74 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
75 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
76 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
77 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
78 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
79 	XE2_GT_COMPUTE_DSS_2,		/* _MMIO(0x914c) */
80 	XE2_GT_GEOMETRY_DSS_1,		/* _MMIO(0x9150) */
81 	XE2_GT_GEOMETRY_DSS_2,		/* _MMIO(0x9154) */
82 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
83 };
84 
85 static const struct xe_reg ver_3000_runtime_regs[] = {
86 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
87 	XEHP_FUSE4,			/* _MMIO(0x9114) */
88 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
89 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
90 	MIRROR_L3BANK_ENABLE,		/* _MMIO(0x9130) */
91 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
92 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
93 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
94 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
95 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
96 	XE2_GT_COMPUTE_DSS_2,		/* _MMIO(0x914c) */
97 	XE2_GT_GEOMETRY_DSS_1,		/* _MMIO(0x9150) */
98 	XE2_GT_GEOMETRY_DSS_2,		/* _MMIO(0x9154) */
99 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
100 };
101 
pick_runtime_regs(struct xe_device * xe,unsigned int * count)102 static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
103 {
104 	const struct xe_reg *regs;
105 
106 	if (GRAPHICS_VERx100(xe) >= 3000) {
107 		*count = ARRAY_SIZE(ver_3000_runtime_regs);
108 		regs = ver_3000_runtime_regs;
109 	} else if (GRAPHICS_VERx100(xe) >= 2000) {
110 		*count = ARRAY_SIZE(ver_2000_runtime_regs);
111 		regs = ver_2000_runtime_regs;
112 	} else if (GRAPHICS_VERx100(xe) >= 1270) {
113 		*count = ARRAY_SIZE(ver_1270_runtime_regs);
114 		regs = ver_1270_runtime_regs;
115 	} else if (GRAPHICS_VERx100(xe) == 1260) {
116 		*count = ARRAY_SIZE(pvc_runtime_regs);
117 		regs = pvc_runtime_regs;
118 	} else if (GRAPHICS_VERx100(xe) == 1255) {
119 		*count = ARRAY_SIZE(ats_m_runtime_regs);
120 		regs = ats_m_runtime_regs;
121 	} else if (GRAPHICS_VERx100(xe) == 1200) {
122 		*count = ARRAY_SIZE(tgl_runtime_regs);
123 		regs = tgl_runtime_regs;
124 	} else {
125 		regs = ERR_PTR(-ENOPKG);
126 		*count = 0;
127 	}
128 
129 	return regs;
130 }
131 
pf_alloc_runtime_info(struct xe_gt * gt)132 static int pf_alloc_runtime_info(struct xe_gt *gt)
133 {
134 	struct xe_device *xe = gt_to_xe(gt);
135 	const struct xe_reg *regs;
136 	unsigned int size;
137 	u32 *values;
138 
139 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
140 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.size);
141 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.regs);
142 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.values);
143 
144 	regs = pick_runtime_regs(xe, &size);
145 	if (IS_ERR(regs))
146 		return PTR_ERR(regs);
147 
148 	if (unlikely(!size))
149 		return 0;
150 
151 	values = drmm_kcalloc(&xe->drm, size, sizeof(u32), GFP_KERNEL);
152 	if (!values)
153 		return -ENOMEM;
154 
155 	gt->sriov.pf.service.runtime.size = size;
156 	gt->sriov.pf.service.runtime.regs = regs;
157 	gt->sriov.pf.service.runtime.values = values;
158 
159 	return 0;
160 }
161 
read_many(struct xe_gt * gt,unsigned int count,const struct xe_reg * regs,u32 * values)162 static void read_many(struct xe_gt *gt, unsigned int count,
163 		      const struct xe_reg *regs, u32 *values)
164 {
165 	while (count--)
166 		*values++ = xe_mmio_read32(&gt->mmio, *regs++);
167 }
168 
pf_prepare_runtime_info(struct xe_gt * gt)169 static void pf_prepare_runtime_info(struct xe_gt *gt)
170 {
171 	const struct xe_reg *regs;
172 	unsigned int size;
173 	u32 *values;
174 
175 	if (!gt->sriov.pf.service.runtime.size)
176 		return;
177 
178 	size = gt->sriov.pf.service.runtime.size;
179 	regs = gt->sriov.pf.service.runtime.regs;
180 	values = gt->sriov.pf.service.runtime.values;
181 
182 	read_many(gt, size, regs, values);
183 
184 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
185 		struct drm_printer p = xe_gt_dbg_printer(gt);
186 
187 		xe_gt_sriov_pf_service_print_runtime(gt, &p);
188 	}
189 }
190 
191 /**
192  * xe_gt_sriov_pf_service_init - Early initialization of the GT SR-IOV PF services.
193  * @gt: the &xe_gt to initialize
194  *
195  * Performs early initialization of the GT SR-IOV PF services, including preparation
196  * of the runtime info that will be shared with VFs.
197  *
198  * This function can only be called on PF.
199  */
xe_gt_sriov_pf_service_init(struct xe_gt * gt)200 int xe_gt_sriov_pf_service_init(struct xe_gt *gt)
201 {
202 	int err;
203 
204 	err = pf_alloc_runtime_info(gt);
205 	if (unlikely(err))
206 		goto failed;
207 
208 	return 0;
209 failed:
210 	xe_gt_sriov_err(gt, "Failed to initialize service (%pe)\n", ERR_PTR(err));
211 	return err;
212 }
213 
214 /**
215  * xe_gt_sriov_pf_service_update - Update PF SR-IOV services.
216  * @gt: the &xe_gt to update
217  *
218  * Updates runtime data shared with VFs.
219  *
220  * This function can be called more than once.
221  * This function can only be called on PF.
222  */
xe_gt_sriov_pf_service_update(struct xe_gt * gt)223 void xe_gt_sriov_pf_service_update(struct xe_gt *gt)
224 {
225 	pf_prepare_runtime_info(gt);
226 }
227 
228 /* Return: length of the response message or a negative error code on failure. */
pf_process_handshake_msg(struct xe_gt * gt,u32 origin,const u32 * request,u32 len,u32 * response,u32 size)229 static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
230 				    const u32 *request, u32 len, u32 *response, u32 size)
231 {
232 	u32 wanted_major, wanted_minor;
233 	u32 major, minor;
234 	u32 mbz;
235 	int err;
236 
237 	if (unlikely(len != VF2PF_HANDSHAKE_REQUEST_MSG_LEN))
238 		return -EMSGSIZE;
239 
240 	mbz = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_0_MBZ, request[0]);
241 	if (unlikely(mbz))
242 		return -EPFNOSUPPORT;
243 
244 	wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]);
245 	wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]);
246 
247 	err = xe_sriov_pf_service_handshake_vf(gt_to_xe(gt), origin, wanted_major, wanted_minor,
248 					       &major, &minor);
249 	if (err < 0)
250 		return err;
251 
252 	xe_gt_assert(gt, major || minor);
253 	xe_gt_assert(gt, size >= VF2PF_HANDSHAKE_RESPONSE_MSG_LEN);
254 
255 	response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
256 		      FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
257 		      FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, 0);
258 	response[1] = FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, major) |
259 		      FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, minor);
260 
261 	return VF2PF_HANDSHAKE_RESPONSE_MSG_LEN;
262 }
263 
264 struct reg_data {
265 	u32 offset;
266 	u32 value;
267 } __packed;
268 static_assert(hxg_sizeof(struct reg_data) == 2);
269 
270 /* Return: number of entries copied or negative error code on failure. */
pf_service_runtime_query(struct xe_gt * gt,u32 start,u32 limit,struct reg_data * data,u32 * remaining)271 static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit,
272 				    struct reg_data *data, u32 *remaining)
273 {
274 	struct xe_gt_sriov_pf_service_runtime_regs *runtime;
275 	unsigned int count, i;
276 	u32 addr;
277 
278 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
279 
280 	runtime = &gt->sriov.pf.service.runtime;
281 
282 	if (start > runtime->size)
283 		return -ERANGE;
284 
285 	count = min_t(u32, runtime->size - start, limit);
286 
287 	for (i = 0; i < count; ++i, ++data) {
288 		addr = runtime->regs[start + i].addr;
289 		data->offset = xe_mmio_adjusted_addr(&gt->mmio, addr);
290 		data->value = runtime->values[start + i];
291 	}
292 
293 	*remaining = runtime->size - start - count;
294 	return count;
295 }
296 
297 /* Return: length of the response message or a negative error code on failure. */
pf_process_runtime_query_msg(struct xe_gt * gt,u32 origin,const u32 * msg,u32 msg_len,u32 * response,u32 resp_size)298 static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin,
299 					const u32 *msg, u32 msg_len, u32 *response, u32 resp_size)
300 {
301 	const u32 chunk_size = hxg_sizeof(struct reg_data);
302 	struct reg_data *reg_data_buf;
303 	u32 limit, start, max_chunks;
304 	u32 remaining = 0;
305 	int ret;
306 
307 	/* this action is available from ABI 1.0 */
308 	if (!xe_sriov_pf_service_is_negotiated(gt_to_xe(gt), origin, 1, 0))
309 		return -EACCES;
310 
311 	if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
312 		return -EMSGSIZE;
313 	if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
314 		return -EPROTO;
315 	if (unlikely(resp_size < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN))
316 		return -EINVAL;
317 
318 	limit = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, msg[0]);
319 	start = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, msg[1]);
320 
321 	resp_size = min_t(u32, resp_size, VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN);
322 	max_chunks = (resp_size - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / chunk_size;
323 	limit = limit == VF2PF_QUERY_RUNTIME_NO_LIMIT ? max_chunks : min_t(u32, max_chunks, limit);
324 	reg_data_buf = (void *)(response + VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN);
325 
326 	ret = pf_service_runtime_query(gt, start, limit, reg_data_buf, &remaining);
327 	if (ret < 0)
328 		return ret;
329 
330 	response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
331 		      FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
332 		      FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, ret);
333 	response[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, remaining);
334 
335 	return VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + ret * hxg_sizeof(struct reg_data);
336 }
337 
338 /**
339  * xe_gt_sriov_pf_service_process_request - Service GT level SR-IOV request message from the VF.
340  * @gt: the &xe_gt that provides the service
341  * @origin: VF number that is requesting the service
342  * @msg: request message
343  * @msg_len: length of the request message (in dwords)
344  * @response: placeholder for the response message
345  * @resp_size: length of the response message buffer (in dwords)
346  *
347  * This function processes `Relay Message`_ request from the VF.
348  *
349  * Return: length of the response message or a negative error code on failure.
350  */
xe_gt_sriov_pf_service_process_request(struct xe_gt * gt,u32 origin,const u32 * msg,u32 msg_len,u32 * response,u32 resp_size)351 int xe_gt_sriov_pf_service_process_request(struct xe_gt *gt, u32 origin,
352 					   const u32 *msg, u32 msg_len,
353 					   u32 *response, u32 resp_size)
354 {
355 	u32 action, data __maybe_unused;
356 	int ret;
357 
358 	xe_gt_assert(gt, msg_len >= GUC_HXG_MSG_MIN_LEN);
359 	xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_REQUEST);
360 
361 	action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
362 	data = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
363 	xe_gt_sriov_dbg_verbose(gt, "service action %#x:%u from VF%u\n",
364 				action, data, origin);
365 
366 	switch (action) {
367 	case GUC_RELAY_ACTION_VF2PF_HANDSHAKE:
368 		ret = pf_process_handshake_msg(gt, origin, msg, msg_len, response, resp_size);
369 		break;
370 	case GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME:
371 		ret = pf_process_runtime_query_msg(gt, origin, msg, msg_len, response, resp_size);
372 		break;
373 	default:
374 		ret = -EOPNOTSUPP;
375 		break;
376 	}
377 
378 	return ret;
379 }
380 
381 /**
382  * xe_gt_sriov_pf_service_print_runtime - Print PF runtime data shared with VFs.
383  * @gt: the &xe_gt
384  * @p: the &drm_printer
385  *
386  * This function is for PF use only.
387  */
xe_gt_sriov_pf_service_print_runtime(struct xe_gt * gt,struct drm_printer * p)388 int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p)
389 {
390 	const struct xe_reg *regs;
391 	unsigned int size;
392 	u32 *values;
393 
394 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
395 
396 	size = gt->sriov.pf.service.runtime.size;
397 	regs = gt->sriov.pf.service.runtime.regs;
398 	values = gt->sriov.pf.service.runtime.values;
399 
400 	for (; size--; regs++, values++) {
401 		drm_printf(p, "reg[%#x] = %#x\n",
402 			   xe_mmio_adjusted_addr(&gt->mmio, regs->addr), *values);
403 	}
404 
405 	return 0;
406 }
407