xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include "abi/guc_actions_sriov_abi.h"
7 
8 #include "xe_device.h"
9 #include "xe_gt.h"
10 #include "xe_gt_sriov_pf_control.h"
11 #include "xe_gt_sriov_printk.h"
12 #include "xe_guc_ct.h"
13 #include "xe_sriov.h"
14 
15 static const char *control_cmd_to_string(u32 cmd)
16 {
17 	switch (cmd) {
18 	case GUC_PF_TRIGGER_VF_PAUSE:
19 		return "PAUSE";
20 	case GUC_PF_TRIGGER_VF_RESUME:
21 		return "RESUME";
22 	case GUC_PF_TRIGGER_VF_STOP:
23 		return "STOP";
24 	case GUC_PF_TRIGGER_VF_FLR_START:
25 		return "FLR_START";
26 	case GUC_PF_TRIGGER_VF_FLR_FINISH:
27 		return "FLR_FINISH";
28 	default:
29 		return "<unknown>";
30 	}
31 }
32 
33 static int guc_action_vf_control_cmd(struct xe_guc *guc, u32 vfid, u32 cmd)
34 {
35 	u32 request[PF2GUC_VF_CONTROL_REQUEST_MSG_LEN] = {
36 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
37 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
38 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_VF_CONTROL),
39 		FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_1_VFID, vfid),
40 		FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_2_COMMAND, cmd),
41 	};
42 	int ret;
43 
44 	/* XXX those two commands are now sent from the G2H handler */
45 	if (cmd == GUC_PF_TRIGGER_VF_FLR_START || cmd == GUC_PF_TRIGGER_VF_FLR_FINISH)
46 		return xe_guc_ct_send_g2h_handler(&guc->ct, request, ARRAY_SIZE(request));
47 
48 	ret = xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
49 	return ret > 0 ? -EPROTO : ret;
50 }
51 
52 static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd)
53 {
54 	int err;
55 
56 	xe_gt_assert(gt, vfid != PFID);
57 
58 	err = guc_action_vf_control_cmd(&gt->uc.guc, vfid, cmd);
59 	if (unlikely(err))
60 		xe_gt_sriov_err(gt, "VF%u control command %s failed (%pe)\n",
61 				vfid, control_cmd_to_string(cmd), ERR_PTR(err));
62 	return err;
63 }
64 
65 static int pf_send_vf_pause(struct xe_gt *gt, unsigned int vfid)
66 {
67 	return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_PAUSE);
68 }
69 
70 static int pf_send_vf_resume(struct xe_gt *gt, unsigned int vfid)
71 {
72 	return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_RESUME);
73 }
74 
75 static int pf_send_vf_stop(struct xe_gt *gt, unsigned int vfid)
76 {
77 	return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_STOP);
78 }
79 
80 static int pf_send_vf_flr_start(struct xe_gt *gt, unsigned int vfid)
81 {
82 	return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_FLR_START);
83 }
84 
85 static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
86 {
87 	return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_FLR_FINISH);
88 }
89 
90 /**
91  * xe_gt_sriov_pf_control_pause_vf - Pause a VF.
92  * @gt: the &xe_gt
93  * @vfid: the VF identifier
94  *
95  * This function is for PF only.
96  *
97  * Return: 0 on success or a negative error code on failure.
98  */
99 int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
100 {
101 	return pf_send_vf_pause(gt, vfid);
102 }
103 
104 /**
105  * xe_gt_sriov_pf_control_resume_vf - Resume a VF.
106  * @gt: the &xe_gt
107  * @vfid: the VF identifier
108  *
109  * This function is for PF only.
110  *
111  * Return: 0 on success or a negative error code on failure.
112  */
113 int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
114 {
115 	return pf_send_vf_resume(gt, vfid);
116 }
117 
118 /**
119  * xe_gt_sriov_pf_control_stop_vf - Stop a VF.
120  * @gt: the &xe_gt
121  * @vfid: the VF identifier
122  *
123  * This function is for PF only.
124  *
125  * Return: 0 on success or a negative error code on failure.
126  */
127 int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
128 {
129 	return pf_send_vf_stop(gt, vfid);
130 }
131 
132 /**
133  * xe_gt_sriov_pf_control_trigger_flr - Start a VF FLR sequence.
134  * @gt: the &xe_gt
135  * @vfid: the VF identifier
136  *
137  * This function is for PF only.
138  *
139  * Return: 0 on success or a negative error code on failure.
140  */
141 int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid)
142 {
143 	int err;
144 
145 	/* XXX pf_send_vf_flr_start() expects ct->lock */
146 	mutex_lock(&gt->uc.guc.ct.lock);
147 	err = pf_send_vf_flr_start(gt, vfid);
148 	mutex_unlock(&gt->uc.guc.ct.lock);
149 
150 	return err;
151 }
152 
153 /**
154  * DOC: The VF FLR Flow with GuC
155  *
156  *          PF                        GUC             PCI
157  * ========================================================
158  *          |                          |               |
159  * (1)      |                         [ ] <----- FLR --|
160  *          |                         [ ]              :
161  * (2)     [ ] <-------- NOTIFY FLR --[ ]
162  *         [ ]                         |
163  * (3)     [ ]                         |
164  *         [ ]                         |
165  *         [ ]-- START FLR ---------> [ ]
166  *          |                         [ ]
167  * (4)      |                         [ ]
168  *          |                         [ ]
169  *         [ ] <--------- FLR DONE -- [ ]
170  *         [ ]                         |
171  * (5)     [ ]                         |
172  *         [ ]                         |
173  *         [ ]-- FINISH FLR --------> [ ]
174  *          |                          |
175  *
176  * Step 1: PCI HW generates interrupt to the GuC about VF FLR
177  * Step 2: GuC FW sends G2H notification to the PF about VF FLR
178  * Step 2a: on some platforms G2H is only received from root GuC
179  * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
180  * Step 3a: on some platforms PF must send H2G to all other GuCs
181  * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
182  * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
183  */
184 
185 static bool needs_dispatch_flr(struct xe_device *xe)
186 {
187 	return xe->info.platform == XE_PVC;
188 }
189 
190 static void pf_handle_vf_flr(struct xe_gt *gt, u32 vfid)
191 {
192 	struct xe_device *xe = gt_to_xe(gt);
193 	struct xe_gt *gtit;
194 	unsigned int gtid;
195 
196 	xe_gt_sriov_info(gt, "VF%u FLR\n", vfid);
197 
198 	if (needs_dispatch_flr(xe)) {
199 		for_each_gt(gtit, xe, gtid)
200 			pf_send_vf_flr_start(gtit, vfid);
201 	} else {
202 		pf_send_vf_flr_start(gt, vfid);
203 	}
204 }
205 
206 static void pf_handle_vf_flr_done(struct xe_gt *gt, u32 vfid)
207 {
208 	pf_send_vf_flr_finish(gt, vfid);
209 }
210 
211 static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
212 {
213 	switch (eventid) {
214 	case GUC_PF_NOTIFY_VF_FLR:
215 		pf_handle_vf_flr(gt, vfid);
216 		break;
217 	case GUC_PF_NOTIFY_VF_FLR_DONE:
218 		pf_handle_vf_flr_done(gt, vfid);
219 		break;
220 	case GUC_PF_NOTIFY_VF_PAUSE_DONE:
221 		break;
222 	case GUC_PF_NOTIFY_VF_FIXUP_DONE:
223 		break;
224 	default:
225 		return -ENOPKG;
226 	}
227 	return 0;
228 }
229 
230 static int pf_handle_pf_event(struct xe_gt *gt, u32 eventid)
231 {
232 	switch (eventid) {
233 	case GUC_PF_NOTIFY_VF_ENABLE:
234 		xe_gt_sriov_dbg_verbose(gt, "VFs %s/%s\n",
235 					str_enabled_disabled(true),
236 					str_enabled_disabled(false));
237 		break;
238 	default:
239 		return -ENOPKG;
240 	}
241 	return 0;
242 }
243 
244 /**
245  * xe_gt_sriov_pf_control_process_guc2pf - Handle VF state notification from GuC.
246  * @gt: the &xe_gt
247  * @msg: the G2H message
248  * @len: the length of the G2H message
249  *
250  * This function is for PF only.
251  *
252  * Return: 0 on success or a negative error code on failure.
253  */
254 int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len)
255 {
256 	u32 vfid;
257 	u32 eventid;
258 
259 	xe_gt_assert(gt, len);
260 	xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC);
261 	xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT);
262 	xe_gt_assert(gt, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) ==
263 		     GUC_ACTION_GUC2PF_VF_STATE_NOTIFY);
264 
265 	if (unlikely(!xe_device_is_sriov_pf(gt_to_xe(gt))))
266 		return -EPROTO;
267 
268 	if (unlikely(FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_0_MBZ, msg[0])))
269 		return -EPFNOSUPPORT;
270 
271 	if (unlikely(len != GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_LEN))
272 		return -EPROTO;
273 
274 	vfid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_1_VFID, msg[1]);
275 	eventid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_2_EVENT, msg[2]);
276 
277 	return vfid ? pf_handle_vf_event(gt, vfid, eventid) : pf_handle_pf_event(gt, eventid);
278 }
279