xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c (revision 3710578d2d580d42abe27f17bab9a4cafb6aad67)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include "abi/guc_actions_sriov_abi.h"
7 
8 #include "xe_device.h"
9 #include "xe_gt.h"
10 #include "xe_gt_sriov_pf_control.h"
11 #include "xe_gt_sriov_printk.h"
12 #include "xe_guc_ct.h"
13 #include "xe_sriov.h"
14 
15 static const char *control_cmd_to_string(u32 cmd)
16 {
17 	switch (cmd) {
18 	case GUC_PF_TRIGGER_VF_PAUSE:
19 		return "PAUSE";
20 	case GUC_PF_TRIGGER_VF_RESUME:
21 		return "RESUME";
22 	case GUC_PF_TRIGGER_VF_STOP:
23 		return "STOP";
24 	case GUC_PF_TRIGGER_VF_FLR_START:
25 		return "FLR_START";
26 	case GUC_PF_TRIGGER_VF_FLR_FINISH:
27 		return "FLR_FINISH";
28 	default:
29 		return "<unknown>";
30 	}
31 }
32 
33 static int guc_action_vf_control_cmd(struct xe_guc *guc, u32 vfid, u32 cmd)
34 {
35 	u32 request[PF2GUC_VF_CONTROL_REQUEST_MSG_LEN] = {
36 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
37 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
38 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_VF_CONTROL),
39 		FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_1_VFID, vfid),
40 		FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_2_COMMAND, cmd),
41 	};
42 	int ret;
43 
44 	/* XXX those two commands are now sent from the G2H handler */
45 	if (cmd == GUC_PF_TRIGGER_VF_FLR_START || cmd == GUC_PF_TRIGGER_VF_FLR_FINISH)
46 		return xe_guc_ct_send_g2h_handler(&guc->ct, request, ARRAY_SIZE(request));
47 
48 	ret = xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
49 	return ret > 0 ? -EPROTO : ret;
50 }
51 
52 static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd)
53 {
54 	int err;
55 
56 	xe_gt_assert(gt, vfid != PFID);
57 
58 	err = guc_action_vf_control_cmd(&gt->uc.guc, vfid, cmd);
59 	if (unlikely(err))
60 		xe_gt_sriov_err(gt, "VF%u control command %s failed (%pe)\n",
61 				vfid, control_cmd_to_string(cmd), ERR_PTR(err));
62 	return err;
63 }
64 
65 static int pf_send_vf_pause(struct xe_gt *gt, unsigned int vfid)
66 {
67 	return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_PAUSE);
68 }
69 
70 static int pf_send_vf_resume(struct xe_gt *gt, unsigned int vfid)
71 {
72 	return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_RESUME);
73 }
74 
75 static int pf_send_vf_stop(struct xe_gt *gt, unsigned int vfid)
76 {
77 	return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_STOP);
78 }
79 
80 static int pf_send_vf_flr_start(struct xe_gt *gt, unsigned int vfid)
81 {
82 	return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_FLR_START);
83 }
84 
85 static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
86 {
87 	return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_FLR_FINISH);
88 }
89 
90 /**
91  * xe_gt_sriov_pf_control_pause_vf - Pause a VF.
92  * @gt: the &xe_gt
93  * @vfid: the VF identifier
94  *
95  * This function is for PF only.
96  *
97  * Return: 0 on success or a negative error code on failure.
98  */
99 int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
100 {
101 	return pf_send_vf_pause(gt, vfid);
102 }
103 
104 /**
105  * xe_gt_sriov_pf_control_resume_vf - Resume a VF.
106  * @gt: the &xe_gt
107  * @vfid: the VF identifier
108  *
109  * This function is for PF only.
110  *
111  * Return: 0 on success or a negative error code on failure.
112  */
113 int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
114 {
115 	return pf_send_vf_resume(gt, vfid);
116 }
117 
118 /**
119  * xe_gt_sriov_pf_control_stop_vf - Stop a VF.
120  * @gt: the &xe_gt
121  * @vfid: the VF identifier
122  *
123  * This function is for PF only.
124  *
125  * Return: 0 on success or a negative error code on failure.
126  */
127 int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
128 {
129 	return pf_send_vf_stop(gt, vfid);
130 }
131 
132 /**
133  * DOC: The VF FLR Flow with GuC
134  *
135  *          PF                        GUC             PCI
136  * ========================================================
137  *          |                          |               |
138  * (1)      |                         [ ] <----- FLR --|
139  *          |                         [ ]              :
140  * (2)     [ ] <-------- NOTIFY FLR --[ ]
141  *         [ ]                         |
142  * (3)     [ ]                         |
143  *         [ ]                         |
144  *         [ ]-- START FLR ---------> [ ]
145  *          |                         [ ]
146  * (4)      |                         [ ]
147  *          |                         [ ]
148  *         [ ] <--------- FLR DONE -- [ ]
149  *         [ ]                         |
150  * (5)     [ ]                         |
151  *         [ ]                         |
152  *         [ ]-- FINISH FLR --------> [ ]
153  *          |                          |
154  *
155  * Step 1: PCI HW generates interrupt to the GuC about VF FLR
156  * Step 2: GuC FW sends G2H notification to the PF about VF FLR
157  * Step 2a: on some platforms G2H is only received from root GuC
158  * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
159  * Step 3a: on some platforms PF must send H2G to all other GuCs
160  * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
161  * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
162  */
163 
164 static bool needs_dispatch_flr(struct xe_device *xe)
165 {
166 	return xe->info.platform == XE_PVC;
167 }
168 
169 static void pf_handle_vf_flr(struct xe_gt *gt, u32 vfid)
170 {
171 	struct xe_device *xe = gt_to_xe(gt);
172 	struct xe_gt *gtit;
173 	unsigned int gtid;
174 
175 	xe_gt_sriov_info(gt, "VF%u FLR\n", vfid);
176 
177 	if (needs_dispatch_flr(xe)) {
178 		for_each_gt(gtit, xe, gtid)
179 			pf_send_vf_flr_start(gtit, vfid);
180 	} else {
181 		pf_send_vf_flr_start(gt, vfid);
182 	}
183 }
184 
185 static void pf_handle_vf_flr_done(struct xe_gt *gt, u32 vfid)
186 {
187 	pf_send_vf_flr_finish(gt, vfid);
188 }
189 
190 static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
191 {
192 	switch (eventid) {
193 	case GUC_PF_NOTIFY_VF_FLR:
194 		pf_handle_vf_flr(gt, vfid);
195 		break;
196 	case GUC_PF_NOTIFY_VF_FLR_DONE:
197 		pf_handle_vf_flr_done(gt, vfid);
198 		break;
199 	case GUC_PF_NOTIFY_VF_PAUSE_DONE:
200 		break;
201 	case GUC_PF_NOTIFY_VF_FIXUP_DONE:
202 		break;
203 	default:
204 		return -ENOPKG;
205 	}
206 	return 0;
207 }
208 
209 static int pf_handle_pf_event(struct xe_gt *gt, u32 eventid)
210 {
211 	switch (eventid) {
212 	case GUC_PF_NOTIFY_VF_ENABLE:
213 		xe_gt_sriov_dbg_verbose(gt, "VFs %s/%s\n",
214 					str_enabled_disabled(true),
215 					str_enabled_disabled(false));
216 		break;
217 	default:
218 		return -ENOPKG;
219 	}
220 	return 0;
221 }
222 
223 /**
224  * xe_gt_sriov_pf_control_process_guc2pf - Handle VF state notification from GuC.
225  * @gt: the &xe_gt
226  * @msg: the G2H message
227  * @len: the length of the G2H message
228  *
229  * This function is for PF only.
230  *
231  * Return: 0 on success or a negative error code on failure.
232  */
233 int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len)
234 {
235 	u32 vfid;
236 	u32 eventid;
237 
238 	xe_gt_assert(gt, len);
239 	xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC);
240 	xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT);
241 	xe_gt_assert(gt, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) ==
242 		     GUC_ACTION_GUC2PF_VF_STATE_NOTIFY);
243 
244 	if (unlikely(!xe_device_is_sriov_pf(gt_to_xe(gt))))
245 		return -EPROTO;
246 
247 	if (unlikely(FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_0_MBZ, msg[0])))
248 		return -EPFNOSUPPORT;
249 
250 	if (unlikely(len != GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_LEN))
251 		return -EPROTO;
252 
253 	vfid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_1_VFID, msg[1]);
254 	eventid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_2_EVENT, msg[2]);
255 
256 	return vfid ? pf_handle_vf_event(gt, vfid, eventid) : pf_handle_pf_event(gt, eventid);
257 }
258