xref: /linux/drivers/vfio/pci/pds/cmds.c (revision bb500dbe2ac622551d98c0bb2735a68f59489c98)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc. */
3 
4 #include <linux/io.h>
5 #include <linux/types.h>
6 #include <linux/delay.h>
7 
8 #include <linux/pds/pds_common.h>
9 #include <linux/pds/pds_core_if.h>
10 #include <linux/pds/pds_adminq.h>
11 
12 #include "vfio_dev.h"
13 #include "cmds.h"
14 
15 #define SUSPEND_TIMEOUT_S		5
16 #define SUSPEND_CHECK_INTERVAL_MS	1
17 
18 static int pds_vfio_client_adminq_cmd(struct pds_vfio_pci_device *pds_vfio,
19 				      union pds_core_adminq_cmd *req,
20 				      union pds_core_adminq_comp *resp,
21 				      bool fast_poll)
22 {
23 	struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
24 	union pds_core_adminq_cmd cmd = {};
25 	struct pdsc *pdsc;
26 	int err;
27 
28 	/* Wrap the client request */
29 	cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
30 	cmd.client_request.client_id = cpu_to_le16(pds_vfio->client_id);
31 	memcpy(cmd.client_request.client_cmd, req,
32 	       sizeof(cmd.client_request.client_cmd));
33 
34 	pdsc = pdsc_get_pf_struct(pdev);
35 	if (IS_ERR(pdsc))
36 		return PTR_ERR(pdsc);
37 
38 	err = pdsc_adminq_post(pdsc, &cmd, resp, fast_poll);
39 	if (err && err != -EAGAIN)
40 		dev_err(pds_vfio_to_dev(pds_vfio),
41 			"client admin cmd failed: %pe\n", ERR_PTR(err));
42 
43 	return err;
44 }
45 
46 int pds_vfio_register_client_cmd(struct pds_vfio_pci_device *pds_vfio)
47 {
48 	struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
49 	char devname[PDS_DEVNAME_LEN];
50 	struct pdsc *pdsc;
51 	int ci;
52 
53 	snprintf(devname, sizeof(devname), "%s.%d-%u", PDS_VFIO_LM_DEV_NAME,
54 		 pci_domain_nr(pdev->bus),
55 		 PCI_DEVID(pdev->bus->number, pdev->devfn));
56 
57 	pdsc = pdsc_get_pf_struct(pdev);
58 	if (IS_ERR(pdsc))
59 		return PTR_ERR(pdsc);
60 
61 	ci = pds_client_register(pdsc, devname);
62 	if (ci < 0)
63 		return ci;
64 
65 	pds_vfio->client_id = ci;
66 
67 	return 0;
68 }
69 
70 void pds_vfio_unregister_client_cmd(struct pds_vfio_pci_device *pds_vfio)
71 {
72 	struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
73 	struct pdsc *pdsc;
74 	int err;
75 
76 	pdsc = pdsc_get_pf_struct(pdev);
77 	if (IS_ERR(pdsc))
78 		return;
79 
80 	err = pds_client_unregister(pdsc, pds_vfio->client_id);
81 	if (err)
82 		dev_err(&pdev->dev, "unregister from DSC failed: %pe\n",
83 			ERR_PTR(err));
84 
85 	pds_vfio->client_id = 0;
86 }
87 
88 static int
89 pds_vfio_suspend_wait_device_cmd(struct pds_vfio_pci_device *pds_vfio)
90 {
91 	union pds_core_adminq_cmd cmd = {
92 		.lm_suspend_status = {
93 			.opcode = PDS_LM_CMD_SUSPEND_STATUS,
94 			.vf_id = cpu_to_le16(pds_vfio->vf_id),
95 		},
96 	};
97 	struct device *dev = pds_vfio_to_dev(pds_vfio);
98 	union pds_core_adminq_comp comp = {};
99 	unsigned long time_limit;
100 	unsigned long time_start;
101 	unsigned long time_done;
102 	int err;
103 
104 	time_start = jiffies;
105 	time_limit = time_start + HZ * SUSPEND_TIMEOUT_S;
106 	do {
107 		err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true);
108 		if (err != -EAGAIN)
109 			break;
110 
111 		msleep(SUSPEND_CHECK_INTERVAL_MS);
112 	} while (time_before(jiffies, time_limit));
113 
114 	time_done = jiffies;
115 	dev_dbg(dev, "%s: vf%u: Suspend comp received in %d msecs\n", __func__,
116 		pds_vfio->vf_id, jiffies_to_msecs(time_done - time_start));
117 
118 	/* Check the results */
119 	if (time_after_eq(time_done, time_limit)) {
120 		dev_err(dev, "%s: vf%u: Suspend comp timeout\n", __func__,
121 			pds_vfio->vf_id);
122 		err = -ETIMEDOUT;
123 	}
124 
125 	return err;
126 }
127 
128 int pds_vfio_suspend_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type)
129 {
130 	union pds_core_adminq_cmd cmd = {
131 		.lm_suspend = {
132 			.opcode = PDS_LM_CMD_SUSPEND,
133 			.vf_id = cpu_to_le16(pds_vfio->vf_id),
134 			.type = type,
135 		},
136 	};
137 	struct device *dev = pds_vfio_to_dev(pds_vfio);
138 	union pds_core_adminq_comp comp = {};
139 	int err;
140 
141 	dev_dbg(dev, "vf%u: Suspend device\n", pds_vfio->vf_id);
142 
143 	/*
144 	 * The initial suspend request to the firmware starts the device suspend
145 	 * operation and the firmware returns success if it's started
146 	 * successfully.
147 	 */
148 	err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true);
149 	if (err) {
150 		dev_err(dev, "vf%u: Suspend failed: %pe\n", pds_vfio->vf_id,
151 			ERR_PTR(err));
152 		return err;
153 	}
154 
155 	/*
156 	 * The subsequent suspend status request(s) check if the firmware has
157 	 * completed the device suspend process.
158 	 */
159 	return pds_vfio_suspend_wait_device_cmd(pds_vfio);
160 }
161 
162 int pds_vfio_resume_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type)
163 {
164 	union pds_core_adminq_cmd cmd = {
165 		.lm_resume = {
166 			.opcode = PDS_LM_CMD_RESUME,
167 			.vf_id = cpu_to_le16(pds_vfio->vf_id),
168 			.type = type,
169 		},
170 	};
171 	struct device *dev = pds_vfio_to_dev(pds_vfio);
172 	union pds_core_adminq_comp comp = {};
173 
174 	dev_dbg(dev, "vf%u: Resume device\n", pds_vfio->vf_id);
175 
176 	return pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true);
177 }
178 
179 int pds_vfio_get_lm_state_size_cmd(struct pds_vfio_pci_device *pds_vfio, u64 *size)
180 {
181 	union pds_core_adminq_cmd cmd = {
182 		.lm_state_size = {
183 			.opcode = PDS_LM_CMD_STATE_SIZE,
184 			.vf_id = cpu_to_le16(pds_vfio->vf_id),
185 		},
186 	};
187 	struct device *dev = pds_vfio_to_dev(pds_vfio);
188 	union pds_core_adminq_comp comp = {};
189 	int err;
190 
191 	dev_dbg(dev, "vf%u: Get migration status\n", pds_vfio->vf_id);
192 
193 	err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
194 	if (err)
195 		return err;
196 
197 	*size = le64_to_cpu(comp.lm_state_size.size);
198 	return 0;
199 }
200 
201 static int pds_vfio_dma_map_lm_file(struct device *dev,
202 				    enum dma_data_direction dir,
203 				    struct pds_vfio_lm_file *lm_file)
204 {
205 	struct pds_lm_sg_elem *sgl, *sge;
206 	struct scatterlist *sg;
207 	dma_addr_t sgl_addr;
208 	size_t sgl_size;
209 	int err;
210 	int i;
211 
212 	if (!lm_file)
213 		return -EINVAL;
214 
215 	/* dma map file pages */
216 	err = dma_map_sgtable(dev, &lm_file->sg_table, dir, 0);
217 	if (err)
218 		return err;
219 
220 	lm_file->num_sge = lm_file->sg_table.nents;
221 
222 	/* alloc sgl */
223 	sgl_size = lm_file->num_sge * sizeof(struct pds_lm_sg_elem);
224 	sgl = kzalloc(sgl_size, GFP_KERNEL);
225 	if (!sgl) {
226 		err = -ENOMEM;
227 		goto out_unmap_sgtable;
228 	}
229 
230 	/* fill sgl */
231 	sge = sgl;
232 	for_each_sgtable_dma_sg(&lm_file->sg_table, sg, i) {
233 		sge->addr = cpu_to_le64(sg_dma_address(sg));
234 		sge->len = cpu_to_le32(sg_dma_len(sg));
235 		dev_dbg(dev, "addr = %llx, len = %u\n", sge->addr, sge->len);
236 		sge++;
237 	}
238 
239 	sgl_addr = dma_map_single(dev, sgl, sgl_size, DMA_TO_DEVICE);
240 	if (dma_mapping_error(dev, sgl_addr)) {
241 		err = -EIO;
242 		goto out_free_sgl;
243 	}
244 
245 	lm_file->sgl = sgl;
246 	lm_file->sgl_addr = sgl_addr;
247 
248 	return 0;
249 
250 out_free_sgl:
251 	kfree(sgl);
252 out_unmap_sgtable:
253 	lm_file->num_sge = 0;
254 	dma_unmap_sgtable(dev, &lm_file->sg_table, dir, 0);
255 	return err;
256 }
257 
258 static void pds_vfio_dma_unmap_lm_file(struct device *dev,
259 				       enum dma_data_direction dir,
260 				       struct pds_vfio_lm_file *lm_file)
261 {
262 	if (!lm_file)
263 		return;
264 
265 	/* free sgl */
266 	if (lm_file->sgl) {
267 		dma_unmap_single(dev, lm_file->sgl_addr,
268 				 lm_file->num_sge * sizeof(*lm_file->sgl),
269 				 DMA_TO_DEVICE);
270 		kfree(lm_file->sgl);
271 		lm_file->sgl = NULL;
272 		lm_file->sgl_addr = DMA_MAPPING_ERROR;
273 		lm_file->num_sge = 0;
274 	}
275 
276 	/* dma unmap file pages */
277 	dma_unmap_sgtable(dev, &lm_file->sg_table, dir, 0);
278 }
279 
280 int pds_vfio_get_lm_state_cmd(struct pds_vfio_pci_device *pds_vfio)
281 {
282 	union pds_core_adminq_cmd cmd = {
283 		.lm_save = {
284 			.opcode = PDS_LM_CMD_SAVE,
285 			.vf_id = cpu_to_le16(pds_vfio->vf_id),
286 		},
287 	};
288 	struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
289 	struct device *pdsc_dev = &pci_physfn(pdev)->dev;
290 	union pds_core_adminq_comp comp = {};
291 	struct pds_vfio_lm_file *lm_file;
292 	int err;
293 
294 	dev_dbg(&pdev->dev, "vf%u: Get migration state\n", pds_vfio->vf_id);
295 
296 	lm_file = pds_vfio->save_file;
297 
298 	err = pds_vfio_dma_map_lm_file(pdsc_dev, DMA_FROM_DEVICE, lm_file);
299 	if (err) {
300 		dev_err(&pdev->dev, "failed to map save migration file: %pe\n",
301 			ERR_PTR(err));
302 		return err;
303 	}
304 
305 	cmd.lm_save.sgl_addr = cpu_to_le64(lm_file->sgl_addr);
306 	cmd.lm_save.num_sge = cpu_to_le32(lm_file->num_sge);
307 
308 	err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
309 	if (err)
310 		dev_err(&pdev->dev, "failed to get migration state: %pe\n",
311 			ERR_PTR(err));
312 
313 	pds_vfio_dma_unmap_lm_file(pdsc_dev, DMA_FROM_DEVICE, lm_file);
314 
315 	return err;
316 }
317 
318 int pds_vfio_set_lm_state_cmd(struct pds_vfio_pci_device *pds_vfio)
319 {
320 	union pds_core_adminq_cmd cmd = {
321 		.lm_restore = {
322 			.opcode = PDS_LM_CMD_RESTORE,
323 			.vf_id = cpu_to_le16(pds_vfio->vf_id),
324 		},
325 	};
326 	struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
327 	struct device *pdsc_dev = &pci_physfn(pdev)->dev;
328 	union pds_core_adminq_comp comp = {};
329 	struct pds_vfio_lm_file *lm_file;
330 	int err;
331 
332 	dev_dbg(&pdev->dev, "vf%u: Set migration state\n", pds_vfio->vf_id);
333 
334 	lm_file = pds_vfio->restore_file;
335 
336 	err = pds_vfio_dma_map_lm_file(pdsc_dev, DMA_TO_DEVICE, lm_file);
337 	if (err) {
338 		dev_err(&pdev->dev,
339 			"failed to map restore migration file: %pe\n",
340 			ERR_PTR(err));
341 		return err;
342 	}
343 
344 	cmd.lm_restore.sgl_addr = cpu_to_le64(lm_file->sgl_addr);
345 	cmd.lm_restore.num_sge = cpu_to_le32(lm_file->num_sge);
346 
347 	err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
348 	if (err)
349 		dev_err(&pdev->dev, "failed to set migration state: %pe\n",
350 			ERR_PTR(err));
351 
352 	pds_vfio_dma_unmap_lm_file(pdsc_dev, DMA_TO_DEVICE, lm_file);
353 
354 	return err;
355 }
356 
357 void pds_vfio_send_host_vf_lm_status_cmd(struct pds_vfio_pci_device *pds_vfio,
358 					 enum pds_lm_host_vf_status vf_status)
359 {
360 	union pds_core_adminq_cmd cmd = {
361 		.lm_host_vf_status = {
362 			.opcode = PDS_LM_CMD_HOST_VF_STATUS,
363 			.vf_id = cpu_to_le16(pds_vfio->vf_id),
364 			.status = vf_status,
365 		},
366 	};
367 	struct device *dev = pds_vfio_to_dev(pds_vfio);
368 	union pds_core_adminq_comp comp = {};
369 	int err;
370 
371 	dev_dbg(dev, "vf%u: Set host VF LM status: %u", pds_vfio->vf_id,
372 		vf_status);
373 	if (vf_status != PDS_LM_STA_IN_PROGRESS &&
374 	    vf_status != PDS_LM_STA_NONE) {
375 		dev_warn(dev, "Invalid host VF migration status, %d\n",
376 			 vf_status);
377 		return;
378 	}
379 
380 	err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
381 	if (err)
382 		dev_warn(dev, "failed to send host VF migration status: %pe\n",
383 			 ERR_PTR(err));
384 }
385