xref: /linux/drivers/vfio/pci/mlx5/cmd.c (revision 3f0a50f345f78183f6e9b39c2f45ca5dcaa511ca)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
4  */
5 
6 #include "cmd.h"
7 
8 int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod)
9 {
10 	struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
11 	u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {};
12 	u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {};
13 	int ret;
14 
15 	if (!mdev)
16 		return -ENOTCONN;
17 
18 	MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA);
19 	MLX5_SET(suspend_vhca_in, in, vhca_id, vhca_id);
20 	MLX5_SET(suspend_vhca_in, in, op_mod, op_mod);
21 
22 	ret = mlx5_cmd_exec_inout(mdev, suspend_vhca, in, out);
23 	mlx5_vf_put_core_dev(mdev);
24 	return ret;
25 }
26 
27 int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod)
28 {
29 	struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
30 	u32 out[MLX5_ST_SZ_DW(resume_vhca_out)] = {};
31 	u32 in[MLX5_ST_SZ_DW(resume_vhca_in)] = {};
32 	int ret;
33 
34 	if (!mdev)
35 		return -ENOTCONN;
36 
37 	MLX5_SET(resume_vhca_in, in, opcode, MLX5_CMD_OP_RESUME_VHCA);
38 	MLX5_SET(resume_vhca_in, in, vhca_id, vhca_id);
39 	MLX5_SET(resume_vhca_in, in, op_mod, op_mod);
40 
41 	ret = mlx5_cmd_exec_inout(mdev, resume_vhca, in, out);
42 	mlx5_vf_put_core_dev(mdev);
43 	return ret;
44 }
45 
46 int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id,
47 					  size_t *state_size)
48 {
49 	struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
50 	u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {};
51 	u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {};
52 	int ret;
53 
54 	if (!mdev)
55 		return -ENOTCONN;
56 
57 	MLX5_SET(query_vhca_migration_state_in, in, opcode,
58 		 MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE);
59 	MLX5_SET(query_vhca_migration_state_in, in, vhca_id, vhca_id);
60 	MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0);
61 
62 	ret = mlx5_cmd_exec_inout(mdev, query_vhca_migration_state, in, out);
63 	if (ret)
64 		goto end;
65 
66 	*state_size = MLX5_GET(query_vhca_migration_state_out, out,
67 			       required_umem_size);
68 
69 end:
70 	mlx5_vf_put_core_dev(mdev);
71 	return ret;
72 }
73 
74 int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id)
75 {
76 	struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
77 	u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
78 	int out_size;
79 	void *out;
80 	int ret;
81 
82 	if (!mdev)
83 		return -ENOTCONN;
84 
85 	out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
86 	out = kzalloc(out_size, GFP_KERNEL);
87 	if (!out) {
88 		ret = -ENOMEM;
89 		goto end;
90 	}
91 
92 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
93 	MLX5_SET(query_hca_cap_in, in, other_function, 1);
94 	MLX5_SET(query_hca_cap_in, in, function_id, function_id);
95 	MLX5_SET(query_hca_cap_in, in, op_mod,
96 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
97 		 HCA_CAP_OPMOD_GET_CUR);
98 
99 	ret = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
100 	if (ret)
101 		goto err_exec;
102 
103 	*vhca_id = MLX5_GET(query_hca_cap_out, out,
104 			    capability.cmd_hca_cap.vhca_id);
105 
106 err_exec:
107 	kfree(out);
108 end:
109 	mlx5_vf_put_core_dev(mdev);
110 	return ret;
111 }
112 
113 static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
114 			      struct mlx5_vf_migration_file *migf, u32 *mkey)
115 {
116 	size_t npages = DIV_ROUND_UP(migf->total_length, PAGE_SIZE);
117 	struct sg_dma_page_iter dma_iter;
118 	int err = 0, inlen;
119 	__be64 *mtt;
120 	void *mkc;
121 	u32 *in;
122 
123 	inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
124 		sizeof(*mtt) * round_up(npages, 2);
125 
126 	in = kvzalloc(inlen, GFP_KERNEL);
127 	if (!in)
128 		return -ENOMEM;
129 
130 	MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
131 		 DIV_ROUND_UP(npages, 2));
132 	mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
133 
134 	for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0)
135 		*mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
136 
137 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
138 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
139 	MLX5_SET(mkc, mkc, lr, 1);
140 	MLX5_SET(mkc, mkc, lw, 1);
141 	MLX5_SET(mkc, mkc, rr, 1);
142 	MLX5_SET(mkc, mkc, rw, 1);
143 	MLX5_SET(mkc, mkc, pd, pdn);
144 	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
145 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
146 	MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
147 	MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2));
148 	MLX5_SET64(mkc, mkc, len, migf->total_length);
149 	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
150 	kvfree(in);
151 	return err;
152 }
153 
154 int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id,
155 			       struct mlx5_vf_migration_file *migf)
156 {
157 	struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
158 	u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
159 	u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
160 	u32 pdn, mkey;
161 	int err;
162 
163 	if (!mdev)
164 		return -ENOTCONN;
165 
166 	err = mlx5_core_alloc_pd(mdev, &pdn);
167 	if (err)
168 		goto end;
169 
170 	err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE,
171 			      0);
172 	if (err)
173 		goto err_dma_map;
174 
175 	err = _create_state_mkey(mdev, pdn, migf, &mkey);
176 	if (err)
177 		goto err_create_mkey;
178 
179 	MLX5_SET(save_vhca_state_in, in, opcode,
180 		 MLX5_CMD_OP_SAVE_VHCA_STATE);
181 	MLX5_SET(save_vhca_state_in, in, op_mod, 0);
182 	MLX5_SET(save_vhca_state_in, in, vhca_id, vhca_id);
183 	MLX5_SET(save_vhca_state_in, in, mkey, mkey);
184 	MLX5_SET(save_vhca_state_in, in, size, migf->total_length);
185 
186 	err = mlx5_cmd_exec_inout(mdev, save_vhca_state, in, out);
187 	if (err)
188 		goto err_exec;
189 
190 	migf->total_length =
191 		MLX5_GET(save_vhca_state_out, out, actual_image_size);
192 
193 	mlx5_core_destroy_mkey(mdev, mkey);
194 	mlx5_core_dealloc_pd(mdev, pdn);
195 	dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
196 	mlx5_vf_put_core_dev(mdev);
197 
198 	return 0;
199 
200 err_exec:
201 	mlx5_core_destroy_mkey(mdev, mkey);
202 err_create_mkey:
203 	dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
204 err_dma_map:
205 	mlx5_core_dealloc_pd(mdev, pdn);
206 end:
207 	mlx5_vf_put_core_dev(mdev);
208 	return err;
209 }
210 
211 int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
212 			       struct mlx5_vf_migration_file *migf)
213 {
214 	struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
215 	u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
216 	u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
217 	u32 pdn, mkey;
218 	int err;
219 
220 	if (!mdev)
221 		return -ENOTCONN;
222 
223 	mutex_lock(&migf->lock);
224 	if (!migf->total_length) {
225 		err = -EINVAL;
226 		goto end;
227 	}
228 
229 	err = mlx5_core_alloc_pd(mdev, &pdn);
230 	if (err)
231 		goto end;
232 
233 	err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_TO_DEVICE, 0);
234 	if (err)
235 		goto err_reg;
236 
237 	err = _create_state_mkey(mdev, pdn, migf, &mkey);
238 	if (err)
239 		goto err_mkey;
240 
241 	MLX5_SET(load_vhca_state_in, in, opcode,
242 		 MLX5_CMD_OP_LOAD_VHCA_STATE);
243 	MLX5_SET(load_vhca_state_in, in, op_mod, 0);
244 	MLX5_SET(load_vhca_state_in, in, vhca_id, vhca_id);
245 	MLX5_SET(load_vhca_state_in, in, mkey, mkey);
246 	MLX5_SET(load_vhca_state_in, in, size, migf->total_length);
247 
248 	err = mlx5_cmd_exec_inout(mdev, load_vhca_state, in, out);
249 
250 	mlx5_core_destroy_mkey(mdev, mkey);
251 err_mkey:
252 	dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_TO_DEVICE, 0);
253 err_reg:
254 	mlx5_core_dealloc_pd(mdev, pdn);
255 end:
256 	mlx5_vf_put_core_dev(mdev);
257 	mutex_unlock(&migf->lock);
258 	return err;
259 }
260