xref: /linux/drivers/fwctl/mlx5/main.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /*
3  * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
4  */
5 #include <linux/fwctl.h>
6 #include <linux/auxiliary_bus.h>
7 #include <linux/mlx5/device.h>
8 #include <linux/mlx5/driver.h>
9 #include <uapi/fwctl/mlx5.h>
10 
11 #define mlx5ctl_err(mcdev, format, ...) \
12 	dev_err(&mcdev->fwctl.dev, format, ##__VA_ARGS__)
13 
14 #define mlx5ctl_dbg(mcdev, format, ...)                             \
15 	dev_dbg(&mcdev->fwctl.dev, "PID %u: " format, current->pid, \
16 		##__VA_ARGS__)
17 
18 struct mlx5ctl_uctx {
19 	struct fwctl_uctx uctx;
20 	u32 uctx_caps;
21 	u32 uctx_uid;
22 };
23 
24 struct mlx5ctl_dev {
25 	struct fwctl_device fwctl;
26 	struct mlx5_core_dev *mdev;
27 };
28 DEFINE_FREE(mlx5ctl, struct mlx5ctl_dev *, if (_T) fwctl_put(&_T->fwctl));
29 
30 struct mlx5_ifc_mbox_in_hdr_bits {
31 	u8 opcode[0x10];
32 	u8 uid[0x10];
33 
34 	u8 reserved_at_20[0x10];
35 	u8 op_mod[0x10];
36 
37 	u8 reserved_at_40[0x40];
38 };
39 
40 struct mlx5_ifc_mbox_out_hdr_bits {
41 	u8 status[0x8];
42 	u8 reserved_at_8[0x18];
43 
44 	u8 syndrome[0x20];
45 
46 	u8 reserved_at_40[0x40];
47 };
48 
49 enum {
50 	MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES = 0x4,
51 };
52 
53 enum {
54 	MLX5_CMD_OP_QUERY_DRIVER_VERSION = 0x10c,
55 	MLX5_CMD_OP_QUERY_OTHER_HCA_CAP = 0x10e,
56 	MLX5_CMD_OP_QUERY_RDB = 0x512,
57 	MLX5_CMD_OP_QUERY_PSV = 0x602,
58 	MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716,
59 	MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER = 0x722,
60 	MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT = 0x728,
61 	MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID = 0x730,
62 	MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT = 0x731,
63 	MLX5_CMD_OP_QUERY_DELEGATED_VHCA = 0x732,
64 	MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813,
65 	MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS = 0x819,
66 	MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS = 0x820,
67 	MLX5_CMD_OP_QUERY_DIAGNOSTIC_COUNTERS = 0x821,
68 	MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS = 0x911,
69 	MLX5_CMD_OP_QUERY_AFU = 0x971,
70 	MLX5_CMD_OP_QUERY_CAPI_PEC = 0x981,
71 	MLX5_CMD_OP_QUERY_UCTX = 0xa05,
72 	MLX5_CMD_OP_QUERY_UMEM = 0xa09,
73 	MLX5_CMD_OP_QUERY_NVMF_CC_RESPONSE = 0xb02,
74 	MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO = 0xb03,
75 	MLX5_CMD_OP_QUERY_REGEXP_PARAMS = 0xb05,
76 	MLX5_CMD_OP_QUERY_REGEXP_REGISTER = 0xb07,
77 	MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY = 0xb08,
78 	MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS = 0xb0a,
79 	MLX5_CMD_OP_ACCESS_REGISTER_USER = 0xb0c,
80 	MLX5_CMD_OP_QUERY_EMULATION_DEVICE_EQ_MSIX_MAPPING = 0xb0f,
81 	MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO = 0xb13,
82 	MLX5_CMD_OP_QUERY_CRYPTO_STATE = 0xb14,
83 	MLX5_CMD_OP_QUERY_VUID = 0xb22,
84 	MLX5_CMD_OP_QUERY_DPA_PARTITION = 0xb28,
85 	MLX5_CMD_OP_QUERY_DPA_PARTITIONS = 0xb2a,
86 	MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT = 0xb2e,
87 	MLX5_CMD_OP_QUERY_EMULATED_RESOURCES_INFO = 0xb2f,
88 	MLX5_CMD_OP_QUERY_RSV_RESOURCES = 0x8000,
89 	MLX5_CMD_OP_QUERY_MTT = 0x8001,
90 	MLX5_CMD_OP_QUERY_SCHED_QUEUE = 0x8006,
91 };
92 
93 static int mlx5ctl_alloc_uid(struct mlx5ctl_dev *mcdev, u32 cap)
94 {
95 	u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
96 	u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
97 	void *uctx;
98 	int ret;
99 	u16 uid;
100 
101 	uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
102 
103 	mlx5ctl_dbg(mcdev, "%s: caps 0x%x\n", __func__, cap);
104 	MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
105 	MLX5_SET(uctx, uctx, cap, cap);
106 
107 	ret = mlx5_cmd_exec(mcdev->mdev, in, sizeof(in), out, sizeof(out));
108 	if (ret)
109 		return ret;
110 
111 	uid = MLX5_GET(create_uctx_out, out, uid);
112 	mlx5ctl_dbg(mcdev, "allocated uid %u with caps 0x%x\n", uid, cap);
113 	return uid;
114 }
115 
116 static void mlx5ctl_release_uid(struct mlx5ctl_dev *mcdev, u16 uid)
117 {
118 	u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
119 	struct mlx5_core_dev *mdev = mcdev->mdev;
120 	int ret;
121 
122 	MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
123 	MLX5_SET(destroy_uctx_in, in, uid, uid);
124 
125 	ret = mlx5_cmd_exec_in(mdev, destroy_uctx, in);
126 	mlx5ctl_dbg(mcdev, "released uid %u %pe\n", uid, ERR_PTR(ret));
127 }
128 
129 static int mlx5ctl_open_uctx(struct fwctl_uctx *uctx)
130 {
131 	struct mlx5ctl_uctx *mfd =
132 		container_of(uctx, struct mlx5ctl_uctx, uctx);
133 	struct mlx5ctl_dev *mcdev =
134 		container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
135 	int uid;
136 
137 	/*
138 	 * New FW supports the TOOLS_RESOURCES uid security label
139 	 * which allows commands to manipulate the global device state.
140 	 * Otherwise only basic existing RDMA devx privilege are allowed.
141 	 */
142 	if (MLX5_CAP_GEN(mcdev->mdev, uctx_cap) &
143 	    MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES)
144 		mfd->uctx_caps |= MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES;
145 
146 	uid = mlx5ctl_alloc_uid(mcdev, mfd->uctx_caps);
147 	if (uid < 0)
148 		return uid;
149 
150 	mfd->uctx_uid = uid;
151 	return 0;
152 }
153 
154 static void mlx5ctl_close_uctx(struct fwctl_uctx *uctx)
155 {
156 	struct mlx5ctl_dev *mcdev =
157 		container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
158 	struct mlx5ctl_uctx *mfd =
159 		container_of(uctx, struct mlx5ctl_uctx, uctx);
160 
161 	mlx5ctl_release_uid(mcdev, mfd->uctx_uid);
162 }
163 
164 static void *mlx5ctl_info(struct fwctl_uctx *uctx, size_t *length)
165 {
166 	struct mlx5ctl_uctx *mfd =
167 		container_of(uctx, struct mlx5ctl_uctx, uctx);
168 	struct fwctl_info_mlx5 *info;
169 
170 	info = kzalloc(sizeof(*info), GFP_KERNEL);
171 	if (!info)
172 		return ERR_PTR(-ENOMEM);
173 
174 	info->uid = mfd->uctx_uid;
175 	info->uctx_caps = mfd->uctx_caps;
176 	*length = sizeof(*info);
177 	return info;
178 }
179 
180 static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
181 {
182 	u16 opcode = MLX5_GET(mbox_in_hdr, in, opcode);
183 	u16 op_mod = MLX5_GET(mbox_in_hdr, in, op_mod);
184 
185 	/*
186 	 * Currently the driver can't keep track of commands that allocate
187 	 * objects in the FW, these commands are safe from a security
188 	 * perspective but nothing will free the memory when the FD is closed.
189 	 * For now permit only query commands and set commands that don't alter
190 	 * objects. Also the caps for the scope have not been defined yet,
191 	 * filter commands manually for now.
192 	 */
193 	switch (opcode) {
194 	case MLX5_CMD_OP_MODIFY_CONG_STATUS:
195 	case MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT:
196 	case MLX5_CMD_OP_QUERY_ADAPTER:
197 	case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
198 	case MLX5_CMD_OP_QUERY_HCA_CAP:
199 	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
200 	case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
201 	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
202 	case MLX5_CMD_OPCODE_QUERY_VUID:
203 	case MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT:
204 	/*
205 	 * FW limits SET_HCA_CAP on the tools UID to only the other function
206 	 * mode which is used for function pre-configuration
207 	 */
208 	case MLX5_CMD_OP_SET_HCA_CAP:
209 		return true; /* scope >= FWCTL_RPC_CONFIGURATION; */
210 
211 	case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
212 	case MLX5_CMD_OP_FPGA_QUERY_QP:
213 	case MLX5_CMD_OP_NOP:
214 	case MLX5_CMD_OP_QUERY_AFU:
215 	case MLX5_CMD_OP_QUERY_BURST_SIZE:
216 	case MLX5_CMD_OP_QUERY_CAPI_PEC:
217 	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
218 	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
219 	case MLX5_CMD_OP_QUERY_CONG_STATUS:
220 	case MLX5_CMD_OP_QUERY_CQ:
221 	case MLX5_CMD_OP_QUERY_CRYPTO_STATE:
222 	case MLX5_CMD_OP_QUERY_DC_CNAK_TRACE:
223 	case MLX5_CMD_OP_QUERY_DCT:
224 	case MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS:
225 	case MLX5_CMD_OP_QUERY_DIAGNOSTIC_COUNTERS:
226 	case MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS:
227 	case MLX5_CMD_OP_QUERY_DPA_PARTITION:
228 	case MLX5_CMD_OP_QUERY_DPA_PARTITIONS:
229 	case MLX5_CMD_OP_QUERY_DRIVER_VERSION:
230 	case MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO:
231 	case MLX5_CMD_OP_QUERY_EMULATED_RESOURCES_INFO:
232 	case MLX5_CMD_OP_QUERY_EMULATION_DEVICE_EQ_MSIX_MAPPING:
233 	case MLX5_CMD_OP_QUERY_EQ:
234 	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
235 	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
236 	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
237 	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
238 	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
239 	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
240 	case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
241 	case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
242 	case MLX5_CMD_OP_QUERY_ISSI:
243 	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
244 	case MLX5_CMD_OP_QUERY_LAG:
245 	case MLX5_CMD_OP_QUERY_MAD_DEMUX:
246 	case MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO:
247 	case MLX5_CMD_OP_QUERY_MKEY:
248 	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
249 	case MLX5_CMD_OP_QUERY_MTT:
250 	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
251 	case MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER:
252 	case MLX5_CMD_OP_QUERY_NVMF_CC_RESPONSE:
253 	case MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT:
254 	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
255 	case MLX5_CMD_OP_QUERY_PAGES:
256 	case MLX5_CMD_OP_QUERY_PSV:
257 	case MLX5_CMD_OP_QUERY_Q_COUNTER:
258 	case MLX5_CMD_OP_QUERY_QP:
259 	case MLX5_CMD_OP_QUERY_RATE_LIMIT:
260 	case MLX5_CMD_OP_QUERY_RDB:
261 	case MLX5_CMD_OP_QUERY_REGEXP_PARAMS:
262 	case MLX5_CMD_OP_QUERY_REGEXP_REGISTER:
263 	case MLX5_CMD_OP_QUERY_RMP:
264 	case MLX5_CMD_OP_QUERY_RQ:
265 	case MLX5_CMD_OP_QUERY_RQT:
266 	case MLX5_CMD_OP_QUERY_RSV_RESOURCES:
267 	case MLX5_CMD_OP_QUERY_SCHED_QUEUE:
268 	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
269 	case MLX5_CMD_OP_QUERY_SF_PARTITION:
270 	case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
271 	case MLX5_CMD_OP_QUERY_SQ:
272 	case MLX5_CMD_OP_QUERY_SRQ:
273 	case MLX5_CMD_OP_QUERY_TIR:
274 	case MLX5_CMD_OP_QUERY_TIS:
275 	case MLX5_CMD_OP_QUERY_UCTX:
276 	case MLX5_CMD_OP_QUERY_UMEM:
277 	case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
278 	case MLX5_CMD_OP_QUERY_VHCA_STATE:
279 	case MLX5_CMD_OP_QUERY_VNIC_ENV:
280 	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
281 	case MLX5_CMD_OP_QUERY_VPORT_STATE:
282 	case MLX5_CMD_OP_QUERY_WOL_ROL:
283 	case MLX5_CMD_OP_QUERY_XRC_SRQ:
284 	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
285 	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
286 	case MLX5_CMD_OP_QUERY_XRQ:
287 	case MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY:
288 	case MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS:
289 	case MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID:
290 	case MLX5_CMD_OP_QUERY_DELEGATED_VHCA:
291 		return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
292 
293 	case MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS:
294 		return scope >= FWCTL_RPC_DEBUG_WRITE;
295 
296 	case MLX5_CMD_OP_ACCESS_REG:
297 	case MLX5_CMD_OP_ACCESS_REGISTER_USER:
298 		if (op_mod == 0) /* write */
299 			return true; /* scope >= FWCTL_RPC_CONFIGURATION; */
300 		return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
301 	default:
302 		return false;
303 	}
304 }
305 
306 static void *mlx5ctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
307 			    void *rpc_in, size_t in_len, size_t *out_len)
308 {
309 	struct mlx5ctl_dev *mcdev =
310 		container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
311 	struct mlx5ctl_uctx *mfd =
312 		container_of(uctx, struct mlx5ctl_uctx, uctx);
313 	void *rpc_out;
314 	int ret;
315 
316 	if (in_len < MLX5_ST_SZ_BYTES(mbox_in_hdr) ||
317 	    *out_len < MLX5_ST_SZ_BYTES(mbox_out_hdr))
318 		return ERR_PTR(-EMSGSIZE);
319 
320 	mlx5ctl_dbg(mcdev, "[UID %d] cmdif: opcode 0x%x inlen %zu outlen %zu\n",
321 		    mfd->uctx_uid, MLX5_GET(mbox_in_hdr, rpc_in, opcode),
322 		    in_len, *out_len);
323 
324 	if (!mlx5ctl_validate_rpc(rpc_in, scope))
325 		return ERR_PTR(-EBADMSG);
326 
327 	/*
328 	 * mlx5_cmd_do() copies the input message to its own buffer before
329 	 * executing it, so we can reuse the allocation for the output.
330 	 */
331 	if (*out_len <= in_len) {
332 		rpc_out = rpc_in;
333 	} else {
334 		rpc_out = kvzalloc(*out_len, GFP_KERNEL);
335 		if (!rpc_out)
336 			return ERR_PTR(-ENOMEM);
337 	}
338 
339 	/* Enforce the user context for the command */
340 	MLX5_SET(mbox_in_hdr, rpc_in, uid, mfd->uctx_uid);
341 	ret = mlx5_cmd_do(mcdev->mdev, rpc_in, in_len, rpc_out, *out_len);
342 
343 	mlx5ctl_dbg(mcdev,
344 		    "[UID %d] cmdif: opcode 0x%x status 0x%x retval %pe\n",
345 		    mfd->uctx_uid, MLX5_GET(mbox_in_hdr, rpc_in, opcode),
346 		    MLX5_GET(mbox_out_hdr, rpc_out, status), ERR_PTR(ret));
347 
348 	/*
349 	 * -EREMOTEIO means execution succeeded and the out is valid,
350 	 * but an error code was returned inside out. Everything else
351 	 * means the RPC did not make it to the device.
352 	 */
353 	if (ret && ret != -EREMOTEIO) {
354 		if (rpc_out != rpc_in)
355 			kvfree(rpc_out);
356 		return ERR_PTR(ret);
357 	}
358 	return rpc_out;
359 }
360 
361 static const struct fwctl_ops mlx5ctl_ops = {
362 	.device_type = FWCTL_DEVICE_TYPE_MLX5,
363 	.uctx_size = sizeof(struct mlx5ctl_uctx),
364 	.open_uctx = mlx5ctl_open_uctx,
365 	.close_uctx = mlx5ctl_close_uctx,
366 	.info = mlx5ctl_info,
367 	.fw_rpc = mlx5ctl_fw_rpc,
368 };
369 
370 static int mlx5ctl_probe(struct auxiliary_device *adev,
371 			 const struct auxiliary_device_id *id)
372 
373 {
374 	struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
375 	struct mlx5_core_dev *mdev = madev->mdev;
376 	struct mlx5ctl_dev *mcdev __free(mlx5ctl) = fwctl_alloc_device(
377 		&mdev->pdev->dev, &mlx5ctl_ops, struct mlx5ctl_dev, fwctl);
378 	int ret;
379 
380 	if (!mcdev)
381 		return -ENOMEM;
382 
383 	mcdev->mdev = mdev;
384 
385 	ret = fwctl_register(&mcdev->fwctl);
386 	if (ret)
387 		return ret;
388 	auxiliary_set_drvdata(adev, no_free_ptr(mcdev));
389 	return 0;
390 }
391 
392 static void mlx5ctl_remove(struct auxiliary_device *adev)
393 {
394 	struct mlx5ctl_dev *mcdev = auxiliary_get_drvdata(adev);
395 
396 	fwctl_unregister(&mcdev->fwctl);
397 	fwctl_put(&mcdev->fwctl);
398 }
399 
400 static const struct auxiliary_device_id mlx5ctl_id_table[] = {
401 	{.name = MLX5_ADEV_NAME ".fwctl",},
402 	{}
403 };
404 MODULE_DEVICE_TABLE(auxiliary, mlx5ctl_id_table);
405 
406 static struct auxiliary_driver mlx5ctl_driver = {
407 	.name = "mlx5_fwctl",
408 	.probe = mlx5ctl_probe,
409 	.remove = mlx5ctl_remove,
410 	.id_table = mlx5ctl_id_table,
411 };
412 
413 module_auxiliary_driver(mlx5ctl_driver);
414 
415 MODULE_IMPORT_NS("FWCTL");
416 MODULE_DESCRIPTION("mlx5 ConnectX fwctl driver");
417 MODULE_AUTHOR("Saeed Mahameed <saeedm@nvidia.com>");
418 MODULE_LICENSE("Dual BSD/GPL");
419