1 /*
2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/highmem.h>
34 #include <linux/errno.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/slab.h>
38 #include <linux/delay.h>
39 #include <linux/random.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/eq.h>
42 #include <linux/debugfs.h>
43
44 #include "mlx5_core.h"
45 #include "lib/eq.h"
46 #include "lib/tout.h"
47 #define CREATE_TRACE_POINTS
48 #include "diag/cmd_tracepoint.h"
49
50 struct mlx5_ifc_mbox_out_bits {
51 u8 status[0x8];
52 u8 reserved_at_8[0x18];
53
54 u8 syndrome[0x20];
55
56 u8 reserved_at_40[0x40];
57 };
58
59 struct mlx5_ifc_mbox_in_bits {
60 u8 opcode[0x10];
61 u8 uid[0x10];
62
63 u8 reserved_at_20[0x10];
64 u8 op_mod[0x10];
65
66 u8 reserved_at_40[0x40];
67 };
68
69 enum {
70 CMD_IF_REV = 5,
71 };
72
73 enum {
74 CMD_MODE_POLLING,
75 CMD_MODE_EVENTS
76 };
77
78 enum {
79 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
80 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
81 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
82 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
83 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
84 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
85 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
86 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
87 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
88 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
89 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
90 };
91
in_to_opcode(void * in)92 static u16 in_to_opcode(void *in)
93 {
94 return MLX5_GET(mbox_in, in, opcode);
95 }
96
in_to_uid(void * in)97 static u16 in_to_uid(void *in)
98 {
99 return MLX5_GET(mbox_in, in, uid);
100 }
101
102 /* Returns true for opcodes that might be triggered very frequently and throttle
103 * the command interface. Limit their command slots usage.
104 */
mlx5_cmd_is_throttle_opcode(u16 op)105 static bool mlx5_cmd_is_throttle_opcode(u16 op)
106 {
107 switch (op) {
108 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
109 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
110 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
111 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
112 case MLX5_CMD_OP_SYNC_CRYPTO:
113 return true;
114 }
115 return false;
116 }
117
118 static struct mlx5_cmd_work_ent *
cmd_alloc_ent(struct mlx5_cmd * cmd,struct mlx5_cmd_msg * in,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t cbk,void * context,int page_queue)119 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
120 struct mlx5_cmd_msg *out, void *uout, int uout_size,
121 mlx5_cmd_cbk_t cbk, void *context, int page_queue)
122 {
123 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
124 struct mlx5_cmd_work_ent *ent;
125
126 ent = kzalloc(sizeof(*ent), alloc_flags);
127 if (!ent)
128 return ERR_PTR(-ENOMEM);
129
130 ent->idx = -EINVAL;
131 ent->in = in;
132 ent->out = out;
133 ent->uout = uout;
134 ent->uout_size = uout_size;
135 ent->callback = cbk;
136 ent->context = context;
137 ent->cmd = cmd;
138 ent->page_queue = page_queue;
139 ent->op = in_to_opcode(in->first.data);
140 refcount_set(&ent->refcnt, 1);
141
142 return ent;
143 }
144
cmd_free_ent(struct mlx5_cmd_work_ent * ent)145 static void cmd_free_ent(struct mlx5_cmd_work_ent *ent)
146 {
147 kfree(ent);
148 }
149
alloc_token(struct mlx5_cmd * cmd)150 static u8 alloc_token(struct mlx5_cmd *cmd)
151 {
152 u8 token;
153
154 spin_lock(&cmd->token_lock);
155 cmd->token++;
156 if (cmd->token == 0)
157 cmd->token++;
158 token = cmd->token;
159 spin_unlock(&cmd->token_lock);
160
161 return token;
162 }
163
cmd_alloc_index(struct mlx5_cmd * cmd,struct mlx5_cmd_work_ent * ent)164 static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
165 {
166 unsigned long flags;
167 int ret;
168
169 spin_lock_irqsave(&cmd->alloc_lock, flags);
170 ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
171 if (ret < cmd->vars.max_reg_cmds) {
172 clear_bit(ret, &cmd->vars.bitmask);
173 ent->idx = ret;
174 cmd->ent_arr[ent->idx] = ent;
175 }
176 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
177
178 return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
179 }
180
cmd_free_index(struct mlx5_cmd * cmd,int idx)181 static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
182 {
183 lockdep_assert_held(&cmd->alloc_lock);
184 set_bit(idx, &cmd->vars.bitmask);
185 }
186
cmd_ent_get(struct mlx5_cmd_work_ent * ent)187 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
188 {
189 refcount_inc(&ent->refcnt);
190 }
191
cmd_ent_put(struct mlx5_cmd_work_ent * ent)192 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
193 {
194 struct mlx5_cmd *cmd = ent->cmd;
195 unsigned long flags;
196
197 spin_lock_irqsave(&cmd->alloc_lock, flags);
198 if (!refcount_dec_and_test(&ent->refcnt))
199 goto out;
200
201 if (ent->idx >= 0) {
202 cmd_free_index(cmd, ent->idx);
203 up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem);
204 }
205
206 cmd_free_ent(ent);
207 out:
208 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
209 }
210
get_inst(struct mlx5_cmd * cmd,int idx)211 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
212 {
213 return cmd->cmd_buf + (idx << cmd->vars.log_stride);
214 }
215
mlx5_calc_cmd_blocks(struct mlx5_cmd_msg * msg)216 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
217 {
218 int size = msg->len;
219 int blen = size - min_t(int, sizeof(msg->first.data), size);
220
221 return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
222 }
223
xor8_buf(void * buf,size_t offset,int len)224 static u8 xor8_buf(void *buf, size_t offset, int len)
225 {
226 u8 *ptr = buf;
227 u8 sum = 0;
228 int i;
229 int end = len + offset;
230
231 for (i = offset; i < end; i++)
232 sum ^= ptr[i];
233
234 return sum;
235 }
236
verify_block_sig(struct mlx5_cmd_prot_block * block)237 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
238 {
239 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
240 int xor_len = sizeof(*block) - sizeof(block->data) - 1;
241
242 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
243 return -EHWPOISON;
244
245 if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
246 return -EHWPOISON;
247
248 return 0;
249 }
250
calc_block_sig(struct mlx5_cmd_prot_block * block)251 static void calc_block_sig(struct mlx5_cmd_prot_block *block)
252 {
253 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
254 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
255
256 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
257 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
258 }
259
calc_chain_sig(struct mlx5_cmd_msg * msg)260 static void calc_chain_sig(struct mlx5_cmd_msg *msg)
261 {
262 struct mlx5_cmd_mailbox *next = msg->next;
263 int n = mlx5_calc_cmd_blocks(msg);
264 int i = 0;
265
266 for (i = 0; i < n && next; i++) {
267 calc_block_sig(next->buf);
268 next = next->next;
269 }
270 }
271
set_signature(struct mlx5_cmd_work_ent * ent,int csum)272 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
273 {
274 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
275 if (csum) {
276 calc_chain_sig(ent->in);
277 calc_chain_sig(ent->out);
278 }
279 }
280
poll_timeout(struct mlx5_cmd_work_ent * ent)281 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
282 {
283 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd);
284 u64 cmd_to_ms = mlx5_tout_ms(dev, CMD);
285 unsigned long poll_end;
286 u8 own;
287
288 poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000);
289
290 do {
291 own = READ_ONCE(ent->lay->status_own);
292 if (!(own & CMD_OWNER_HW)) {
293 ent->ret = 0;
294 return;
295 }
296 cond_resched();
297 } while (time_before(jiffies, poll_end));
298
299 ent->ret = -ETIMEDOUT;
300 }
301
verify_signature(struct mlx5_cmd_work_ent * ent)302 static int verify_signature(struct mlx5_cmd_work_ent *ent)
303 {
304 struct mlx5_cmd_mailbox *next = ent->out->next;
305 int n = mlx5_calc_cmd_blocks(ent->out);
306 int err;
307 u8 sig;
308 int i = 0;
309
310 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
311 if (sig != 0xff)
312 return -EHWPOISON;
313
314 for (i = 0; i < n && next; i++) {
315 err = verify_block_sig(next->buf);
316 if (err)
317 return -EHWPOISON;
318
319 next = next->next;
320 }
321
322 return 0;
323 }
324
dump_buf(void * buf,int size,int data_only,int offset,int idx)325 static void dump_buf(void *buf, int size, int data_only, int offset, int idx)
326 {
327 __be32 *p = buf;
328 int i;
329
330 for (i = 0; i < size; i += 16) {
331 pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset,
332 be32_to_cpu(p[0]), be32_to_cpu(p[1]),
333 be32_to_cpu(p[2]), be32_to_cpu(p[3]));
334 p += 4;
335 offset += 16;
336 }
337 if (!data_only)
338 pr_debug("\n");
339 }
340
mlx5_internal_err_ret_value(struct mlx5_core_dev * dev,u16 op,u32 * synd,u8 * status)341 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
342 u32 *synd, u8 *status)
343 {
344 *synd = 0;
345 *status = 0;
346
347 switch (op) {
348 case MLX5_CMD_OP_TEARDOWN_HCA:
349 case MLX5_CMD_OP_DISABLE_HCA:
350 case MLX5_CMD_OP_MANAGE_PAGES:
351 case MLX5_CMD_OP_DESTROY_MKEY:
352 case MLX5_CMD_OP_DESTROY_EQ:
353 case MLX5_CMD_OP_DESTROY_CQ:
354 case MLX5_CMD_OP_DESTROY_QP:
355 case MLX5_CMD_OP_DESTROY_PSV:
356 case MLX5_CMD_OP_DESTROY_SRQ:
357 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
358 case MLX5_CMD_OP_DESTROY_XRQ:
359 case MLX5_CMD_OP_DESTROY_DCT:
360 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
361 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
362 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
363 case MLX5_CMD_OP_DEALLOC_PD:
364 case MLX5_CMD_OP_DEALLOC_UAR:
365 case MLX5_CMD_OP_DETACH_FROM_MCG:
366 case MLX5_CMD_OP_DEALLOC_XRCD:
367 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
368 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
369 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
370 case MLX5_CMD_OP_DESTROY_LAG:
371 case MLX5_CMD_OP_DESTROY_VPORT_LAG:
372 case MLX5_CMD_OP_DESTROY_TIR:
373 case MLX5_CMD_OP_DESTROY_SQ:
374 case MLX5_CMD_OP_DESTROY_RQ:
375 case MLX5_CMD_OP_DESTROY_RMP:
376 case MLX5_CMD_OP_DESTROY_TIS:
377 case MLX5_CMD_OP_DESTROY_RQT:
378 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
379 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
380 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
381 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
382 case MLX5_CMD_OP_2ERR_QP:
383 case MLX5_CMD_OP_2RST_QP:
384 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
385 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
386 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
387 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
388 case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
389 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
390 case MLX5_CMD_OP_FPGA_DESTROY_QP:
391 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
392 case MLX5_CMD_OP_DEALLOC_MEMIC:
393 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
394 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
395 case MLX5_CMD_OP_DEALLOC_SF:
396 case MLX5_CMD_OP_DESTROY_UCTX:
397 case MLX5_CMD_OP_DESTROY_UMEM:
398 case MLX5_CMD_OP_MODIFY_RQT:
399 return MLX5_CMD_STAT_OK;
400
401 case MLX5_CMD_OP_QUERY_HCA_CAP:
402 case MLX5_CMD_OP_QUERY_ADAPTER:
403 case MLX5_CMD_OP_INIT_HCA:
404 case MLX5_CMD_OP_ENABLE_HCA:
405 case MLX5_CMD_OP_QUERY_PAGES:
406 case MLX5_CMD_OP_SET_HCA_CAP:
407 case MLX5_CMD_OP_QUERY_ISSI:
408 case MLX5_CMD_OP_SET_ISSI:
409 case MLX5_CMD_OP_CREATE_MKEY:
410 case MLX5_CMD_OP_QUERY_MKEY:
411 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
412 case MLX5_CMD_OP_CREATE_EQ:
413 case MLX5_CMD_OP_QUERY_EQ:
414 case MLX5_CMD_OP_GEN_EQE:
415 case MLX5_CMD_OP_CREATE_CQ:
416 case MLX5_CMD_OP_QUERY_CQ:
417 case MLX5_CMD_OP_MODIFY_CQ:
418 case MLX5_CMD_OP_CREATE_QP:
419 case MLX5_CMD_OP_RST2INIT_QP:
420 case MLX5_CMD_OP_INIT2RTR_QP:
421 case MLX5_CMD_OP_RTR2RTS_QP:
422 case MLX5_CMD_OP_RTS2RTS_QP:
423 case MLX5_CMD_OP_SQERR2RTS_QP:
424 case MLX5_CMD_OP_QUERY_QP:
425 case MLX5_CMD_OP_SQD_RTS_QP:
426 case MLX5_CMD_OP_INIT2INIT_QP:
427 case MLX5_CMD_OP_CREATE_PSV:
428 case MLX5_CMD_OP_CREATE_SRQ:
429 case MLX5_CMD_OP_QUERY_SRQ:
430 case MLX5_CMD_OP_ARM_RQ:
431 case MLX5_CMD_OP_CREATE_XRC_SRQ:
432 case MLX5_CMD_OP_QUERY_XRC_SRQ:
433 case MLX5_CMD_OP_ARM_XRC_SRQ:
434 case MLX5_CMD_OP_CREATE_XRQ:
435 case MLX5_CMD_OP_QUERY_XRQ:
436 case MLX5_CMD_OP_ARM_XRQ:
437 case MLX5_CMD_OP_CREATE_DCT:
438 case MLX5_CMD_OP_DRAIN_DCT:
439 case MLX5_CMD_OP_QUERY_DCT:
440 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
441 case MLX5_CMD_OP_QUERY_VPORT_STATE:
442 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
443 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
444 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
445 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
446 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
447 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
448 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
449 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
450 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
451 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
452 case MLX5_CMD_OP_QUERY_VNIC_ENV:
453 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
454 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
455 case MLX5_CMD_OP_QUERY_Q_COUNTER:
456 case MLX5_CMD_OP_SET_MONITOR_COUNTER:
457 case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
458 case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
459 case MLX5_CMD_OP_QUERY_RATE_LIMIT:
460 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
461 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
462 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
463 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
464 case MLX5_CMD_OP_ALLOC_PD:
465 case MLX5_CMD_OP_ALLOC_UAR:
466 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
467 case MLX5_CMD_OP_ACCESS_REG:
468 case MLX5_CMD_OP_ATTACH_TO_MCG:
469 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
470 case MLX5_CMD_OP_MAD_IFC:
471 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
472 case MLX5_CMD_OP_SET_MAD_DEMUX:
473 case MLX5_CMD_OP_NOP:
474 case MLX5_CMD_OP_ALLOC_XRCD:
475 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
476 case MLX5_CMD_OP_QUERY_CONG_STATUS:
477 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
478 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
479 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
480 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
481 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
482 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
483 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
484 case MLX5_CMD_OP_CREATE_LAG:
485 case MLX5_CMD_OP_MODIFY_LAG:
486 case MLX5_CMD_OP_QUERY_LAG:
487 case MLX5_CMD_OP_CREATE_VPORT_LAG:
488 case MLX5_CMD_OP_CREATE_TIR:
489 case MLX5_CMD_OP_MODIFY_TIR:
490 case MLX5_CMD_OP_QUERY_TIR:
491 case MLX5_CMD_OP_CREATE_SQ:
492 case MLX5_CMD_OP_MODIFY_SQ:
493 case MLX5_CMD_OP_QUERY_SQ:
494 case MLX5_CMD_OP_CREATE_RQ:
495 case MLX5_CMD_OP_MODIFY_RQ:
496 case MLX5_CMD_OP_QUERY_RQ:
497 case MLX5_CMD_OP_CREATE_RMP:
498 case MLX5_CMD_OP_MODIFY_RMP:
499 case MLX5_CMD_OP_QUERY_RMP:
500 case MLX5_CMD_OP_CREATE_TIS:
501 case MLX5_CMD_OP_MODIFY_TIS:
502 case MLX5_CMD_OP_QUERY_TIS:
503 case MLX5_CMD_OP_CREATE_RQT:
504 case MLX5_CMD_OP_QUERY_RQT:
505
506 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
507 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
508 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
509 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
510 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
511 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
512 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
513 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
514 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
515 case MLX5_CMD_OP_FPGA_CREATE_QP:
516 case MLX5_CMD_OP_FPGA_MODIFY_QP:
517 case MLX5_CMD_OP_FPGA_QUERY_QP:
518 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
519 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
520 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
521 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
522 case MLX5_CMD_OP_CREATE_UCTX:
523 case MLX5_CMD_OP_CREATE_UMEM:
524 case MLX5_CMD_OP_ALLOC_MEMIC:
525 case MLX5_CMD_OP_MODIFY_XRQ:
526 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
527 case MLX5_CMD_OP_QUERY_VHCA_STATE:
528 case MLX5_CMD_OP_MODIFY_VHCA_STATE:
529 case MLX5_CMD_OP_ALLOC_SF:
530 case MLX5_CMD_OP_SUSPEND_VHCA:
531 case MLX5_CMD_OP_RESUME_VHCA:
532 case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
533 case MLX5_CMD_OP_SAVE_VHCA_STATE:
534 case MLX5_CMD_OP_LOAD_VHCA_STATE:
535 case MLX5_CMD_OP_SYNC_CRYPTO:
536 case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS:
537 *status = MLX5_DRIVER_STATUS_ABORTED;
538 *synd = MLX5_DRIVER_SYND;
539 return -ENOLINK;
540 default:
541 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
542 return -EINVAL;
543 }
544 }
545
mlx5_command_str(int command)546 const char *mlx5_command_str(int command)
547 {
548 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
549
550 switch (command) {
551 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
552 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
553 MLX5_COMMAND_STR_CASE(INIT_HCA);
554 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
555 MLX5_COMMAND_STR_CASE(ENABLE_HCA);
556 MLX5_COMMAND_STR_CASE(DISABLE_HCA);
557 MLX5_COMMAND_STR_CASE(QUERY_PAGES);
558 MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
559 MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
560 MLX5_COMMAND_STR_CASE(QUERY_ISSI);
561 MLX5_COMMAND_STR_CASE(SET_ISSI);
562 MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
563 MLX5_COMMAND_STR_CASE(CREATE_MKEY);
564 MLX5_COMMAND_STR_CASE(QUERY_MKEY);
565 MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
566 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
567 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
568 MLX5_COMMAND_STR_CASE(CREATE_EQ);
569 MLX5_COMMAND_STR_CASE(DESTROY_EQ);
570 MLX5_COMMAND_STR_CASE(QUERY_EQ);
571 MLX5_COMMAND_STR_CASE(GEN_EQE);
572 MLX5_COMMAND_STR_CASE(CREATE_CQ);
573 MLX5_COMMAND_STR_CASE(DESTROY_CQ);
574 MLX5_COMMAND_STR_CASE(QUERY_CQ);
575 MLX5_COMMAND_STR_CASE(MODIFY_CQ);
576 MLX5_COMMAND_STR_CASE(CREATE_QP);
577 MLX5_COMMAND_STR_CASE(DESTROY_QP);
578 MLX5_COMMAND_STR_CASE(RST2INIT_QP);
579 MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
580 MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
581 MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
582 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
583 MLX5_COMMAND_STR_CASE(2ERR_QP);
584 MLX5_COMMAND_STR_CASE(2RST_QP);
585 MLX5_COMMAND_STR_CASE(QUERY_QP);
586 MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
587 MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
588 MLX5_COMMAND_STR_CASE(CREATE_PSV);
589 MLX5_COMMAND_STR_CASE(DESTROY_PSV);
590 MLX5_COMMAND_STR_CASE(CREATE_SRQ);
591 MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
592 MLX5_COMMAND_STR_CASE(QUERY_SRQ);
593 MLX5_COMMAND_STR_CASE(ARM_RQ);
594 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
595 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
596 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
597 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
598 MLX5_COMMAND_STR_CASE(CREATE_DCT);
599 MLX5_COMMAND_STR_CASE(DESTROY_DCT);
600 MLX5_COMMAND_STR_CASE(DRAIN_DCT);
601 MLX5_COMMAND_STR_CASE(QUERY_DCT);
602 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
603 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
604 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
605 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
606 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
607 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
608 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
609 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
610 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
611 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
612 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
613 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
614 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
615 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
616 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
617 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
618 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
619 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
620 MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
621 MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
622 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
623 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
624 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
625 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
626 MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
627 MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
628 MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
629 MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
630 MLX5_COMMAND_STR_CASE(ALLOC_PD);
631 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
632 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
633 MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
634 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
635 MLX5_COMMAND_STR_CASE(ACCESS_REG);
636 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
637 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
638 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
639 MLX5_COMMAND_STR_CASE(MAD_IFC);
640 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
641 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
642 MLX5_COMMAND_STR_CASE(NOP);
643 MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
644 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
645 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
646 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
647 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
648 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
649 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
650 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
651 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
652 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
653 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
654 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
655 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
656 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
657 MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
658 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
659 MLX5_COMMAND_STR_CASE(CREATE_LAG);
660 MLX5_COMMAND_STR_CASE(MODIFY_LAG);
661 MLX5_COMMAND_STR_CASE(QUERY_LAG);
662 MLX5_COMMAND_STR_CASE(DESTROY_LAG);
663 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
664 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
665 MLX5_COMMAND_STR_CASE(CREATE_TIR);
666 MLX5_COMMAND_STR_CASE(MODIFY_TIR);
667 MLX5_COMMAND_STR_CASE(DESTROY_TIR);
668 MLX5_COMMAND_STR_CASE(QUERY_TIR);
669 MLX5_COMMAND_STR_CASE(CREATE_SQ);
670 MLX5_COMMAND_STR_CASE(MODIFY_SQ);
671 MLX5_COMMAND_STR_CASE(DESTROY_SQ);
672 MLX5_COMMAND_STR_CASE(QUERY_SQ);
673 MLX5_COMMAND_STR_CASE(CREATE_RQ);
674 MLX5_COMMAND_STR_CASE(MODIFY_RQ);
675 MLX5_COMMAND_STR_CASE(DESTROY_RQ);
676 MLX5_COMMAND_STR_CASE(QUERY_RQ);
677 MLX5_COMMAND_STR_CASE(CREATE_RMP);
678 MLX5_COMMAND_STR_CASE(MODIFY_RMP);
679 MLX5_COMMAND_STR_CASE(DESTROY_RMP);
680 MLX5_COMMAND_STR_CASE(QUERY_RMP);
681 MLX5_COMMAND_STR_CASE(CREATE_TIS);
682 MLX5_COMMAND_STR_CASE(MODIFY_TIS);
683 MLX5_COMMAND_STR_CASE(DESTROY_TIS);
684 MLX5_COMMAND_STR_CASE(QUERY_TIS);
685 MLX5_COMMAND_STR_CASE(CREATE_RQT);
686 MLX5_COMMAND_STR_CASE(MODIFY_RQT);
687 MLX5_COMMAND_STR_CASE(DESTROY_RQT);
688 MLX5_COMMAND_STR_CASE(QUERY_RQT);
689 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
690 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
691 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
692 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
693 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
694 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
695 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
696 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
697 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
698 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
699 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
700 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
701 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
702 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
703 MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
704 MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
705 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
706 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
707 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
708 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
709 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
710 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
711 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
712 MLX5_COMMAND_STR_CASE(CREATE_XRQ);
713 MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
714 MLX5_COMMAND_STR_CASE(QUERY_XRQ);
715 MLX5_COMMAND_STR_CASE(ARM_XRQ);
716 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
717 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
718 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
719 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
720 MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
721 MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
722 MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
723 MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
724 MLX5_COMMAND_STR_CASE(CREATE_UCTX);
725 MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
726 MLX5_COMMAND_STR_CASE(CREATE_UMEM);
727 MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
728 MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
729 MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
730 MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE);
731 MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
732 MLX5_COMMAND_STR_CASE(ALLOC_SF);
733 MLX5_COMMAND_STR_CASE(DEALLOC_SF);
734 MLX5_COMMAND_STR_CASE(SUSPEND_VHCA);
735 MLX5_COMMAND_STR_CASE(RESUME_VHCA);
736 MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE);
737 MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
738 MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
739 MLX5_COMMAND_STR_CASE(SYNC_CRYPTO);
740 MLX5_COMMAND_STR_CASE(ALLOW_OTHER_VHCA_ACCESS);
741 default: return "unknown command opcode";
742 }
743 }
744
cmd_status_str(u8 status)745 static const char *cmd_status_str(u8 status)
746 {
747 switch (status) {
748 case MLX5_CMD_STAT_OK:
749 return "OK";
750 case MLX5_CMD_STAT_INT_ERR:
751 return "internal error";
752 case MLX5_CMD_STAT_BAD_OP_ERR:
753 return "bad operation";
754 case MLX5_CMD_STAT_BAD_PARAM_ERR:
755 return "bad parameter";
756 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
757 return "bad system state";
758 case MLX5_CMD_STAT_BAD_RES_ERR:
759 return "bad resource";
760 case MLX5_CMD_STAT_RES_BUSY:
761 return "resource busy";
762 case MLX5_CMD_STAT_NOT_READY:
763 return "FW not ready";
764 case MLX5_CMD_STAT_LIM_ERR:
765 return "limits exceeded";
766 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
767 return "bad resource state";
768 case MLX5_CMD_STAT_IX_ERR:
769 return "bad index";
770 case MLX5_CMD_STAT_NO_RES_ERR:
771 return "no resources";
772 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
773 return "bad input length";
774 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
775 return "bad output length";
776 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
777 return "bad QP state";
778 case MLX5_CMD_STAT_BAD_PKT_ERR:
779 return "bad packet (discarded)";
780 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
781 return "bad size too many outstanding CQEs";
782 default:
783 return "unknown status";
784 }
785 }
786
cmd_status_to_err(u8 status)787 static int cmd_status_to_err(u8 status)
788 {
789 switch (status) {
790 case MLX5_CMD_STAT_OK: return 0;
791 case MLX5_CMD_STAT_INT_ERR: return -EIO;
792 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
793 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
794 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
795 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
796 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
797 case MLX5_CMD_STAT_NOT_READY: return -EAGAIN;
798 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
799 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
800 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
801 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
802 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
803 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
804 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
805 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
806 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
807 default: return -EIO;
808 }
809 }
810
mlx5_cmd_out_err(struct mlx5_core_dev * dev,u16 opcode,u16 op_mod,void * out)811 void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
812 {
813 u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
814 u8 status = MLX5_GET(mbox_out, out, status);
815
816 mlx5_core_err_rl(dev,
817 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
818 mlx5_command_str(opcode), opcode, op_mod,
819 cmd_status_str(status), status, syndrome, cmd_status_to_err(status));
820 }
821 EXPORT_SYMBOL(mlx5_cmd_out_err);
822
cmd_status_print(struct mlx5_core_dev * dev,void * in,void * out)823 static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
824 {
825 u16 opcode, op_mod;
826 u8 status;
827 u16 uid;
828
829 opcode = in_to_opcode(in);
830 op_mod = MLX5_GET(mbox_in, in, op_mod);
831 uid = in_to_uid(in);
832 status = MLX5_GET(mbox_out, out, status);
833
834 if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
835 opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY)
836 mlx5_cmd_out_err(dev, opcode, op_mod, out);
837 }
838
mlx5_cmd_check(struct mlx5_core_dev * dev,int err,void * in,void * out)839 int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
840 {
841 /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
842 if (err == -ENXIO) {
843 u16 opcode = in_to_opcode(in);
844 u32 syndrome;
845 u8 status;
846
847 /* PCI Error, emulate command return status, for smooth reset */
848 err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status);
849 MLX5_SET(mbox_out, out, status, status);
850 MLX5_SET(mbox_out, out, syndrome, syndrome);
851 if (!err)
852 return 0;
853 }
854
855 /* driver or FW delivery error */
856 if (err != -EREMOTEIO && err)
857 return err;
858
859 /* check outbox status */
860 err = cmd_status_to_err(MLX5_GET(mbox_out, out, status));
861 if (err)
862 cmd_status_print(dev, in, out);
863
864 return err;
865 }
866 EXPORT_SYMBOL(mlx5_cmd_check);
867
dump_command(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent,int input)868 static void dump_command(struct mlx5_core_dev *dev,
869 struct mlx5_cmd_work_ent *ent, int input)
870 {
871 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
872 struct mlx5_cmd_mailbox *next = msg->next;
873 int n = mlx5_calc_cmd_blocks(msg);
874 u16 op = ent->op;
875 int data_only;
876 u32 offset = 0;
877 int dump_len;
878 int i;
879
880 mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx);
881 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
882
883 if (data_only)
884 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
885 "cmd[%d]: dump command data %s(0x%x) %s\n",
886 ent->idx, mlx5_command_str(op), op,
887 input ? "INPUT" : "OUTPUT");
888 else
889 mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n",
890 ent->idx, mlx5_command_str(op), op,
891 input ? "INPUT" : "OUTPUT");
892
893 if (data_only) {
894 if (input) {
895 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx);
896 offset += sizeof(ent->lay->in);
897 } else {
898 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx);
899 offset += sizeof(ent->lay->out);
900 }
901 } else {
902 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx);
903 offset += sizeof(*ent->lay);
904 }
905
906 for (i = 0; i < n && next; i++) {
907 if (data_only) {
908 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
909 dump_buf(next->buf, dump_len, 1, offset, ent->idx);
910 offset += MLX5_CMD_DATA_BLOCK_SIZE;
911 } else {
912 mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx);
913 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset,
914 ent->idx);
915 offset += sizeof(struct mlx5_cmd_prot_block);
916 }
917 next = next->next;
918 }
919
920 if (data_only)
921 pr_debug("\n");
922
923 mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
924 }
925
926 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
927
cb_timeout_handler(struct work_struct * work)928 static void cb_timeout_handler(struct work_struct *work)
929 {
930 struct delayed_work *dwork = to_delayed_work(work);
931 struct mlx5_cmd_work_ent *ent = container_of(dwork,
932 struct mlx5_cmd_work_ent,
933 cb_timeout_work);
934 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
935 cmd);
936
937 mlx5_cmd_eq_recover(dev);
938
939 /* Maybe got handled by eq recover ? */
940 if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
941 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
942 mlx5_command_str(ent->op), ent->op);
943 goto out; /* phew, already handled */
944 }
945
946 ent->ret = -ETIMEDOUT;
947 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
948 ent->idx, mlx5_command_str(ent->op), ent->op);
949 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
950
951 out:
952 cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
953 }
954
955 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
956 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
957 struct mlx5_cmd_msg *msg);
958
opcode_allowed(struct mlx5_cmd * cmd,u16 opcode)959 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
960 {
961 if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
962 return true;
963
964 return cmd->allowed_opcode == opcode;
965 }
966
mlx5_cmd_is_down(struct mlx5_core_dev * dev)967 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
968 {
969 return pci_channel_offline(dev->pdev) ||
970 dev->cmd.state != MLX5_CMDIF_STATE_UP ||
971 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR;
972 }
973
cmd_work_handler(struct work_struct * work)974 static void cmd_work_handler(struct work_struct *work)
975 {
976 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
977 struct mlx5_cmd *cmd = ent->cmd;
978 bool poll_cmd = ent->polling;
979 struct mlx5_cmd_layout *lay;
980 struct mlx5_core_dev *dev;
981 unsigned long timeout;
982 unsigned long flags;
983 int alloc_ret;
984 int cmd_mode;
985
986 complete(&ent->handling);
987
988 dev = container_of(cmd, struct mlx5_core_dev, cmd);
989 timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
990
991 if (!ent->page_queue) {
992 if (down_timeout(&cmd->vars.sem, timeout)) {
993 mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n",
994 mlx5_command_str(ent->op), ent->op);
995 if (ent->callback) {
996 ent->callback(-EBUSY, ent->context);
997 mlx5_free_cmd_msg(dev, ent->out);
998 free_msg(dev, ent->in);
999 cmd_ent_put(ent);
1000 } else {
1001 ent->ret = -EBUSY;
1002 complete(&ent->done);
1003 }
1004 complete(&ent->slotted);
1005 return;
1006 }
1007 alloc_ret = cmd_alloc_index(cmd, ent);
1008 if (alloc_ret < 0) {
1009 mlx5_core_err_rl(dev, "failed to allocate command entry\n");
1010 if (ent->callback) {
1011 ent->callback(-EAGAIN, ent->context);
1012 mlx5_free_cmd_msg(dev, ent->out);
1013 free_msg(dev, ent->in);
1014 cmd_ent_put(ent);
1015 } else {
1016 ent->ret = -EAGAIN;
1017 complete(&ent->done);
1018 }
1019 up(&cmd->vars.sem);
1020 complete(&ent->slotted);
1021 return;
1022 }
1023 } else {
1024 down(&cmd->vars.pages_sem);
1025 ent->idx = cmd->vars.max_reg_cmds;
1026 spin_lock_irqsave(&cmd->alloc_lock, flags);
1027 clear_bit(ent->idx, &cmd->vars.bitmask);
1028 cmd->ent_arr[ent->idx] = ent;
1029 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
1030 }
1031
1032 complete(&ent->slotted);
1033
1034 lay = get_inst(cmd, ent->idx);
1035 ent->lay = lay;
1036 memset(lay, 0, sizeof(*lay));
1037 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
1038 if (ent->in->next)
1039 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
1040 lay->inlen = cpu_to_be32(ent->in->len);
1041 if (ent->out->next)
1042 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
1043 lay->outlen = cpu_to_be32(ent->out->len);
1044 lay->type = MLX5_PCI_CMD_XPORT;
1045 lay->token = ent->token;
1046 lay->status_own = CMD_OWNER_HW;
1047 set_signature(ent, !cmd->checksum_disabled);
1048 dump_command(dev, ent, 1);
1049 ent->ts1 = ktime_get_ns();
1050 cmd_mode = cmd->mode;
1051
1052 if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
1053 cmd_ent_get(ent);
1054 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
1055
1056 cmd_ent_get(ent); /* for the _real_ FW event on completion */
1057 /* Skip sending command to fw if internal error */
1058 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
1059 ent->ret = -ENXIO;
1060 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
1061 return;
1062 }
1063
1064 /* ring doorbell after the descriptor is valid */
1065 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
1066 wmb();
1067 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
1068 /* if not in polling don't use ent after this point */
1069 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
1070 poll_timeout(ent);
1071 /* make sure we read the descriptor after ownership is SW */
1072 rmb();
1073 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
1074 }
1075 }
1076
deliv_status_to_err(u8 status)1077 static int deliv_status_to_err(u8 status)
1078 {
1079 switch (status) {
1080 case MLX5_CMD_DELIVERY_STAT_OK:
1081 case MLX5_DRIVER_STATUS_ABORTED:
1082 return 0;
1083 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1084 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1085 return -EBADR;
1086 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1087 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1088 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1089 return -EFAULT; /* Bad address */
1090 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1091 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1092 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1093 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1094 return -ENOMSG;
1095 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1096 return -EIO;
1097 default:
1098 return -EINVAL;
1099 }
1100 }
1101
deliv_status_to_str(u8 status)1102 static const char *deliv_status_to_str(u8 status)
1103 {
1104 switch (status) {
1105 case MLX5_CMD_DELIVERY_STAT_OK:
1106 return "no errors";
1107 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1108 return "signature error";
1109 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1110 return "token error";
1111 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1112 return "bad block number";
1113 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1114 return "output pointer not aligned to block size";
1115 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1116 return "input pointer not aligned to block size";
1117 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1118 return "firmware internal error";
1119 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1120 return "command input length error";
1121 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1122 return "command output length error";
1123 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1124 return "reserved fields not cleared";
1125 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1126 return "bad command descriptor type";
1127 default:
1128 return "unknown status code";
1129 }
1130 }
1131
1132 enum {
1133 MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000,
1134 };
1135
wait_func_handle_exec_timeout(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent)1136 static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
1137 struct mlx5_cmd_work_ent *ent)
1138 {
1139 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC);
1140
1141 mlx5_cmd_eq_recover(dev);
1142
1143 /* Re-wait on the ent->done after executing the recovery flow. If the
1144 * recovery flow (or any other recovery flow running simultaneously)
1145 * has recovered an EQE, it should cause the entry to be completed by
1146 * the command interface.
1147 */
1148 if (wait_for_completion_timeout(&ent->done, timeout)) {
1149 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
1150 mlx5_command_str(ent->op), ent->op);
1151 return;
1152 }
1153
1154 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
1155 mlx5_command_str(ent->op), ent->op);
1156
1157 ent->ret = -ETIMEDOUT;
1158 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
1159 }
1160
wait_func(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent)1161 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
1162 {
1163 unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
1164 struct mlx5_cmd *cmd = &dev->cmd;
1165 int err;
1166
1167 if (!wait_for_completion_timeout(&ent->handling, timeout) &&
1168 cancel_work_sync(&ent->work)) {
1169 ent->ret = -ECANCELED;
1170 goto out_err;
1171 }
1172
1173 wait_for_completion(&ent->slotted);
1174
1175 if (cmd->mode == CMD_MODE_POLLING || ent->polling)
1176 wait_for_completion(&ent->done);
1177 else if (!wait_for_completion_timeout(&ent->done, timeout))
1178 wait_func_handle_exec_timeout(dev, ent);
1179
1180 out_err:
1181 err = ent->ret;
1182
1183 if (err == -ETIMEDOUT) {
1184 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1185 mlx5_command_str(ent->op), ent->op);
1186 } else if (err == -ECANCELED) {
1187 mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
1188 mlx5_command_str(ent->op), ent->op);
1189 } else if (err == -EBUSY) {
1190 mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
1191 mlx5_command_str(ent->op), ent->op);
1192 }
1193 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
1194 err, deliv_status_to_str(ent->status), ent->status);
1195
1196 return err;
1197 }
1198
1199 /* Notes:
1200 * 1. Callback functions may not sleep
1201 * 2. page queue commands do not support asynchrous completion
1202 *
1203 * return value in case (!callback):
1204 * ret < 0 : Command execution couldn't be submitted by driver
1205 * ret > 0 : Command execution couldn't be performed by firmware
1206 * ret == 0: Command was executed by FW, Caller must check FW outbox status.
1207 *
1208 * return value in case (callback):
1209 * ret < 0 : Command execution couldn't be submitted by driver
1210 * ret == 0: Command will be submitted to FW for execution
1211 * and the callback will be called for further status updates
1212 */
mlx5_cmd_invoke(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * in,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t callback,void * context,int page_queue,u8 token,bool force_polling)1213 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1214 struct mlx5_cmd_msg *out, void *uout, int uout_size,
1215 mlx5_cmd_cbk_t callback,
1216 void *context, int page_queue,
1217 u8 token, bool force_polling)
1218 {
1219 struct mlx5_cmd *cmd = &dev->cmd;
1220 struct mlx5_cmd_work_ent *ent;
1221 struct mlx5_cmd_stats *stats;
1222 u8 status = 0;
1223 int err = 0;
1224 s64 ds;
1225
1226 if (callback && page_queue)
1227 return -EINVAL;
1228
1229 ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
1230 callback, context, page_queue);
1231 if (IS_ERR(ent))
1232 return PTR_ERR(ent);
1233
1234 /* put for this ent is when consumed, depending on the use case
1235 * 1) (!callback) blocking flow: by caller after wait_func completes
1236 * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled
1237 */
1238
1239 ent->token = token;
1240 ent->polling = force_polling;
1241
1242 init_completion(&ent->handling);
1243 init_completion(&ent->slotted);
1244 if (!callback)
1245 init_completion(&ent->done);
1246
1247 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
1248 INIT_WORK(&ent->work, cmd_work_handler);
1249 if (page_queue) {
1250 cmd_work_handler(&ent->work);
1251 } else if (!queue_work(cmd->wq, &ent->work)) {
1252 mlx5_core_warn(dev, "failed to queue work\n");
1253 err = -EALREADY;
1254 goto out_free;
1255 }
1256
1257 if (callback)
1258 return 0; /* mlx5_cmd_comp_handler() will put(ent) */
1259
1260 err = wait_func(dev, ent);
1261 if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY)
1262 goto out_free;
1263
1264 ds = ent->ts2 - ent->ts1;
1265 stats = xa_load(&cmd->stats, ent->op);
1266 if (stats) {
1267 spin_lock_irq(&stats->lock);
1268 stats->sum += ds;
1269 ++stats->n;
1270 spin_unlock_irq(&stats->lock);
1271 }
1272 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1273 "fw exec time for %s is %lld nsec\n",
1274 mlx5_command_str(ent->op), ds);
1275
1276 out_free:
1277 status = ent->status;
1278 cmd_ent_put(ent);
1279 return err ? : status;
1280 }
1281
dbg_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1282 static ssize_t dbg_write(struct file *filp, const char __user *buf,
1283 size_t count, loff_t *pos)
1284 {
1285 struct mlx5_core_dev *dev = filp->private_data;
1286 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1287 char lbuf[3];
1288 int err;
1289
1290 if (!dbg->in_msg || !dbg->out_msg)
1291 return -ENOMEM;
1292
1293 if (count < sizeof(lbuf) - 1)
1294 return -EINVAL;
1295
1296 if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
1297 return -EFAULT;
1298
1299 lbuf[sizeof(lbuf) - 1] = 0;
1300
1301 if (strcmp(lbuf, "go"))
1302 return -EINVAL;
1303
1304 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
1305
1306 return err ? err : count;
1307 }
1308
1309 static const struct file_operations fops = {
1310 .owner = THIS_MODULE,
1311 .open = simple_open,
1312 .write = dbg_write,
1313 };
1314
mlx5_copy_to_msg(struct mlx5_cmd_msg * to,void * from,int size,u8 token)1315 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
1316 u8 token)
1317 {
1318 struct mlx5_cmd_prot_block *block;
1319 struct mlx5_cmd_mailbox *next;
1320 int copy;
1321
1322 if (!to || !from)
1323 return -ENOMEM;
1324
1325 copy = min_t(int, size, sizeof(to->first.data));
1326 memcpy(to->first.data, from, copy);
1327 size -= copy;
1328 from += copy;
1329
1330 next = to->next;
1331 while (size) {
1332 if (!next) {
1333 /* this is a BUG */
1334 return -ENOMEM;
1335 }
1336
1337 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1338 block = next->buf;
1339 memcpy(block->data, from, copy);
1340 from += copy;
1341 size -= copy;
1342 block->token = token;
1343 next = next->next;
1344 }
1345
1346 return 0;
1347 }
1348
mlx5_copy_from_msg(void * to,struct mlx5_cmd_msg * from,int size)1349 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1350 {
1351 struct mlx5_cmd_prot_block *block;
1352 struct mlx5_cmd_mailbox *next;
1353 int copy;
1354
1355 if (!to || !from)
1356 return -ENOMEM;
1357
1358 copy = min_t(int, size, sizeof(from->first.data));
1359 memcpy(to, from->first.data, copy);
1360 size -= copy;
1361 to += copy;
1362
1363 next = from->next;
1364 while (size) {
1365 if (!next) {
1366 /* this is a BUG */
1367 return -ENOMEM;
1368 }
1369
1370 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1371 block = next->buf;
1372
1373 memcpy(to, block->data, copy);
1374 to += copy;
1375 size -= copy;
1376 next = next->next;
1377 }
1378
1379 return 0;
1380 }
1381
alloc_cmd_box(struct mlx5_core_dev * dev,gfp_t flags)1382 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1383 gfp_t flags)
1384 {
1385 struct mlx5_cmd_mailbox *mailbox;
1386
1387 mailbox = kmalloc(sizeof(*mailbox), flags);
1388 if (!mailbox)
1389 return ERR_PTR(-ENOMEM);
1390
1391 mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
1392 &mailbox->dma);
1393 if (!mailbox->buf) {
1394 mlx5_core_dbg(dev, "failed allocation\n");
1395 kfree(mailbox);
1396 return ERR_PTR(-ENOMEM);
1397 }
1398 mailbox->next = NULL;
1399
1400 return mailbox;
1401 }
1402
free_cmd_box(struct mlx5_core_dev * dev,struct mlx5_cmd_mailbox * mailbox)1403 static void free_cmd_box(struct mlx5_core_dev *dev,
1404 struct mlx5_cmd_mailbox *mailbox)
1405 {
1406 dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1407 kfree(mailbox);
1408 }
1409
mlx5_alloc_cmd_msg(struct mlx5_core_dev * dev,gfp_t flags,int size,u8 token)1410 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1411 gfp_t flags, int size,
1412 u8 token)
1413 {
1414 struct mlx5_cmd_mailbox *tmp, *head = NULL;
1415 struct mlx5_cmd_prot_block *block;
1416 struct mlx5_cmd_msg *msg;
1417 int err;
1418 int n;
1419 int i;
1420
1421 msg = kzalloc(sizeof(*msg), flags);
1422 if (!msg)
1423 return ERR_PTR(-ENOMEM);
1424
1425 msg->len = size;
1426 n = mlx5_calc_cmd_blocks(msg);
1427
1428 for (i = 0; i < n; i++) {
1429 tmp = alloc_cmd_box(dev, flags);
1430 if (IS_ERR(tmp)) {
1431 mlx5_core_warn(dev, "failed allocating block\n");
1432 err = PTR_ERR(tmp);
1433 goto err_alloc;
1434 }
1435
1436 block = tmp->buf;
1437 tmp->next = head;
1438 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1439 block->block_num = cpu_to_be32(n - i - 1);
1440 block->token = token;
1441 head = tmp;
1442 }
1443 msg->next = head;
1444 return msg;
1445
1446 err_alloc:
1447 while (head) {
1448 tmp = head->next;
1449 free_cmd_box(dev, head);
1450 head = tmp;
1451 }
1452 kfree(msg);
1453
1454 return ERR_PTR(err);
1455 }
1456
mlx5_free_cmd_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1457 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1458 struct mlx5_cmd_msg *msg)
1459 {
1460 struct mlx5_cmd_mailbox *head = msg->next;
1461 struct mlx5_cmd_mailbox *next;
1462
1463 while (head) {
1464 next = head->next;
1465 free_cmd_box(dev, head);
1466 head = next;
1467 }
1468 kfree(msg);
1469 }
1470
data_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1471 static ssize_t data_write(struct file *filp, const char __user *buf,
1472 size_t count, loff_t *pos)
1473 {
1474 struct mlx5_core_dev *dev = filp->private_data;
1475 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1476 void *ptr;
1477
1478 if (*pos != 0)
1479 return -EINVAL;
1480
1481 kfree(dbg->in_msg);
1482 dbg->in_msg = NULL;
1483 dbg->inlen = 0;
1484 ptr = memdup_user(buf, count);
1485 if (IS_ERR(ptr))
1486 return PTR_ERR(ptr);
1487 dbg->in_msg = ptr;
1488 dbg->inlen = count;
1489
1490 *pos = count;
1491
1492 return count;
1493 }
1494
data_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)1495 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1496 loff_t *pos)
1497 {
1498 struct mlx5_core_dev *dev = filp->private_data;
1499 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1500
1501 if (!dbg->out_msg)
1502 return -ENOMEM;
1503
1504 return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
1505 dbg->outlen);
1506 }
1507
1508 static const struct file_operations dfops = {
1509 .owner = THIS_MODULE,
1510 .open = simple_open,
1511 .write = data_write,
1512 .read = data_read,
1513 };
1514
outlen_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)1515 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1516 loff_t *pos)
1517 {
1518 struct mlx5_core_dev *dev = filp->private_data;
1519 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1520 char outlen[8];
1521 int err;
1522
1523 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1524 if (err < 0)
1525 return err;
1526
1527 return simple_read_from_buffer(buf, count, pos, outlen, err);
1528 }
1529
outlen_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1530 static ssize_t outlen_write(struct file *filp, const char __user *buf,
1531 size_t count, loff_t *pos)
1532 {
1533 struct mlx5_core_dev *dev = filp->private_data;
1534 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1535 char outlen_str[8] = {0};
1536 int outlen;
1537 void *ptr;
1538 int err;
1539
1540 if (*pos != 0 || count > 6)
1541 return -EINVAL;
1542
1543 kfree(dbg->out_msg);
1544 dbg->out_msg = NULL;
1545 dbg->outlen = 0;
1546
1547 if (copy_from_user(outlen_str, buf, count))
1548 return -EFAULT;
1549
1550 err = sscanf(outlen_str, "%d", &outlen);
1551 if (err != 1)
1552 return -EINVAL;
1553
1554 ptr = kzalloc(outlen, GFP_KERNEL);
1555 if (!ptr)
1556 return -ENOMEM;
1557
1558 dbg->out_msg = ptr;
1559 dbg->outlen = outlen;
1560
1561 *pos = count;
1562
1563 return count;
1564 }
1565
1566 static const struct file_operations olfops = {
1567 .owner = THIS_MODULE,
1568 .open = simple_open,
1569 .write = outlen_write,
1570 .read = outlen_read,
1571 };
1572
set_wqname(struct mlx5_core_dev * dev)1573 static void set_wqname(struct mlx5_core_dev *dev)
1574 {
1575 struct mlx5_cmd *cmd = &dev->cmd;
1576
1577 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1578 dev_name(dev->device));
1579 }
1580
clean_debug_files(struct mlx5_core_dev * dev)1581 static void clean_debug_files(struct mlx5_core_dev *dev)
1582 {
1583 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1584
1585 if (!mlx5_debugfs_root)
1586 return;
1587
1588 debugfs_remove_recursive(dbg->dbg_root);
1589 }
1590
create_debugfs_files(struct mlx5_core_dev * dev)1591 static void create_debugfs_files(struct mlx5_core_dev *dev)
1592 {
1593 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1594
1595 dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev));
1596
1597 debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
1598 debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
1599 debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
1600 debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
1601 debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1602 }
1603
mlx5_cmd_allowed_opcode(struct mlx5_core_dev * dev,u16 opcode)1604 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
1605 {
1606 struct mlx5_cmd *cmd = &dev->cmd;
1607 int i;
1608
1609 for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1610 down(&cmd->vars.sem);
1611 down(&cmd->vars.pages_sem);
1612
1613 cmd->allowed_opcode = opcode;
1614
1615 up(&cmd->vars.pages_sem);
1616 for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1617 up(&cmd->vars.sem);
1618 }
1619
mlx5_cmd_change_mod(struct mlx5_core_dev * dev,int mode)1620 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1621 {
1622 struct mlx5_cmd *cmd = &dev->cmd;
1623 int i;
1624
1625 for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1626 down(&cmd->vars.sem);
1627 down(&cmd->vars.pages_sem);
1628
1629 cmd->mode = mode;
1630
1631 up(&cmd->vars.pages_sem);
1632 for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1633 up(&cmd->vars.sem);
1634 }
1635
cmd_comp_notifier(struct notifier_block * nb,unsigned long type,void * data)1636 static int cmd_comp_notifier(struct notifier_block *nb,
1637 unsigned long type, void *data)
1638 {
1639 struct mlx5_core_dev *dev;
1640 struct mlx5_cmd *cmd;
1641 struct mlx5_eqe *eqe;
1642
1643 cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
1644 dev = container_of(cmd, struct mlx5_core_dev, cmd);
1645 eqe = data;
1646
1647 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1648 return NOTIFY_DONE;
1649
1650 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
1651
1652 return NOTIFY_OK;
1653 }
mlx5_cmd_use_events(struct mlx5_core_dev * dev)1654 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1655 {
1656 MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
1657 mlx5_eq_notifier_register(dev, &dev->cmd.nb);
1658 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1659 }
1660
mlx5_cmd_use_polling(struct mlx5_core_dev * dev)1661 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1662 {
1663 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1664 mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
1665 }
1666
free_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1667 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1668 {
1669 unsigned long flags;
1670
1671 if (msg->parent) {
1672 spin_lock_irqsave(&msg->parent->lock, flags);
1673 list_add_tail(&msg->list, &msg->parent->head);
1674 spin_unlock_irqrestore(&msg->parent->lock, flags);
1675 } else {
1676 mlx5_free_cmd_msg(dev, msg);
1677 }
1678 }
1679
mlx5_cmd_comp_handler(struct mlx5_core_dev * dev,u64 vec,bool forced)1680 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1681 {
1682 struct mlx5_cmd *cmd = &dev->cmd;
1683 struct mlx5_cmd_work_ent *ent;
1684 mlx5_cmd_cbk_t callback;
1685 void *context;
1686 int err;
1687 int i;
1688 s64 ds;
1689 struct mlx5_cmd_stats *stats;
1690 unsigned long flags;
1691 unsigned long vector;
1692
1693 /* there can be at most 32 command queues */
1694 vector = vec & 0xffffffff;
1695 for (i = 0; i < (1 << cmd->vars.log_sz); i++) {
1696 if (test_bit(i, &vector)) {
1697 ent = cmd->ent_arr[i];
1698
1699 /* if we already completed the command, ignore it */
1700 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
1701 &ent->state)) {
1702 /* only real completion can free the cmd slot */
1703 if (!forced) {
1704 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1705 ent->idx);
1706 cmd_ent_put(ent);
1707 }
1708 continue;
1709 }
1710
1711 if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work))
1712 cmd_ent_put(ent); /* timeout work was canceled */
1713
1714 if (!forced || /* Real FW completion */
1715 mlx5_cmd_is_down(dev) || /* No real FW completion is expected */
1716 !opcode_allowed(cmd, ent->op))
1717 cmd_ent_put(ent);
1718
1719 ent->ts2 = ktime_get_ns();
1720 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1721 dump_command(dev, ent, 0);
1722
1723 if (vec & MLX5_TRIGGERED_CMD_COMP)
1724 ent->ret = -ENXIO;
1725
1726 if (!ent->ret) { /* Command completed by FW */
1727 if (!cmd->checksum_disabled)
1728 ent->ret = verify_signature(ent);
1729
1730 ent->status = ent->lay->status_own >> 1;
1731
1732 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1733 ent->ret, deliv_status_to_str(ent->status), ent->status);
1734 }
1735
1736 if (ent->callback) {
1737 ds = ent->ts2 - ent->ts1;
1738 stats = xa_load(&cmd->stats, ent->op);
1739 if (stats) {
1740 spin_lock_irqsave(&stats->lock, flags);
1741 stats->sum += ds;
1742 ++stats->n;
1743 spin_unlock_irqrestore(&stats->lock, flags);
1744 }
1745
1746 callback = ent->callback;
1747 context = ent->context;
1748 err = ent->ret ? : ent->status;
1749 if (err > 0) /* Failed in FW, command didn't execute */
1750 err = deliv_status_to_err(err);
1751
1752 if (!err)
1753 err = mlx5_copy_from_msg(ent->uout,
1754 ent->out,
1755 ent->uout_size);
1756
1757 mlx5_free_cmd_msg(dev, ent->out);
1758 free_msg(dev, ent->in);
1759
1760 /* final consumer is done, release ent */
1761 cmd_ent_put(ent);
1762 callback(err, context);
1763 } else {
1764 /* release wait_func() so mlx5_cmd_invoke()
1765 * can make the final ent_put()
1766 */
1767 complete(&ent->done);
1768 }
1769 }
1770 }
1771 }
1772
1773 #define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1
1774 #define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \
1775 MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1)
1776
mlx5_cmd_trigger_completions(struct mlx5_core_dev * dev)1777 static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
1778 {
1779 struct mlx5_cmd *cmd = &dev->cmd;
1780 unsigned long bitmask;
1781 unsigned long flags;
1782 u64 vector;
1783 int i;
1784
1785 /* wait for pending handlers to complete */
1786 mlx5_eq_synchronize_cmd_irq(dev);
1787 spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1788 vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK;
1789 if (!vector)
1790 goto no_trig;
1791
1792 bitmask = vector;
1793 /* we must increment the allocated entries refcount before triggering the completions
1794 * to guarantee pending commands will not get freed in the meanwhile.
1795 * For that reason, it also has to be done inside the alloc_lock.
1796 */
1797 for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
1798 cmd_ent_get(cmd->ent_arr[i]);
1799 vector |= MLX5_TRIGGERED_CMD_COMP;
1800 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1801
1802 mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
1803 mlx5_cmd_comp_handler(dev, vector, true);
1804 for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
1805 cmd_ent_put(cmd->ent_arr[i]);
1806 return;
1807
1808 no_trig:
1809 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1810 }
1811
mlx5_cmd_flush(struct mlx5_core_dev * dev)1812 void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1813 {
1814 struct mlx5_cmd *cmd = &dev->cmd;
1815 int i;
1816
1817 for (i = 0; i < cmd->vars.max_reg_cmds; i++) {
1818 while (down_trylock(&cmd->vars.sem)) {
1819 mlx5_cmd_trigger_completions(dev);
1820 cond_resched();
1821 }
1822 }
1823
1824 while (down_trylock(&cmd->vars.pages_sem)) {
1825 mlx5_cmd_trigger_completions(dev);
1826 cond_resched();
1827 }
1828
1829 /* Unlock cmdif */
1830 up(&cmd->vars.pages_sem);
1831 for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1832 up(&cmd->vars.sem);
1833 }
1834
alloc_msg(struct mlx5_core_dev * dev,int in_size,gfp_t gfp)1835 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1836 gfp_t gfp)
1837 {
1838 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1839 struct cmd_msg_cache *ch = NULL;
1840 struct mlx5_cmd *cmd = &dev->cmd;
1841 int i;
1842
1843 if (in_size <= 16)
1844 goto cache_miss;
1845
1846 for (i = 0; i < dev->profile.num_cmd_caches; i++) {
1847 ch = &cmd->cache[i];
1848 if (in_size > ch->max_inbox_size)
1849 continue;
1850 spin_lock_irq(&ch->lock);
1851 if (list_empty(&ch->head)) {
1852 spin_unlock_irq(&ch->lock);
1853 continue;
1854 }
1855 msg = list_entry(ch->head.next, typeof(*msg), list);
1856 /* For cached lists, we must explicitly state what is
1857 * the real size
1858 */
1859 msg->len = in_size;
1860 list_del(&msg->list);
1861 spin_unlock_irq(&ch->lock);
1862 break;
1863 }
1864
1865 if (!IS_ERR(msg))
1866 return msg;
1867
1868 cache_miss:
1869 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1870 return msg;
1871 }
1872
is_manage_pages(void * in)1873 static int is_manage_pages(void *in)
1874 {
1875 return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
1876 }
1877
mlx5_has_privileged_uid(struct mlx5_core_dev * dev)1878 static bool mlx5_has_privileged_uid(struct mlx5_core_dev *dev)
1879 {
1880 return !xa_empty(&dev->cmd.vars.privileged_uids);
1881 }
1882
mlx5_cmd_is_privileged_uid(struct mlx5_core_dev * dev,u16 uid)1883 static bool mlx5_cmd_is_privileged_uid(struct mlx5_core_dev *dev,
1884 u16 uid)
1885 {
1886 return !!xa_load(&dev->cmd.vars.privileged_uids, uid);
1887 }
1888
1889 /* Notes:
1890 * 1. Callback functions may not sleep
1891 * 2. Page queue commands do not support asynchrous completion
1892 */
cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size,mlx5_cmd_cbk_t callback,void * context,bool force_polling)1893 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1894 int out_size, mlx5_cmd_cbk_t callback, void *context,
1895 bool force_polling)
1896 {
1897 struct mlx5_cmd_msg *inb, *outb;
1898 u16 opcode = in_to_opcode(in);
1899 bool throttle_locked = false;
1900 bool unpriv_locked = false;
1901 u16 uid = in_to_uid(in);
1902 int pages_queue;
1903 gfp_t gfp;
1904 u8 token;
1905 int err;
1906
1907 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
1908 return -ENXIO;
1909
1910 if (!callback) {
1911 /* The semaphore is already held for callback commands. It was
1912 * acquired in mlx5_cmd_exec_cb()
1913 */
1914 if (uid && mlx5_has_privileged_uid(dev)) {
1915 if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
1916 unpriv_locked = true;
1917 down(&dev->cmd.vars.unprivileged_sem);
1918 }
1919 } else if (mlx5_cmd_is_throttle_opcode(opcode)) {
1920 throttle_locked = true;
1921 down(&dev->cmd.vars.throttle_sem);
1922 }
1923 }
1924
1925 pages_queue = is_manage_pages(in);
1926 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1927
1928 inb = alloc_msg(dev, in_size, gfp);
1929 if (IS_ERR(inb)) {
1930 err = PTR_ERR(inb);
1931 goto out_up;
1932 }
1933
1934 token = alloc_token(&dev->cmd);
1935
1936 err = mlx5_copy_to_msg(inb, in, in_size, token);
1937 if (err) {
1938 mlx5_core_warn(dev, "err %d\n", err);
1939 goto out_in;
1940 }
1941
1942 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1943 if (IS_ERR(outb)) {
1944 err = PTR_ERR(outb);
1945 goto out_in;
1946 }
1947
1948 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1949 pages_queue, token, force_polling);
1950 if (callback && !err)
1951 return 0;
1952
1953 if (err > 0) /* Failed in FW, command didn't execute */
1954 err = deliv_status_to_err(err);
1955
1956 if (err)
1957 goto out_out;
1958
1959 /* command completed by FW */
1960 err = mlx5_copy_from_msg(out, outb, out_size);
1961 out_out:
1962 mlx5_free_cmd_msg(dev, outb);
1963 out_in:
1964 free_msg(dev, inb);
1965 out_up:
1966 if (throttle_locked)
1967 up(&dev->cmd.vars.throttle_sem);
1968 if (unpriv_locked)
1969 up(&dev->cmd.vars.unprivileged_sem);
1970
1971 return err;
1972 }
1973
mlx5_cmd_err_trace(struct mlx5_core_dev * dev,u16 opcode,u16 op_mod,void * out)1974 static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
1975 {
1976 u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
1977 u8 status = MLX5_GET(mbox_out, out, status);
1978
1979 trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod,
1980 cmd_status_str(status), status, syndrome,
1981 cmd_status_to_err(status));
1982 }
1983
cmd_status_log(struct mlx5_core_dev * dev,u16 opcode,u8 status,u32 syndrome,int err)1984 static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
1985 u32 syndrome, int err)
1986 {
1987 const char *namep = mlx5_command_str(opcode);
1988 struct mlx5_cmd_stats *stats;
1989 unsigned long flags;
1990
1991 if (!err || !(strcmp(namep, "unknown command opcode")))
1992 return;
1993
1994 stats = xa_load(&dev->cmd.stats, opcode);
1995 if (!stats)
1996 return;
1997 spin_lock_irqsave(&stats->lock, flags);
1998 stats->failed++;
1999 if (err < 0)
2000 stats->last_failed_errno = -err;
2001 if (err == -EREMOTEIO) {
2002 stats->failed_mbox_status++;
2003 stats->last_failed_mbox_status = status;
2004 stats->last_failed_syndrome = syndrome;
2005 }
2006 spin_unlock_irqrestore(&stats->lock, flags);
2007 }
2008
2009 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
cmd_status_err(struct mlx5_core_dev * dev,int err,u16 opcode,u16 op_mod,void * out)2010 static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out)
2011 {
2012 u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
2013 u8 status = MLX5_GET(mbox_out, out, status);
2014
2015 if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
2016 err = -EIO;
2017
2018 if (!err && status != MLX5_CMD_STAT_OK) {
2019 err = -EREMOTEIO;
2020 mlx5_cmd_err_trace(dev, opcode, op_mod, out);
2021 }
2022
2023 cmd_status_log(dev, opcode, status, syndrome, err);
2024 return err;
2025 }
2026
2027 /**
2028 * mlx5_cmd_do - Executes a fw command, wait for completion.
2029 * Unlike mlx5_cmd_exec, this function will not translate or intercept
2030 * outbox.status and will return -EREMOTEIO when
2031 * outbox.status != MLX5_CMD_STAT_OK
2032 *
2033 * @dev: mlx5 core device
2034 * @in: inbox mlx5_ifc command buffer
2035 * @in_size: inbox buffer size
2036 * @out: outbox mlx5_ifc buffer
2037 * @out_size: outbox size
2038 *
2039 * @return:
2040 * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK.
2041 * Caller must check FW outbox status.
2042 * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK.
2043 * < 0 : Command execution couldn't be performed by firmware or driver
2044 */
mlx5_cmd_do(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)2045 int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
2046 {
2047 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
2048 u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
2049 u16 opcode = in_to_opcode(in);
2050
2051 return cmd_status_err(dev, err, opcode, op_mod, out);
2052 }
2053 EXPORT_SYMBOL(mlx5_cmd_do);
2054
2055 /**
2056 * mlx5_cmd_exec - Executes a fw command, wait for completion
2057 *
2058 * @dev: mlx5 core device
2059 * @in: inbox mlx5_ifc command buffer
2060 * @in_size: inbox buffer size
2061 * @out: outbox mlx5_ifc buffer
2062 * @out_size: outbox size
2063 *
2064 * @return: 0 if no error, FW command execution was successful
2065 * and outbox status is ok.
2066 */
mlx5_cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)2067 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
2068 int out_size)
2069 {
2070 int err = mlx5_cmd_do(dev, in, in_size, out, out_size);
2071
2072 return mlx5_cmd_check(dev, err, in, out);
2073 }
2074 EXPORT_SYMBOL(mlx5_cmd_exec);
2075
2076 /**
2077 * mlx5_cmd_exec_polling - Executes a fw command, poll for completion
2078 * Needed for driver force teardown, when command completion EQ
2079 * will not be available to complete the command
2080 *
2081 * @dev: mlx5 core device
2082 * @in: inbox mlx5_ifc command buffer
2083 * @in_size: inbox buffer size
2084 * @out: outbox mlx5_ifc buffer
2085 * @out_size: outbox size
2086 *
2087 * @return: 0 if no error, FW command execution was successful
2088 * and outbox status is ok.
2089 */
mlx5_cmd_exec_polling(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)2090 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
2091 void *out, int out_size)
2092 {
2093 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
2094 u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
2095 u16 opcode = in_to_opcode(in);
2096
2097 err = cmd_status_err(dev, err, opcode, op_mod, out);
2098 return mlx5_cmd_check(dev, err, in, out);
2099 }
2100 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
2101
mlx5_cmd_init_async_ctx(struct mlx5_core_dev * dev,struct mlx5_async_ctx * ctx)2102 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
2103 struct mlx5_async_ctx *ctx)
2104 {
2105 ctx->dev = dev;
2106 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
2107 atomic_set(&ctx->num_inflight, 1);
2108 init_completion(&ctx->inflight_done);
2109 }
2110 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
2111
2112 /**
2113 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
2114 * @ctx: The ctx to clean
2115 *
2116 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
2117 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
2118 * the call mlx5_cleanup_async_ctx().
2119 */
mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx * ctx)2120 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
2121 {
2122 if (!atomic_dec_and_test(&ctx->num_inflight))
2123 wait_for_completion(&ctx->inflight_done);
2124 }
2125 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
2126
mlx5_cmd_exec_cb_handler(int status,void * _work)2127 static void mlx5_cmd_exec_cb_handler(int status, void *_work)
2128 {
2129 struct mlx5_async_work *work = _work;
2130 struct mlx5_async_ctx *ctx;
2131 struct mlx5_core_dev *dev;
2132 bool throttle_locked;
2133 bool unpriv_locked;
2134
2135 ctx = work->ctx;
2136 dev = ctx->dev;
2137 throttle_locked = work->throttle_locked;
2138 unpriv_locked = work->unpriv_locked;
2139 status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
2140 work->user_callback(status, work);
2141 /* Can't access "work" from this point on. It could have been freed in
2142 * the callback.
2143 */
2144 if (throttle_locked)
2145 up(&dev->cmd.vars.throttle_sem);
2146 if (unpriv_locked)
2147 up(&dev->cmd.vars.unprivileged_sem);
2148 if (atomic_dec_and_test(&ctx->num_inflight))
2149 complete(&ctx->inflight_done);
2150 }
2151
mlx5_cmd_exec_cb(struct mlx5_async_ctx * ctx,void * in,int in_size,void * out,int out_size,mlx5_async_cbk_t callback,struct mlx5_async_work * work)2152 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
2153 void *out, int out_size, mlx5_async_cbk_t callback,
2154 struct mlx5_async_work *work)
2155 {
2156 struct mlx5_core_dev *dev = ctx->dev;
2157 u16 uid;
2158 int ret;
2159
2160 work->ctx = ctx;
2161 work->user_callback = callback;
2162 work->opcode = in_to_opcode(in);
2163 work->op_mod = MLX5_GET(mbox_in, in, op_mod);
2164 work->out = out;
2165 work->throttle_locked = false;
2166 work->unpriv_locked = false;
2167 uid = in_to_uid(in);
2168
2169 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
2170 return -EIO;
2171
2172 if (uid && mlx5_has_privileged_uid(dev)) {
2173 if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
2174 if (down_trylock(&dev->cmd.vars.unprivileged_sem)) {
2175 ret = -EBUSY;
2176 goto dec_num_inflight;
2177 }
2178 work->unpriv_locked = true;
2179 }
2180 } else if (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) {
2181 if (down_trylock(&dev->cmd.vars.throttle_sem)) {
2182 ret = -EBUSY;
2183 goto dec_num_inflight;
2184 }
2185 work->throttle_locked = true;
2186 }
2187
2188 ret = cmd_exec(dev, in, in_size, out, out_size,
2189 mlx5_cmd_exec_cb_handler, work, false);
2190 if (ret)
2191 goto sem_up;
2192
2193 return 0;
2194
2195 sem_up:
2196 if (work->throttle_locked)
2197 up(&dev->cmd.vars.throttle_sem);
2198 if (work->unpriv_locked)
2199 up(&dev->cmd.vars.unprivileged_sem);
2200 dec_num_inflight:
2201 if (atomic_dec_and_test(&ctx->num_inflight))
2202 complete(&ctx->inflight_done);
2203
2204 return ret;
2205 }
2206 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
2207
mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev * dev,struct mlx5_cmd_allow_other_vhca_access_attr * attr)2208 int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
2209 struct mlx5_cmd_allow_other_vhca_access_attr *attr)
2210 {
2211 u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {};
2212 u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {};
2213 void *key;
2214
2215 MLX5_SET(allow_other_vhca_access_in,
2216 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
2217 MLX5_SET(allow_other_vhca_access_in,
2218 in, object_type_to_be_accessed, attr->obj_type);
2219 MLX5_SET(allow_other_vhca_access_in,
2220 in, object_id_to_be_accessed, attr->obj_id);
2221
2222 key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
2223 memcpy(key, attr->access_key, sizeof(attr->access_key));
2224
2225 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
2226 }
2227
mlx5_cmd_alias_obj_create(struct mlx5_core_dev * dev,struct mlx5_cmd_alias_obj_create_attr * alias_attr,u32 * obj_id)2228 int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
2229 struct mlx5_cmd_alias_obj_create_attr *alias_attr,
2230 u32 *obj_id)
2231 {
2232 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
2233 u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {};
2234 void *param;
2235 void *attr;
2236 void *key;
2237 int ret;
2238
2239 attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
2240 MLX5_SET(general_obj_in_cmd_hdr,
2241 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2242 MLX5_SET(general_obj_in_cmd_hdr,
2243 attr, obj_type, alias_attr->obj_type);
2244 param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
2245 MLX5_SET(general_obj_create_param, param, alias_object, 1);
2246
2247 attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
2248 MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
2249 MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
2250
2251 key = MLX5_ADDR_OF(alias_context, attr, access_key);
2252 memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
2253
2254 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
2255 if (ret)
2256 return ret;
2257
2258 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2259
2260 return 0;
2261 }
2262
mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev * dev,u32 obj_id,u16 obj_type)2263 int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id,
2264 u16 obj_type)
2265 {
2266 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
2267 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
2268
2269 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
2270 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type);
2271 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
2272
2273 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
2274 }
2275
destroy_msg_cache(struct mlx5_core_dev * dev)2276 static void destroy_msg_cache(struct mlx5_core_dev *dev)
2277 {
2278 struct cmd_msg_cache *ch;
2279 struct mlx5_cmd_msg *msg;
2280 struct mlx5_cmd_msg *n;
2281 int i;
2282
2283 for (i = 0; i < dev->profile.num_cmd_caches; i++) {
2284 ch = &dev->cmd.cache[i];
2285 list_for_each_entry_safe(msg, n, &ch->head, list) {
2286 list_del(&msg->list);
2287 mlx5_free_cmd_msg(dev, msg);
2288 }
2289 }
2290 }
2291
2292 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
2293 512, 32, 16, 8, 2
2294 };
2295
2296 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
2297 16 + MLX5_CMD_DATA_BLOCK_SIZE,
2298 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
2299 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
2300 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
2301 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
2302 };
2303
create_msg_cache(struct mlx5_core_dev * dev)2304 static void create_msg_cache(struct mlx5_core_dev *dev)
2305 {
2306 struct mlx5_cmd *cmd = &dev->cmd;
2307 struct cmd_msg_cache *ch;
2308 struct mlx5_cmd_msg *msg;
2309 int i;
2310 int k;
2311
2312 /* Initialize and fill the caches with initial entries */
2313 for (k = 0; k < dev->profile.num_cmd_caches; k++) {
2314 ch = &cmd->cache[k];
2315 spin_lock_init(&ch->lock);
2316 INIT_LIST_HEAD(&ch->head);
2317 ch->num_ent = cmd_cache_num_ent[k];
2318 ch->max_inbox_size = cmd_cache_ent_size[k];
2319 for (i = 0; i < ch->num_ent; i++) {
2320 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
2321 ch->max_inbox_size, 0);
2322 if (IS_ERR(msg))
2323 break;
2324 msg->parent = ch;
2325 list_add_tail(&msg->list, &ch->head);
2326 }
2327 }
2328 }
2329
alloc_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)2330 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
2331 {
2332 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
2333 &cmd->alloc_dma, GFP_KERNEL);
2334 if (!cmd->cmd_alloc_buf)
2335 return -ENOMEM;
2336
2337 /* make sure it is aligned to 4K */
2338 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
2339 cmd->cmd_buf = cmd->cmd_alloc_buf;
2340 cmd->dma = cmd->alloc_dma;
2341 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
2342 return 0;
2343 }
2344
2345 dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
2346 cmd->alloc_dma);
2347 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
2348 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
2349 &cmd->alloc_dma, GFP_KERNEL);
2350 if (!cmd->cmd_alloc_buf)
2351 return -ENOMEM;
2352
2353 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
2354 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
2355 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
2356 return 0;
2357 }
2358
free_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)2359 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
2360 {
2361 dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
2362 cmd->alloc_dma);
2363 }
2364
cmdif_rev(struct mlx5_core_dev * dev)2365 static u16 cmdif_rev(struct mlx5_core_dev *dev)
2366 {
2367 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2368 }
2369
mlx5_cmd_init(struct mlx5_core_dev * dev)2370 int mlx5_cmd_init(struct mlx5_core_dev *dev)
2371 {
2372 struct mlx5_cmd *cmd = &dev->cmd;
2373
2374 cmd->checksum_disabled = 1;
2375
2376 spin_lock_init(&cmd->alloc_lock);
2377 spin_lock_init(&cmd->token_lock);
2378
2379 set_wqname(dev);
2380 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
2381 if (!cmd->wq) {
2382 mlx5_core_err(dev, "failed to create command workqueue\n");
2383 return -ENOMEM;
2384 }
2385
2386 mlx5_cmdif_debugfs_init(dev);
2387
2388 return 0;
2389 }
2390
mlx5_cmd_cleanup(struct mlx5_core_dev * dev)2391 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
2392 {
2393 struct mlx5_cmd *cmd = &dev->cmd;
2394
2395 mlx5_cmdif_debugfs_cleanup(dev);
2396 destroy_workqueue(cmd->wq);
2397 }
2398
mlx5_cmd_enable(struct mlx5_core_dev * dev)2399 int mlx5_cmd_enable(struct mlx5_core_dev *dev)
2400 {
2401 int size = sizeof(struct mlx5_cmd_prot_block);
2402 int align = roundup_pow_of_two(size);
2403 struct mlx5_cmd *cmd = &dev->cmd;
2404 u32 cmd_h, cmd_l;
2405 int err;
2406
2407 memset(&cmd->vars, 0, sizeof(cmd->vars));
2408 cmd->vars.cmdif_rev = cmdif_rev(dev);
2409 if (cmd->vars.cmdif_rev != CMD_IF_REV) {
2410 mlx5_core_err(dev,
2411 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
2412 CMD_IF_REV, cmd->vars.cmdif_rev);
2413 return -EINVAL;
2414 }
2415
2416 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
2417 cmd->vars.log_sz = cmd_l >> 4 & 0xf;
2418 cmd->vars.log_stride = cmd_l & 0xf;
2419 if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) {
2420 mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
2421 1 << cmd->vars.log_sz);
2422 return -EINVAL;
2423 }
2424
2425 if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
2426 mlx5_core_err(dev, "command queue size overflow\n");
2427 return -EINVAL;
2428 }
2429
2430 cmd->state = MLX5_CMDIF_STATE_DOWN;
2431 cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
2432 cmd->vars.bitmask = MLX5_CMD_MASK;
2433
2434 sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
2435 sema_init(&cmd->vars.pages_sem, 1);
2436 sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
2437 sema_init(&cmd->vars.unprivileged_sem,
2438 DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
2439
2440 xa_init(&cmd->vars.privileged_uids);
2441
2442 cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
2443 if (!cmd->pool) {
2444 err = -ENOMEM;
2445 goto err_destroy_xa;
2446 }
2447
2448 err = alloc_cmd_page(dev, cmd);
2449 if (err)
2450 goto err_free_pool;
2451
2452 cmd_h = (u32)((u64)(cmd->dma) >> 32);
2453 cmd_l = (u32)(cmd->dma);
2454 if (cmd_l & 0xfff) {
2455 mlx5_core_err(dev, "invalid command queue address\n");
2456 err = -ENOMEM;
2457 goto err_cmd_page;
2458 }
2459
2460 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
2461 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
2462
2463 /* Make sure firmware sees the complete address before we proceed */
2464 wmb();
2465
2466 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
2467
2468 cmd->mode = CMD_MODE_POLLING;
2469 cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
2470
2471 create_msg_cache(dev);
2472 create_debugfs_files(dev);
2473
2474 return 0;
2475
2476 err_cmd_page:
2477 free_cmd_page(dev, cmd);
2478 err_free_pool:
2479 dma_pool_destroy(cmd->pool);
2480 err_destroy_xa:
2481 xa_destroy(&dev->cmd.vars.privileged_uids);
2482 return err;
2483 }
2484
mlx5_cmd_disable(struct mlx5_core_dev * dev)2485 void mlx5_cmd_disable(struct mlx5_core_dev *dev)
2486 {
2487 struct mlx5_cmd *cmd = &dev->cmd;
2488
2489 flush_workqueue(cmd->wq);
2490 clean_debug_files(dev);
2491 destroy_msg_cache(dev);
2492 free_cmd_page(dev, cmd);
2493 dma_pool_destroy(cmd->pool);
2494 xa_destroy(&dev->cmd.vars.privileged_uids);
2495 }
2496
mlx5_cmd_set_state(struct mlx5_core_dev * dev,enum mlx5_cmdif_state cmdif_state)2497 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
2498 enum mlx5_cmdif_state cmdif_state)
2499 {
2500 dev->cmd.state = cmdif_state;
2501 }
2502
mlx5_cmd_add_privileged_uid(struct mlx5_core_dev * dev,u16 uid)2503 int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
2504 {
2505 return xa_insert(&dev->cmd.vars.privileged_uids, uid,
2506 xa_mk_value(uid), GFP_KERNEL);
2507 }
2508 EXPORT_SYMBOL(mlx5_cmd_add_privileged_uid);
2509
mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev * dev,u16 uid)2510 void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
2511 {
2512 void *data = xa_erase(&dev->cmd.vars.privileged_uids, uid);
2513
2514 WARN(!data, "Privileged UID %u does not exist\n", uid);
2515 }
2516 EXPORT_SYMBOL(mlx5_cmd_remove_privileged_uid);
2517