1 /*
2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/highmem.h>
34 #include <linux/errno.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/slab.h>
38 #include <linux/delay.h>
39 #include <linux/random.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/eq.h>
42 #include <linux/debugfs.h>
43
44 #include "mlx5_core.h"
45 #include "lib/eq.h"
46 #include "lib/tout.h"
47 #define CREATE_TRACE_POINTS
48 #include "diag/cmd_tracepoint.h"
49
50 struct mlx5_ifc_mbox_out_bits {
51 u8 status[0x8];
52 u8 reserved_at_8[0x18];
53
54 u8 syndrome[0x20];
55
56 u8 reserved_at_40[0x40];
57 };
58
59 struct mlx5_ifc_mbox_in_bits {
60 u8 opcode[0x10];
61 u8 uid[0x10];
62
63 u8 reserved_at_20[0x10];
64 u8 op_mod[0x10];
65
66 u8 reserved_at_40[0x40];
67 };
68
69 enum {
70 CMD_IF_REV = 5,
71 };
72
73 enum {
74 CMD_MODE_POLLING,
75 CMD_MODE_EVENTS
76 };
77
78 enum {
79 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
80 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
81 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
82 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
83 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
84 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
85 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
86 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
87 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
88 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
89 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
90 };
91
in_to_opcode(void * in)92 static u16 in_to_opcode(void *in)
93 {
94 return MLX5_GET(mbox_in, in, opcode);
95 }
96
in_to_uid(void * in)97 static u16 in_to_uid(void *in)
98 {
99 return MLX5_GET(mbox_in, in, uid);
100 }
101
102 /* Returns true for opcodes that might be triggered very frequently and throttle
103 * the command interface. Limit their command slots usage.
104 */
mlx5_cmd_is_throttle_opcode(u16 op)105 static bool mlx5_cmd_is_throttle_opcode(u16 op)
106 {
107 switch (op) {
108 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
109 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
110 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
111 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
112 case MLX5_CMD_OP_SYNC_CRYPTO:
113 return true;
114 }
115 return false;
116 }
117
118 static struct mlx5_cmd_work_ent *
cmd_alloc_ent(struct mlx5_cmd * cmd,struct mlx5_cmd_msg * in,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t cbk,void * context,int page_queue)119 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
120 struct mlx5_cmd_msg *out, void *uout, int uout_size,
121 mlx5_cmd_cbk_t cbk, void *context, int page_queue)
122 {
123 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
124 struct mlx5_cmd_work_ent *ent;
125
126 ent = kzalloc(sizeof(*ent), alloc_flags);
127 if (!ent)
128 return ERR_PTR(-ENOMEM);
129
130 ent->idx = -EINVAL;
131 ent->in = in;
132 ent->out = out;
133 ent->uout = uout;
134 ent->uout_size = uout_size;
135 ent->callback = cbk;
136 ent->context = context;
137 ent->cmd = cmd;
138 ent->page_queue = page_queue;
139 ent->op = in_to_opcode(in->first.data);
140 refcount_set(&ent->refcnt, 1);
141
142 return ent;
143 }
144
cmd_free_ent(struct mlx5_cmd_work_ent * ent)145 static void cmd_free_ent(struct mlx5_cmd_work_ent *ent)
146 {
147 kfree(ent);
148 }
149
alloc_token(struct mlx5_cmd * cmd)150 static u8 alloc_token(struct mlx5_cmd *cmd)
151 {
152 u8 token;
153
154 spin_lock(&cmd->token_lock);
155 cmd->token++;
156 if (cmd->token == 0)
157 cmd->token++;
158 token = cmd->token;
159 spin_unlock(&cmd->token_lock);
160
161 return token;
162 }
163
cmd_alloc_index(struct mlx5_cmd * cmd,struct mlx5_cmd_work_ent * ent)164 static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
165 {
166 unsigned long flags;
167 int ret;
168
169 spin_lock_irqsave(&cmd->alloc_lock, flags);
170 ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
171 if (ret < cmd->vars.max_reg_cmds) {
172 clear_bit(ret, &cmd->vars.bitmask);
173 ent->idx = ret;
174 cmd->ent_arr[ent->idx] = ent;
175 }
176 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
177
178 return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
179 }
180
cmd_free_index(struct mlx5_cmd * cmd,int idx)181 static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
182 {
183 lockdep_assert_held(&cmd->alloc_lock);
184 set_bit(idx, &cmd->vars.bitmask);
185 }
186
cmd_ent_get(struct mlx5_cmd_work_ent * ent)187 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
188 {
189 refcount_inc(&ent->refcnt);
190 }
191
cmd_ent_put(struct mlx5_cmd_work_ent * ent)192 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
193 {
194 struct mlx5_cmd *cmd = ent->cmd;
195 unsigned long flags;
196
197 spin_lock_irqsave(&cmd->alloc_lock, flags);
198 if (!refcount_dec_and_test(&ent->refcnt))
199 goto out;
200
201 if (ent->idx >= 0) {
202 cmd_free_index(cmd, ent->idx);
203 up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem);
204 }
205
206 cmd_free_ent(ent);
207 out:
208 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
209 }
210
get_inst(struct mlx5_cmd * cmd,int idx)211 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
212 {
213 return cmd->cmd_buf + (idx << cmd->vars.log_stride);
214 }
215
mlx5_calc_cmd_blocks(struct mlx5_cmd_msg * msg)216 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
217 {
218 int size = msg->len;
219 int blen = size - min_t(int, sizeof(msg->first.data), size);
220
221 return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
222 }
223
xor8_buf(void * buf,size_t offset,int len)224 static u8 xor8_buf(void *buf, size_t offset, int len)
225 {
226 u8 *ptr = buf;
227 u8 sum = 0;
228 int i;
229 int end = len + offset;
230
231 for (i = offset; i < end; i++)
232 sum ^= ptr[i];
233
234 return sum;
235 }
236
verify_block_sig(struct mlx5_cmd_prot_block * block)237 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
238 {
239 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
240 int xor_len = sizeof(*block) - sizeof(block->data) - 1;
241
242 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
243 return -EHWPOISON;
244
245 if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
246 return -EHWPOISON;
247
248 return 0;
249 }
250
calc_block_sig(struct mlx5_cmd_prot_block * block)251 static void calc_block_sig(struct mlx5_cmd_prot_block *block)
252 {
253 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
254 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
255
256 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
257 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
258 }
259
calc_chain_sig(struct mlx5_cmd_msg * msg)260 static void calc_chain_sig(struct mlx5_cmd_msg *msg)
261 {
262 struct mlx5_cmd_mailbox *next = msg->next;
263 int n = mlx5_calc_cmd_blocks(msg);
264 int i = 0;
265
266 for (i = 0; i < n && next; i++) {
267 calc_block_sig(next->buf);
268 next = next->next;
269 }
270 }
271
set_signature(struct mlx5_cmd_work_ent * ent,int csum)272 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
273 {
274 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
275 if (csum) {
276 calc_chain_sig(ent->in);
277 calc_chain_sig(ent->out);
278 }
279 }
280
poll_timeout(struct mlx5_cmd_work_ent * ent)281 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
282 {
283 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd);
284 u64 cmd_to_ms = mlx5_tout_ms(dev, CMD);
285 unsigned long poll_end;
286 u8 own;
287
288 poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000);
289
290 do {
291 own = READ_ONCE(ent->lay->status_own);
292 if (!(own & CMD_OWNER_HW)) {
293 ent->ret = 0;
294 return;
295 }
296 cond_resched();
297 } while (time_before(jiffies, poll_end));
298
299 ent->ret = -ETIMEDOUT;
300 }
301
verify_signature(struct mlx5_cmd_work_ent * ent)302 static int verify_signature(struct mlx5_cmd_work_ent *ent)
303 {
304 struct mlx5_cmd_mailbox *next = ent->out->next;
305 int n = mlx5_calc_cmd_blocks(ent->out);
306 int err;
307 u8 sig;
308 int i = 0;
309
310 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
311 if (sig != 0xff)
312 return -EHWPOISON;
313
314 for (i = 0; i < n && next; i++) {
315 err = verify_block_sig(next->buf);
316 if (err)
317 return -EHWPOISON;
318
319 next = next->next;
320 }
321
322 return 0;
323 }
324
dump_buf(void * buf,int size,int data_only,int offset,int idx)325 static void dump_buf(void *buf, int size, int data_only, int offset, int idx)
326 {
327 __be32 *p = buf;
328 int i;
329
330 for (i = 0; i < size; i += 16) {
331 pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset,
332 be32_to_cpu(p[0]), be32_to_cpu(p[1]),
333 be32_to_cpu(p[2]), be32_to_cpu(p[3]));
334 p += 4;
335 offset += 16;
336 }
337 if (!data_only)
338 pr_debug("\n");
339 }
340
mlx5_internal_err_ret_value(struct mlx5_core_dev * dev,u16 op,u32 * synd,u8 * status)341 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
342 u32 *synd, u8 *status)
343 {
344 *synd = 0;
345 *status = 0;
346
347 switch (op) {
348 case MLX5_CMD_OP_TEARDOWN_HCA:
349 case MLX5_CMD_OP_DISABLE_HCA:
350 case MLX5_CMD_OP_MANAGE_PAGES:
351 case MLX5_CMD_OP_DESTROY_MKEY:
352 case MLX5_CMD_OP_DESTROY_EQ:
353 case MLX5_CMD_OP_DESTROY_CQ:
354 case MLX5_CMD_OP_DESTROY_QP:
355 case MLX5_CMD_OP_DESTROY_PSV:
356 case MLX5_CMD_OP_DESTROY_SRQ:
357 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
358 case MLX5_CMD_OP_DESTROY_XRQ:
359 case MLX5_CMD_OP_DESTROY_DCT:
360 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
361 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
362 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
363 case MLX5_CMD_OP_DEALLOC_PD:
364 case MLX5_CMD_OP_DEALLOC_UAR:
365 case MLX5_CMD_OP_DETACH_FROM_MCG:
366 case MLX5_CMD_OP_DEALLOC_XRCD:
367 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
368 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
369 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
370 case MLX5_CMD_OP_DESTROY_LAG:
371 case MLX5_CMD_OP_DESTROY_VPORT_LAG:
372 case MLX5_CMD_OP_DESTROY_TIR:
373 case MLX5_CMD_OP_DESTROY_SQ:
374 case MLX5_CMD_OP_DESTROY_RQ:
375 case MLX5_CMD_OP_DESTROY_RMP:
376 case MLX5_CMD_OP_DESTROY_TIS:
377 case MLX5_CMD_OP_DESTROY_RQT:
378 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
379 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
380 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
381 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
382 case MLX5_CMD_OP_2ERR_QP:
383 case MLX5_CMD_OP_2RST_QP:
384 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
385 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
386 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
387 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
388 case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
389 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
390 case MLX5_CMD_OP_FPGA_DESTROY_QP:
391 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
392 case MLX5_CMD_OP_DEALLOC_MEMIC:
393 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
394 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
395 case MLX5_CMD_OP_DEALLOC_SF:
396 case MLX5_CMD_OP_DESTROY_UCTX:
397 case MLX5_CMD_OP_DESTROY_UMEM:
398 case MLX5_CMD_OP_MODIFY_RQT:
399 return MLX5_CMD_STAT_OK;
400
401 case MLX5_CMD_OP_QUERY_HCA_CAP:
402 case MLX5_CMD_OP_QUERY_ADAPTER:
403 case MLX5_CMD_OP_INIT_HCA:
404 case MLX5_CMD_OP_ENABLE_HCA:
405 case MLX5_CMD_OP_QUERY_PAGES:
406 case MLX5_CMD_OP_SET_HCA_CAP:
407 case MLX5_CMD_OP_QUERY_ISSI:
408 case MLX5_CMD_OP_SET_ISSI:
409 case MLX5_CMD_OP_CREATE_MKEY:
410 case MLX5_CMD_OP_QUERY_MKEY:
411 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
412 case MLX5_CMD_OP_CREATE_EQ:
413 case MLX5_CMD_OP_QUERY_EQ:
414 case MLX5_CMD_OP_GEN_EQE:
415 case MLX5_CMD_OP_CREATE_CQ:
416 case MLX5_CMD_OP_QUERY_CQ:
417 case MLX5_CMD_OP_MODIFY_CQ:
418 case MLX5_CMD_OP_CREATE_QP:
419 case MLX5_CMD_OP_RST2INIT_QP:
420 case MLX5_CMD_OP_INIT2RTR_QP:
421 case MLX5_CMD_OP_RTR2RTS_QP:
422 case MLX5_CMD_OP_RTS2RTS_QP:
423 case MLX5_CMD_OP_SQERR2RTS_QP:
424 case MLX5_CMD_OP_QUERY_QP:
425 case MLX5_CMD_OP_SQD_RTS_QP:
426 case MLX5_CMD_OP_INIT2INIT_QP:
427 case MLX5_CMD_OP_CREATE_PSV:
428 case MLX5_CMD_OP_CREATE_SRQ:
429 case MLX5_CMD_OP_QUERY_SRQ:
430 case MLX5_CMD_OP_ARM_RQ:
431 case MLX5_CMD_OP_CREATE_XRC_SRQ:
432 case MLX5_CMD_OP_QUERY_XRC_SRQ:
433 case MLX5_CMD_OP_ARM_XRC_SRQ:
434 case MLX5_CMD_OP_CREATE_XRQ:
435 case MLX5_CMD_OP_QUERY_XRQ:
436 case MLX5_CMD_OP_ARM_XRQ:
437 case MLX5_CMD_OP_CREATE_DCT:
438 case MLX5_CMD_OP_DRAIN_DCT:
439 case MLX5_CMD_OP_QUERY_DCT:
440 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
441 case MLX5_CMD_OP_QUERY_VPORT_STATE:
442 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
443 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
444 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
445 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
446 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
447 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
448 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
449 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
450 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
451 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
452 case MLX5_CMD_OP_QUERY_VNIC_ENV:
453 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
454 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
455 case MLX5_CMD_OP_QUERY_Q_COUNTER:
456 case MLX5_CMD_OP_SET_MONITOR_COUNTER:
457 case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
458 case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
459 case MLX5_CMD_OP_QUERY_RATE_LIMIT:
460 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
461 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
462 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
463 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
464 case MLX5_CMD_OP_ALLOC_PD:
465 case MLX5_CMD_OP_ALLOC_UAR:
466 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
467 case MLX5_CMD_OP_ACCESS_REG:
468 case MLX5_CMD_OP_ATTACH_TO_MCG:
469 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
470 case MLX5_CMD_OP_MAD_IFC:
471 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
472 case MLX5_CMD_OP_SET_MAD_DEMUX:
473 case MLX5_CMD_OP_NOP:
474 case MLX5_CMD_OP_ALLOC_XRCD:
475 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
476 case MLX5_CMD_OP_QUERY_CONG_STATUS:
477 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
478 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
479 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
480 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
481 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
482 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
483 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
484 case MLX5_CMD_OP_CREATE_LAG:
485 case MLX5_CMD_OP_MODIFY_LAG:
486 case MLX5_CMD_OP_QUERY_LAG:
487 case MLX5_CMD_OP_CREATE_VPORT_LAG:
488 case MLX5_CMD_OP_CREATE_TIR:
489 case MLX5_CMD_OP_MODIFY_TIR:
490 case MLX5_CMD_OP_QUERY_TIR:
491 case MLX5_CMD_OP_CREATE_SQ:
492 case MLX5_CMD_OP_MODIFY_SQ:
493 case MLX5_CMD_OP_QUERY_SQ:
494 case MLX5_CMD_OP_CREATE_RQ:
495 case MLX5_CMD_OP_MODIFY_RQ:
496 case MLX5_CMD_OP_QUERY_RQ:
497 case MLX5_CMD_OP_CREATE_RMP:
498 case MLX5_CMD_OP_MODIFY_RMP:
499 case MLX5_CMD_OP_QUERY_RMP:
500 case MLX5_CMD_OP_CREATE_TIS:
501 case MLX5_CMD_OP_MODIFY_TIS:
502 case MLX5_CMD_OP_QUERY_TIS:
503 case MLX5_CMD_OP_CREATE_RQT:
504 case MLX5_CMD_OP_QUERY_RQT:
505
506 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
507 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
508 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
509 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
510 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
511 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
512 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
513 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
514 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
515 case MLX5_CMD_OP_FPGA_CREATE_QP:
516 case MLX5_CMD_OP_FPGA_MODIFY_QP:
517 case MLX5_CMD_OP_FPGA_QUERY_QP:
518 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
519 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
520 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
521 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
522 case MLX5_CMD_OP_CREATE_UCTX:
523 case MLX5_CMD_OP_CREATE_UMEM:
524 case MLX5_CMD_OP_ALLOC_MEMIC:
525 case MLX5_CMD_OP_MODIFY_XRQ:
526 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
527 case MLX5_CMD_OP_QUERY_VHCA_STATE:
528 case MLX5_CMD_OP_MODIFY_VHCA_STATE:
529 case MLX5_CMD_OP_ALLOC_SF:
530 case MLX5_CMD_OP_SUSPEND_VHCA:
531 case MLX5_CMD_OP_RESUME_VHCA:
532 case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
533 case MLX5_CMD_OP_SAVE_VHCA_STATE:
534 case MLX5_CMD_OP_LOAD_VHCA_STATE:
535 case MLX5_CMD_OP_SYNC_CRYPTO:
536 case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS:
537 *status = MLX5_DRIVER_STATUS_ABORTED;
538 *synd = MLX5_DRIVER_SYND;
539 return -ENOLINK;
540 default:
541 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
542 return -EINVAL;
543 }
544 }
545
mlx5_command_str(int command)546 const char *mlx5_command_str(int command)
547 {
548 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
549
550 switch (command) {
551 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
552 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
553 MLX5_COMMAND_STR_CASE(INIT_HCA);
554 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
555 MLX5_COMMAND_STR_CASE(ENABLE_HCA);
556 MLX5_COMMAND_STR_CASE(DISABLE_HCA);
557 MLX5_COMMAND_STR_CASE(QUERY_PAGES);
558 MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
559 MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
560 MLX5_COMMAND_STR_CASE(QUERY_ISSI);
561 MLX5_COMMAND_STR_CASE(SET_ISSI);
562 MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
563 MLX5_COMMAND_STR_CASE(CREATE_MKEY);
564 MLX5_COMMAND_STR_CASE(QUERY_MKEY);
565 MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
566 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
567 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
568 MLX5_COMMAND_STR_CASE(CREATE_EQ);
569 MLX5_COMMAND_STR_CASE(DESTROY_EQ);
570 MLX5_COMMAND_STR_CASE(QUERY_EQ);
571 MLX5_COMMAND_STR_CASE(GEN_EQE);
572 MLX5_COMMAND_STR_CASE(CREATE_CQ);
573 MLX5_COMMAND_STR_CASE(DESTROY_CQ);
574 MLX5_COMMAND_STR_CASE(QUERY_CQ);
575 MLX5_COMMAND_STR_CASE(MODIFY_CQ);
576 MLX5_COMMAND_STR_CASE(CREATE_QP);
577 MLX5_COMMAND_STR_CASE(DESTROY_QP);
578 MLX5_COMMAND_STR_CASE(RST2INIT_QP);
579 MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
580 MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
581 MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
582 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
583 MLX5_COMMAND_STR_CASE(2ERR_QP);
584 MLX5_COMMAND_STR_CASE(2RST_QP);
585 MLX5_COMMAND_STR_CASE(QUERY_QP);
586 MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
587 MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
588 MLX5_COMMAND_STR_CASE(CREATE_PSV);
589 MLX5_COMMAND_STR_CASE(DESTROY_PSV);
590 MLX5_COMMAND_STR_CASE(CREATE_SRQ);
591 MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
592 MLX5_COMMAND_STR_CASE(QUERY_SRQ);
593 MLX5_COMMAND_STR_CASE(ARM_RQ);
594 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
595 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
596 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
597 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
598 MLX5_COMMAND_STR_CASE(CREATE_DCT);
599 MLX5_COMMAND_STR_CASE(DESTROY_DCT);
600 MLX5_COMMAND_STR_CASE(DRAIN_DCT);
601 MLX5_COMMAND_STR_CASE(QUERY_DCT);
602 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
603 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
604 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
605 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
606 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
607 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
608 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
609 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
610 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
611 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
612 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
613 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
614 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
615 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
616 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
617 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
618 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
619 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
620 MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
621 MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
622 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
623 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
624 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
625 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
626 MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
627 MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
628 MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
629 MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
630 MLX5_COMMAND_STR_CASE(ALLOC_PD);
631 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
632 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
633 MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
634 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
635 MLX5_COMMAND_STR_CASE(ACCESS_REG);
636 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
637 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
638 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
639 MLX5_COMMAND_STR_CASE(MAD_IFC);
640 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
641 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
642 MLX5_COMMAND_STR_CASE(NOP);
643 MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
644 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
645 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
646 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
647 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
648 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
649 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
650 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
651 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
652 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
653 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
654 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
655 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
656 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
657 MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
658 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
659 MLX5_COMMAND_STR_CASE(CREATE_LAG);
660 MLX5_COMMAND_STR_CASE(MODIFY_LAG);
661 MLX5_COMMAND_STR_CASE(QUERY_LAG);
662 MLX5_COMMAND_STR_CASE(DESTROY_LAG);
663 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
664 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
665 MLX5_COMMAND_STR_CASE(CREATE_TIR);
666 MLX5_COMMAND_STR_CASE(MODIFY_TIR);
667 MLX5_COMMAND_STR_CASE(DESTROY_TIR);
668 MLX5_COMMAND_STR_CASE(QUERY_TIR);
669 MLX5_COMMAND_STR_CASE(CREATE_SQ);
670 MLX5_COMMAND_STR_CASE(MODIFY_SQ);
671 MLX5_COMMAND_STR_CASE(DESTROY_SQ);
672 MLX5_COMMAND_STR_CASE(QUERY_SQ);
673 MLX5_COMMAND_STR_CASE(CREATE_RQ);
674 MLX5_COMMAND_STR_CASE(MODIFY_RQ);
675 MLX5_COMMAND_STR_CASE(DESTROY_RQ);
676 MLX5_COMMAND_STR_CASE(QUERY_RQ);
677 MLX5_COMMAND_STR_CASE(CREATE_RMP);
678 MLX5_COMMAND_STR_CASE(MODIFY_RMP);
679 MLX5_COMMAND_STR_CASE(DESTROY_RMP);
680 MLX5_COMMAND_STR_CASE(QUERY_RMP);
681 MLX5_COMMAND_STR_CASE(CREATE_TIS);
682 MLX5_COMMAND_STR_CASE(MODIFY_TIS);
683 MLX5_COMMAND_STR_CASE(DESTROY_TIS);
684 MLX5_COMMAND_STR_CASE(QUERY_TIS);
685 MLX5_COMMAND_STR_CASE(CREATE_RQT);
686 MLX5_COMMAND_STR_CASE(MODIFY_RQT);
687 MLX5_COMMAND_STR_CASE(DESTROY_RQT);
688 MLX5_COMMAND_STR_CASE(QUERY_RQT);
689 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
690 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
691 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
692 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
693 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
694 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
695 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
696 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
697 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
698 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
699 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
700 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
701 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
702 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
703 MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
704 MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
705 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
706 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
707 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
708 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
709 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
710 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
711 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
712 MLX5_COMMAND_STR_CASE(CREATE_XRQ);
713 MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
714 MLX5_COMMAND_STR_CASE(QUERY_XRQ);
715 MLX5_COMMAND_STR_CASE(ARM_XRQ);
716 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
717 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
718 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
719 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
720 MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
721 MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
722 MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
723 MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
724 MLX5_COMMAND_STR_CASE(CREATE_UCTX);
725 MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
726 MLX5_COMMAND_STR_CASE(CREATE_UMEM);
727 MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
728 MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
729 MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
730 MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE);
731 MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
732 MLX5_COMMAND_STR_CASE(ALLOC_SF);
733 MLX5_COMMAND_STR_CASE(DEALLOC_SF);
734 MLX5_COMMAND_STR_CASE(SUSPEND_VHCA);
735 MLX5_COMMAND_STR_CASE(RESUME_VHCA);
736 MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE);
737 MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
738 MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
739 MLX5_COMMAND_STR_CASE(SYNC_CRYPTO);
740 MLX5_COMMAND_STR_CASE(ALLOW_OTHER_VHCA_ACCESS);
741 default: return "unknown command opcode";
742 }
743 }
744
cmd_status_str(u8 status)745 static const char *cmd_status_str(u8 status)
746 {
747 switch (status) {
748 case MLX5_CMD_STAT_OK:
749 return "OK";
750 case MLX5_CMD_STAT_INT_ERR:
751 return "internal error";
752 case MLX5_CMD_STAT_BAD_OP_ERR:
753 return "bad operation";
754 case MLX5_CMD_STAT_BAD_PARAM_ERR:
755 return "bad parameter";
756 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
757 return "bad system state";
758 case MLX5_CMD_STAT_BAD_RES_ERR:
759 return "bad resource";
760 case MLX5_CMD_STAT_RES_BUSY:
761 return "resource busy";
762 case MLX5_CMD_STAT_NOT_READY:
763 return "FW not ready";
764 case MLX5_CMD_STAT_LIM_ERR:
765 return "limits exceeded";
766 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
767 return "bad resource state";
768 case MLX5_CMD_STAT_IX_ERR:
769 return "bad index";
770 case MLX5_CMD_STAT_NO_RES_ERR:
771 return "no resources";
772 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
773 return "bad input length";
774 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
775 return "bad output length";
776 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
777 return "bad QP state";
778 case MLX5_CMD_STAT_BAD_PKT_ERR:
779 return "bad packet (discarded)";
780 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
781 return "bad size too many outstanding CQEs";
782 default:
783 return "unknown status";
784 }
785 }
786
cmd_status_to_err(u8 status)787 static int cmd_status_to_err(u8 status)
788 {
789 switch (status) {
790 case MLX5_CMD_STAT_OK: return 0;
791 case MLX5_CMD_STAT_INT_ERR: return -EIO;
792 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
793 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
794 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
795 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
796 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
797 case MLX5_CMD_STAT_NOT_READY: return -EAGAIN;
798 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
799 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
800 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
801 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
802 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
803 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
804 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
805 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
806 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
807 default: return -EIO;
808 }
809 }
810
mlx5_cmd_out_err(struct mlx5_core_dev * dev,u16 opcode,u16 op_mod,void * out)811 void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
812 {
813 u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
814 u8 status = MLX5_GET(mbox_out, out, status);
815
816 mlx5_core_err_rl(dev,
817 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
818 mlx5_command_str(opcode), opcode, op_mod,
819 cmd_status_str(status), status, syndrome, cmd_status_to_err(status));
820 }
821 EXPORT_SYMBOL(mlx5_cmd_out_err);
822
cmd_status_print(struct mlx5_core_dev * dev,void * in,void * out)823 static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
824 {
825 u16 opcode, op_mod;
826 u8 status;
827 u16 uid;
828
829 opcode = in_to_opcode(in);
830 op_mod = MLX5_GET(mbox_in, in, op_mod);
831 uid = in_to_uid(in);
832 status = MLX5_GET(mbox_out, out, status);
833
834 if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
835 opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY)
836 mlx5_cmd_out_err(dev, opcode, op_mod, out);
837 }
838
mlx5_cmd_check(struct mlx5_core_dev * dev,int err,void * in,void * out)839 int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
840 {
841 /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
842 if (err == -ENXIO) {
843 u16 opcode = in_to_opcode(in);
844 u32 syndrome;
845 u8 status;
846
847 /* PCI Error, emulate command return status, for smooth reset */
848 err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status);
849 MLX5_SET(mbox_out, out, status, status);
850 MLX5_SET(mbox_out, out, syndrome, syndrome);
851 if (!err)
852 return 0;
853 }
854
855 /* driver or FW delivery error */
856 if (err != -EREMOTEIO && err)
857 return err;
858
859 /* check outbox status */
860 err = cmd_status_to_err(MLX5_GET(mbox_out, out, status));
861 if (err)
862 cmd_status_print(dev, in, out);
863
864 return err;
865 }
866 EXPORT_SYMBOL(mlx5_cmd_check);
867
dump_command(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent,int input)868 static void dump_command(struct mlx5_core_dev *dev,
869 struct mlx5_cmd_work_ent *ent, int input)
870 {
871 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
872 struct mlx5_cmd_mailbox *next = msg->next;
873 int n = mlx5_calc_cmd_blocks(msg);
874 u16 op = ent->op;
875 int data_only;
876 u32 offset = 0;
877 int dump_len;
878 int i;
879
880 mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx);
881 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
882
883 if (data_only)
884 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
885 "cmd[%d]: dump command data %s(0x%x) %s\n",
886 ent->idx, mlx5_command_str(op), op,
887 input ? "INPUT" : "OUTPUT");
888 else
889 mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n",
890 ent->idx, mlx5_command_str(op), op,
891 input ? "INPUT" : "OUTPUT");
892
893 if (data_only) {
894 if (input) {
895 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx);
896 offset += sizeof(ent->lay->in);
897 } else {
898 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx);
899 offset += sizeof(ent->lay->out);
900 }
901 } else {
902 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx);
903 offset += sizeof(*ent->lay);
904 }
905
906 for (i = 0; i < n && next; i++) {
907 if (data_only) {
908 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
909 dump_buf(next->buf, dump_len, 1, offset, ent->idx);
910 offset += MLX5_CMD_DATA_BLOCK_SIZE;
911 } else {
912 mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx);
913 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset,
914 ent->idx);
915 offset += sizeof(struct mlx5_cmd_prot_block);
916 }
917 next = next->next;
918 }
919
920 if (data_only)
921 pr_debug("\n");
922
923 mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
924 }
925
926 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
927
cb_timeout_handler(struct work_struct * work)928 static void cb_timeout_handler(struct work_struct *work)
929 {
930 struct delayed_work *dwork = container_of(work, struct delayed_work,
931 work);
932 struct mlx5_cmd_work_ent *ent = container_of(dwork,
933 struct mlx5_cmd_work_ent,
934 cb_timeout_work);
935 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
936 cmd);
937
938 mlx5_cmd_eq_recover(dev);
939
940 /* Maybe got handled by eq recover ? */
941 if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
942 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
943 mlx5_command_str(ent->op), ent->op);
944 goto out; /* phew, already handled */
945 }
946
947 ent->ret = -ETIMEDOUT;
948 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
949 ent->idx, mlx5_command_str(ent->op), ent->op);
950 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
951
952 out:
953 cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
954 }
955
956 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
957 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
958 struct mlx5_cmd_msg *msg);
959
opcode_allowed(struct mlx5_cmd * cmd,u16 opcode)960 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
961 {
962 if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
963 return true;
964
965 return cmd->allowed_opcode == opcode;
966 }
967
mlx5_cmd_is_down(struct mlx5_core_dev * dev)968 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
969 {
970 return pci_channel_offline(dev->pdev) ||
971 dev->cmd.state != MLX5_CMDIF_STATE_UP ||
972 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR;
973 }
974
cmd_work_handler(struct work_struct * work)975 static void cmd_work_handler(struct work_struct *work)
976 {
977 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
978 struct mlx5_cmd *cmd = ent->cmd;
979 bool poll_cmd = ent->polling;
980 struct mlx5_cmd_layout *lay;
981 struct mlx5_core_dev *dev;
982 unsigned long timeout;
983 unsigned long flags;
984 int alloc_ret;
985 int cmd_mode;
986
987 complete(&ent->handling);
988
989 dev = container_of(cmd, struct mlx5_core_dev, cmd);
990 timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
991
992 if (!ent->page_queue) {
993 if (down_timeout(&cmd->vars.sem, timeout)) {
994 mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n",
995 mlx5_command_str(ent->op), ent->op);
996 if (ent->callback) {
997 ent->callback(-EBUSY, ent->context);
998 mlx5_free_cmd_msg(dev, ent->out);
999 free_msg(dev, ent->in);
1000 cmd_ent_put(ent);
1001 } else {
1002 ent->ret = -EBUSY;
1003 complete(&ent->done);
1004 }
1005 complete(&ent->slotted);
1006 return;
1007 }
1008 alloc_ret = cmd_alloc_index(cmd, ent);
1009 if (alloc_ret < 0) {
1010 mlx5_core_err_rl(dev, "failed to allocate command entry\n");
1011 if (ent->callback) {
1012 ent->callback(-EAGAIN, ent->context);
1013 mlx5_free_cmd_msg(dev, ent->out);
1014 free_msg(dev, ent->in);
1015 cmd_ent_put(ent);
1016 } else {
1017 ent->ret = -EAGAIN;
1018 complete(&ent->done);
1019 }
1020 up(&cmd->vars.sem);
1021 complete(&ent->slotted);
1022 return;
1023 }
1024 } else {
1025 down(&cmd->vars.pages_sem);
1026 ent->idx = cmd->vars.max_reg_cmds;
1027 spin_lock_irqsave(&cmd->alloc_lock, flags);
1028 clear_bit(ent->idx, &cmd->vars.bitmask);
1029 cmd->ent_arr[ent->idx] = ent;
1030 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
1031 }
1032
1033 complete(&ent->slotted);
1034
1035 lay = get_inst(cmd, ent->idx);
1036 ent->lay = lay;
1037 memset(lay, 0, sizeof(*lay));
1038 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
1039 if (ent->in->next)
1040 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
1041 lay->inlen = cpu_to_be32(ent->in->len);
1042 if (ent->out->next)
1043 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
1044 lay->outlen = cpu_to_be32(ent->out->len);
1045 lay->type = MLX5_PCI_CMD_XPORT;
1046 lay->token = ent->token;
1047 lay->status_own = CMD_OWNER_HW;
1048 set_signature(ent, !cmd->checksum_disabled);
1049 dump_command(dev, ent, 1);
1050 ent->ts1 = ktime_get_ns();
1051 cmd_mode = cmd->mode;
1052
1053 if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
1054 cmd_ent_get(ent);
1055 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
1056
1057 cmd_ent_get(ent); /* for the _real_ FW event on completion */
1058 /* Skip sending command to fw if internal error */
1059 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
1060 ent->ret = -ENXIO;
1061 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
1062 return;
1063 }
1064
1065 /* ring doorbell after the descriptor is valid */
1066 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
1067 wmb();
1068 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
1069 /* if not in polling don't use ent after this point */
1070 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
1071 poll_timeout(ent);
1072 /* make sure we read the descriptor after ownership is SW */
1073 rmb();
1074 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
1075 }
1076 }
1077
deliv_status_to_err(u8 status)1078 static int deliv_status_to_err(u8 status)
1079 {
1080 switch (status) {
1081 case MLX5_CMD_DELIVERY_STAT_OK:
1082 case MLX5_DRIVER_STATUS_ABORTED:
1083 return 0;
1084 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1085 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1086 return -EBADR;
1087 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1088 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1089 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1090 return -EFAULT; /* Bad address */
1091 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1092 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1093 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1094 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1095 return -ENOMSG;
1096 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1097 return -EIO;
1098 default:
1099 return -EINVAL;
1100 }
1101 }
1102
deliv_status_to_str(u8 status)1103 static const char *deliv_status_to_str(u8 status)
1104 {
1105 switch (status) {
1106 case MLX5_CMD_DELIVERY_STAT_OK:
1107 return "no errors";
1108 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1109 return "signature error";
1110 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1111 return "token error";
1112 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1113 return "bad block number";
1114 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1115 return "output pointer not aligned to block size";
1116 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1117 return "input pointer not aligned to block size";
1118 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1119 return "firmware internal error";
1120 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1121 return "command input length error";
1122 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1123 return "command output length error";
1124 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1125 return "reserved fields not cleared";
1126 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1127 return "bad command descriptor type";
1128 default:
1129 return "unknown status code";
1130 }
1131 }
1132
1133 enum {
1134 MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000,
1135 };
1136
wait_func_handle_exec_timeout(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent)1137 static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
1138 struct mlx5_cmd_work_ent *ent)
1139 {
1140 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC);
1141
1142 mlx5_cmd_eq_recover(dev);
1143
1144 /* Re-wait on the ent->done after executing the recovery flow. If the
1145 * recovery flow (or any other recovery flow running simultaneously)
1146 * has recovered an EQE, it should cause the entry to be completed by
1147 * the command interface.
1148 */
1149 if (wait_for_completion_timeout(&ent->done, timeout)) {
1150 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
1151 mlx5_command_str(ent->op), ent->op);
1152 return;
1153 }
1154
1155 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
1156 mlx5_command_str(ent->op), ent->op);
1157
1158 ent->ret = -ETIMEDOUT;
1159 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
1160 }
1161
wait_func(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent)1162 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
1163 {
1164 unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
1165 struct mlx5_cmd *cmd = &dev->cmd;
1166 int err;
1167
1168 if (!wait_for_completion_timeout(&ent->handling, timeout) &&
1169 cancel_work_sync(&ent->work)) {
1170 ent->ret = -ECANCELED;
1171 goto out_err;
1172 }
1173
1174 wait_for_completion(&ent->slotted);
1175
1176 if (cmd->mode == CMD_MODE_POLLING || ent->polling)
1177 wait_for_completion(&ent->done);
1178 else if (!wait_for_completion_timeout(&ent->done, timeout))
1179 wait_func_handle_exec_timeout(dev, ent);
1180
1181 out_err:
1182 err = ent->ret;
1183
1184 if (err == -ETIMEDOUT) {
1185 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1186 mlx5_command_str(ent->op), ent->op);
1187 } else if (err == -ECANCELED) {
1188 mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
1189 mlx5_command_str(ent->op), ent->op);
1190 } else if (err == -EBUSY) {
1191 mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
1192 mlx5_command_str(ent->op), ent->op);
1193 }
1194 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
1195 err, deliv_status_to_str(ent->status), ent->status);
1196
1197 return err;
1198 }
1199
1200 /* Notes:
1201 * 1. Callback functions may not sleep
1202 * 2. page queue commands do not support asynchrous completion
1203 *
1204 * return value in case (!callback):
1205 * ret < 0 : Command execution couldn't be submitted by driver
1206 * ret > 0 : Command execution couldn't be performed by firmware
1207 * ret == 0: Command was executed by FW, Caller must check FW outbox status.
1208 *
1209 * return value in case (callback):
1210 * ret < 0 : Command execution couldn't be submitted by driver
1211 * ret == 0: Command will be submitted to FW for execution
1212 * and the callback will be called for further status updates
1213 */
mlx5_cmd_invoke(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * in,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t callback,void * context,int page_queue,u8 token,bool force_polling)1214 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1215 struct mlx5_cmd_msg *out, void *uout, int uout_size,
1216 mlx5_cmd_cbk_t callback,
1217 void *context, int page_queue,
1218 u8 token, bool force_polling)
1219 {
1220 struct mlx5_cmd *cmd = &dev->cmd;
1221 struct mlx5_cmd_work_ent *ent;
1222 struct mlx5_cmd_stats *stats;
1223 u8 status = 0;
1224 int err = 0;
1225 s64 ds;
1226
1227 if (callback && page_queue)
1228 return -EINVAL;
1229
1230 ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
1231 callback, context, page_queue);
1232 if (IS_ERR(ent))
1233 return PTR_ERR(ent);
1234
1235 /* put for this ent is when consumed, depending on the use case
1236 * 1) (!callback) blocking flow: by caller after wait_func completes
1237 * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled
1238 */
1239
1240 ent->token = token;
1241 ent->polling = force_polling;
1242
1243 init_completion(&ent->handling);
1244 init_completion(&ent->slotted);
1245 if (!callback)
1246 init_completion(&ent->done);
1247
1248 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
1249 INIT_WORK(&ent->work, cmd_work_handler);
1250 if (page_queue) {
1251 cmd_work_handler(&ent->work);
1252 } else if (!queue_work(cmd->wq, &ent->work)) {
1253 mlx5_core_warn(dev, "failed to queue work\n");
1254 err = -EALREADY;
1255 goto out_free;
1256 }
1257
1258 if (callback)
1259 return 0; /* mlx5_cmd_comp_handler() will put(ent) */
1260
1261 err = wait_func(dev, ent);
1262 if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY)
1263 goto out_free;
1264
1265 ds = ent->ts2 - ent->ts1;
1266 stats = xa_load(&cmd->stats, ent->op);
1267 if (stats) {
1268 spin_lock_irq(&stats->lock);
1269 stats->sum += ds;
1270 ++stats->n;
1271 spin_unlock_irq(&stats->lock);
1272 }
1273 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1274 "fw exec time for %s is %lld nsec\n",
1275 mlx5_command_str(ent->op), ds);
1276
1277 out_free:
1278 status = ent->status;
1279 cmd_ent_put(ent);
1280 return err ? : status;
1281 }
1282
dbg_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1283 static ssize_t dbg_write(struct file *filp, const char __user *buf,
1284 size_t count, loff_t *pos)
1285 {
1286 struct mlx5_core_dev *dev = filp->private_data;
1287 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1288 char lbuf[3];
1289 int err;
1290
1291 if (!dbg->in_msg || !dbg->out_msg)
1292 return -ENOMEM;
1293
1294 if (count < sizeof(lbuf) - 1)
1295 return -EINVAL;
1296
1297 if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
1298 return -EFAULT;
1299
1300 lbuf[sizeof(lbuf) - 1] = 0;
1301
1302 if (strcmp(lbuf, "go"))
1303 return -EINVAL;
1304
1305 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
1306
1307 return err ? err : count;
1308 }
1309
1310 static const struct file_operations fops = {
1311 .owner = THIS_MODULE,
1312 .open = simple_open,
1313 .write = dbg_write,
1314 };
1315
mlx5_copy_to_msg(struct mlx5_cmd_msg * to,void * from,int size,u8 token)1316 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
1317 u8 token)
1318 {
1319 struct mlx5_cmd_prot_block *block;
1320 struct mlx5_cmd_mailbox *next;
1321 int copy;
1322
1323 if (!to || !from)
1324 return -ENOMEM;
1325
1326 copy = min_t(int, size, sizeof(to->first.data));
1327 memcpy(to->first.data, from, copy);
1328 size -= copy;
1329 from += copy;
1330
1331 next = to->next;
1332 while (size) {
1333 if (!next) {
1334 /* this is a BUG */
1335 return -ENOMEM;
1336 }
1337
1338 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1339 block = next->buf;
1340 memcpy(block->data, from, copy);
1341 from += copy;
1342 size -= copy;
1343 block->token = token;
1344 next = next->next;
1345 }
1346
1347 return 0;
1348 }
1349
mlx5_copy_from_msg(void * to,struct mlx5_cmd_msg * from,int size)1350 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1351 {
1352 struct mlx5_cmd_prot_block *block;
1353 struct mlx5_cmd_mailbox *next;
1354 int copy;
1355
1356 if (!to || !from)
1357 return -ENOMEM;
1358
1359 copy = min_t(int, size, sizeof(from->first.data));
1360 memcpy(to, from->first.data, copy);
1361 size -= copy;
1362 to += copy;
1363
1364 next = from->next;
1365 while (size) {
1366 if (!next) {
1367 /* this is a BUG */
1368 return -ENOMEM;
1369 }
1370
1371 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1372 block = next->buf;
1373
1374 memcpy(to, block->data, copy);
1375 to += copy;
1376 size -= copy;
1377 next = next->next;
1378 }
1379
1380 return 0;
1381 }
1382
alloc_cmd_box(struct mlx5_core_dev * dev,gfp_t flags)1383 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1384 gfp_t flags)
1385 {
1386 struct mlx5_cmd_mailbox *mailbox;
1387
1388 mailbox = kmalloc(sizeof(*mailbox), flags);
1389 if (!mailbox)
1390 return ERR_PTR(-ENOMEM);
1391
1392 mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
1393 &mailbox->dma);
1394 if (!mailbox->buf) {
1395 mlx5_core_dbg(dev, "failed allocation\n");
1396 kfree(mailbox);
1397 return ERR_PTR(-ENOMEM);
1398 }
1399 mailbox->next = NULL;
1400
1401 return mailbox;
1402 }
1403
free_cmd_box(struct mlx5_core_dev * dev,struct mlx5_cmd_mailbox * mailbox)1404 static void free_cmd_box(struct mlx5_core_dev *dev,
1405 struct mlx5_cmd_mailbox *mailbox)
1406 {
1407 dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1408 kfree(mailbox);
1409 }
1410
mlx5_alloc_cmd_msg(struct mlx5_core_dev * dev,gfp_t flags,int size,u8 token)1411 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1412 gfp_t flags, int size,
1413 u8 token)
1414 {
1415 struct mlx5_cmd_mailbox *tmp, *head = NULL;
1416 struct mlx5_cmd_prot_block *block;
1417 struct mlx5_cmd_msg *msg;
1418 int err;
1419 int n;
1420 int i;
1421
1422 msg = kzalloc(sizeof(*msg), flags);
1423 if (!msg)
1424 return ERR_PTR(-ENOMEM);
1425
1426 msg->len = size;
1427 n = mlx5_calc_cmd_blocks(msg);
1428
1429 for (i = 0; i < n; i++) {
1430 tmp = alloc_cmd_box(dev, flags);
1431 if (IS_ERR(tmp)) {
1432 mlx5_core_warn(dev, "failed allocating block\n");
1433 err = PTR_ERR(tmp);
1434 goto err_alloc;
1435 }
1436
1437 block = tmp->buf;
1438 tmp->next = head;
1439 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1440 block->block_num = cpu_to_be32(n - i - 1);
1441 block->token = token;
1442 head = tmp;
1443 }
1444 msg->next = head;
1445 return msg;
1446
1447 err_alloc:
1448 while (head) {
1449 tmp = head->next;
1450 free_cmd_box(dev, head);
1451 head = tmp;
1452 }
1453 kfree(msg);
1454
1455 return ERR_PTR(err);
1456 }
1457
mlx5_free_cmd_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1458 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1459 struct mlx5_cmd_msg *msg)
1460 {
1461 struct mlx5_cmd_mailbox *head = msg->next;
1462 struct mlx5_cmd_mailbox *next;
1463
1464 while (head) {
1465 next = head->next;
1466 free_cmd_box(dev, head);
1467 head = next;
1468 }
1469 kfree(msg);
1470 }
1471
data_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1472 static ssize_t data_write(struct file *filp, const char __user *buf,
1473 size_t count, loff_t *pos)
1474 {
1475 struct mlx5_core_dev *dev = filp->private_data;
1476 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1477 void *ptr;
1478
1479 if (*pos != 0)
1480 return -EINVAL;
1481
1482 kfree(dbg->in_msg);
1483 dbg->in_msg = NULL;
1484 dbg->inlen = 0;
1485 ptr = memdup_user(buf, count);
1486 if (IS_ERR(ptr))
1487 return PTR_ERR(ptr);
1488 dbg->in_msg = ptr;
1489 dbg->inlen = count;
1490
1491 *pos = count;
1492
1493 return count;
1494 }
1495
data_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)1496 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1497 loff_t *pos)
1498 {
1499 struct mlx5_core_dev *dev = filp->private_data;
1500 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1501
1502 if (!dbg->out_msg)
1503 return -ENOMEM;
1504
1505 return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
1506 dbg->outlen);
1507 }
1508
1509 static const struct file_operations dfops = {
1510 .owner = THIS_MODULE,
1511 .open = simple_open,
1512 .write = data_write,
1513 .read = data_read,
1514 };
1515
outlen_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)1516 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1517 loff_t *pos)
1518 {
1519 struct mlx5_core_dev *dev = filp->private_data;
1520 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1521 char outlen[8];
1522 int err;
1523
1524 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1525 if (err < 0)
1526 return err;
1527
1528 return simple_read_from_buffer(buf, count, pos, outlen, err);
1529 }
1530
outlen_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1531 static ssize_t outlen_write(struct file *filp, const char __user *buf,
1532 size_t count, loff_t *pos)
1533 {
1534 struct mlx5_core_dev *dev = filp->private_data;
1535 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1536 char outlen_str[8] = {0};
1537 int outlen;
1538 void *ptr;
1539 int err;
1540
1541 if (*pos != 0 || count > 6)
1542 return -EINVAL;
1543
1544 kfree(dbg->out_msg);
1545 dbg->out_msg = NULL;
1546 dbg->outlen = 0;
1547
1548 if (copy_from_user(outlen_str, buf, count))
1549 return -EFAULT;
1550
1551 err = sscanf(outlen_str, "%d", &outlen);
1552 if (err != 1)
1553 return -EINVAL;
1554
1555 ptr = kzalloc(outlen, GFP_KERNEL);
1556 if (!ptr)
1557 return -ENOMEM;
1558
1559 dbg->out_msg = ptr;
1560 dbg->outlen = outlen;
1561
1562 *pos = count;
1563
1564 return count;
1565 }
1566
1567 static const struct file_operations olfops = {
1568 .owner = THIS_MODULE,
1569 .open = simple_open,
1570 .write = outlen_write,
1571 .read = outlen_read,
1572 };
1573
set_wqname(struct mlx5_core_dev * dev)1574 static void set_wqname(struct mlx5_core_dev *dev)
1575 {
1576 struct mlx5_cmd *cmd = &dev->cmd;
1577
1578 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1579 dev_name(dev->device));
1580 }
1581
clean_debug_files(struct mlx5_core_dev * dev)1582 static void clean_debug_files(struct mlx5_core_dev *dev)
1583 {
1584 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1585
1586 if (!mlx5_debugfs_root)
1587 return;
1588
1589 debugfs_remove_recursive(dbg->dbg_root);
1590 }
1591
create_debugfs_files(struct mlx5_core_dev * dev)1592 static void create_debugfs_files(struct mlx5_core_dev *dev)
1593 {
1594 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1595
1596 dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev));
1597
1598 debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
1599 debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
1600 debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
1601 debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
1602 debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1603 }
1604
mlx5_cmd_allowed_opcode(struct mlx5_core_dev * dev,u16 opcode)1605 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
1606 {
1607 struct mlx5_cmd *cmd = &dev->cmd;
1608 int i;
1609
1610 for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1611 down(&cmd->vars.sem);
1612 down(&cmd->vars.pages_sem);
1613
1614 cmd->allowed_opcode = opcode;
1615
1616 up(&cmd->vars.pages_sem);
1617 for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1618 up(&cmd->vars.sem);
1619 }
1620
mlx5_cmd_change_mod(struct mlx5_core_dev * dev,int mode)1621 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1622 {
1623 struct mlx5_cmd *cmd = &dev->cmd;
1624 int i;
1625
1626 for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1627 down(&cmd->vars.sem);
1628 down(&cmd->vars.pages_sem);
1629
1630 cmd->mode = mode;
1631
1632 up(&cmd->vars.pages_sem);
1633 for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1634 up(&cmd->vars.sem);
1635 }
1636
cmd_comp_notifier(struct notifier_block * nb,unsigned long type,void * data)1637 static int cmd_comp_notifier(struct notifier_block *nb,
1638 unsigned long type, void *data)
1639 {
1640 struct mlx5_core_dev *dev;
1641 struct mlx5_cmd *cmd;
1642 struct mlx5_eqe *eqe;
1643
1644 cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
1645 dev = container_of(cmd, struct mlx5_core_dev, cmd);
1646 eqe = data;
1647
1648 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1649 return NOTIFY_DONE;
1650
1651 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
1652
1653 return NOTIFY_OK;
1654 }
mlx5_cmd_use_events(struct mlx5_core_dev * dev)1655 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1656 {
1657 MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
1658 mlx5_eq_notifier_register(dev, &dev->cmd.nb);
1659 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1660 }
1661
mlx5_cmd_use_polling(struct mlx5_core_dev * dev)1662 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1663 {
1664 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1665 mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
1666 }
1667
free_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1668 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1669 {
1670 unsigned long flags;
1671
1672 if (msg->parent) {
1673 spin_lock_irqsave(&msg->parent->lock, flags);
1674 list_add_tail(&msg->list, &msg->parent->head);
1675 spin_unlock_irqrestore(&msg->parent->lock, flags);
1676 } else {
1677 mlx5_free_cmd_msg(dev, msg);
1678 }
1679 }
1680
mlx5_cmd_comp_handler(struct mlx5_core_dev * dev,u64 vec,bool forced)1681 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1682 {
1683 struct mlx5_cmd *cmd = &dev->cmd;
1684 struct mlx5_cmd_work_ent *ent;
1685 mlx5_cmd_cbk_t callback;
1686 void *context;
1687 int err;
1688 int i;
1689 s64 ds;
1690 struct mlx5_cmd_stats *stats;
1691 unsigned long flags;
1692 unsigned long vector;
1693
1694 /* there can be at most 32 command queues */
1695 vector = vec & 0xffffffff;
1696 for (i = 0; i < (1 << cmd->vars.log_sz); i++) {
1697 if (test_bit(i, &vector)) {
1698 ent = cmd->ent_arr[i];
1699
1700 /* if we already completed the command, ignore it */
1701 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
1702 &ent->state)) {
1703 /* only real completion can free the cmd slot */
1704 if (!forced) {
1705 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1706 ent->idx);
1707 cmd_ent_put(ent);
1708 }
1709 continue;
1710 }
1711
1712 if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work))
1713 cmd_ent_put(ent); /* timeout work was canceled */
1714
1715 if (!forced || /* Real FW completion */
1716 mlx5_cmd_is_down(dev) || /* No real FW completion is expected */
1717 !opcode_allowed(cmd, ent->op))
1718 cmd_ent_put(ent);
1719
1720 ent->ts2 = ktime_get_ns();
1721 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1722 dump_command(dev, ent, 0);
1723
1724 if (vec & MLX5_TRIGGERED_CMD_COMP)
1725 ent->ret = -ENXIO;
1726
1727 if (!ent->ret) { /* Command completed by FW */
1728 if (!cmd->checksum_disabled)
1729 ent->ret = verify_signature(ent);
1730
1731 ent->status = ent->lay->status_own >> 1;
1732
1733 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1734 ent->ret, deliv_status_to_str(ent->status), ent->status);
1735 }
1736
1737 if (ent->callback) {
1738 ds = ent->ts2 - ent->ts1;
1739 stats = xa_load(&cmd->stats, ent->op);
1740 if (stats) {
1741 spin_lock_irqsave(&stats->lock, flags);
1742 stats->sum += ds;
1743 ++stats->n;
1744 spin_unlock_irqrestore(&stats->lock, flags);
1745 }
1746
1747 callback = ent->callback;
1748 context = ent->context;
1749 err = ent->ret ? : ent->status;
1750 if (err > 0) /* Failed in FW, command didn't execute */
1751 err = deliv_status_to_err(err);
1752
1753 if (!err)
1754 err = mlx5_copy_from_msg(ent->uout,
1755 ent->out,
1756 ent->uout_size);
1757
1758 mlx5_free_cmd_msg(dev, ent->out);
1759 free_msg(dev, ent->in);
1760
1761 /* final consumer is done, release ent */
1762 cmd_ent_put(ent);
1763 callback(err, context);
1764 } else {
1765 /* release wait_func() so mlx5_cmd_invoke()
1766 * can make the final ent_put()
1767 */
1768 complete(&ent->done);
1769 }
1770 }
1771 }
1772 }
1773
1774 #define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1
1775 #define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \
1776 MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1)
1777
mlx5_cmd_trigger_completions(struct mlx5_core_dev * dev)1778 static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
1779 {
1780 struct mlx5_cmd *cmd = &dev->cmd;
1781 unsigned long bitmask;
1782 unsigned long flags;
1783 u64 vector;
1784 int i;
1785
1786 /* wait for pending handlers to complete */
1787 mlx5_eq_synchronize_cmd_irq(dev);
1788 spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1789 vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK;
1790 if (!vector)
1791 goto no_trig;
1792
1793 bitmask = vector;
1794 /* we must increment the allocated entries refcount before triggering the completions
1795 * to guarantee pending commands will not get freed in the meanwhile.
1796 * For that reason, it also has to be done inside the alloc_lock.
1797 */
1798 for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
1799 cmd_ent_get(cmd->ent_arr[i]);
1800 vector |= MLX5_TRIGGERED_CMD_COMP;
1801 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1802
1803 mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
1804 mlx5_cmd_comp_handler(dev, vector, true);
1805 for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
1806 cmd_ent_put(cmd->ent_arr[i]);
1807 return;
1808
1809 no_trig:
1810 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1811 }
1812
mlx5_cmd_flush(struct mlx5_core_dev * dev)1813 void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1814 {
1815 struct mlx5_cmd *cmd = &dev->cmd;
1816 int i;
1817
1818 for (i = 0; i < cmd->vars.max_reg_cmds; i++) {
1819 while (down_trylock(&cmd->vars.sem)) {
1820 mlx5_cmd_trigger_completions(dev);
1821 cond_resched();
1822 }
1823 }
1824
1825 while (down_trylock(&cmd->vars.pages_sem)) {
1826 mlx5_cmd_trigger_completions(dev);
1827 cond_resched();
1828 }
1829
1830 /* Unlock cmdif */
1831 up(&cmd->vars.pages_sem);
1832 for (i = 0; i < cmd->vars.max_reg_cmds; i++)
1833 up(&cmd->vars.sem);
1834 }
1835
alloc_msg(struct mlx5_core_dev * dev,int in_size,gfp_t gfp)1836 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1837 gfp_t gfp)
1838 {
1839 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1840 struct cmd_msg_cache *ch = NULL;
1841 struct mlx5_cmd *cmd = &dev->cmd;
1842 int i;
1843
1844 if (in_size <= 16)
1845 goto cache_miss;
1846
1847 for (i = 0; i < dev->profile.num_cmd_caches; i++) {
1848 ch = &cmd->cache[i];
1849 if (in_size > ch->max_inbox_size)
1850 continue;
1851 spin_lock_irq(&ch->lock);
1852 if (list_empty(&ch->head)) {
1853 spin_unlock_irq(&ch->lock);
1854 continue;
1855 }
1856 msg = list_entry(ch->head.next, typeof(*msg), list);
1857 /* For cached lists, we must explicitly state what is
1858 * the real size
1859 */
1860 msg->len = in_size;
1861 list_del(&msg->list);
1862 spin_unlock_irq(&ch->lock);
1863 break;
1864 }
1865
1866 if (!IS_ERR(msg))
1867 return msg;
1868
1869 cache_miss:
1870 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1871 return msg;
1872 }
1873
is_manage_pages(void * in)1874 static int is_manage_pages(void *in)
1875 {
1876 return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
1877 }
1878
mlx5_has_privileged_uid(struct mlx5_core_dev * dev)1879 static bool mlx5_has_privileged_uid(struct mlx5_core_dev *dev)
1880 {
1881 return !xa_empty(&dev->cmd.vars.privileged_uids);
1882 }
1883
mlx5_cmd_is_privileged_uid(struct mlx5_core_dev * dev,u16 uid)1884 static bool mlx5_cmd_is_privileged_uid(struct mlx5_core_dev *dev,
1885 u16 uid)
1886 {
1887 return !!xa_load(&dev->cmd.vars.privileged_uids, uid);
1888 }
1889
1890 /* Notes:
1891 * 1. Callback functions may not sleep
1892 * 2. Page queue commands do not support asynchrous completion
1893 */
cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size,mlx5_cmd_cbk_t callback,void * context,bool force_polling)1894 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1895 int out_size, mlx5_cmd_cbk_t callback, void *context,
1896 bool force_polling)
1897 {
1898 struct mlx5_cmd_msg *inb, *outb;
1899 u16 opcode = in_to_opcode(in);
1900 bool throttle_locked = false;
1901 bool unpriv_locked = false;
1902 u16 uid = in_to_uid(in);
1903 int pages_queue;
1904 gfp_t gfp;
1905 u8 token;
1906 int err;
1907
1908 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
1909 return -ENXIO;
1910
1911 if (!callback) {
1912 /* The semaphore is already held for callback commands. It was
1913 * acquired in mlx5_cmd_exec_cb()
1914 */
1915 if (uid && mlx5_has_privileged_uid(dev)) {
1916 if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
1917 unpriv_locked = true;
1918 down(&dev->cmd.vars.unprivileged_sem);
1919 }
1920 } else if (mlx5_cmd_is_throttle_opcode(opcode)) {
1921 throttle_locked = true;
1922 down(&dev->cmd.vars.throttle_sem);
1923 }
1924 }
1925
1926 pages_queue = is_manage_pages(in);
1927 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1928
1929 inb = alloc_msg(dev, in_size, gfp);
1930 if (IS_ERR(inb)) {
1931 err = PTR_ERR(inb);
1932 goto out_up;
1933 }
1934
1935 token = alloc_token(&dev->cmd);
1936
1937 err = mlx5_copy_to_msg(inb, in, in_size, token);
1938 if (err) {
1939 mlx5_core_warn(dev, "err %d\n", err);
1940 goto out_in;
1941 }
1942
1943 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1944 if (IS_ERR(outb)) {
1945 err = PTR_ERR(outb);
1946 goto out_in;
1947 }
1948
1949 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1950 pages_queue, token, force_polling);
1951 if (callback)
1952 return err;
1953
1954 if (err > 0) /* Failed in FW, command didn't execute */
1955 err = deliv_status_to_err(err);
1956
1957 if (err)
1958 goto out_out;
1959
1960 /* command completed by FW */
1961 err = mlx5_copy_from_msg(out, outb, out_size);
1962 out_out:
1963 mlx5_free_cmd_msg(dev, outb);
1964 out_in:
1965 free_msg(dev, inb);
1966 out_up:
1967 if (throttle_locked)
1968 up(&dev->cmd.vars.throttle_sem);
1969 if (unpriv_locked)
1970 up(&dev->cmd.vars.unprivileged_sem);
1971
1972 return err;
1973 }
1974
mlx5_cmd_err_trace(struct mlx5_core_dev * dev,u16 opcode,u16 op_mod,void * out)1975 static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
1976 {
1977 u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
1978 u8 status = MLX5_GET(mbox_out, out, status);
1979
1980 trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod,
1981 cmd_status_str(status), status, syndrome,
1982 cmd_status_to_err(status));
1983 }
1984
cmd_status_log(struct mlx5_core_dev * dev,u16 opcode,u8 status,u32 syndrome,int err)1985 static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
1986 u32 syndrome, int err)
1987 {
1988 const char *namep = mlx5_command_str(opcode);
1989 struct mlx5_cmd_stats *stats;
1990 unsigned long flags;
1991
1992 if (!err || !(strcmp(namep, "unknown command opcode")))
1993 return;
1994
1995 stats = xa_load(&dev->cmd.stats, opcode);
1996 if (!stats)
1997 return;
1998 spin_lock_irqsave(&stats->lock, flags);
1999 stats->failed++;
2000 if (err < 0)
2001 stats->last_failed_errno = -err;
2002 if (err == -EREMOTEIO) {
2003 stats->failed_mbox_status++;
2004 stats->last_failed_mbox_status = status;
2005 stats->last_failed_syndrome = syndrome;
2006 }
2007 spin_unlock_irqrestore(&stats->lock, flags);
2008 }
2009
2010 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
cmd_status_err(struct mlx5_core_dev * dev,int err,u16 opcode,u16 op_mod,void * out)2011 static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out)
2012 {
2013 u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
2014 u8 status = MLX5_GET(mbox_out, out, status);
2015
2016 if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
2017 err = -EIO;
2018
2019 if (!err && status != MLX5_CMD_STAT_OK) {
2020 err = -EREMOTEIO;
2021 mlx5_cmd_err_trace(dev, opcode, op_mod, out);
2022 }
2023
2024 cmd_status_log(dev, opcode, status, syndrome, err);
2025 return err;
2026 }
2027
2028 /**
2029 * mlx5_cmd_do - Executes a fw command, wait for completion.
2030 * Unlike mlx5_cmd_exec, this function will not translate or intercept
2031 * outbox.status and will return -EREMOTEIO when
2032 * outbox.status != MLX5_CMD_STAT_OK
2033 *
2034 * @dev: mlx5 core device
2035 * @in: inbox mlx5_ifc command buffer
2036 * @in_size: inbox buffer size
2037 * @out: outbox mlx5_ifc buffer
2038 * @out_size: outbox size
2039 *
2040 * @return:
2041 * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK.
2042 * Caller must check FW outbox status.
2043 * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK.
2044 * < 0 : Command execution couldn't be performed by firmware or driver
2045 */
mlx5_cmd_do(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)2046 int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
2047 {
2048 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
2049 u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
2050 u16 opcode = in_to_opcode(in);
2051
2052 return cmd_status_err(dev, err, opcode, op_mod, out);
2053 }
2054 EXPORT_SYMBOL(mlx5_cmd_do);
2055
2056 /**
2057 * mlx5_cmd_exec - Executes a fw command, wait for completion
2058 *
2059 * @dev: mlx5 core device
2060 * @in: inbox mlx5_ifc command buffer
2061 * @in_size: inbox buffer size
2062 * @out: outbox mlx5_ifc buffer
2063 * @out_size: outbox size
2064 *
2065 * @return: 0 if no error, FW command execution was successful
2066 * and outbox status is ok.
2067 */
mlx5_cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)2068 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
2069 int out_size)
2070 {
2071 int err = mlx5_cmd_do(dev, in, in_size, out, out_size);
2072
2073 return mlx5_cmd_check(dev, err, in, out);
2074 }
2075 EXPORT_SYMBOL(mlx5_cmd_exec);
2076
2077 /**
2078 * mlx5_cmd_exec_polling - Executes a fw command, poll for completion
2079 * Needed for driver force teardown, when command completion EQ
2080 * will not be available to complete the command
2081 *
2082 * @dev: mlx5 core device
2083 * @in: inbox mlx5_ifc command buffer
2084 * @in_size: inbox buffer size
2085 * @out: outbox mlx5_ifc buffer
2086 * @out_size: outbox size
2087 *
2088 * @return: 0 if no error, FW command execution was successful
2089 * and outbox status is ok.
2090 */
mlx5_cmd_exec_polling(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)2091 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
2092 void *out, int out_size)
2093 {
2094 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
2095 u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
2096 u16 opcode = in_to_opcode(in);
2097
2098 err = cmd_status_err(dev, err, opcode, op_mod, out);
2099 return mlx5_cmd_check(dev, err, in, out);
2100 }
2101 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
2102
mlx5_cmd_init_async_ctx(struct mlx5_core_dev * dev,struct mlx5_async_ctx * ctx)2103 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
2104 struct mlx5_async_ctx *ctx)
2105 {
2106 ctx->dev = dev;
2107 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
2108 atomic_set(&ctx->num_inflight, 1);
2109 init_completion(&ctx->inflight_done);
2110 }
2111 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
2112
2113 /**
2114 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
2115 * @ctx: The ctx to clean
2116 *
2117 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
2118 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
2119 * the call mlx5_cleanup_async_ctx().
2120 */
mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx * ctx)2121 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
2122 {
2123 if (!atomic_dec_and_test(&ctx->num_inflight))
2124 wait_for_completion(&ctx->inflight_done);
2125 }
2126 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
2127
mlx5_cmd_exec_cb_handler(int status,void * _work)2128 static void mlx5_cmd_exec_cb_handler(int status, void *_work)
2129 {
2130 struct mlx5_async_work *work = _work;
2131 struct mlx5_async_ctx *ctx;
2132 struct mlx5_core_dev *dev;
2133 bool throttle_locked;
2134 bool unpriv_locked;
2135
2136 ctx = work->ctx;
2137 dev = ctx->dev;
2138 throttle_locked = work->throttle_locked;
2139 unpriv_locked = work->unpriv_locked;
2140 status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
2141 work->user_callback(status, work);
2142 /* Can't access "work" from this point on. It could have been freed in
2143 * the callback.
2144 */
2145 if (throttle_locked)
2146 up(&dev->cmd.vars.throttle_sem);
2147 if (unpriv_locked)
2148 up(&dev->cmd.vars.unprivileged_sem);
2149 if (atomic_dec_and_test(&ctx->num_inflight))
2150 complete(&ctx->inflight_done);
2151 }
2152
mlx5_cmd_exec_cb(struct mlx5_async_ctx * ctx,void * in,int in_size,void * out,int out_size,mlx5_async_cbk_t callback,struct mlx5_async_work * work)2153 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
2154 void *out, int out_size, mlx5_async_cbk_t callback,
2155 struct mlx5_async_work *work)
2156 {
2157 struct mlx5_core_dev *dev = ctx->dev;
2158 u16 uid;
2159 int ret;
2160
2161 work->ctx = ctx;
2162 work->user_callback = callback;
2163 work->opcode = in_to_opcode(in);
2164 work->op_mod = MLX5_GET(mbox_in, in, op_mod);
2165 work->out = out;
2166 work->throttle_locked = false;
2167 work->unpriv_locked = false;
2168 uid = in_to_uid(in);
2169
2170 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
2171 return -EIO;
2172
2173 if (uid && mlx5_has_privileged_uid(dev)) {
2174 if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
2175 if (down_trylock(&dev->cmd.vars.unprivileged_sem)) {
2176 ret = -EBUSY;
2177 goto dec_num_inflight;
2178 }
2179 work->unpriv_locked = true;
2180 }
2181 } else if (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) {
2182 if (down_trylock(&dev->cmd.vars.throttle_sem)) {
2183 ret = -EBUSY;
2184 goto dec_num_inflight;
2185 }
2186 work->throttle_locked = true;
2187 }
2188
2189 ret = cmd_exec(dev, in, in_size, out, out_size,
2190 mlx5_cmd_exec_cb_handler, work, false);
2191 if (ret)
2192 goto sem_up;
2193
2194 return 0;
2195
2196 sem_up:
2197 if (work->throttle_locked)
2198 up(&dev->cmd.vars.throttle_sem);
2199 if (work->unpriv_locked)
2200 up(&dev->cmd.vars.unprivileged_sem);
2201 dec_num_inflight:
2202 if (atomic_dec_and_test(&ctx->num_inflight))
2203 complete(&ctx->inflight_done);
2204
2205 return ret;
2206 }
2207 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
2208
mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev * dev,struct mlx5_cmd_allow_other_vhca_access_attr * attr)2209 int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
2210 struct mlx5_cmd_allow_other_vhca_access_attr *attr)
2211 {
2212 u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {};
2213 u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {};
2214 void *key;
2215
2216 MLX5_SET(allow_other_vhca_access_in,
2217 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
2218 MLX5_SET(allow_other_vhca_access_in,
2219 in, object_type_to_be_accessed, attr->obj_type);
2220 MLX5_SET(allow_other_vhca_access_in,
2221 in, object_id_to_be_accessed, attr->obj_id);
2222
2223 key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
2224 memcpy(key, attr->access_key, sizeof(attr->access_key));
2225
2226 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
2227 }
2228
mlx5_cmd_alias_obj_create(struct mlx5_core_dev * dev,struct mlx5_cmd_alias_obj_create_attr * alias_attr,u32 * obj_id)2229 int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
2230 struct mlx5_cmd_alias_obj_create_attr *alias_attr,
2231 u32 *obj_id)
2232 {
2233 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
2234 u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {};
2235 void *param;
2236 void *attr;
2237 void *key;
2238 int ret;
2239
2240 attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
2241 MLX5_SET(general_obj_in_cmd_hdr,
2242 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2243 MLX5_SET(general_obj_in_cmd_hdr,
2244 attr, obj_type, alias_attr->obj_type);
2245 param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
2246 MLX5_SET(general_obj_create_param, param, alias_object, 1);
2247
2248 attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
2249 MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
2250 MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
2251
2252 key = MLX5_ADDR_OF(alias_context, attr, access_key);
2253 memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
2254
2255 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
2256 if (ret)
2257 return ret;
2258
2259 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2260
2261 return 0;
2262 }
2263
mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev * dev,u32 obj_id,u16 obj_type)2264 int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id,
2265 u16 obj_type)
2266 {
2267 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
2268 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
2269
2270 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
2271 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type);
2272 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
2273
2274 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
2275 }
2276
destroy_msg_cache(struct mlx5_core_dev * dev)2277 static void destroy_msg_cache(struct mlx5_core_dev *dev)
2278 {
2279 struct cmd_msg_cache *ch;
2280 struct mlx5_cmd_msg *msg;
2281 struct mlx5_cmd_msg *n;
2282 int i;
2283
2284 for (i = 0; i < dev->profile.num_cmd_caches; i++) {
2285 ch = &dev->cmd.cache[i];
2286 list_for_each_entry_safe(msg, n, &ch->head, list) {
2287 list_del(&msg->list);
2288 mlx5_free_cmd_msg(dev, msg);
2289 }
2290 }
2291 }
2292
2293 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
2294 512, 32, 16, 8, 2
2295 };
2296
2297 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
2298 16 + MLX5_CMD_DATA_BLOCK_SIZE,
2299 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
2300 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
2301 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
2302 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
2303 };
2304
create_msg_cache(struct mlx5_core_dev * dev)2305 static void create_msg_cache(struct mlx5_core_dev *dev)
2306 {
2307 struct mlx5_cmd *cmd = &dev->cmd;
2308 struct cmd_msg_cache *ch;
2309 struct mlx5_cmd_msg *msg;
2310 int i;
2311 int k;
2312
2313 /* Initialize and fill the caches with initial entries */
2314 for (k = 0; k < dev->profile.num_cmd_caches; k++) {
2315 ch = &cmd->cache[k];
2316 spin_lock_init(&ch->lock);
2317 INIT_LIST_HEAD(&ch->head);
2318 ch->num_ent = cmd_cache_num_ent[k];
2319 ch->max_inbox_size = cmd_cache_ent_size[k];
2320 for (i = 0; i < ch->num_ent; i++) {
2321 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
2322 ch->max_inbox_size, 0);
2323 if (IS_ERR(msg))
2324 break;
2325 msg->parent = ch;
2326 list_add_tail(&msg->list, &ch->head);
2327 }
2328 }
2329 }
2330
alloc_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)2331 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
2332 {
2333 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
2334 &cmd->alloc_dma, GFP_KERNEL);
2335 if (!cmd->cmd_alloc_buf)
2336 return -ENOMEM;
2337
2338 /* make sure it is aligned to 4K */
2339 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
2340 cmd->cmd_buf = cmd->cmd_alloc_buf;
2341 cmd->dma = cmd->alloc_dma;
2342 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
2343 return 0;
2344 }
2345
2346 dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
2347 cmd->alloc_dma);
2348 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
2349 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
2350 &cmd->alloc_dma, GFP_KERNEL);
2351 if (!cmd->cmd_alloc_buf)
2352 return -ENOMEM;
2353
2354 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
2355 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
2356 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
2357 return 0;
2358 }
2359
free_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)2360 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
2361 {
2362 dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
2363 cmd->alloc_dma);
2364 }
2365
cmdif_rev(struct mlx5_core_dev * dev)2366 static u16 cmdif_rev(struct mlx5_core_dev *dev)
2367 {
2368 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2369 }
2370
mlx5_cmd_init(struct mlx5_core_dev * dev)2371 int mlx5_cmd_init(struct mlx5_core_dev *dev)
2372 {
2373 struct mlx5_cmd *cmd = &dev->cmd;
2374
2375 cmd->checksum_disabled = 1;
2376
2377 spin_lock_init(&cmd->alloc_lock);
2378 spin_lock_init(&cmd->token_lock);
2379
2380 set_wqname(dev);
2381 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
2382 if (!cmd->wq) {
2383 mlx5_core_err(dev, "failed to create command workqueue\n");
2384 return -ENOMEM;
2385 }
2386
2387 mlx5_cmdif_debugfs_init(dev);
2388
2389 return 0;
2390 }
2391
mlx5_cmd_cleanup(struct mlx5_core_dev * dev)2392 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
2393 {
2394 struct mlx5_cmd *cmd = &dev->cmd;
2395
2396 mlx5_cmdif_debugfs_cleanup(dev);
2397 destroy_workqueue(cmd->wq);
2398 }
2399
mlx5_cmd_enable(struct mlx5_core_dev * dev)2400 int mlx5_cmd_enable(struct mlx5_core_dev *dev)
2401 {
2402 int size = sizeof(struct mlx5_cmd_prot_block);
2403 int align = roundup_pow_of_two(size);
2404 struct mlx5_cmd *cmd = &dev->cmd;
2405 u32 cmd_h, cmd_l;
2406 int err;
2407
2408 memset(&cmd->vars, 0, sizeof(cmd->vars));
2409 cmd->vars.cmdif_rev = cmdif_rev(dev);
2410 if (cmd->vars.cmdif_rev != CMD_IF_REV) {
2411 mlx5_core_err(dev,
2412 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
2413 CMD_IF_REV, cmd->vars.cmdif_rev);
2414 return -EINVAL;
2415 }
2416
2417 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
2418 cmd->vars.log_sz = cmd_l >> 4 & 0xf;
2419 cmd->vars.log_stride = cmd_l & 0xf;
2420 if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) {
2421 mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
2422 1 << cmd->vars.log_sz);
2423 return -EINVAL;
2424 }
2425
2426 if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
2427 mlx5_core_err(dev, "command queue size overflow\n");
2428 return -EINVAL;
2429 }
2430
2431 cmd->state = MLX5_CMDIF_STATE_DOWN;
2432 cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
2433 cmd->vars.bitmask = MLX5_CMD_MASK;
2434
2435 sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
2436 sema_init(&cmd->vars.pages_sem, 1);
2437 sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
2438 sema_init(&cmd->vars.unprivileged_sem,
2439 DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
2440
2441 xa_init(&cmd->vars.privileged_uids);
2442
2443 cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
2444 if (!cmd->pool) {
2445 err = -ENOMEM;
2446 goto err_destroy_xa;
2447 }
2448
2449 err = alloc_cmd_page(dev, cmd);
2450 if (err)
2451 goto err_free_pool;
2452
2453 cmd_h = (u32)((u64)(cmd->dma) >> 32);
2454 cmd_l = (u32)(cmd->dma);
2455 if (cmd_l & 0xfff) {
2456 mlx5_core_err(dev, "invalid command queue address\n");
2457 err = -ENOMEM;
2458 goto err_cmd_page;
2459 }
2460
2461 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
2462 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
2463
2464 /* Make sure firmware sees the complete address before we proceed */
2465 wmb();
2466
2467 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
2468
2469 cmd->mode = CMD_MODE_POLLING;
2470 cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
2471
2472 create_msg_cache(dev);
2473 create_debugfs_files(dev);
2474
2475 return 0;
2476
2477 err_cmd_page:
2478 free_cmd_page(dev, cmd);
2479 err_free_pool:
2480 dma_pool_destroy(cmd->pool);
2481 err_destroy_xa:
2482 xa_destroy(&dev->cmd.vars.privileged_uids);
2483 return err;
2484 }
2485
mlx5_cmd_disable(struct mlx5_core_dev * dev)2486 void mlx5_cmd_disable(struct mlx5_core_dev *dev)
2487 {
2488 struct mlx5_cmd *cmd = &dev->cmd;
2489
2490 flush_workqueue(cmd->wq);
2491 clean_debug_files(dev);
2492 destroy_msg_cache(dev);
2493 free_cmd_page(dev, cmd);
2494 dma_pool_destroy(cmd->pool);
2495 xa_destroy(&dev->cmd.vars.privileged_uids);
2496 }
2497
mlx5_cmd_set_state(struct mlx5_core_dev * dev,enum mlx5_cmdif_state cmdif_state)2498 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
2499 enum mlx5_cmdif_state cmdif_state)
2500 {
2501 dev->cmd.state = cmdif_state;
2502 }
2503
mlx5_cmd_add_privileged_uid(struct mlx5_core_dev * dev,u16 uid)2504 int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
2505 {
2506 return xa_insert(&dev->cmd.vars.privileged_uids, uid,
2507 xa_mk_value(uid), GFP_KERNEL);
2508 }
2509 EXPORT_SYMBOL(mlx5_cmd_add_privileged_uid);
2510
mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev * dev,u16 uid)2511 void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
2512 {
2513 void *data = xa_erase(&dev->cmd.vars.privileged_uids, uid);
2514
2515 WARN(!data, "Privileged UID %u does not exist\n", uid);
2516 }
2517 EXPORT_SYMBOL(mlx5_cmd_remove_privileged_uid);
2518