1 /*-
2 * Copyright (c) 2013-2020, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <dev/mlx5/driver.h>
30 #include <linux/module.h>
31 #include <dev/mlx5/mlx5_core/mlx5_core.h>
32
mlx5_cmd_query_adapter(struct mlx5_core_dev * dev,u32 * out,int outlen)33 static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
34 int outlen)
35 {
36 u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
37 int err;
38
39 memset(in, 0, sizeof(in));
40
41 MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
42
43 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
44 return err;
45 }
46
mlx5_query_board_id(struct mlx5_core_dev * dev)47 int mlx5_query_board_id(struct mlx5_core_dev *dev)
48 {
49 u32 *out;
50 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
51 int err;
52
53 out = kzalloc(outlen, GFP_KERNEL);
54
55 err = mlx5_cmd_query_adapter(dev, out, outlen);
56 if (err)
57 goto out_out;
58
59 memcpy(dev->board_id,
60 MLX5_ADDR_OF(query_adapter_out, out,
61 query_adapter_struct.vsd_contd_psid),
62 MLX5_FLD_SZ_BYTES(query_adapter_out,
63 query_adapter_struct.vsd_contd_psid));
64
65 out_out:
66 kfree(out);
67
68 return err;
69 }
70
mlx5_core_query_vendor_id(struct mlx5_core_dev * mdev,u32 * vendor_id)71 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
72 {
73 u32 *out;
74 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
75 int err;
76
77 out = kzalloc(outlen, GFP_KERNEL);
78
79 err = mlx5_cmd_query_adapter(mdev, out, outlen);
80 if (err)
81 goto out_out;
82
83 *vendor_id = MLX5_GET(query_adapter_out, out,
84 query_adapter_struct.ieee_vendor_id);
85
86 out_out:
87 kfree(out);
88
89 return err;
90 }
91 EXPORT_SYMBOL(mlx5_core_query_vendor_id);
92
mlx5_core_query_special_contexts(struct mlx5_core_dev * dev)93 static int mlx5_core_query_special_contexts(struct mlx5_core_dev *dev)
94 {
95 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)];
96 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)];
97 int err;
98
99 memset(in, 0, sizeof(in));
100 memset(out, 0, sizeof(out));
101
102 MLX5_SET(query_special_contexts_in, in, opcode,
103 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
104 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
105 if (err)
106 return err;
107
108 dev->special_contexts.resd_lkey = MLX5_GET(query_special_contexts_out,
109 out, resd_lkey);
110
111 return err;
112 }
113
mlx5_get_qcam_reg(struct mlx5_core_dev * dev)114 static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
115 {
116 return mlx5_query_qcam_reg(dev, dev->caps.qcam,
117 MLX5_QCAM_FEATURE_ENHANCED_FEATURES,
118 MLX5_QCAM_REGS_FIRST_128);
119 }
120
mlx5_get_pcam_reg(struct mlx5_core_dev * dev)121 static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
122 {
123 return mlx5_query_pcam_reg(dev, dev->caps.pcam,
124 MLX5_PCAM_FEATURE_ENHANCED_FEATURES,
125 MLX5_PCAM_REGS_5000_TO_507F);
126 }
127
mlx5_get_mcam_reg(struct mlx5_core_dev * dev)128 static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
129 {
130 return mlx5_query_mcam_reg(dev, dev->caps.mcam,
131 MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
132 MLX5_MCAM_REGS_FIRST_128);
133 }
134
mlx5_query_hca_caps(struct mlx5_core_dev * dev)135 int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
136 {
137 int err;
138
139 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
140 if (err)
141 return err;
142
143 if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
144 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
145 if (err)
146 return err;
147 }
148
149 if (MLX5_CAP_GEN(dev, pg)) {
150 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
151 if (err)
152 return err;
153 }
154
155 if (MLX5_CAP_GEN(dev, atomic)) {
156 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
157 if (err)
158 return err;
159 }
160
161 if (MLX5_CAP_GEN(dev, roce)) {
162 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
163 if (err)
164 return err;
165 }
166
167 if ((MLX5_CAP_GEN(dev, port_type) ==
168 MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET &&
169 MLX5_CAP_GEN(dev, nic_flow_table)) ||
170 (MLX5_CAP_GEN(dev, port_type) == MLX5_CMD_HCA_CAP_PORT_TYPE_IB &&
171 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) {
172 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
173 if (err)
174 return err;
175 }
176
177 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
178 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
179 if (err)
180 return err;
181 }
182
183 if (MLX5_CAP_GEN(dev, vport_group_manager)) {
184 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
185 if (err)
186 return err;
187 }
188
189 if (MLX5_CAP_GEN(dev, snapshot)) {
190 err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT);
191 if (err)
192 return err;
193 }
194
195 if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
196 err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS);
197 if (err)
198 return err;
199 }
200
201 if (MLX5_CAP_GEN(dev, debug)) {
202 err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG);
203 if (err)
204 return err;
205 }
206
207 if (MLX5_CAP_GEN(dev, qos)) {
208 err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
209 if (err)
210 return err;
211 }
212
213 if (MLX5_CAP_GEN(dev, qcam_reg)) {
214 err = mlx5_get_qcam_reg(dev);
215 if (err)
216 return err;
217 }
218
219 if (MLX5_CAP_GEN(dev, mcam_reg)) {
220 err = mlx5_get_mcam_reg(dev);
221 if (err)
222 return err;
223 }
224
225 if (MLX5_CAP_GEN(dev, pcam_reg)) {
226 err = mlx5_get_pcam_reg(dev);
227 if (err)
228 return err;
229 }
230
231 if (MLX5_CAP_GEN(dev, tls_tx)) {
232 err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
233 if (err)
234 return err;
235 }
236
237 if (MLX5_CAP_GEN(dev, event_cap)) {
238 err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT);
239 if (err)
240 return err;
241 }
242
243 if (MLX5_CAP_GEN(dev, ipsec_offload)) {
244 err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC);
245 if (err)
246 return err;
247 }
248
249 err = mlx5_core_query_special_contexts(dev);
250 if (err)
251 return err;
252
253 return 0;
254 }
255
mlx5_cmd_init_hca(struct mlx5_core_dev * dev)256 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
257 {
258 u32 in[MLX5_ST_SZ_DW(init_hca_in)];
259 u32 out[MLX5_ST_SZ_DW(init_hca_out)];
260
261 memset(in, 0, sizeof(in));
262
263 MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
264
265 memset(out, 0, sizeof(out));
266 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
267 }
268
mlx5_cmd_teardown_hca(struct mlx5_core_dev * dev)269 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
270 {
271 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
272 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
273
274 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
275 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
276 }
277
mlx5_cmd_force_teardown_hca(struct mlx5_core_dev * dev)278 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
279 {
280 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
281 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
282 int force_state;
283 int ret;
284
285 if (!MLX5_CAP_GEN(dev, force_teardown)) {
286 mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
287 return -EOPNOTSUPP;
288 }
289
290 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
291 MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE);
292
293 ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out));
294 if (ret)
295 return ret;
296
297 force_state = MLX5_GET(teardown_hca_out, out, state);
298 if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
299 mlx5_core_err(dev, "teardown with force mode failed\n");
300 return -EIO;
301 }
302
303 return 0;
304 }
305
306 #define MLX5_FAST_TEARDOWN_WAIT_MS 3000
mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev * dev)307 int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
308 {
309 unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
310 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
311 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
312 int state;
313 int ret;
314
315 if (!MLX5_CAP_GEN(dev, fast_teardown)) {
316 mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n");
317 return -EOPNOTSUPP;
318 }
319
320 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
321 MLX5_SET(teardown_hca_in, in, profile,
322 MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
323
324 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
325 if (ret)
326 return ret;
327
328 state = MLX5_GET(teardown_hca_out, out, state);
329 if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
330 mlx5_core_warn(dev, "teardown with fast mode failed\n");
331 return -EIO;
332 }
333
334 mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
335
336 /* Loop until device state turns to disable */
337 end = jiffies + msecs_to_jiffies(delay_ms);
338 do {
339 if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
340 break;
341
342 pause("W", 1);
343 } while (!time_after(jiffies, end));
344
345 if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
346 mlx5_core_err(dev, "NIC IFC still %d after %lums.\n",
347 mlx5_get_nic_state(dev), delay_ms);
348 return -EIO;
349 }
350 return 0;
351 }
352
mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev * dev,int enable,u64 addr)353 int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable,
354 u64 addr)
355 {
356 u32 in[MLX5_ST_SZ_DW(set_dc_cnak_trace_in)] = {0};
357 u32 out[MLX5_ST_SZ_DW(set_dc_cnak_trace_out)] = {0};
358 __be64 be_addr;
359 void *pas;
360
361 MLX5_SET(set_dc_cnak_trace_in, in, opcode, MLX5_CMD_OP_SET_DC_CNAK_TRACE);
362 MLX5_SET(set_dc_cnak_trace_in, in, enable, enable);
363 pas = MLX5_ADDR_OF(set_dc_cnak_trace_in, in, pas);
364 be_addr = cpu_to_be64(addr);
365 memcpy(MLX5_ADDR_OF(cmd_pas, pas, pa_h), &be_addr, sizeof(be_addr));
366
367 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
368 }
369
370 enum mlxsw_reg_mcc_instruction {
371 MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
372 MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
373 MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
374 MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
375 MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
376 MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08,
377 };
378
mlx5_reg_mcc_set(struct mlx5_core_dev * dev,enum mlxsw_reg_mcc_instruction instr,u16 component_index,u32 update_handle,u32 component_size)379 static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev,
380 enum mlxsw_reg_mcc_instruction instr,
381 u16 component_index, u32 update_handle,
382 u32 component_size)
383 {
384 u32 out[MLX5_ST_SZ_DW(mcc_reg)];
385 u32 in[MLX5_ST_SZ_DW(mcc_reg)];
386
387 memset(in, 0, sizeof(in));
388
389 MLX5_SET(mcc_reg, in, instruction, instr);
390 MLX5_SET(mcc_reg, in, component_index, component_index);
391 MLX5_SET(mcc_reg, in, update_handle, update_handle);
392 MLX5_SET(mcc_reg, in, component_size, component_size);
393
394 return mlx5_core_access_reg(dev, in, sizeof(in), out,
395 sizeof(out), MLX5_REG_MCC, 0, 1);
396 }
397
mlx5_reg_mcc_query(struct mlx5_core_dev * dev,u32 * update_handle,u8 * error_code,u8 * control_state)398 static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev,
399 u32 *update_handle, u8 *error_code,
400 u8 *control_state)
401 {
402 u32 out[MLX5_ST_SZ_DW(mcc_reg)];
403 u32 in[MLX5_ST_SZ_DW(mcc_reg)];
404 int err;
405
406 memset(in, 0, sizeof(in));
407 memset(out, 0, sizeof(out));
408 MLX5_SET(mcc_reg, in, update_handle, *update_handle);
409
410 err = mlx5_core_access_reg(dev, in, sizeof(in), out,
411 sizeof(out), MLX5_REG_MCC, 0, 0);
412 if (err)
413 goto out;
414
415 *update_handle = MLX5_GET(mcc_reg, out, update_handle);
416 *error_code = MLX5_GET(mcc_reg, out, error_code);
417 *control_state = MLX5_GET(mcc_reg, out, control_state);
418
419 out:
420 return err;
421 }
422
mlx5_reg_mcda_set(struct mlx5_core_dev * dev,u32 update_handle,u32 offset,u16 size,u8 * data)423 static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev,
424 u32 update_handle,
425 u32 offset, u16 size,
426 u8 *data)
427 {
428 int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size;
429 u32 out[MLX5_ST_SZ_DW(mcda_reg)];
430 int i, j, dw_size = size >> 2;
431 __be32 data_element;
432 u32 *in;
433
434 in = kzalloc(in_size, GFP_KERNEL);
435 if (!in)
436 return -ENOMEM;
437
438 MLX5_SET(mcda_reg, in, update_handle, update_handle);
439 MLX5_SET(mcda_reg, in, offset, offset);
440 MLX5_SET(mcda_reg, in, size, size);
441
442 for (i = 0; i < dw_size; i++) {
443 j = i * 4;
444 data_element = htonl(*(u32 *)&data[j]);
445 memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4);
446 }
447
448 err = mlx5_core_access_reg(dev, in, in_size, out,
449 sizeof(out), MLX5_REG_MCDA, 0, 1);
450 kfree(in);
451 return err;
452 }
453
mlx5_reg_mcqi_query(struct mlx5_core_dev * dev,u16 component_index,u32 * max_component_size,u8 * log_mcda_word_size,u16 * mcda_max_write_size)454 static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev,
455 u16 component_index,
456 u32 *max_component_size,
457 u8 *log_mcda_word_size,
458 u16 *mcda_max_write_size)
459 {
460 u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)];
461 int offset = MLX5_ST_SZ_DW(mcqi_reg);
462 u32 in[MLX5_ST_SZ_DW(mcqi_reg)];
463 int err;
464
465 memset(in, 0, sizeof(in));
466 memset(out, 0, sizeof(out));
467
468 MLX5_SET(mcqi_reg, in, component_index, component_index);
469 MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap));
470
471 err = mlx5_core_access_reg(dev, in, sizeof(in), out,
472 sizeof(out), MLX5_REG_MCQI, 0, 0);
473 if (err)
474 goto out;
475
476 *max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size);
477 *log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size);
478 *mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size);
479
480 out:
481 return err;
482 }
483
484 struct mlx5_mlxfw_dev {
485 struct mlxfw_dev mlxfw_dev;
486 struct mlx5_core_dev *mlx5_core_dev;
487 };
488
mlx5_component_query(struct mlxfw_dev * mlxfw_dev,u16 component_index,u32 * p_max_size,u8 * p_align_bits,u16 * p_max_write_size)489 static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev,
490 u16 component_index, u32 *p_max_size,
491 u8 *p_align_bits, u16 *p_max_write_size)
492 {
493 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
494 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
495 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
496
497 return mlx5_reg_mcqi_query(dev, component_index, p_max_size,
498 p_align_bits, p_max_write_size);
499 }
500
mlx5_fsm_lock(struct mlxfw_dev * mlxfw_dev,u32 * fwhandle)501 static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
502 {
503 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
504 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
505 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
506 u8 control_state, error_code;
507 int err;
508
509 *fwhandle = 0;
510 err = mlx5_reg_mcc_query(dev, fwhandle, &error_code, &control_state);
511 if (err)
512 return err;
513
514 if (control_state != MLXFW_FSM_STATE_IDLE)
515 return -EBUSY;
516
517 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
518 0, *fwhandle, 0);
519 }
520
mlx5_fsm_component_update(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index,u32 component_size)521 static int mlx5_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
522 u16 component_index, u32 component_size)
523 {
524 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
525 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
526 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
527
528 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
529 component_index, fwhandle, component_size);
530 }
531
mlx5_fsm_block_download(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u8 * data,u16 size,u32 offset)532 static int mlx5_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
533 u8 *data, u16 size, u32 offset)
534 {
535 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
536 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
537 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
538
539 return mlx5_reg_mcda_set(dev, fwhandle, offset, size, data);
540 }
541
mlx5_fsm_component_verify(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index)542 static int mlx5_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
543 u16 component_index)
544 {
545 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
546 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
547 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
548
549 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
550 component_index, fwhandle, 0);
551 }
552
mlx5_fsm_activate(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)553 static int mlx5_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
554 {
555 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
556 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
557 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
558
559 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_ACTIVATE, 0,
560 fwhandle, 0);
561 }
562
mlx5_fsm_query_state(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,enum mlxfw_fsm_state * fsm_state,enum mlxfw_fsm_state_err * fsm_state_err)563 static int mlx5_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
564 enum mlxfw_fsm_state *fsm_state,
565 enum mlxfw_fsm_state_err *fsm_state_err)
566 {
567 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
568 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
569 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
570 u8 control_state, error_code;
571 int err;
572
573 err = mlx5_reg_mcc_query(dev, &fwhandle, &error_code, &control_state);
574 if (err)
575 return err;
576
577 *fsm_state = control_state;
578 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
579 MLXFW_FSM_STATE_ERR_MAX);
580 return 0;
581 }
582
mlx5_fsm_cancel(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)583 static void mlx5_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
584 {
585 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
586 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
587 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
588
589 mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
590 }
591
mlx5_fsm_release(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)592 static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
593 {
594 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
595 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
596 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
597
598 mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
599 fwhandle, 0);
600 }
601
602 static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
603 .component_query = mlx5_component_query,
604 .fsm_lock = mlx5_fsm_lock,
605 .fsm_component_update = mlx5_fsm_component_update,
606 .fsm_block_download = mlx5_fsm_block_download,
607 .fsm_component_verify = mlx5_fsm_component_verify,
608 .fsm_activate = mlx5_fsm_activate,
609 .fsm_query_state = mlx5_fsm_query_state,
610 .fsm_cancel = mlx5_fsm_cancel,
611 .fsm_release = mlx5_fsm_release
612 };
613
mlx5_firmware_flash(struct mlx5_core_dev * dev,const struct firmware * firmware)614 int mlx5_firmware_flash(struct mlx5_core_dev *dev,
615 const struct firmware *firmware)
616 {
617 struct mlx5_mlxfw_dev mlx5_mlxfw_dev = {
618 .mlxfw_dev = {
619 .ops = &mlx5_mlxfw_dev_ops,
620 .psid = dev->board_id,
621 .psid_size = strlen(dev->board_id),
622 },
623 .mlx5_core_dev = dev
624 };
625
626 if (!MLX5_CAP_GEN(dev, mcam_reg) ||
627 !MLX5_CAP_MCAM_REG(dev, mcqi) ||
628 !MLX5_CAP_MCAM_REG(dev, mcc) ||
629 !MLX5_CAP_MCAM_REG(dev, mcda)) {
630 pr_info("%s flashing isn't supported by the running FW\n", __func__);
631 return -EOPNOTSUPP;
632 }
633
634 return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, firmware);
635 }
636