1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
26
27 * You may not use this file except in compliance with the License.
28
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35
36 /*
37 * Copyright 2018 Joyent, Inc.
38 */
39
40 #include "bcm_osal.h"
41 #include "ecore.h"
42 #include "ecore_status.h"
43 #include "nvm_map.h"
44 #include "nvm_cfg.h"
45 #include "ecore_mcp.h"
46 #include "mcp_public.h"
47 #include "reg_addr.h"
48 #include "ecore_hw.h"
49 #include "ecore_init_fw_funcs.h"
50 #include "ecore_sriov.h"
51 #include "ecore_vf.h"
52 #include "ecore_iov_api.h"
53 #include "ecore_gtt_reg_addr.h"
54 #include "ecore_iro.h"
55 #include "ecore_dcbx.h"
56 #include "ecore_sp_commands.h"
57
58 #define CHIP_MCP_RESP_ITER_US 10
59 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
60
61 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
62 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
63
64 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
65 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
66 _val)
67
68 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
69 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
70
71 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
72 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
73 offsetof(struct public_drv_mb, _field), _val)
74
75 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
76 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
77 offsetof(struct public_drv_mb, _field))
78
79 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
80 DRV_ID_PDA_COMP_VER_SHIFT)
81
82 #define MCP_BYTES_PER_MBIT_SHIFT 17
83
84 #ifndef ASIC_ONLY
85 static int loaded;
86 static int loaded_port[MAX_NUM_PORTS] = { 0 };
87 #endif
88
ecore_mcp_is_init(struct ecore_hwfn * p_hwfn)89 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
90 {
91 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
92 return false;
93 return true;
94 }
95
ecore_mcp_cmd_port_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)96 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
97 struct ecore_ptt *p_ptt)
98 {
99 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
100 PUBLIC_PORT);
101 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
102
103 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
104 MFW_PORT(p_hwfn));
105 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
106 "port_addr = 0x%x, port_id 0x%02x\n",
107 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
108 }
109
ecore_mcp_read_mb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)110 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
111 struct ecore_ptt *p_ptt)
112 {
113 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
114 OSAL_BE32 tmp;
115 u32 i;
116
117 #ifndef ASIC_ONLY
118 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
119 return;
120 #endif
121
122 if (!p_hwfn->mcp_info->public_base)
123 return;
124
125 for (i = 0; i < length; i++) {
126 tmp = ecore_rd(p_hwfn, p_ptt,
127 p_hwfn->mcp_info->mfw_mb_addr +
128 (i << 2) + sizeof(u32));
129
130 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
131 OSAL_BE32_TO_CPU(tmp);
132 }
133 }
134
ecore_mcp_free(struct ecore_hwfn * p_hwfn)135 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
136 {
137 if (p_hwfn->mcp_info) {
138 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
139 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
140 #ifdef CONFIG_ECORE_LOCK_ALLOC
141 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
142 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
143 #endif
144 }
145 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
146 p_hwfn->mcp_info = OSAL_NULL;
147
148 return ECORE_SUCCESS;
149 }
150
ecore_load_mcp_offsets(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)151 enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
152 struct ecore_ptt *p_ptt)
153 {
154 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
155 u32 drv_mb_offsize, mfw_mb_offsize;
156 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
157
158 #ifndef ASIC_ONLY
159 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
160 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
161 p_info->public_base = 0;
162 return ECORE_INVAL;
163 }
164 #endif
165
166 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
167 if (!p_info->public_base)
168 return ECORE_INVAL;
169
170 p_info->public_base |= GRCBASE_MCP;
171
172 /* Calculate the driver and MFW mailbox address */
173 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
174 SECTION_OFFSIZE_ADDR(p_info->public_base,
175 PUBLIC_DRV_MB));
176 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
177 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
178 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
179 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
180
181 /* Set the MFW MB address */
182 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
183 SECTION_OFFSIZE_ADDR(p_info->public_base,
184 PUBLIC_MFW_MB));
185 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
186 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
187 p_info->mfw_mb_addr);
188
189 /* Get the current driver mailbox sequence before sending
190 * the first command
191 */
192 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
193 DRV_MSG_SEQ_NUMBER_MASK;
194
195 /* Get current FW pulse sequence */
196 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
197 DRV_PULSE_SEQ_MASK;
198
199 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
200 MISCS_REG_GENERIC_POR_0);
201
202 return ECORE_SUCCESS;
203 }
204
ecore_mcp_cmd_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)205 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
206 struct ecore_ptt *p_ptt)
207 {
208 struct ecore_mcp_info *p_info;
209 u32 size;
210
211 /* Allocate mcp_info structure */
212 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
213 sizeof(*p_hwfn->mcp_info));
214 if (!p_hwfn->mcp_info)
215 goto err;
216 p_info = p_hwfn->mcp_info;
217
218 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
219 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
220 /* Do not free mcp_info here, since public_base indicate that
221 * the MCP is not initialized
222 */
223 return ECORE_SUCCESS;
224 }
225
226 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
227 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
228 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
229 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
230 goto err;
231
232 /* Initialize the MFW spinlock */
233 #ifdef CONFIG_ECORE_LOCK_ALLOC
234 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
235 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
236 #endif
237 OSAL_SPIN_LOCK_INIT(&p_info->lock);
238 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
239
240 return ECORE_SUCCESS;
241
242 err:
243 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
244 ecore_mcp_free(p_hwfn);
245 return ECORE_NOMEM;
246
247 }
248
249 /* Locks the MFW mailbox of a PF to ensure a single access.
250 * The lock is achieved in most cases by holding a spinlock, causing other
251 * threads to wait till a previous access is done.
252 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
253 * access is achieved by setting a blocking flag, which will fail other
254 * competing contexts to send their mailboxes.
255 */
ecore_mcp_mb_lock(struct ecore_hwfn * p_hwfn,u32 cmd)256 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
257 u32 cmd)
258 {
259 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
260
261 /* The spinlock shouldn't be acquired when the mailbox command is
262 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
263 * pending [UN]LOAD_REQ command of another PF together with a spinlock
264 * (i.e. interrupts are disabled) - can lead to a deadlock.
265 * It is assumed that for a single PF, no other mailbox commands can be
266 * sent from another context while sending LOAD_REQ, and that any
267 * parallel commands to UNLOAD_REQ can be cancelled.
268 */
269 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
270 p_hwfn->mcp_info->block_mb_sending = false;
271
272 /* There's at least a single command that is sent by ecore during the
273 * load sequence [expectation of MFW].
274 */
275 if ((p_hwfn->mcp_info->block_mb_sending) &&
276 (cmd != DRV_MSG_CODE_FEATURE_SUPPORT)) {
277 DP_NOTICE(p_hwfn, false,
278 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
279 cmd);
280 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
281 return ECORE_BUSY;
282 }
283
284 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
285 p_hwfn->mcp_info->block_mb_sending = true;
286 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
287 }
288
289 return ECORE_SUCCESS;
290 }
291
ecore_mcp_mb_unlock(struct ecore_hwfn * p_hwfn,u32 cmd)292 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
293 {
294 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
295 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
296 }
297
ecore_mcp_reset(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)298 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
299 struct ecore_ptt *p_ptt)
300 {
301 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
302 u32 delay = CHIP_MCP_RESP_ITER_US;
303 u32 org_mcp_reset_seq, cnt = 0;
304 enum _ecore_status_t rc = ECORE_SUCCESS;
305
306 #ifndef ASIC_ONLY
307 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
308 delay = EMUL_MCP_RESP_ITER_US;
309 #endif
310
311 /* Ensure that only a single thread is accessing the mailbox at a
312 * certain time.
313 */
314 rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
315 if (rc != ECORE_SUCCESS)
316 return rc;
317
318 /* Set drv command along with the updated sequence */
319 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
320 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
321
322 do {
323 /* Wait for MFW response */
324 OSAL_UDELAY(delay);
325 /* Give the FW up to 500 second (50*1000*10usec) */
326 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
327 MISCS_REG_GENERIC_POR_0)) &&
328 (cnt++ < ECORE_MCP_RESET_RETRIES));
329
330 if (org_mcp_reset_seq !=
331 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
332 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
333 "MCP was reset after %d usec\n", cnt * delay);
334 } else {
335 DP_ERR(p_hwfn, "Failed to reset MCP\n");
336 rc = ECORE_AGAIN;
337 }
338
339 ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
340
341 return rc;
342 }
343
ecore_mcp_print_cpu_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)344 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
345 struct ecore_ptt *p_ptt)
346 {
347 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
348
349 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
350 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
351 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
352 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
353 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
354 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
355 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
356
357 DP_NOTICE(p_hwfn, false,
358 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
359 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
360 }
361
ecore_do_mcp_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param)362 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
363 struct ecore_ptt *p_ptt,
364 u32 cmd, u32 param,
365 u32 *o_mcp_resp, u32 *o_mcp_param)
366 {
367 u32 delay = CHIP_MCP_RESP_ITER_US;
368 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
369 u32 seq, cnt = 1, actual_mb_seq __unused;
370 enum _ecore_status_t rc = ECORE_SUCCESS;
371
372 #ifndef ASIC_ONLY
373 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
374 delay = EMUL_MCP_RESP_ITER_US;
375 /* There is a built-in delay of 100usec in each MFW response read */
376 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
377 max_retries /= 10;
378 #endif
379
380 /* Get actual driver mailbox sequence */
381 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
382 DRV_MSG_SEQ_NUMBER_MASK;
383
384 /* Use MCP history register to check if MCP reset occurred between
385 * init time and now.
386 */
387 if (p_hwfn->mcp_info->mcp_hist !=
388 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
389 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
390 ecore_load_mcp_offsets(p_hwfn, p_ptt);
391 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
392 }
393 seq = ++p_hwfn->mcp_info->drv_mb_seq;
394
395 /* Set drv param */
396 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
397
398 /* Set drv command along with the updated sequence */
399 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
400
401 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
402 "wrote command (%x) to MFW MB param 0x%08x\n",
403 (cmd | seq), param);
404
405 do {
406 /* Wait for MFW response */
407 OSAL_UDELAY(delay);
408 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
409
410 /* Give the FW up to 5 second (500*10ms) */
411 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
412 (cnt++ < max_retries));
413
414 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
415 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
416 cnt * delay, *o_mcp_resp, seq);
417
418 /* Is this a reply to our command? */
419 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
420 *o_mcp_resp &= FW_MSG_CODE_MASK;
421 /* Get the MCP param */
422 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
423 } else {
424 /* FW BUG! */
425 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
426 cmd, param);
427 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
428 *o_mcp_resp = 0;
429 rc = ECORE_AGAIN;
430 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
431 }
432 return rc;
433 }
434
ecore_mcp_cmd_and_union(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_mb_params * p_mb_params)435 static enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
436 struct ecore_ptt *p_ptt,
437 struct ecore_mcp_mb_params *p_mb_params)
438 {
439 union drv_union_data union_data;
440 u32 union_data_addr;
441 enum _ecore_status_t rc;
442
443 /* MCP not initialized */
444 if (!ecore_mcp_is_init(p_hwfn)) {
445 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
446 return ECORE_BUSY;
447 }
448
449 if (p_mb_params->data_src_size > sizeof(union_data) ||
450 p_mb_params->data_dst_size > sizeof(union_data)) {
451 DP_ERR(p_hwfn,
452 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
453 p_mb_params->data_src_size, p_mb_params->data_dst_size,
454 sizeof(union_data));
455 return ECORE_INVAL;
456 }
457
458 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
459 offsetof(struct public_drv_mb, union_data);
460
461 /* Ensure that only a single thread is accessing the mailbox at a
462 * certain time.
463 */
464 rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
465 if (rc != ECORE_SUCCESS)
466 return rc;
467
468 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
469 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
470 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
471 p_mb_params->data_src_size);
472 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
473 sizeof(union_data));
474
475 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
476 p_mb_params->param, &p_mb_params->mcp_resp,
477 &p_mb_params->mcp_param);
478
479 if (p_mb_params->p_data_dst != OSAL_NULL &&
480 p_mb_params->data_dst_size)
481 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
482 union_data_addr, p_mb_params->data_dst_size);
483
484 ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
485
486 return rc;
487 }
488
ecore_mcp_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param)489 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
490 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
491 u32 *o_mcp_resp, u32 *o_mcp_param)
492 {
493 struct ecore_mcp_mb_params mb_params;
494 enum _ecore_status_t rc;
495
496 #ifndef ASIC_ONLY
497 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
498 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
499 loaded--;
500 loaded_port[p_hwfn->port_id]--;
501 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
502 loaded);
503 }
504 return ECORE_SUCCESS;
505 }
506 #endif
507
508 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
509 mb_params.cmd = cmd;
510 mb_params.param = param;
511 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
512 if (rc != ECORE_SUCCESS)
513 return rc;
514
515 *o_mcp_resp = mb_params.mcp_resp;
516 *o_mcp_param = mb_params.mcp_param;
517
518 return ECORE_SUCCESS;
519 }
520
ecore_mcp_nvm_wr_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 i_txn_size,u32 * i_buf)521 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
522 struct ecore_ptt *p_ptt,
523 u32 cmd,
524 u32 param,
525 u32 *o_mcp_resp,
526 u32 *o_mcp_param,
527 u32 i_txn_size,
528 u32 *i_buf)
529 {
530 struct ecore_mcp_mb_params mb_params;
531 enum _ecore_status_t rc;
532
533 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
534 mb_params.cmd = cmd;
535 mb_params.param = param;
536 mb_params.p_data_src = i_buf;
537 mb_params.data_src_size = (u8) i_txn_size;
538 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
539 if (rc != ECORE_SUCCESS)
540 return rc;
541
542 *o_mcp_resp = mb_params.mcp_resp;
543 *o_mcp_param = mb_params.mcp_param;
544
545 return ECORE_SUCCESS;
546 }
547
ecore_mcp_nvm_rd_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 * o_txn_size,u32 * o_buf)548 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
549 struct ecore_ptt *p_ptt,
550 u32 cmd,
551 u32 param,
552 u32 *o_mcp_resp,
553 u32 *o_mcp_param,
554 u32 *o_txn_size,
555 u32 *o_buf)
556 {
557 struct ecore_mcp_mb_params mb_params;
558 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
559 enum _ecore_status_t rc;
560
561 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
562 mb_params.cmd = cmd;
563 mb_params.param = param;
564 mb_params.p_data_dst = raw_data;
565
566 /* Use the maximal value since the actual one is part of the response */
567 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
568
569 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
570 if (rc != ECORE_SUCCESS)
571 return rc;
572
573 *o_mcp_resp = mb_params.mcp_resp;
574 *o_mcp_param = mb_params.mcp_param;
575
576 *o_txn_size = *o_mcp_param;
577 OSAL_MEMCPY(o_buf, raw_data, *o_txn_size);
578
579 return ECORE_SUCCESS;
580 }
581
582 #ifndef ASIC_ONLY
ecore_mcp_mf_workaround(struct ecore_hwfn * p_hwfn,u32 * p_load_code)583 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
584 u32 *p_load_code)
585 {
586 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
587
588 if (!loaded) {
589 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
590 } else if (!loaded_port[p_hwfn->port_id]) {
591 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
592 } else {
593 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
594 }
595
596 /* On CMT, always tell that it's engine */
597 if (p_hwfn->p_dev->num_hwfns > 1)
598 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
599
600 *p_load_code = load_phase;
601 loaded++;
602 loaded_port[p_hwfn->port_id]++;
603
604 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
605 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
606 *p_load_code, loaded, p_hwfn->port_id,
607 loaded_port[p_hwfn->port_id]);
608 }
609 #endif
610
611 static bool
ecore_mcp_can_force_load(u8 drv_role,u8 exist_drv_role,enum ecore_override_force_load override_force_load)612 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
613 enum ecore_override_force_load override_force_load)
614 {
615 bool can_force_load = false;
616
617 switch (override_force_load) {
618 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
619 can_force_load = true;
620 break;
621 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
622 can_force_load = false;
623 break;
624 default:
625 can_force_load = (drv_role == DRV_ROLE_OS &&
626 exist_drv_role == DRV_ROLE_PREBOOT) ||
627 (drv_role == DRV_ROLE_KDUMP &&
628 exist_drv_role == DRV_ROLE_OS);
629 break;
630 }
631
632 return can_force_load;
633 }
634
ecore_mcp_cancel_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)635 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
636 struct ecore_ptt *p_ptt)
637 {
638 u32 resp = 0, param = 0;
639 enum _ecore_status_t rc;
640
641 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
642 &resp, ¶m);
643 if (rc != ECORE_SUCCESS)
644 DP_NOTICE(p_hwfn, false,
645 "Failed to send cancel load request, rc = %d\n", rc);
646
647 return rc;
648 }
649
650 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
651 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
652 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
653 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
654 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
655 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
656 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
657
ecore_get_config_bitmap(void)658 static u32 ecore_get_config_bitmap(void)
659 {
660 u32 config_bitmap = 0x0;
661
662 #ifdef CONFIG_ECORE_L2
663 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
664 #endif
665 #ifdef CONFIG_ECORE_SRIOV
666 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
667 #endif
668 #ifdef CONFIG_ECORE_ROCE
669 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
670 #endif
671 #ifdef CONFIG_ECORE_IWARP
672 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
673 #endif
674 #ifdef CONFIG_ECORE_FCOE
675 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
676 #endif
677 #ifdef CONFIG_ECORE_ISCSI
678 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
679 #endif
680 #ifdef CONFIG_ECORE_LL2
681 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
682 #endif
683
684 return config_bitmap;
685 }
686
687 struct ecore_load_req_in_params {
688 u8 hsi_ver;
689 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
690 #define ECORE_LOAD_REQ_HSI_VER_1 1
691 u32 drv_ver_0;
692 u32 drv_ver_1;
693 u32 fw_ver;
694 u8 drv_role;
695 u8 timeout_val;
696 u8 force_cmd;
697 bool avoid_eng_reset;
698 };
699
700 struct ecore_load_req_out_params {
701 u32 load_code;
702 u32 exist_drv_ver_0;
703 u32 exist_drv_ver_1;
704 u32 exist_fw_ver;
705 u8 exist_drv_role;
706 u8 mfw_hsi_ver;
707 bool drv_exists;
708 };
709
710 static enum _ecore_status_t
__ecore_mcp_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_load_req_in_params * p_in_params,struct ecore_load_req_out_params * p_out_params)711 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
712 struct ecore_load_req_in_params *p_in_params,
713 struct ecore_load_req_out_params *p_out_params)
714 {
715 struct ecore_mcp_mb_params mb_params;
716 struct load_req_stc load_req;
717 struct load_rsp_stc load_rsp;
718 u32 hsi_ver;
719 enum _ecore_status_t rc;
720
721 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
722 load_req.drv_ver_0 = p_in_params->drv_ver_0;
723 load_req.drv_ver_1 = p_in_params->drv_ver_1;
724 load_req.fw_ver = p_in_params->fw_ver;
725 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
726 p_in_params->drv_role);
727 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
728 p_in_params->timeout_val);
729 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
730 p_in_params->force_cmd);
731 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
732 p_in_params->avoid_eng_reset);
733
734 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
735 DRV_ID_MCP_HSI_VER_CURRENT :
736 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
737
738 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
739 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
740 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
741 mb_params.p_data_src = &load_req;
742 mb_params.data_src_size = sizeof(load_req);
743 mb_params.p_data_dst = &load_rsp;
744 mb_params.data_dst_size = sizeof(load_rsp);
745
746 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
747 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
748 mb_params.param,
749 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
750 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
751 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
752 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
753
754 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
755 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
756 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
757 load_req.drv_ver_0, load_req.drv_ver_1,
758 load_req.fw_ver, load_req.misc0,
759 ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
760 ECORE_MFW_GET_FIELD(load_req.misc0,
761 LOAD_REQ_LOCK_TO),
762 ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
763 ECORE_MFW_GET_FIELD(load_req.misc0,
764 LOAD_REQ_FLAGS0));
765
766 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
767 if (rc != ECORE_SUCCESS) {
768 DP_NOTICE(p_hwfn, false,
769 "Failed to send load request, rc = %d\n", rc);
770 return rc;
771 }
772
773 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
774 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
775 p_out_params->load_code = mb_params.mcp_resp;
776
777 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
778 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
779 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
780 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
781 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
782 load_rsp.fw_ver, load_rsp.misc0,
783 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
784 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
785 ECORE_MFW_GET_FIELD(load_rsp.misc0,
786 LOAD_RSP_FLAGS0));
787
788 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
789 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
790 p_out_params->exist_fw_ver = load_rsp.fw_ver;
791 p_out_params->exist_drv_role =
792 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
793 p_out_params->mfw_hsi_ver =
794 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
795 p_out_params->drv_exists =
796 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
797 LOAD_RSP_FLAGS0_DRV_EXISTS;
798 }
799
800 return ECORE_SUCCESS;
801 }
802
eocre_get_mfw_drv_role(struct ecore_hwfn * p_hwfn,enum ecore_drv_role drv_role,u8 * p_mfw_drv_role)803 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
804 enum ecore_drv_role drv_role,
805 u8 *p_mfw_drv_role)
806 {
807 switch (drv_role)
808 {
809 case ECORE_DRV_ROLE_OS:
810 *p_mfw_drv_role = DRV_ROLE_OS;
811 break;
812 case ECORE_DRV_ROLE_KDUMP:
813 *p_mfw_drv_role = DRV_ROLE_KDUMP;
814 break;
815 default:
816 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
817 return ECORE_INVAL;
818 }
819
820 return ECORE_SUCCESS;
821 }
822
823 enum ecore_load_req_force {
824 ECORE_LOAD_REQ_FORCE_NONE,
825 ECORE_LOAD_REQ_FORCE_PF,
826 ECORE_LOAD_REQ_FORCE_ALL,
827 };
828
829 static enum _ecore_status_t
ecore_get_mfw_force_cmd(struct ecore_hwfn * p_hwfn,enum ecore_load_req_force force_cmd,u8 * p_mfw_force_cmd)830 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
831 enum ecore_load_req_force force_cmd,
832 u8 *p_mfw_force_cmd)
833 {
834 switch (force_cmd) {
835 case ECORE_LOAD_REQ_FORCE_NONE:
836 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
837 break;
838 case ECORE_LOAD_REQ_FORCE_PF:
839 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
840 break;
841 case ECORE_LOAD_REQ_FORCE_ALL:
842 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
843 break;
844 default:
845 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
846 return ECORE_INVAL;
847 }
848
849 return ECORE_SUCCESS;
850 }
851
ecore_mcp_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_load_req_params * p_params)852 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
853 struct ecore_ptt *p_ptt,
854 struct ecore_load_req_params *p_params)
855 {
856 struct ecore_load_req_out_params out_params;
857 struct ecore_load_req_in_params in_params;
858 u8 mfw_drv_role, mfw_force_cmd;
859 enum _ecore_status_t rc;
860
861 #ifndef ASIC_ONLY
862 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
863 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
864 return ECORE_SUCCESS;
865 }
866 #endif
867
868 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
869 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
870 in_params.drv_ver_0 = ECORE_VERSION;
871 in_params.drv_ver_1 = ecore_get_config_bitmap();
872 in_params.fw_ver = STORM_FW_VERSION;
873 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
874 if (rc != ECORE_SUCCESS)
875 return rc;
876
877 in_params.drv_role = mfw_drv_role;
878 in_params.timeout_val = p_params->timeout_val;
879 rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
880 &mfw_force_cmd);
881 if (rc != ECORE_SUCCESS)
882 return rc;
883
884 in_params.force_cmd = mfw_force_cmd;
885 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
886
887 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
888 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
889 if (rc != ECORE_SUCCESS)
890 return rc;
891
892 /* First handle cases where another load request should/might be sent:
893 * - MFW expects the old interface [HSI version = 1]
894 * - MFW responds that a force load request is required
895 */
896 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
897 DP_INFO(p_hwfn,
898 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
899
900 /* The previous load request set the mailbox blocking */
901 p_hwfn->mcp_info->block_mb_sending = false;
902
903 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
904 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
905 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
906 &out_params);
907 if (rc != ECORE_SUCCESS)
908 return rc;
909 } else if (out_params.load_code ==
910 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
911 /* The previous load request set the mailbox blocking */
912 p_hwfn->mcp_info->block_mb_sending = false;
913
914 if (ecore_mcp_can_force_load(in_params.drv_role,
915 out_params.exist_drv_role,
916 p_params->override_force_load)) {
917 DP_INFO(p_hwfn,
918 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
919 in_params.drv_role, in_params.fw_ver,
920 in_params.drv_ver_1, in_params.drv_ver_0,
921 out_params.exist_drv_role,
922 out_params.exist_fw_ver,
923 out_params.exist_drv_ver_1,
924 out_params.exist_drv_ver_0);
925 DP_INFO(p_hwfn, "Sending a force load request\n");
926
927 rc = ecore_get_mfw_force_cmd(p_hwfn,
928 ECORE_LOAD_REQ_FORCE_ALL,
929 &mfw_force_cmd);
930 if (rc != ECORE_SUCCESS)
931 return rc;
932
933 in_params.force_cmd = mfw_force_cmd;
934 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
935 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
936 &out_params);
937 if (rc != ECORE_SUCCESS)
938 return rc;
939 } else {
940 DP_NOTICE(p_hwfn, false,
941 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
942 in_params.drv_role, in_params.fw_ver,
943 in_params.drv_ver_0, in_params.drv_ver_1,
944 out_params.exist_drv_role,
945 out_params.exist_fw_ver,
946 out_params.exist_drv_ver_0,
947 out_params.exist_drv_ver_1);
948 DP_NOTICE(p_hwfn, false,
949 "Avoid sending a force load request to prevent disruption of active PFs\n");
950
951 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
952 return ECORE_BUSY;
953 }
954 }
955
956 /* Now handle the other types of responses.
957 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
958 * expected here after the additional revised load requests were sent.
959 */
960 switch (out_params.load_code) {
961 case FW_MSG_CODE_DRV_LOAD_ENGINE:
962 case FW_MSG_CODE_DRV_LOAD_PORT:
963 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
964 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
965 out_params.drv_exists) {
966 /* The role and fw/driver version match, but the PF is
967 * already loaded and has not been unloaded gracefully.
968 * This is unexpected since a quasi-FLR request was
969 * previously sent as part of ecore_hw_prepare().
970 */
971 DP_NOTICE(p_hwfn, false,
972 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
973 return ECORE_INVAL;
974 }
975 break;
976 case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
977 case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
978 case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
979 case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
980 DP_NOTICE(p_hwfn, false,
981 "MFW refused a load request [resp 0x%08x]. Aborting.\n",
982 out_params.load_code);
983 return ECORE_BUSY;
984 default:
985 DP_NOTICE(p_hwfn, false,
986 "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
987 out_params.load_code);
988 break;
989 }
990
991 p_params->load_code = out_params.load_code;
992
993 return ECORE_SUCCESS;
994 }
995
ecore_mcp_unload_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)996 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
997 struct ecore_ptt *p_ptt)
998 {
999 u32 wol_param, mcp_resp, mcp_param;
1000
1001 switch (p_hwfn->p_dev->wol_config) {
1002 case ECORE_OV_WOL_DISABLED:
1003 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1004 break;
1005 case ECORE_OV_WOL_ENABLED:
1006 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1007 break;
1008 default:
1009 DP_NOTICE(p_hwfn, true,
1010 "Unknown WoL configuration %02x\n",
1011 p_hwfn->p_dev->wol_config);
1012 /* Fallthrough */
1013 case ECORE_OV_WOL_DEFAULT:
1014 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1015 }
1016
1017 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1018 &mcp_resp, &mcp_param);
1019 }
1020
ecore_mcp_unload_done(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1021 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1022 struct ecore_ptt *p_ptt)
1023 {
1024 struct ecore_mcp_mb_params mb_params;
1025 struct mcp_mac wol_mac;
1026
1027 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1028 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1029
1030 /* Set the primary MAC if WoL is enabled */
1031 if (p_hwfn->p_dev->wol_config == ECORE_OV_WOL_ENABLED) {
1032 u8 *p_mac = p_hwfn->p_dev->wol_mac;
1033
1034 OSAL_MEM_ZERO(&wol_mac, sizeof(wol_mac));
1035 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1036 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1037 p_mac[4] << 8 | p_mac[5];
1038
1039 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFDOWN),
1040 "Setting WoL MAC: %02x:%02x:%02x:%02x:%02x:%02x --> [%08x,%08x]\n",
1041 p_mac[0], p_mac[1], p_mac[2], p_mac[3], p_mac[4],
1042 p_mac[5], wol_mac.mac_upper, wol_mac.mac_lower);
1043
1044 mb_params.p_data_src = &wol_mac;
1045 mb_params.data_src_size = sizeof(wol_mac);
1046 }
1047
1048 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1049 }
1050
ecore_mcp_handle_vf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1051 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1052 struct ecore_ptt *p_ptt)
1053 {
1054 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1055 PUBLIC_PATH);
1056 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1057 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1058 ECORE_PATH_ID(p_hwfn));
1059 u32 disabled_vfs[VF_MAX_STATIC / 32];
1060 int i;
1061
1062 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1063 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1064 mfw_path_offsize, path_addr);
1065
1066 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1067 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1068 path_addr +
1069 offsetof(struct public_path,
1070 mcp_vf_disabled) +
1071 sizeof(u32) * i);
1072 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1073 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1074 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1075 }
1076
1077 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1078 OSAL_VF_FLR_UPDATE(p_hwfn);
1079 }
1080
ecore_mcp_ack_vf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * vfs_to_ack)1081 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1082 struct ecore_ptt *p_ptt,
1083 u32 *vfs_to_ack)
1084 {
1085 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1086 PUBLIC_FUNC);
1087 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1088 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1089 MCP_PF_ID(p_hwfn));
1090 struct ecore_mcp_mb_params mb_params;
1091 enum _ecore_status_t rc;
1092 int i;
1093
1094 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1095 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1096 "Acking VFs [%08x,...,%08x] - %08x\n",
1097 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1098
1099 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1100 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1101 mb_params.p_data_src = vfs_to_ack;
1102 mb_params.data_src_size = VF_MAX_STATIC / 8;
1103 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1104 if (rc != ECORE_SUCCESS) {
1105 DP_NOTICE(p_hwfn, false,
1106 "Failed to pass ACK for VF flr to MFW\n");
1107 return ECORE_TIMEOUT;
1108 }
1109
1110 /* TMP - clear the ACK bits; should be done by MFW */
1111 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1112 ecore_wr(p_hwfn, p_ptt,
1113 func_addr +
1114 offsetof(struct public_func, drv_ack_vf_disabled) +
1115 i * sizeof(u32), 0);
1116
1117 return rc;
1118 }
1119
ecore_mcp_handle_transceiver_change(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1120 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1121 struct ecore_ptt *p_ptt)
1122 {
1123 u32 transceiver_state;
1124
1125 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1126 p_hwfn->mcp_info->port_addr +
1127 offsetof(struct public_port,
1128 transceiver_data));
1129
1130 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1131 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1132 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1133 offsetof(struct public_port,
1134 transceiver_data)));
1135
1136 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
1137
1138 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1139 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1140 else
1141 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1142 }
1143
ecore_mcp_read_eee_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_link_state * p_link)1144 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1145 struct ecore_ptt *p_ptt,
1146 struct ecore_mcp_link_state *p_link)
1147 {
1148 u32 eee_status, val;
1149
1150 p_link->eee_adv_caps = 0;
1151 p_link->eee_lp_adv_caps = 0;
1152 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1153 offsetof(struct public_port, eee_status));
1154 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1155 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_SHIFT;
1156 if (val & EEE_1G_ADV)
1157 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1158 if (val & EEE_10G_ADV)
1159 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1160 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_SHIFT;
1161 if (val & EEE_1G_ADV)
1162 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1163 if (val & EEE_10G_ADV)
1164 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1165 }
1166
ecore_mcp_handle_link_change(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool b_reset)1167 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1168 struct ecore_ptt *p_ptt,
1169 bool b_reset)
1170 {
1171 struct ecore_mcp_link_state *p_link;
1172 u8 max_bw, min_bw;
1173 u32 status = 0;
1174
1175 /* Prevent SW/attentions from doing this at the same time */
1176 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1177
1178 p_link = &p_hwfn->mcp_info->link_output;
1179 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1180 if (!b_reset) {
1181 status = ecore_rd(p_hwfn, p_ptt,
1182 p_hwfn->mcp_info->port_addr +
1183 offsetof(struct public_port, link_status));
1184 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1185 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1186 status, (u32)(p_hwfn->mcp_info->port_addr +
1187 offsetof(struct public_port, link_status)));
1188 } else {
1189 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1190 "Resetting link indications\n");
1191 goto out;
1192 }
1193
1194 if (p_hwfn->b_drv_link_init)
1195 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1196 else
1197 p_link->link_up = false;
1198
1199 p_link->full_duplex = true;
1200 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1201 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1202 p_link->speed = 100000;
1203 break;
1204 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1205 p_link->speed = 50000;
1206 break;
1207 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1208 p_link->speed = 40000;
1209 break;
1210 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1211 p_link->speed = 25000;
1212 break;
1213 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1214 p_link->speed = 20000;
1215 break;
1216 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1217 p_link->speed = 10000;
1218 break;
1219 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1220 p_link->full_duplex = false;
1221 /* Fall-through */
1222 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1223 p_link->speed = 1000;
1224 break;
1225 default:
1226 p_link->speed = 0;
1227 }
1228
1229 /* We never store total line speed as p_link->speed is
1230 * again changes according to bandwidth allocation.
1231 */
1232 if (p_link->link_up && p_link->speed)
1233 p_link->line_speed = p_link->speed;
1234 else
1235 p_link->line_speed = 0;
1236
1237 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1238 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1239
1240 /* Max bandwidth configuration */
1241 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1242
1243 /* Mintz bandwidth configuration */
1244 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1245 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1246 p_link->min_pf_rate);
1247
1248 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1249 p_link->an_complete = !!(status &
1250 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1251 p_link->parallel_detection = !!(status &
1252 LINK_STATUS_PARALLEL_DETECTION_USED);
1253 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1254
1255 p_link->partner_adv_speed |=
1256 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1257 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1258 p_link->partner_adv_speed |=
1259 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1260 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1261 p_link->partner_adv_speed |=
1262 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1263 ECORE_LINK_PARTNER_SPEED_10G : 0;
1264 p_link->partner_adv_speed |=
1265 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1266 ECORE_LINK_PARTNER_SPEED_20G : 0;
1267 p_link->partner_adv_speed |=
1268 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1269 ECORE_LINK_PARTNER_SPEED_25G : 0;
1270 p_link->partner_adv_speed |=
1271 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1272 ECORE_LINK_PARTNER_SPEED_40G : 0;
1273 p_link->partner_adv_speed |=
1274 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1275 ECORE_LINK_PARTNER_SPEED_50G : 0;
1276 p_link->partner_adv_speed |=
1277 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1278 ECORE_LINK_PARTNER_SPEED_100G : 0;
1279
1280 p_link->partner_tx_flow_ctrl_en =
1281 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1282 p_link->partner_rx_flow_ctrl_en =
1283 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1284
1285 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1286 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1287 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1288 break;
1289 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1290 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1291 break;
1292 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1293 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1294 break;
1295 default:
1296 p_link->partner_adv_pause = 0;
1297 }
1298
1299 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1300
1301 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1302 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1303
1304 OSAL_LINK_UPDATE(p_hwfn);
1305 out:
1306 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1307 }
1308
ecore_mcp_set_link(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool b_up)1309 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1310 struct ecore_ptt *p_ptt,
1311 bool b_up)
1312 {
1313 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1314 struct ecore_mcp_mb_params mb_params;
1315 struct eth_phy_cfg phy_cfg;
1316 enum _ecore_status_t rc = ECORE_SUCCESS;
1317 u32 cmd;
1318
1319 #ifndef ASIC_ONLY
1320 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1321 return ECORE_SUCCESS;
1322 #endif
1323
1324 /* Set the shmem configuration according to params */
1325 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1326 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1327 if (!params->speed.autoneg)
1328 phy_cfg.speed = params->speed.forced_speed;
1329 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1330 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1331 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1332 phy_cfg.adv_speed = params->speed.advertised_speeds;
1333 phy_cfg.loopback_mode = params->loopback_mode;
1334 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
1335 if (params->eee.enable)
1336 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1337 if (params->eee.tx_lpi_enable)
1338 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1339 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1340 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1341 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1342 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1343 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1344 EEE_TX_TIMER_USEC_SHIFT) &
1345 EEE_TX_TIMER_USEC_MASK;
1346 }
1347
1348 p_hwfn->b_drv_link_init = b_up;
1349
1350 if (b_up)
1351 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1352 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1353 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1354 phy_cfg.loopback_mode);
1355 else
1356 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1357
1358 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1359 mb_params.cmd = cmd;
1360 mb_params.p_data_src = &phy_cfg;
1361 mb_params.data_src_size = sizeof(phy_cfg);
1362 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1363
1364 /* if mcp fails to respond we must abort */
1365 if (rc != ECORE_SUCCESS) {
1366 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1367 return rc;
1368 }
1369
1370 /* Mimic link-change attention, done for several reasons:
1371 * - On reset, there's no guarantee MFW would trigger
1372 * an attention.
1373 * - On initialization, older MFWs might not indicate link change
1374 * during LFA, so we'll never get an UP indication.
1375 */
1376 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1377
1378 return rc;
1379 }
1380
ecore_get_process_kill_counter(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1381 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1382 struct ecore_ptt *p_ptt)
1383 {
1384 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1385
1386 /* TODO - Add support for VFs */
1387 if (IS_VF(p_hwfn->p_dev))
1388 return ECORE_INVAL;
1389
1390 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1391 PUBLIC_PATH);
1392 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1393 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1394
1395 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1396 path_addr +
1397 offsetof(struct public_path, process_kill)) &
1398 PROCESS_KILL_COUNTER_MASK;
1399
1400 return proc_kill_cnt;
1401 }
1402
ecore_mcp_handle_process_kill(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1403 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1404 struct ecore_ptt *p_ptt)
1405 {
1406 struct ecore_dev *p_dev = p_hwfn->p_dev;
1407 u32 proc_kill_cnt;
1408
1409 /* Prevent possible attentions/interrupts during the recovery handling
1410 * and till its load phase, during which they will be re-enabled.
1411 */
1412 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1413
1414 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1415
1416 /* The following operations should be done once, and thus in CMT mode
1417 * are carried out by only the first HW function.
1418 */
1419 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1420 return;
1421
1422 if (p_dev->recov_in_prog) {
1423 DP_NOTICE(p_hwfn, false,
1424 "Ignoring the indication since a recovery process is already in progress\n");
1425 return;
1426 }
1427
1428 p_dev->recov_in_prog = true;
1429
1430 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1431 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1432
1433 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1434 }
1435
ecore_mcp_send_protocol_stats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum MFW_DRV_MSG_TYPE type)1436 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1437 struct ecore_ptt *p_ptt,
1438 enum MFW_DRV_MSG_TYPE type)
1439 {
1440 enum ecore_mcp_protocol_type stats_type __unused;
1441 union ecore_mcp_protocol_stats stats;
1442 struct ecore_mcp_mb_params mb_params;
1443 u32 hsi_param;
1444 enum _ecore_status_t rc;
1445
1446 switch (type) {
1447 case MFW_DRV_MSG_GET_LAN_STATS:
1448 stats_type = ECORE_MCP_LAN_STATS;
1449 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1450 break;
1451 case MFW_DRV_MSG_GET_FCOE_STATS:
1452 stats_type = ECORE_MCP_FCOE_STATS;
1453 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1454 break;
1455 case MFW_DRV_MSG_GET_ISCSI_STATS:
1456 stats_type = ECORE_MCP_ISCSI_STATS;
1457 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1458 break;
1459 case MFW_DRV_MSG_GET_RDMA_STATS:
1460 stats_type = ECORE_MCP_RDMA_STATS;
1461 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1462 break;
1463 default:
1464 DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
1465 return;
1466 }
1467
1468 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1469
1470 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1471 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1472 mb_params.param = hsi_param;
1473 mb_params.p_data_src = &stats;
1474 mb_params.data_src_size = sizeof(stats);
1475 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1476 if (rc != ECORE_SUCCESS)
1477 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1478 }
1479
ecore_read_pf_bandwidth(struct ecore_hwfn * p_hwfn,struct public_func * p_shmem_info)1480 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1481 struct public_func *p_shmem_info)
1482 {
1483 struct ecore_mcp_function_info *p_info;
1484
1485 p_info = &p_hwfn->mcp_info->func_info;
1486
1487 /* TODO - bandwidth min/max should have valid values of 1-100,
1488 * as well as some indication that the feature is disabled.
1489 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1490 * limit and correct value to min `1' and max `100' if limit isn't in
1491 * range.
1492 */
1493 p_info->bandwidth_min = (p_shmem_info->config &
1494 FUNC_MF_CFG_MIN_BW_MASK) >>
1495 FUNC_MF_CFG_MIN_BW_SHIFT;
1496 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1497 DP_INFO(p_hwfn,
1498 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1499 p_info->bandwidth_min);
1500 p_info->bandwidth_min = 1;
1501 }
1502
1503 p_info->bandwidth_max = (p_shmem_info->config &
1504 FUNC_MF_CFG_MAX_BW_MASK) >>
1505 FUNC_MF_CFG_MAX_BW_SHIFT;
1506 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1507 DP_INFO(p_hwfn,
1508 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1509 p_info->bandwidth_max);
1510 p_info->bandwidth_max = 100;
1511 }
1512 }
1513
ecore_mcp_get_shmem_func(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct public_func * p_data,int pfid)1514 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1515 struct ecore_ptt *p_ptt,
1516 struct public_func *p_data,
1517 int pfid)
1518 {
1519 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1520 PUBLIC_FUNC);
1521 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1522 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1523 u32 i, size;
1524
1525 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1526
1527 size = OSAL_MIN_T(u32, sizeof(*p_data),
1528 SECTION_SIZE(mfw_path_offsize));
1529 for (i = 0; i < size / sizeof(u32); i++)
1530 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1531 func_addr + (i << 2));
1532
1533 return size;
1534 }
1535 #if 0
1536 /* This was introduced with FW 8.10.5.0; Hopefully this is only temp. */
1537 enum _ecore_status_t ecore_hw_init_first_eth(struct ecore_hwfn *p_hwfn,
1538 struct ecore_ptt *p_ptt,
1539 u8 *p_pf)
1540 {
1541 struct public_func shmem_info;
1542 int i;
1543
1544 /* Find first Ethernet interface in port */
1545 for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->p_dev);
1546 i += p_hwfn->p_dev->num_ports_in_engine) {
1547 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1548 MCP_PF_ID_BY_REL(p_hwfn, i));
1549
1550 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1551 continue;
1552
1553 if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
1554 FUNC_MF_CFG_PROTOCOL_ETHERNET) {
1555 *p_pf = (u8)i;
1556 return ECORE_SUCCESS;
1557 }
1558 }
1559
1560 /* This might actually be valid somewhere in the future but for now
1561 * it's highly unlikely.
1562 */
1563 DP_NOTICE(p_hwfn, false,
1564 "Failed to find on port an ethernet interface in MF_SI mode\n");
1565
1566 return ECORE_INVAL;
1567 }
1568 #endif
1569 static void
ecore_mcp_update_bw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1570 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1571 {
1572 struct ecore_mcp_function_info *p_info;
1573 struct public_func shmem_info;
1574 u32 resp = 0, param = 0;
1575
1576 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1577 MCP_PF_ID(p_hwfn));
1578
1579 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1580
1581 p_info = &p_hwfn->mcp_info->func_info;
1582
1583 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1584
1585 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1586
1587 /* Acknowledge the MFW */
1588 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1589 ¶m);
1590 }
1591
ecore_mcp_update_stag(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1592 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1593 struct ecore_ptt *p_ptt)
1594 {
1595 struct public_func shmem_info;
1596 u32 resp = 0, param = 0;
1597
1598 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1599 MCP_PF_ID(p_hwfn));
1600
1601 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1602 FUNC_MF_CFG_OV_STAG_MASK;
1603 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1604 if ((p_hwfn->hw_info.hw_mode & (1 << MODE_MF_SD)) &&
1605 (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET)) {
1606 ecore_wr(p_hwfn, p_ptt,
1607 NIG_REG_LLH_FUNC_TAG_VALUE,
1608 p_hwfn->hw_info.ovlan);
1609 ecore_sp_pf_update_stag(p_hwfn);
1610 }
1611
1612 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1613
1614 /* Acknowledge the MFW */
1615 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1616 &resp, ¶m);
1617 }
1618
ecore_mcp_handle_fan_failure(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1619 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1620 struct ecore_ptt *p_ptt)
1621 {
1622 /* A single notification should be sent to upper driver in CMT mode */
1623 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1624 return;
1625
1626 DP_NOTICE(p_hwfn, false,
1627 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1628
1629 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1630 }
1631
1632 struct ecore_mdump_cmd_params {
1633 u32 cmd;
1634 void *p_data_src;
1635 u8 data_src_size;
1636 void *p_data_dst;
1637 u8 data_dst_size;
1638 u32 mcp_resp;
1639 };
1640
1641 static enum _ecore_status_t
ecore_mcp_mdump_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_cmd_params * p_mdump_cmd_params)1642 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1643 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1644 {
1645 struct ecore_mcp_mb_params mb_params;
1646 enum _ecore_status_t rc;
1647
1648 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1649 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1650 mb_params.param = p_mdump_cmd_params->cmd;
1651 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1652 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1653 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1654 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1655 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1656 if (rc != ECORE_SUCCESS)
1657 return rc;
1658
1659 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1660
1661 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1662 DP_INFO(p_hwfn,
1663 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1664 p_mdump_cmd_params->cmd);
1665 rc = ECORE_NOTIMPL;
1666 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1667 DP_INFO(p_hwfn,
1668 "The mdump command is not supported by the MFW\n");
1669 rc = ECORE_NOTIMPL;
1670 }
1671
1672 return rc;
1673 }
1674
ecore_mcp_mdump_ack(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1675 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1676 struct ecore_ptt *p_ptt)
1677 {
1678 struct ecore_mdump_cmd_params mdump_cmd_params;
1679
1680 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1681 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1682
1683 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1684 }
1685
ecore_mcp_mdump_set_values(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 epoch)1686 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1687 struct ecore_ptt *p_ptt,
1688 u32 epoch)
1689 {
1690 struct ecore_mdump_cmd_params mdump_cmd_params;
1691
1692 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1693 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1694 mdump_cmd_params.p_data_src = &epoch;
1695 mdump_cmd_params.data_src_size = sizeof(epoch);
1696
1697 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1698 }
1699
ecore_mcp_mdump_trigger(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1700 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1701 struct ecore_ptt *p_ptt)
1702 {
1703 struct ecore_mdump_cmd_params mdump_cmd_params;
1704
1705 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1706 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1707
1708 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1709 }
1710
1711 static enum _ecore_status_t
ecore_mcp_mdump_get_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct mdump_config_stc * p_mdump_config)1712 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1713 struct mdump_config_stc *p_mdump_config)
1714 {
1715 struct ecore_mdump_cmd_params mdump_cmd_params;
1716 enum _ecore_status_t rc;
1717
1718 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1719 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1720 mdump_cmd_params.p_data_dst = p_mdump_config;
1721 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1722
1723 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1724 if (rc != ECORE_SUCCESS)
1725 return rc;
1726
1727 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1728 DP_INFO(p_hwfn,
1729 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1730 mdump_cmd_params.mcp_resp);
1731 rc = ECORE_UNKNOWN_ERROR;
1732 }
1733
1734 return rc;
1735 }
1736
1737 enum _ecore_status_t
ecore_mcp_mdump_get_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_info * p_mdump_info)1738 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1739 struct ecore_mdump_info *p_mdump_info)
1740 {
1741 u32 addr, global_offsize, global_addr;
1742 struct mdump_config_stc mdump_config;
1743 enum _ecore_status_t rc;
1744
1745 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1746
1747 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1748 PUBLIC_GLOBAL);
1749 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1750 global_addr = SECTION_ADDR(global_offsize, 0);
1751 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1752 global_addr +
1753 offsetof(struct public_global,
1754 mdump_reason));
1755
1756 if (p_mdump_info->reason) {
1757 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1758 if (rc != ECORE_SUCCESS)
1759 return rc;
1760
1761 p_mdump_info->version = mdump_config.version;
1762 p_mdump_info->config = mdump_config.config;
1763 p_mdump_info->epoch = mdump_config.epoc;
1764 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1765 p_mdump_info->valid_logs = mdump_config.valid_logs;
1766
1767 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1768 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1769 p_mdump_info->reason, p_mdump_info->version,
1770 p_mdump_info->config, p_mdump_info->epoch,
1771 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1772 } else {
1773 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1774 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1775 }
1776
1777 return ECORE_SUCCESS;
1778 }
1779
ecore_mcp_mdump_clear_logs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1780 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1781 struct ecore_ptt *p_ptt)
1782 {
1783 struct ecore_mdump_cmd_params mdump_cmd_params;
1784
1785 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1786 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1787
1788 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1789 }
1790
1791 enum _ecore_status_t
ecore_mcp_mdump_get_retain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_retain_data * p_mdump_retain)1792 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1793 struct ecore_mdump_retain_data *p_mdump_retain)
1794 {
1795 struct ecore_mdump_cmd_params mdump_cmd_params;
1796 struct mdump_retain_data_stc mfw_mdump_retain;
1797 enum _ecore_status_t rc;
1798
1799 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1800 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1801 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1802 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1803
1804 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1805 if (rc != ECORE_SUCCESS)
1806 return rc;
1807
1808 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1809 DP_INFO(p_hwfn,
1810 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1811 mdump_cmd_params.mcp_resp);
1812 return ECORE_UNKNOWN_ERROR;
1813 }
1814
1815 p_mdump_retain->valid = mfw_mdump_retain.valid;
1816 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1817 p_mdump_retain->pf = mfw_mdump_retain.pf;
1818 p_mdump_retain->status = mfw_mdump_retain.status;
1819
1820 return ECORE_SUCCESS;
1821 }
1822
ecore_mcp_mdump_clr_retain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1823 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1824 struct ecore_ptt *p_ptt)
1825 {
1826 struct ecore_mdump_cmd_params mdump_cmd_params;
1827
1828 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1829 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1830
1831 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1832 }
1833
ecore_mcp_handle_critical_error(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1834 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1835 struct ecore_ptt *p_ptt)
1836 {
1837 struct ecore_mdump_retain_data mdump_retain;
1838 enum _ecore_status_t rc;
1839
1840 /* In CMT mode - no need for more than a single acknowledgement to the
1841 * MFW, and no more than a single notification to the upper driver.
1842 */
1843 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1844 return;
1845
1846 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1847 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1848 DP_NOTICE(p_hwfn, false,
1849 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1850 mdump_retain.epoch, mdump_retain.pf,
1851 mdump_retain.status);
1852 } else {
1853 DP_NOTICE(p_hwfn, false,
1854 "The MFW notified that a critical error occurred in the device\n");
1855 }
1856
1857 if (p_hwfn->p_dev->allow_mdump) {
1858 DP_NOTICE(p_hwfn, false,
1859 "Not acknowledging the notification to allow the MFW crash dump\n");
1860 return;
1861 }
1862
1863 DP_NOTICE(p_hwfn, false,
1864 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1865 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1866 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1867 }
1868
ecore_mcp_handle_events(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1869 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1870 struct ecore_ptt *p_ptt)
1871 {
1872 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1873 enum _ecore_status_t rc = ECORE_SUCCESS;
1874 bool found = false;
1875 u16 i;
1876
1877 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1878
1879 /* Read Messages from MFW */
1880 ecore_mcp_read_mb(p_hwfn, p_ptt);
1881
1882 /* Compare current messages to old ones */
1883 for (i = 0; i < info->mfw_mb_length; i++) {
1884 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1885 continue;
1886
1887 found = true;
1888
1889 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1890 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1891 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1892
1893 switch (i) {
1894 case MFW_DRV_MSG_LINK_CHANGE:
1895 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1896 break;
1897 case MFW_DRV_MSG_VF_DISABLED:
1898 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1899 break;
1900 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1901 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1902 ECORE_DCBX_REMOTE_LLDP_MIB);
1903 break;
1904 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1905 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1906 ECORE_DCBX_REMOTE_MIB);
1907 break;
1908 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1909 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1910 ECORE_DCBX_OPERATIONAL_MIB);
1911 break;
1912 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1913 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1914 break;
1915 case MFW_DRV_MSG_ERROR_RECOVERY:
1916 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1917 break;
1918 case MFW_DRV_MSG_GET_LAN_STATS:
1919 case MFW_DRV_MSG_GET_FCOE_STATS:
1920 case MFW_DRV_MSG_GET_ISCSI_STATS:
1921 case MFW_DRV_MSG_GET_RDMA_STATS:
1922 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1923 break;
1924 case MFW_DRV_MSG_BW_UPDATE:
1925 ecore_mcp_update_bw(p_hwfn, p_ptt);
1926 break;
1927 case MFW_DRV_MSG_S_TAG_UPDATE:
1928 ecore_mcp_update_stag(p_hwfn, p_ptt);
1929 break;
1930 case MFW_DRV_MSG_FAILURE_DETECTED:
1931 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1932 break;
1933 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1934 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1935 break;
1936 case MFW_DRV_MSG_GET_TLV_REQ:
1937 OSAL_MFW_TLV_REQ(p_hwfn);
1938 break;
1939 default:
1940 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1941 rc = ECORE_INVAL;
1942 }
1943 }
1944
1945 /* ACK everything */
1946 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1947 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1948
1949 /* MFW expect answer in BE, so we force write in that format */
1950 ecore_wr(p_hwfn, p_ptt,
1951 info->mfw_mb_addr + sizeof(u32) +
1952 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1953 sizeof(u32) + i * sizeof(u32), val);
1954 }
1955
1956 if (!found) {
1957 DP_NOTICE(p_hwfn, false,
1958 "Received an MFW message indication but no new message!\n");
1959 rc = ECORE_INVAL;
1960 }
1961
1962 /* Copy the new mfw messages into the shadow */
1963 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1964
1965 return rc;
1966 }
1967
ecore_mcp_get_mfw_ver(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_mfw_ver,u32 * p_running_bundle_id)1968 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1969 struct ecore_ptt *p_ptt,
1970 u32 *p_mfw_ver,
1971 u32 *p_running_bundle_id)
1972 {
1973 u32 global_offsize;
1974
1975 #ifndef ASIC_ONLY
1976 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1977 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1978 return ECORE_SUCCESS;
1979 }
1980 #endif
1981
1982 if (IS_VF(p_hwfn->p_dev)) {
1983 if (p_hwfn->vf_iov_info) {
1984 struct pfvf_acquire_resp_tlv *p_resp;
1985
1986 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1987 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1988 return ECORE_SUCCESS;
1989 } else {
1990 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1991 "VF requested MFW version prior to ACQUIRE\n");
1992 return ECORE_INVAL;
1993 }
1994 }
1995
1996 global_offsize = ecore_rd(p_hwfn, p_ptt,
1997 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1998 PUBLIC_GLOBAL));
1999 *p_mfw_ver = ecore_rd(p_hwfn, p_ptt,
2000 SECTION_ADDR(global_offsize, 0) +
2001 offsetof(struct public_global, mfw_ver));
2002
2003 if (p_running_bundle_id != OSAL_NULL) {
2004 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2005 SECTION_ADDR(global_offsize, 0) +
2006 offsetof(struct public_global,
2007 running_bundle_id));
2008 }
2009
2010 return ECORE_SUCCESS;
2011 }
2012
ecore_mcp_get_mbi_ver(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_mbi_ver)2013 enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
2014 struct ecore_ptt *p_ptt,
2015 u32 *p_mbi_ver)
2016 {
2017 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2018
2019 #ifndef ASIC_ONLY
2020 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2021 DP_NOTICE(p_hwfn, false, "Emulation - can't get MBI version\n");
2022 return ECORE_SUCCESS;
2023 }
2024 #endif
2025
2026 if (IS_VF(p_hwfn->p_dev))
2027 return ECORE_INVAL;
2028
2029 /* Read the address of the nvm_cfg */
2030 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2031 if (!nvm_cfg_addr) {
2032 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
2033 return ECORE_INVAL;
2034 }
2035
2036 /* Read the offset of nvm_cfg1 */
2037 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2038
2039 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2040 offsetof(struct nvm_cfg1, glob) +
2041 offsetof(struct nvm_cfg1_glob, mbi_version);
2042 *p_mbi_ver = ecore_rd(p_hwfn, p_ptt, mbi_ver_addr) &
2043 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2044 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2045 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2046
2047 return ECORE_SUCCESS;
2048 }
2049
ecore_mcp_get_media_type(struct ecore_dev * p_dev,u32 * p_media_type)2050 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
2051 u32 *p_media_type)
2052 {
2053 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
2054 struct ecore_ptt *p_ptt;
2055
2056 /* TODO - Add support for VFs */
2057 if (IS_VF(p_dev))
2058 return ECORE_INVAL;
2059
2060 if (!ecore_mcp_is_init(p_hwfn)) {
2061 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
2062 return ECORE_BUSY;
2063 }
2064
2065 *p_media_type = MEDIA_UNSPECIFIED;
2066
2067 p_ptt = ecore_ptt_acquire(p_hwfn);
2068 if (!p_ptt)
2069 return ECORE_BUSY;
2070
2071 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2072 offsetof(struct public_port, media_type));
2073
2074 ecore_ptt_release(p_hwfn, p_ptt);
2075
2076 return ECORE_SUCCESS;
2077 }
2078
2079 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2080 static void
ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn * p_hwfn,enum ecore_pci_personality * p_proto)2081 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2082 enum ecore_pci_personality *p_proto)
2083 {
2084 /* There wasn't ever a legacy MFW that published iwarp.
2085 * So at this point, this is either plain l2 or RoCE.
2086 */
2087 if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE,
2088 &p_hwfn->hw_info.device_capabilities))
2089 *p_proto = ECORE_PCI_ETH_ROCE;
2090 else
2091 *p_proto = ECORE_PCI_ETH;
2092
2093 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2094 "According to Legacy capabilities, L2 personality is %08x\n",
2095 (u32) *p_proto);
2096 }
2097
2098 static enum _ecore_status_t
ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_pci_personality * p_proto)2099 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2100 struct ecore_ptt *p_ptt,
2101 enum ecore_pci_personality *p_proto)
2102 {
2103 u32 resp = 0, param = 0;
2104 enum _ecore_status_t rc;
2105
2106 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2107 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2108 if (rc != ECORE_SUCCESS)
2109 return rc;
2110 if (resp != FW_MSG_CODE_OK) {
2111 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2112 "MFW lacks support for command; Returns %08x\n",
2113 resp);
2114 return ECORE_INVAL;
2115 }
2116
2117 switch (param) {
2118 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2119 *p_proto = ECORE_PCI_ETH;
2120 break;
2121 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2122 *p_proto = ECORE_PCI_ETH_ROCE;
2123 break;
2124 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2125 *p_proto = ECORE_PCI_ETH_IWARP;
2126 break;
2127 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2128 *p_proto = ECORE_PCI_ETH_RDMA;
2129 break;
2130 default:
2131 DP_NOTICE(p_hwfn, true,
2132 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2133 param);
2134 return ECORE_INVAL;
2135 }
2136
2137 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2138 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2139 (u32) *p_proto, resp, param);
2140 return ECORE_SUCCESS;
2141 }
2142
2143 static enum _ecore_status_t
ecore_mcp_get_shmem_proto(struct ecore_hwfn * p_hwfn,struct public_func * p_info,struct ecore_ptt * p_ptt,enum ecore_pci_personality * p_proto)2144 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2145 struct public_func *p_info,
2146 struct ecore_ptt *p_ptt,
2147 enum ecore_pci_personality *p_proto)
2148 {
2149 enum _ecore_status_t rc = ECORE_SUCCESS;
2150
2151 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2152 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2153 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2154 ECORE_SUCCESS)
2155 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2156 break;
2157 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2158 *p_proto = ECORE_PCI_ISCSI;
2159 break;
2160 case FUNC_MF_CFG_PROTOCOL_FCOE:
2161 *p_proto = ECORE_PCI_FCOE;
2162 break;
2163 case FUNC_MF_CFG_PROTOCOL_ROCE:
2164 DP_NOTICE(p_hwfn, true, "RoCE personality is not a valid value!\n");
2165 rc = ECORE_INVAL;
2166 break;
2167 default:
2168 rc = ECORE_INVAL;
2169 }
2170
2171 return rc;
2172 }
2173
ecore_mcp_fill_shmem_func_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2174 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2175 struct ecore_ptt *p_ptt)
2176 {
2177 struct ecore_mcp_function_info *info;
2178 struct public_func shmem_info;
2179
2180 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2181 MCP_PF_ID(p_hwfn));
2182 info = &p_hwfn->mcp_info->func_info;
2183
2184 info->pause_on_host = (shmem_info.config &
2185 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2186
2187 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2188 &info->protocol)) {
2189 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2190 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2191 return ECORE_INVAL;
2192 }
2193
2194 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2195
2196 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2197 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2198 info->mac[1] = (u8)(shmem_info.mac_upper);
2199 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2200 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2201 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2202 info->mac[5] = (u8)(shmem_info.mac_lower);
2203
2204 /* Store primary MAC for later possible WoL */
2205 OSAL_MEMCPY(&p_hwfn->p_dev->wol_mac, info->mac, ETH_ALEN);
2206
2207 } else {
2208 /* TODO - are there protocols for which there's no MAC? */
2209 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2210 }
2211
2212 /* TODO - are these calculations true for BE machine? */
2213 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2214 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2215 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2216 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2217
2218 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2219
2220 info->mtu = (u16)shmem_info.mtu_size;
2221
2222 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_NONE;
2223 if (ecore_mcp_is_init(p_hwfn)) {
2224 u32 resp = 0, param = 0;
2225 enum _ecore_status_t rc;
2226
2227 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2228 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2229 if (rc != ECORE_SUCCESS)
2230 return rc;
2231 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2232 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_PME;
2233 }
2234 p_hwfn->p_dev->wol_config = (u8)ECORE_OV_WOL_DEFAULT;
2235
2236 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2237 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2238 info->pause_on_host, info->protocol,
2239 info->bandwidth_min, info->bandwidth_max,
2240 info->mac[0], info->mac[1], info->mac[2],
2241 info->mac[3], info->mac[4], info->mac[5],
2242 info->wwn_port, info->wwn_node, info->ovlan,
2243 (u8)p_hwfn->hw_info.b_wol_support);
2244
2245 return ECORE_SUCCESS;
2246 }
2247
2248 struct ecore_mcp_link_params
ecore_mcp_get_link_params(struct ecore_hwfn * p_hwfn)2249 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2250 {
2251 if (!p_hwfn || !p_hwfn->mcp_info)
2252 return OSAL_NULL;
2253 return &p_hwfn->mcp_info->link_input;
2254 }
2255
2256 struct ecore_mcp_link_state
ecore_mcp_get_link_state(struct ecore_hwfn * p_hwfn)2257 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2258 {
2259 if (!p_hwfn || !p_hwfn->mcp_info)
2260 return OSAL_NULL;
2261
2262 #ifndef ASIC_ONLY
2263 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2264 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2265 p_hwfn->mcp_info->link_output.link_up = true;
2266 }
2267 #endif
2268
2269 return &p_hwfn->mcp_info->link_output;
2270 }
2271
2272 struct ecore_mcp_link_capabilities
ecore_mcp_get_link_capabilities(struct ecore_hwfn * p_hwfn)2273 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2274 {
2275 if (!p_hwfn || !p_hwfn->mcp_info)
2276 return OSAL_NULL;
2277 return &p_hwfn->mcp_info->link_capabilities;
2278 }
2279
ecore_mcp_drain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2280 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2281 struct ecore_ptt *p_ptt)
2282 {
2283 u32 resp = 0, param = 0;
2284 enum _ecore_status_t rc;
2285
2286 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2287 DRV_MSG_CODE_NIG_DRAIN, 1000,
2288 &resp, ¶m);
2289
2290 /* Wait for the drain to complete before returning */
2291 OSAL_MSLEEP(1020);
2292
2293 return rc;
2294 }
2295
2296 #ifndef LINUX_REMOVE
2297 const struct ecore_mcp_function_info
ecore_mcp_get_function_info(struct ecore_hwfn * p_hwfn)2298 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2299 {
2300 if (!p_hwfn || !p_hwfn->mcp_info)
2301 return OSAL_NULL;
2302 return &p_hwfn->mcp_info->func_info;
2303 }
2304 #endif
2305
ecore_mcp_nvm_command(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_nvm_params * params)2306 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
2307 struct ecore_ptt *p_ptt,
2308 struct ecore_mcp_nvm_params *params)
2309 {
2310 enum _ecore_status_t rc;
2311
2312 switch (params->type) {
2313 case ECORE_MCP_NVM_RD:
2314 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2315 params->nvm_common.offset,
2316 ¶ms->nvm_common.resp,
2317 ¶ms->nvm_common.param,
2318 params->nvm_rd.buf_size,
2319 params->nvm_rd.buf);
2320 break;
2321 case ECORE_MCP_CMD:
2322 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2323 params->nvm_common.offset,
2324 ¶ms->nvm_common.resp,
2325 ¶ms->nvm_common.param);
2326 break;
2327 case ECORE_MCP_NVM_WR:
2328 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2329 params->nvm_common.offset,
2330 ¶ms->nvm_common.resp,
2331 ¶ms->nvm_common.param,
2332 params->nvm_wr.buf_size,
2333 params->nvm_wr.buf);
2334 break;
2335 default:
2336 rc = ECORE_NOTIMPL;
2337 break;
2338 }
2339 return rc;
2340 }
2341
2342 #ifndef LINUX_REMOVE
ecore_mcp_get_personality_cnt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 personalities)2343 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2344 struct ecore_ptt *p_ptt,
2345 u32 personalities)
2346 {
2347 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2348 struct public_func shmem_info;
2349 int i, count = 0, num_pfs;
2350
2351 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2352
2353 for (i = 0; i < num_pfs; i++) {
2354 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2355 MCP_PF_ID_BY_REL(p_hwfn, i));
2356 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2357 continue;
2358
2359 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2360 &protocol) !=
2361 ECORE_SUCCESS)
2362 continue;
2363
2364 if ((1 << ((u32)protocol)) & personalities)
2365 count++;
2366 }
2367
2368 return count;
2369 }
2370 #endif
2371
ecore_mcp_get_flash_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_flash_size)2372 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2373 struct ecore_ptt *p_ptt,
2374 u32 *p_flash_size)
2375 {
2376 u32 flash_size;
2377
2378 #ifndef ASIC_ONLY
2379 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2380 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2381 return ECORE_INVAL;
2382 }
2383 #endif
2384
2385 if (IS_VF(p_hwfn->p_dev))
2386 return ECORE_INVAL;
2387
2388 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2389 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2390 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2391 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2392
2393 *p_flash_size = flash_size;
2394
2395 return ECORE_SUCCESS;
2396 }
2397
ecore_start_recovery_process(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2398 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2399 struct ecore_ptt *p_ptt)
2400 {
2401 struct ecore_dev *p_dev = p_hwfn->p_dev;
2402
2403 if (p_dev->recov_in_prog) {
2404 DP_NOTICE(p_hwfn, false,
2405 "Avoid triggering a recovery since such a process is already in progress\n");
2406 return ECORE_AGAIN;
2407 }
2408
2409 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2410 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2411
2412 return ECORE_SUCCESS;
2413 }
2414
2415 static enum _ecore_status_t
ecore_mcp_config_vf_msix_bb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 vf_id,u8 num)2416 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2417 struct ecore_ptt *p_ptt,
2418 u8 vf_id, u8 num)
2419 {
2420 u32 resp = 0, param = 0, rc_param = 0;
2421 enum _ecore_status_t rc;
2422
2423 /* Only Leader can configure MSIX, and need to take CMT into account */
2424 if (!IS_LEAD_HWFN(p_hwfn))
2425 return ECORE_SUCCESS;
2426 num *= p_hwfn->p_dev->num_hwfns;
2427
2428 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2429 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2430 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2431 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2432
2433 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2434 &resp, &rc_param);
2435
2436 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2437 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2438 vf_id);
2439 rc = ECORE_INVAL;
2440 } else {
2441 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2442 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2443 num, vf_id);
2444 }
2445
2446 return rc;
2447 }
2448
2449 static enum _ecore_status_t
ecore_mcp_config_vf_msix_ah(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 num)2450 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2451 struct ecore_ptt *p_ptt,
2452 u8 num)
2453 {
2454 u32 resp = 0, param = num, rc_param = 0;
2455 enum _ecore_status_t rc;
2456
2457 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2458 param, &resp, &rc_param);
2459
2460 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2461 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2462 rc = ECORE_INVAL;
2463 } else {
2464 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2465 "Requested 0x%02x MSI-x interrupts for VFs\n",
2466 num);
2467 }
2468
2469 return rc;
2470 }
2471
ecore_mcp_config_vf_msix(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 vf_id,u8 num)2472 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2473 struct ecore_ptt *p_ptt,
2474 u8 vf_id, u8 num)
2475 {
2476 if (ECORE_IS_BB(p_hwfn->p_dev))
2477 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2478 else
2479 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2480 }
2481
2482 enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_drv_version * p_ver)2483 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2484 struct ecore_mcp_drv_version *p_ver)
2485 {
2486 struct ecore_mcp_mb_params mb_params;
2487 struct drv_version_stc drv_version;
2488 u32 num_words, i;
2489 void *p_name;
2490 OSAL_BE32 val;
2491 enum _ecore_status_t rc;
2492
2493 #ifndef ASIC_ONLY
2494 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2495 return ECORE_SUCCESS;
2496 #endif
2497
2498 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2499 drv_version.version = p_ver->version;
2500 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2501 for (i = 0; i < num_words; i++) {
2502 /* The driver name is expected to be in a big-endian format */
2503 p_name = &p_ver->name[i * sizeof(u32)];
2504 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2505 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2506 }
2507
2508 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2509 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2510 mb_params.p_data_src = &drv_version;
2511 mb_params.data_src_size = sizeof(drv_version);
2512 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2513 if (rc != ECORE_SUCCESS)
2514 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2515
2516 return rc;
2517 }
2518
2519 /* A maximal 100 msec waiting time for the MCP to halt */
2520 #define ECORE_MCP_HALT_SLEEP_MS 10
2521 #define ECORE_MCP_HALT_MAX_RETRIES 10
2522
ecore_mcp_halt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2523 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2524 struct ecore_ptt *p_ptt)
2525 {
2526 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2527 enum _ecore_status_t rc;
2528
2529 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2530 ¶m);
2531 if (rc != ECORE_SUCCESS) {
2532 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2533 return rc;
2534 }
2535
2536 do {
2537 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2538 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2539 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2540 break;
2541 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2542
2543 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2544 DP_NOTICE(p_hwfn, false,
2545 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2546 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2547 return ECORE_BUSY;
2548 }
2549
2550 return ECORE_SUCCESS;
2551 }
2552
2553 #define ECORE_MCP_RESUME_SLEEP_MS 10
2554
ecore_mcp_resume(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2555 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2556 struct ecore_ptt *p_ptt)
2557 {
2558 u32 cpu_mode, cpu_state;
2559
2560 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2561
2562 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2563 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2564 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2565
2566 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2567 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2568
2569 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2570 DP_NOTICE(p_hwfn, false,
2571 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2572 cpu_mode, cpu_state);
2573 return ECORE_BUSY;
2574 }
2575
2576 return ECORE_SUCCESS;
2577 }
2578
2579 enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_client client)2580 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2581 struct ecore_ptt *p_ptt,
2582 enum ecore_ov_client client)
2583 {
2584 enum _ecore_status_t rc;
2585 u32 resp = 0, param = 0;
2586 u32 drv_mb_param;
2587
2588 switch (client) {
2589 case ECORE_OV_CLIENT_DRV:
2590 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2591 break;
2592 case ECORE_OV_CLIENT_USER:
2593 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2594 break;
2595 case ECORE_OV_CLIENT_VENDOR_SPEC:
2596 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2597 break;
2598 default:
2599 DP_NOTICE(p_hwfn, true,
2600 "Invalid client type %d\n", client);
2601 return ECORE_INVAL;
2602 }
2603
2604 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2605 drv_mb_param, &resp, ¶m);
2606 if (rc != ECORE_SUCCESS)
2607 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2608
2609 return rc;
2610 }
2611
2612 enum _ecore_status_t
ecore_mcp_ov_update_driver_state(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_driver_state drv_state)2613 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2614 struct ecore_ptt *p_ptt,
2615 enum ecore_ov_driver_state drv_state)
2616 {
2617 enum _ecore_status_t rc;
2618 u32 resp = 0, param = 0;
2619 u32 drv_mb_param;
2620
2621 switch (drv_state) {
2622 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2623 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2624 break;
2625 case ECORE_OV_DRIVER_STATE_DISABLED:
2626 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2627 break;
2628 case ECORE_OV_DRIVER_STATE_ACTIVE:
2629 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2630 break;
2631 default:
2632 DP_NOTICE(p_hwfn, true,
2633 "Invalid driver state %d\n", drv_state);
2634 return ECORE_INVAL;
2635 }
2636
2637 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2638 drv_mb_param, &resp, ¶m);
2639 if (rc != ECORE_SUCCESS)
2640 DP_ERR(p_hwfn, "Failed to send driver state\n");
2641
2642 return rc;
2643 }
2644
2645 enum _ecore_status_t
ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_fc_npiv_tbl * p_table)2646 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2647 struct ecore_fc_npiv_tbl *p_table)
2648 {
2649 enum _ecore_status_t rc = ECORE_SUCCESS;
2650 struct dci_fc_npiv_tbl *p_npiv_table;
2651 u8 *p_buf = OSAL_NULL;
2652 u32 addr, size, i;
2653
2654 p_table->num_wwpn = 0;
2655 p_table->num_wwnn = 0;
2656 addr = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2657 offsetof(struct public_port, fc_npiv_nvram_tbl_addr));
2658 if (addr == NPIV_TBL_INVALID_ADDR) {
2659 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table doesn't exist\n");
2660 return rc;
2661 }
2662
2663 size = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2664 offsetof(struct public_port, fc_npiv_nvram_tbl_size));
2665 if (!size) {
2666 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table is empty\n");
2667 return rc;
2668 }
2669
2670 p_buf = OSAL_VZALLOC(p_hwfn->p_dev, size);
2671 if (!p_buf) {
2672 DP_ERR(p_hwfn, "Buffer allocation failed\n");
2673 return ECORE_NOMEM;
2674 }
2675
2676 rc = ecore_mcp_nvm_read(p_hwfn->p_dev, addr, p_buf, size);
2677 if (rc != ECORE_SUCCESS) {
2678 OSAL_VFREE(p_hwfn->p_dev, p_buf);
2679 return rc;
2680 }
2681
2682 p_npiv_table = (struct dci_fc_npiv_tbl *)p_buf;
2683 p_table->num_wwpn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2684 p_table->num_wwnn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2685 for (i = 0; i < p_table->num_wwpn; i++) {
2686 OSAL_MEMCPY(p_table->wwpn, p_npiv_table->settings[i].npiv_wwpn,
2687 ECORE_WWN_SIZE);
2688 OSAL_MEMCPY(p_table->wwnn, p_npiv_table->settings[i].npiv_wwnn,
2689 ECORE_WWN_SIZE);
2690 }
2691
2692 OSAL_VFREE(p_hwfn->p_dev, p_buf);
2693
2694 return ECORE_SUCCESS;
2695 }
2696
2697 enum _ecore_status_t
ecore_mcp_ov_update_mtu(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 mtu)2698 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2699 u16 mtu)
2700 {
2701 enum _ecore_status_t rc;
2702 u32 resp = 0, param = 0;
2703 u32 drv_mb_param;
2704
2705 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2706 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2707 drv_mb_param, &resp, ¶m);
2708 if (rc != ECORE_SUCCESS)
2709 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2710
2711 return rc;
2712 }
2713
2714 enum _ecore_status_t
ecore_mcp_ov_update_mac(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 * mac)2715 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2716 u8 *mac)
2717 {
2718 struct ecore_mcp_mb_params mb_params;
2719 enum _ecore_status_t rc;
2720 u32 mfw_mac[2];
2721
2722 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2723 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2724 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2725 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2726 mb_params.param |= MCP_PF_ID(p_hwfn);
2727
2728 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2729 * in 32-bit granularity.
2730 * So the MAC has to be set in native order [and not byte order],
2731 * otherwise it would be read incorrectly by MFW after swap.
2732 */
2733 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2734 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2735
2736 mb_params.p_data_src = (u8 *)mfw_mac;
2737 mb_params.data_src_size = 8;
2738 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2739 if (rc != ECORE_SUCCESS)
2740 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2741
2742 /* Store primary MAC for later possible WoL */
2743 OSAL_MEMCPY(p_hwfn->p_dev->wol_mac, mac, ETH_ALEN);
2744
2745 return rc;
2746 }
2747
2748 enum _ecore_status_t
ecore_mcp_ov_update_wol(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_wol wol)2749 ecore_mcp_ov_update_wol(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2750 enum ecore_ov_wol wol)
2751 {
2752 enum _ecore_status_t rc;
2753 u32 resp = 0, param = 0;
2754 u32 drv_mb_param;
2755
2756 if (p_hwfn->hw_info.b_wol_support == ECORE_WOL_SUPPORT_NONE) {
2757 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2758 "Can't change WoL configuration when WoL isn't supported\n");
2759 return ECORE_INVAL;
2760 }
2761
2762 switch (wol) {
2763 case ECORE_OV_WOL_DEFAULT:
2764 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2765 break;
2766 case ECORE_OV_WOL_DISABLED:
2767 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2768 break;
2769 case ECORE_OV_WOL_ENABLED:
2770 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2771 break;
2772 default:
2773 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2774 return ECORE_INVAL;
2775 }
2776
2777 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2778 drv_mb_param, &resp, ¶m);
2779 if (rc != ECORE_SUCCESS)
2780 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2781
2782 /* Store the WoL update for a future unload */
2783 p_hwfn->p_dev->wol_config = (u8)wol;
2784
2785 return rc;
2786 }
2787
2788 enum _ecore_status_t
ecore_mcp_ov_update_eswitch(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_eswitch eswitch)2789 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2790 enum ecore_ov_eswitch eswitch)
2791 {
2792 enum _ecore_status_t rc;
2793 u32 resp = 0, param = 0;
2794 u32 drv_mb_param;
2795
2796 switch (eswitch) {
2797 case ECORE_OV_ESWITCH_NONE:
2798 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2799 break;
2800 case ECORE_OV_ESWITCH_VEB:
2801 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2802 break;
2803 case ECORE_OV_ESWITCH_VEPA:
2804 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2805 break;
2806 default:
2807 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2808 return ECORE_INVAL;
2809 }
2810
2811 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2812 drv_mb_param, &resp, ¶m);
2813 if (rc != ECORE_SUCCESS)
2814 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2815
2816 return rc;
2817 }
2818
ecore_mcp_set_led(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_led_mode mode)2819 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2820 struct ecore_ptt *p_ptt,
2821 enum ecore_led_mode mode)
2822 {
2823 u32 resp = 0, param = 0, drv_mb_param;
2824 enum _ecore_status_t rc;
2825
2826 switch (mode) {
2827 case ECORE_LED_MODE_ON:
2828 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2829 break;
2830 case ECORE_LED_MODE_OFF:
2831 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2832 break;
2833 case ECORE_LED_MODE_RESTORE:
2834 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2835 break;
2836 default:
2837 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2838 return ECORE_INVAL;
2839 }
2840
2841 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2842 drv_mb_param, &resp, ¶m);
2843 if (rc != ECORE_SUCCESS)
2844 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2845
2846 return rc;
2847 }
2848
ecore_mcp_mask_parities(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 mask_parities)2849 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2850 struct ecore_ptt *p_ptt,
2851 u32 mask_parities)
2852 {
2853 enum _ecore_status_t rc;
2854 u32 resp = 0, param = 0;
2855
2856 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2857 mask_parities, &resp, ¶m);
2858
2859 if (rc != ECORE_SUCCESS) {
2860 DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n");
2861 } else if (resp != FW_MSG_CODE_OK) {
2862 DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n");
2863 rc = ECORE_INVAL;
2864 }
2865
2866 return rc;
2867 }
2868
ecore_mcp_nvm_read(struct ecore_dev * p_dev,u32 addr,u8 * p_buf,u32 len)2869 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2870 u8 *p_buf, u32 len)
2871 {
2872 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2873 u32 bytes_left, offset, bytes_to_copy, buf_size;
2874 struct ecore_mcp_nvm_params params;
2875 struct ecore_ptt *p_ptt;
2876 enum _ecore_status_t rc = ECORE_SUCCESS;
2877
2878 p_ptt = ecore_ptt_acquire(p_hwfn);
2879 if (!p_ptt)
2880 return ECORE_BUSY;
2881
2882 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2883 bytes_left = len;
2884 offset = 0;
2885 params.type = ECORE_MCP_NVM_RD;
2886 params.nvm_rd.buf_size = &buf_size;
2887 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2888 while (bytes_left > 0) {
2889 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2890 MCP_DRV_NVM_BUF_LEN);
2891 params.nvm_common.offset = (addr + offset) |
2892 (bytes_to_copy <<
2893 DRV_MB_PARAM_NVM_LEN_SHIFT);
2894 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2895 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2896 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2897 FW_MSG_CODE_NVM_OK)) {
2898 DP_NOTICE(p_dev, false, "MCP command rc = %d\n",
2899 rc);
2900 break;
2901 }
2902
2903 /* This can be a lengthy process, and it's possible scheduler
2904 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2905 */
2906 if (bytes_left % 0x1000 <
2907 (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2908 OSAL_MSLEEP(1);
2909
2910 offset += *params.nvm_rd.buf_size;
2911 bytes_left -= *params.nvm_rd.buf_size;
2912 }
2913
2914 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2915 ecore_ptt_release(p_hwfn, p_ptt);
2916
2917 return rc;
2918 }
2919
ecore_mcp_phy_read(struct ecore_dev * p_dev,u32 cmd,u32 addr,u8 * p_buf,u32 len)2920 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2921 u32 addr, u8 *p_buf, u32 len)
2922 {
2923 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2924 struct ecore_mcp_nvm_params params;
2925 struct ecore_ptt *p_ptt;
2926 enum _ecore_status_t rc;
2927
2928 p_ptt = ecore_ptt_acquire(p_hwfn);
2929 if (!p_ptt)
2930 return ECORE_BUSY;
2931
2932 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2933 params.type = ECORE_MCP_NVM_RD;
2934 params.nvm_rd.buf_size = &len;
2935 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2936 DRV_MSG_CODE_PHY_CORE_READ :
2937 DRV_MSG_CODE_PHY_RAW_READ;
2938 params.nvm_common.offset = addr;
2939 params.nvm_rd.buf = (u32 *)p_buf;
2940 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2941 if (rc != ECORE_SUCCESS)
2942 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2943
2944 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2945 ecore_ptt_release(p_hwfn, p_ptt);
2946
2947 return rc;
2948 }
2949
ecore_mcp_nvm_resp(struct ecore_dev * p_dev,u8 * p_buf)2950 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2951 {
2952 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2953 struct ecore_mcp_nvm_params params;
2954 struct ecore_ptt *p_ptt;
2955
2956 p_ptt = ecore_ptt_acquire(p_hwfn);
2957 if (!p_ptt)
2958 return ECORE_BUSY;
2959
2960 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2961 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2962 ecore_ptt_release(p_hwfn, p_ptt);
2963
2964 return ECORE_SUCCESS;
2965 }
2966
ecore_mcp_nvm_del_file(struct ecore_dev * p_dev,u32 addr)2967 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
2968 u32 addr)
2969 {
2970 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2971 struct ecore_mcp_nvm_params params;
2972 struct ecore_ptt *p_ptt;
2973 enum _ecore_status_t rc;
2974
2975 p_ptt = ecore_ptt_acquire(p_hwfn);
2976 if (!p_ptt)
2977 return ECORE_BUSY;
2978 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2979 params.type = ECORE_MCP_CMD;
2980 params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2981 params.nvm_common.offset = addr;
2982 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2983 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2984 ecore_ptt_release(p_hwfn, p_ptt);
2985
2986 return rc;
2987 }
2988
ecore_mcp_nvm_put_file_begin(struct ecore_dev * p_dev,u32 addr)2989 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2990 u32 addr)
2991 {
2992 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2993 struct ecore_mcp_nvm_params params;
2994 struct ecore_ptt *p_ptt;
2995 enum _ecore_status_t rc;
2996
2997 p_ptt = ecore_ptt_acquire(p_hwfn);
2998 if (!p_ptt)
2999 return ECORE_BUSY;
3000 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3001 params.type = ECORE_MCP_CMD;
3002 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
3003 params.nvm_common.offset = addr;
3004 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3005 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3006 ecore_ptt_release(p_hwfn, p_ptt);
3007
3008 return rc;
3009 }
3010
3011 /* rc recieves ECORE_INVAL as default parameter because
3012 * it might not enter the while loop if the len is 0
3013 */
ecore_mcp_nvm_write(struct ecore_dev * p_dev,u32 cmd,u32 addr,u8 * p_buf,u32 len)3014 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3015 u32 addr, u8 *p_buf, u32 len)
3016 {
3017 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3018 enum _ecore_status_t rc = ECORE_INVAL;
3019 struct ecore_mcp_nvm_params params;
3020 struct ecore_ptt *p_ptt;
3021 u32 buf_idx, buf_size;
3022
3023 p_ptt = ecore_ptt_acquire(p_hwfn);
3024 if (!p_ptt)
3025 return ECORE_BUSY;
3026
3027 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3028 params.type = ECORE_MCP_NVM_WR;
3029 switch (cmd) {
3030 case ECORE_PUT_FILE_DATA:
3031 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3032 break;
3033 case ECORE_NVM_WRITE_NVRAM:
3034 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3035 break;
3036 case ECORE_EXT_PHY_FW_UPGRADE:
3037 params.nvm_common.cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3038 break;
3039 default:
3040 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3041 cmd);
3042 return ECORE_INVAL;
3043 }
3044
3045 buf_idx = 0;
3046 while (buf_idx < len) {
3047 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3048 MCP_DRV_NVM_BUF_LEN);
3049 params.nvm_common.offset = ((buf_size <<
3050 DRV_MB_PARAM_NVM_LEN_SHIFT)
3051 | addr) + buf_idx;
3052 params.nvm_wr.buf_size = buf_size;
3053 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
3054 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3055 if (rc != ECORE_SUCCESS ||
3056 ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
3057 (params.nvm_common.resp !=
3058 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
3059 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3060
3061 /* This can be a lengthy process, and it's possible scheduler
3062 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3063 */
3064 if (buf_idx % 0x1000 >
3065 (buf_idx + buf_size) % 0x1000)
3066 OSAL_MSLEEP(1);
3067
3068 buf_idx += buf_size;
3069 }
3070
3071 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3072 ecore_ptt_release(p_hwfn, p_ptt);
3073
3074 return rc;
3075 }
3076
ecore_mcp_phy_write(struct ecore_dev * p_dev,u32 cmd,u32 addr,u8 * p_buf,u32 len)3077 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3078 u32 addr, u8 *p_buf, u32 len)
3079 {
3080 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3081 struct ecore_mcp_nvm_params params;
3082 struct ecore_ptt *p_ptt;
3083 enum _ecore_status_t rc;
3084
3085 p_ptt = ecore_ptt_acquire(p_hwfn);
3086 if (!p_ptt)
3087 return ECORE_BUSY;
3088
3089 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3090 params.type = ECORE_MCP_NVM_WR;
3091 params.nvm_wr.buf_size = len;
3092 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
3093 DRV_MSG_CODE_PHY_CORE_WRITE :
3094 DRV_MSG_CODE_PHY_RAW_WRITE;
3095 params.nvm_common.offset = addr;
3096 params.nvm_wr.buf = (u32 *)p_buf;
3097 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3098 if (rc != ECORE_SUCCESS)
3099 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3100 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3101 ecore_ptt_release(p_hwfn, p_ptt);
3102
3103 return rc;
3104 }
3105
ecore_mcp_nvm_set_secure_mode(struct ecore_dev * p_dev,u32 addr)3106 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3107 u32 addr)
3108 {
3109 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3110 struct ecore_mcp_nvm_params params;
3111 struct ecore_ptt *p_ptt;
3112 enum _ecore_status_t rc;
3113
3114 p_ptt = ecore_ptt_acquire(p_hwfn);
3115 if (!p_ptt)
3116 return ECORE_BUSY;
3117
3118 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3119 params.type = ECORE_MCP_CMD;
3120 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
3121 params.nvm_common.offset = addr;
3122 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3123 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3124 ecore_ptt_release(p_hwfn, p_ptt);
3125
3126 return rc;
3127 }
3128
ecore_mcp_phy_sfp_read(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 port,u32 addr,u32 offset,u32 len,u8 * p_buf)3129 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3130 struct ecore_ptt *p_ptt,
3131 u32 port, u32 addr, u32 offset,
3132 u32 len, u8 *p_buf)
3133 {
3134 struct ecore_mcp_nvm_params params;
3135 u32 bytes_left, bytes_to_copy, buf_size;
3136
3137 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3138 params.nvm_common.offset =
3139 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3140 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3141 addr = offset;
3142 offset = 0;
3143 bytes_left = len;
3144 params.type = ECORE_MCP_NVM_RD;
3145 params.nvm_rd.buf_size = &buf_size;
3146 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
3147 while (bytes_left > 0) {
3148 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3149 MAX_I2C_TRANSACTION_SIZE);
3150 params.nvm_rd.buf = (u32 *)(p_buf + offset);
3151 params.nvm_common.offset &=
3152 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3153 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3154 params.nvm_common.offset |=
3155 ((addr + offset) <<
3156 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3157 params.nvm_common.offset |=
3158 (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3159 (void) ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3160 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3161 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3162 return ECORE_NODEV;
3163 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3164 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3165 return ECORE_UNKNOWN_ERROR;
3166
3167 offset += *params.nvm_rd.buf_size;
3168 bytes_left -= *params.nvm_rd.buf_size;
3169 }
3170
3171 return ECORE_SUCCESS;
3172 }
3173
ecore_mcp_phy_sfp_write(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 port,u32 addr,u32 offset,u32 len,u8 * p_buf)3174 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3175 struct ecore_ptt *p_ptt,
3176 u32 port, u32 addr, u32 offset,
3177 u32 len, u8 *p_buf)
3178 {
3179 struct ecore_mcp_nvm_params params;
3180 u32 buf_idx, buf_size;
3181
3182 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3183 params.nvm_common.offset =
3184 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3185 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3186 params.type = ECORE_MCP_NVM_WR;
3187 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
3188 buf_idx = 0;
3189 while (buf_idx < len) {
3190 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3191 MAX_I2C_TRANSACTION_SIZE);
3192 params.nvm_common.offset &=
3193 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3194 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3195 params.nvm_common.offset |=
3196 ((offset + buf_idx) <<
3197 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3198 params.nvm_common.offset |=
3199 (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3200 params.nvm_wr.buf_size = buf_size;
3201 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
3202 (void) ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3203 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3204 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3205 return ECORE_NODEV;
3206 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3207 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3208 return ECORE_UNKNOWN_ERROR;
3209
3210 buf_idx += buf_size;
3211 }
3212
3213 return ECORE_SUCCESS;
3214 }
3215
ecore_mcp_gpio_read(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 gpio,u32 * gpio_val)3216 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3217 struct ecore_ptt *p_ptt,
3218 u16 gpio, u32 *gpio_val)
3219 {
3220 enum _ecore_status_t rc = ECORE_SUCCESS;
3221 u32 drv_mb_param = 0, rsp;
3222
3223 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
3224
3225 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3226 drv_mb_param, &rsp, gpio_val);
3227
3228 if (rc != ECORE_SUCCESS)
3229 return rc;
3230
3231 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3232 return ECORE_UNKNOWN_ERROR;
3233
3234 return ECORE_SUCCESS;
3235 }
3236
ecore_mcp_gpio_write(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 gpio,u16 gpio_val)3237 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3238 struct ecore_ptt *p_ptt,
3239 u16 gpio, u16 gpio_val)
3240 {
3241 enum _ecore_status_t rc = ECORE_SUCCESS;
3242 u32 drv_mb_param = 0, param, rsp;
3243
3244 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
3245 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
3246
3247 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3248 drv_mb_param, &rsp, ¶m);
3249
3250 if (rc != ECORE_SUCCESS)
3251 return rc;
3252
3253 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3254 return ECORE_UNKNOWN_ERROR;
3255
3256 return ECORE_SUCCESS;
3257 }
3258
ecore_mcp_gpio_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 gpio,u32 * gpio_direction,u32 * gpio_ctrl)3259 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3260 struct ecore_ptt *p_ptt,
3261 u16 gpio, u32 *gpio_direction,
3262 u32 *gpio_ctrl)
3263 {
3264 u32 drv_mb_param = 0, rsp, val = 0;
3265 enum _ecore_status_t rc = ECORE_SUCCESS;
3266
3267 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
3268
3269 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3270 drv_mb_param, &rsp, &val);
3271 if (rc != ECORE_SUCCESS)
3272 return rc;
3273
3274 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3275 DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
3276 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3277 DRV_MB_PARAM_GPIO_CTRL_SHIFT;
3278
3279 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3280 return ECORE_UNKNOWN_ERROR;
3281
3282 return ECORE_SUCCESS;
3283 }
3284
ecore_mcp_bist_register_test(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)3285 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3286 struct ecore_ptt *p_ptt)
3287 {
3288 u32 drv_mb_param = 0, rsp, param;
3289 enum _ecore_status_t rc = ECORE_SUCCESS;
3290
3291 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3292 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3293
3294 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3295 drv_mb_param, &rsp, ¶m);
3296
3297 if (rc != ECORE_SUCCESS)
3298 return rc;
3299
3300 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3301 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3302 rc = ECORE_UNKNOWN_ERROR;
3303
3304 return rc;
3305 }
3306
ecore_mcp_bist_clock_test(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)3307 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3308 struct ecore_ptt *p_ptt)
3309 {
3310 u32 drv_mb_param, rsp, param;
3311 enum _ecore_status_t rc = ECORE_SUCCESS;
3312
3313 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3314 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3315
3316 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3317 drv_mb_param, &rsp, ¶m);
3318
3319 if (rc != ECORE_SUCCESS)
3320 return rc;
3321
3322 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3323 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3324 rc = ECORE_UNKNOWN_ERROR;
3325
3326 return rc;
3327 }
3328
ecore_mcp_bist_nvm_test_get_num_images(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * num_images)3329 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3330 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3331 {
3332 u32 drv_mb_param = 0, rsp;
3333 enum _ecore_status_t rc = ECORE_SUCCESS;
3334
3335 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3336 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3337
3338 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3339 drv_mb_param, &rsp, num_images);
3340
3341 if (rc != ECORE_SUCCESS)
3342 return rc;
3343
3344 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3345 rc = ECORE_UNKNOWN_ERROR;
3346
3347 return rc;
3348 }
3349
ecore_mcp_bist_nvm_test_get_image_att(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct bist_nvm_image_att * p_image_att,u32 image_index)3350 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3351 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3352 struct bist_nvm_image_att *p_image_att, u32 image_index)
3353 {
3354 struct ecore_mcp_nvm_params params;
3355 enum _ecore_status_t rc;
3356 u32 buf_size;
3357
3358 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3359 params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3360 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3361 params.nvm_common.offset |= (image_index <<
3362 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
3363
3364 params.type = ECORE_MCP_NVM_RD;
3365 params.nvm_rd.buf_size = &buf_size;
3366 params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
3367 params.nvm_rd.buf = (u32 *)p_image_att;
3368
3369 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3370 if (rc != ECORE_SUCCESS)
3371 return rc;
3372
3373 if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3374 (p_image_att->return_code != 1))
3375 rc = ECORE_UNKNOWN_ERROR;
3376
3377 return rc;
3378 }
3379
3380 enum _ecore_status_t
ecore_mcp_get_nvm_image_att(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_nvm_images image_id,struct ecore_nvm_image_att * p_image_att)3381 ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3382 enum ecore_nvm_images image_id,
3383 struct ecore_nvm_image_att *p_image_att)
3384 {
3385 struct bist_nvm_image_att mfw_image_att;
3386 enum nvm_image_type type;
3387 u32 num_images, i;
3388 enum _ecore_status_t rc;
3389
3390 /* Translate image_id into MFW definitions */
3391 switch (image_id) {
3392 case ECORE_NVM_IMAGE_ISCSI_CFG:
3393 type = NVM_TYPE_ISCSI_CFG;
3394 break;
3395 case ECORE_NVM_IMAGE_FCOE_CFG:
3396 type = NVM_TYPE_FCOE_CFG;
3397 break;
3398 case ECORE_NVM_IMAGE_MDUMP:
3399 type = NVM_TYPE_MDUMP;
3400 break;
3401 default:
3402 DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
3403 image_id);
3404 return ECORE_INVAL;
3405 }
3406
3407 /* Learn number of images, then traverse and see if one fits */
3408 rc = ecore_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
3409 if (rc != ECORE_SUCCESS || !num_images)
3410 return ECORE_INVAL;
3411
3412 for (i = 0; i < num_images; i++) {
3413 rc = ecore_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
3414 &mfw_image_att, i);
3415 if (rc != ECORE_SUCCESS)
3416 return rc;
3417
3418 if (type == mfw_image_att.image_type)
3419 break;
3420 }
3421 if (i == num_images) {
3422 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3423 "Failed to find nvram image of type %08x\n",
3424 image_id);
3425 return ECORE_INVAL;
3426 }
3427
3428 p_image_att->start_addr = mfw_image_att.nvm_start_addr;
3429 p_image_att->length = mfw_image_att.len;
3430
3431 return ECORE_SUCCESS;
3432 }
3433
ecore_mcp_get_nvm_image(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_nvm_images image_id,u8 * p_buffer,u32 buffer_len)3434 enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
3435 struct ecore_ptt *p_ptt,
3436 enum ecore_nvm_images image_id,
3437 u8 *p_buffer, u32 buffer_len)
3438 {
3439 struct ecore_nvm_image_att image_att;
3440 enum _ecore_status_t rc;
3441
3442 OSAL_MEM_ZERO(p_buffer, buffer_len);
3443
3444 rc = ecore_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
3445 if (rc != ECORE_SUCCESS)
3446 return rc;
3447
3448 /* Validate sizes - both the image's and the supplied buffer's */
3449 if (image_att.length <= 4) {
3450 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3451 "Image [%d] is too small - only %d bytes\n",
3452 image_id, image_att.length);
3453 return ECORE_INVAL;
3454 }
3455
3456 /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
3457 image_att.length -= 4;
3458
3459 if (image_att.length > buffer_len) {
3460 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3461 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3462 image_id, image_att.length, buffer_len);
3463 return ECORE_NOMEM;
3464 }
3465
3466 return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr,
3467 p_buffer, image_att.length);
3468 }
3469
3470 enum _ecore_status_t
ecore_mcp_get_temperature_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_temperature_info * p_temp_info)3471 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3472 struct ecore_ptt *p_ptt,
3473 struct ecore_temperature_info *p_temp_info)
3474 {
3475 struct ecore_temperature_sensor *p_temp_sensor;
3476 struct temperature_status_stc mfw_temp_info;
3477 struct ecore_mcp_mb_params mb_params;
3478 u32 val;
3479 enum _ecore_status_t rc;
3480 u8 i;
3481
3482 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3483 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3484 mb_params.p_data_dst = &mfw_temp_info;
3485 mb_params.data_dst_size = sizeof(mfw_temp_info);
3486 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3487 if (rc != ECORE_SUCCESS)
3488 return rc;
3489
3490 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3491 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3492 ECORE_MAX_NUM_OF_SENSORS);
3493 for (i = 0; i < p_temp_info->num_sensors; i++) {
3494 val = mfw_temp_info.sensor[i];
3495 p_temp_sensor = &p_temp_info->sensors[i];
3496 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3497 SENSOR_LOCATION_SHIFT;
3498 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3499 THRESHOLD_HIGH_SHIFT;
3500 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3501 CRITICAL_TEMPERATURE_SHIFT;
3502 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3503 CURRENT_TEMP_SHIFT;
3504 }
3505
3506 return ECORE_SUCCESS;
3507 }
3508
ecore_mcp_get_mba_versions(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mba_vers * p_mba_vers)3509 enum _ecore_status_t ecore_mcp_get_mba_versions(
3510 struct ecore_hwfn *p_hwfn,
3511 struct ecore_ptt *p_ptt,
3512 struct ecore_mba_vers *p_mba_vers)
3513 {
3514 struct ecore_mcp_nvm_params params;
3515 enum _ecore_status_t rc;
3516 u32 buf_size;
3517
3518 OSAL_MEM_ZERO(¶ms, sizeof(params));
3519 params.type = ECORE_MCP_NVM_RD;
3520 params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
3521 params.nvm_common.offset = 0;
3522 params.nvm_rd.buf = &(p_mba_vers->mba_vers[0]);
3523 params.nvm_rd.buf_size = &buf_size;
3524 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3525
3526 if (rc != ECORE_SUCCESS)
3527 return rc;
3528
3529 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3530 FW_MSG_CODE_NVM_OK)
3531 rc = ECORE_UNKNOWN_ERROR;
3532
3533 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3534 rc = ECORE_UNKNOWN_ERROR;
3535
3536 return rc;
3537 }
3538
ecore_mcp_mem_ecc_events(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u64 * num_events)3539 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3540 struct ecore_ptt *p_ptt,
3541 u64 *num_events)
3542 {
3543 u32 rsp;
3544
3545 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3546 0, &rsp, (u32 *)num_events);
3547 }
3548
3549 static enum resource_id_enum
ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)3550 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3551 {
3552 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3553
3554 switch (res_id) {
3555 case ECORE_SB:
3556 mfw_res_id = RESOURCE_NUM_SB_E;
3557 break;
3558 case ECORE_L2_QUEUE:
3559 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3560 break;
3561 case ECORE_VPORT:
3562 mfw_res_id = RESOURCE_NUM_VPORT_E;
3563 break;
3564 case ECORE_RSS_ENG:
3565 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3566 break;
3567 case ECORE_PQ:
3568 mfw_res_id = RESOURCE_NUM_PQ_E;
3569 break;
3570 case ECORE_RL:
3571 mfw_res_id = RESOURCE_NUM_RL_E;
3572 break;
3573 case ECORE_MAC:
3574 case ECORE_VLAN:
3575 /* Each VFC resource can accommodate both a MAC and a VLAN */
3576 mfw_res_id = RESOURCE_VFC_FILTER_E;
3577 break;
3578 case ECORE_ILT:
3579 mfw_res_id = RESOURCE_ILT_E;
3580 break;
3581 case ECORE_LL2_QUEUE:
3582 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3583 break;
3584 case ECORE_RDMA_CNQ_RAM:
3585 case ECORE_CMDQS_CQS:
3586 /* CNQ/CMDQS are the same resource */
3587 mfw_res_id = RESOURCE_CQS_E;
3588 break;
3589 case ECORE_RDMA_STATS_QUEUE:
3590 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3591 break;
3592 case ECORE_BDQ:
3593 mfw_res_id = RESOURCE_BDQ_E;
3594 break;
3595 default:
3596 break;
3597 }
3598
3599 return mfw_res_id;
3600 }
3601
3602 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3603 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3604 #define ECORE_RESC_ALLOC_VERSION \
3605 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3606 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3607 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3608 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3609
3610 struct ecore_resc_alloc_in_params {
3611 u32 cmd;
3612 enum ecore_resources res_id;
3613 u32 resc_max_val;
3614 };
3615
3616 struct ecore_resc_alloc_out_params {
3617 u32 mcp_resp;
3618 u32 mcp_param;
3619 u32 resc_num;
3620 u32 resc_start;
3621 u32 vf_resc_num;
3622 u32 vf_resc_start;
3623 u32 flags;
3624 };
3625
3626 static enum _ecore_status_t
ecore_mcp_resc_allocation_msg(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_alloc_in_params * p_in_params,struct ecore_resc_alloc_out_params * p_out_params)3627 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3628 struct ecore_ptt *p_ptt,
3629 struct ecore_resc_alloc_in_params *p_in_params,
3630 struct ecore_resc_alloc_out_params *p_out_params)
3631 {
3632 struct ecore_mcp_mb_params mb_params;
3633 struct resource_info mfw_resc_info;
3634 enum _ecore_status_t rc;
3635
3636 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3637
3638 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3639 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3640 DP_ERR(p_hwfn,
3641 "Failed to match resource %d [%s] with the MFW resources\n",
3642 p_in_params->res_id,
3643 ecore_hw_get_resc_name(p_in_params->res_id));
3644 return ECORE_INVAL;
3645 }
3646
3647 switch (p_in_params->cmd) {
3648 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3649 mfw_resc_info.size = p_in_params->resc_max_val;
3650 /* Fallthrough */
3651 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3652 break;
3653 default:
3654 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3655 p_in_params->cmd);
3656 return ECORE_INVAL;
3657 }
3658
3659 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3660 mb_params.cmd = p_in_params->cmd;
3661 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3662 mb_params.p_data_src = &mfw_resc_info;
3663 mb_params.data_src_size = sizeof(mfw_resc_info);
3664 mb_params.p_data_dst = mb_params.p_data_src;
3665 mb_params.data_dst_size = mb_params.data_src_size;
3666
3667 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3668 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3669 p_in_params->cmd, p_in_params->res_id,
3670 ecore_hw_get_resc_name(p_in_params->res_id),
3671 ECORE_MFW_GET_FIELD(mb_params.param,
3672 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3673 ECORE_MFW_GET_FIELD(mb_params.param,
3674 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3675 p_in_params->resc_max_val);
3676
3677 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3678 if (rc != ECORE_SUCCESS)
3679 return rc;
3680
3681 p_out_params->mcp_resp = mb_params.mcp_resp;
3682 p_out_params->mcp_param = mb_params.mcp_param;
3683 p_out_params->resc_num = mfw_resc_info.size;
3684 p_out_params->resc_start = mfw_resc_info.offset;
3685 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3686 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3687 p_out_params->flags = mfw_resc_info.flags;
3688
3689 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3690 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3691 ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
3692 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3693 ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
3694 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3695 p_out_params->resc_num, p_out_params->resc_start,
3696 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3697 p_out_params->flags);
3698
3699 return ECORE_SUCCESS;
3700 }
3701
3702 enum _ecore_status_t
ecore_mcp_set_resc_max_val(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_resources res_id,u32 resc_max_val,u32 * p_mcp_resp)3703 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3704 enum ecore_resources res_id, u32 resc_max_val,
3705 u32 *p_mcp_resp)
3706 {
3707 struct ecore_resc_alloc_out_params out_params;
3708 struct ecore_resc_alloc_in_params in_params;
3709 enum _ecore_status_t rc;
3710
3711 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3712 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3713 in_params.res_id = res_id;
3714 in_params.resc_max_val = resc_max_val;
3715 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3716 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3717 &out_params);
3718 if (rc != ECORE_SUCCESS)
3719 return rc;
3720
3721 *p_mcp_resp = out_params.mcp_resp;
3722
3723 return ECORE_SUCCESS;
3724 }
3725
3726 enum _ecore_status_t
ecore_mcp_get_resc_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_resources res_id,u32 * p_mcp_resp,u32 * p_resc_num,u32 * p_resc_start)3727 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3728 enum ecore_resources res_id, u32 *p_mcp_resp,
3729 u32 *p_resc_num, u32 *p_resc_start)
3730 {
3731 struct ecore_resc_alloc_out_params out_params;
3732 struct ecore_resc_alloc_in_params in_params;
3733 enum _ecore_status_t rc;
3734
3735 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3736 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3737 in_params.res_id = res_id;
3738 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3739 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3740 &out_params);
3741 if (rc != ECORE_SUCCESS)
3742 return rc;
3743
3744 *p_mcp_resp = out_params.mcp_resp;
3745
3746 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3747 *p_resc_num = out_params.resc_num;
3748 *p_resc_start = out_params.resc_start;
3749 }
3750
3751 return ECORE_SUCCESS;
3752 }
3753
ecore_mcp_initiate_pf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)3754 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3755 struct ecore_ptt *p_ptt)
3756 {
3757 u32 mcp_resp, mcp_param;
3758
3759 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3760 &mcp_resp, &mcp_param);
3761 }
3762
ecore_mcp_get_lldp_mac(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 lldp_mac_addr[ETH_ALEN])3763 enum _ecore_status_t ecore_mcp_get_lldp_mac(struct ecore_hwfn *p_hwfn,
3764 struct ecore_ptt *p_ptt,
3765 u8 lldp_mac_addr[ETH_ALEN])
3766 {
3767 struct ecore_mcp_mb_params mb_params;
3768 struct mcp_mac lldp_mac;
3769 enum _ecore_status_t rc;
3770
3771 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3772 mb_params.cmd = DRV_MSG_CODE_GET_LLDP_MAC;
3773 mb_params.p_data_dst = &lldp_mac;
3774 mb_params.data_dst_size = sizeof(lldp_mac);
3775 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3776 if (rc != ECORE_SUCCESS)
3777 return rc;
3778
3779 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3780 DP_NOTICE(p_hwfn, false,
3781 "MFW lacks support for the GET_LLDP_MAC command [resp 0x%08x]\n",
3782 mb_params.mcp_resp);
3783 return ECORE_INVAL;
3784 }
3785
3786 *(u16 *)lldp_mac_addr = *(u16 *)&lldp_mac.mac_upper;
3787 *(u32 *)(lldp_mac_addr + 2) = lldp_mac.mac_lower;
3788
3789 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3790 "LLDP MAC address is %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3791 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3792 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3793
3794 return ECORE_SUCCESS;
3795 }
3796
ecore_mcp_set_lldp_mac(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 lldp_mac_addr[ETH_ALEN])3797 enum _ecore_status_t ecore_mcp_set_lldp_mac(struct ecore_hwfn *p_hwfn,
3798 struct ecore_ptt *p_ptt,
3799 u8 lldp_mac_addr[ETH_ALEN])
3800 {
3801 struct ecore_mcp_mb_params mb_params;
3802 struct mcp_mac lldp_mac;
3803 enum _ecore_status_t rc;
3804
3805 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3806 "Configuring LLDP MAC address to %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3807 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3808 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3809
3810 OSAL_MEM_ZERO(&lldp_mac, sizeof(lldp_mac));
3811 lldp_mac.mac_upper = *(u16 *)lldp_mac_addr;
3812 lldp_mac.mac_lower = *(u32 *)(lldp_mac_addr + 2);
3813
3814 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3815 mb_params.cmd = DRV_MSG_CODE_SET_LLDP_MAC;
3816 mb_params.p_data_src = &lldp_mac;
3817 mb_params.data_src_size = sizeof(lldp_mac);
3818 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3819 if (rc != ECORE_SUCCESS)
3820 return rc;
3821
3822 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3823 DP_NOTICE(p_hwfn, false,
3824 "MFW lacks support for the SET_LLDP_MAC command [resp 0x%08x]\n",
3825 mb_params.mcp_resp);
3826 return ECORE_INVAL;
3827 }
3828
3829 return ECORE_SUCCESS;
3830 }
3831
ecore_mcp_resource_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 param,u32 * p_mcp_resp,u32 * p_mcp_param)3832 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3833 struct ecore_ptt *p_ptt,
3834 u32 param, u32 *p_mcp_resp,
3835 u32 *p_mcp_param)
3836 {
3837 enum _ecore_status_t rc;
3838
3839 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3840 p_mcp_resp, p_mcp_param);
3841 if (rc != ECORE_SUCCESS)
3842 return rc;
3843
3844 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3845 DP_INFO(p_hwfn,
3846 "The resource command is unsupported by the MFW\n");
3847 return ECORE_NOTIMPL;
3848 }
3849
3850 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3851 u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3852
3853 DP_NOTICE(p_hwfn, false,
3854 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3855 param, opcode);
3856 return ECORE_INVAL;
3857 }
3858
3859 return rc;
3860 }
3861
3862 enum _ecore_status_t
__ecore_mcp_resc_lock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_lock_params * p_params)3863 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3864 struct ecore_resc_lock_params *p_params)
3865 {
3866 u32 param = 0, mcp_resp, mcp_param;
3867 u8 opcode;
3868 enum _ecore_status_t rc;
3869
3870 switch (p_params->timeout) {
3871 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3872 opcode = RESOURCE_OPCODE_REQ;
3873 p_params->timeout = 0;
3874 break;
3875 case ECORE_MCP_RESC_LOCK_TO_NONE:
3876 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3877 p_params->timeout = 0;
3878 break;
3879 default:
3880 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3881 break;
3882 }
3883
3884 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3885 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3886 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3887
3888 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3889 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3890 param, p_params->timeout, opcode, p_params->resource);
3891
3892 /* Attempt to acquire the resource */
3893 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3894 &mcp_param);
3895 if (rc != ECORE_SUCCESS)
3896 return rc;
3897
3898 /* Analyze the response */
3899 p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
3900 RESOURCE_CMD_RSP_OWNER);
3901 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3902
3903 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3904 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3905 mcp_param, opcode, p_params->owner);
3906
3907 switch (opcode) {
3908 case RESOURCE_OPCODE_GNT:
3909 p_params->b_granted = true;
3910 break;
3911 case RESOURCE_OPCODE_BUSY:
3912 p_params->b_granted = false;
3913 break;
3914 default:
3915 DP_NOTICE(p_hwfn, false,
3916 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3917 mcp_param, opcode);
3918 return ECORE_INVAL;
3919 }
3920
3921 return ECORE_SUCCESS;
3922 }
3923
3924 enum _ecore_status_t
ecore_mcp_resc_lock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_lock_params * p_params)3925 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3926 struct ecore_resc_lock_params *p_params)
3927 {
3928 u32 retry_cnt = 0;
3929 enum _ecore_status_t rc;
3930
3931 do {
3932 /* No need for an interval before the first iteration */
3933 if (retry_cnt) {
3934 if (p_params->sleep_b4_retry) {
3935 u16 retry_interval_in_ms =
3936 DIV_ROUND_UP(p_params->retry_interval,
3937 1000);
3938
3939 OSAL_MSLEEP(retry_interval_in_ms);
3940 } else {
3941 OSAL_UDELAY(p_params->retry_interval);
3942 }
3943 }
3944
3945 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3946 if (rc != ECORE_SUCCESS)
3947 return rc;
3948
3949 if (p_params->b_granted)
3950 break;
3951 } while (retry_cnt++ < p_params->retry_num);
3952
3953 return ECORE_SUCCESS;
3954 }
3955
3956 enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_unlock_params * p_params)3957 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3958 struct ecore_resc_unlock_params *p_params)
3959 {
3960 u32 param = 0, mcp_resp, mcp_param;
3961 u8 opcode;
3962 enum _ecore_status_t rc;
3963
3964 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3965 : RESOURCE_OPCODE_RELEASE;
3966 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3967 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3968
3969 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3970 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3971 param, opcode, p_params->resource);
3972
3973 /* Attempt to release the resource */
3974 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3975 &mcp_param);
3976 if (rc != ECORE_SUCCESS)
3977 return rc;
3978
3979 /* Analyze the response */
3980 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3981
3982 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3983 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3984 mcp_param, opcode);
3985
3986 switch (opcode) {
3987 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3988 DP_INFO(p_hwfn,
3989 "Resource unlock request for an already released resource [%d]\n",
3990 p_params->resource);
3991 /* Fallthrough */
3992 case RESOURCE_OPCODE_RELEASED:
3993 p_params->b_released = true;
3994 break;
3995 case RESOURCE_OPCODE_WRONG_OWNER:
3996 p_params->b_released = false;
3997 break;
3998 default:
3999 DP_NOTICE(p_hwfn, false,
4000 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
4001 mcp_param, opcode);
4002 return ECORE_INVAL;
4003 }
4004
4005 return ECORE_SUCCESS;
4006 }
4007
4008 enum _ecore_status_t
ecore_mcp_update_fcoe_cvid(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 vlan)4009 ecore_mcp_update_fcoe_cvid(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4010 u16 vlan)
4011 {
4012 u32 resp = 0, param = 0;
4013 enum _ecore_status_t rc;
4014
4015 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID,
4016 (u32)vlan << DRV_MB_PARAM_FCOE_CVID_SHIFT,
4017 &resp, ¶m);
4018 if (rc != ECORE_SUCCESS)
4019 DP_ERR(p_hwfn, "Failed to update fcoe vlan, rc = %d\n", rc);
4020
4021 return rc;
4022 }
4023
4024 enum _ecore_status_t
ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 * wwn)4025 ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn *p_hwfn,
4026 struct ecore_ptt *p_ptt, u8 *wwn)
4027 {
4028 struct ecore_mcp_mb_params mb_params;
4029 struct mcp_wwn fabric_name;
4030 enum _ecore_status_t rc;
4031
4032 OSAL_MEM_ZERO(&fabric_name, sizeof(fabric_name));
4033 fabric_name.wwn_upper = *(u32 *)wwn;
4034 fabric_name.wwn_lower = *(u32 *)(wwn + 4);
4035
4036 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4037 mb_params.cmd = DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME;
4038 mb_params.p_data_src = &fabric_name;
4039 mb_params.data_src_size = sizeof(fabric_name);
4040 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4041 if (rc != ECORE_SUCCESS)
4042 DP_ERR(p_hwfn, "Failed to update fcoe wwn, rc = %d\n", rc);
4043
4044 return rc;
4045 }
4046
ecore_mcp_wol_wr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 offset,u32 val)4047 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4048 u32 offset, u32 val)
4049 {
4050 struct ecore_mcp_mb_params mb_params = {0};
4051 enum _ecore_status_t rc = ECORE_SUCCESS;
4052 u32 dword = val;
4053
4054 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4055 mb_params.param = offset;
4056 mb_params.p_data_src = &dword;
4057 mb_params.data_src_size = sizeof(dword);
4058
4059 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4060 if (rc != ECORE_SUCCESS) {
4061 DP_NOTICE(p_hwfn, false,
4062 "Failed to wol write request, rc = %d\n", rc);
4063 }
4064
4065 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4066 DP_NOTICE(p_hwfn, false,
4067 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4068 val, offset, mb_params.mcp_resp);
4069 rc = ECORE_UNKNOWN_ERROR;
4070 }
4071 }
4072
ecore_mcp_get_capabilities(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4073 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4074 struct ecore_ptt *p_ptt)
4075 {
4076 u32 mcp_resp;
4077 enum _ecore_status_t rc;
4078
4079 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4080 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4081 if (rc == ECORE_SUCCESS)
4082 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4083 "MFW supported features: %08x\n",
4084 p_hwfn->mcp_info->capabilities);
4085
4086 return rc;
4087 }
4088
ecore_mcp_set_capabilities(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4089 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4090 struct ecore_ptt *p_ptt)
4091 {
4092 u32 mcp_resp, mcp_param, features;
4093
4094 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4095 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
4096
4097 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4098 features, &mcp_resp, &mcp_param);
4099 }
4100