1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 /*
28 * File : ecore_mcp.c
29 */
30 #include <sys/cdefs.h>
31 #include "bcm_osal.h"
32 #include "ecore.h"
33 #include "ecore_status.h"
34 #include "nvm_map.h"
35 #include "nvm_cfg.h"
36 #include "ecore_mcp.h"
37 #include "mcp_public.h"
38 #include "reg_addr.h"
39 #include "ecore_hw.h"
40 #include "ecore_init_fw_funcs.h"
41 #include "ecore_sriov.h"
42 #include "ecore_vf.h"
43 #include "ecore_iov_api.h"
44 #include "ecore_gtt_reg_addr.h"
45 #include "ecore_iro.h"
46 #include "ecore_dcbx.h"
47 #include "ecore_sp_commands.h"
48 #include "ecore_cxt.h"
49
50 #define CHIP_MCP_RESP_ITER_US 10
51 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
52
53 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
54 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
55
56 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
57 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
58 _val)
59
60 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
61 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
62
63 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
64 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
65 OFFSETOF(struct public_drv_mb, _field), _val)
66
67 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
68 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
69 OFFSETOF(struct public_drv_mb, _field))
70
71 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
72 DRV_ID_PDA_COMP_VER_OFFSET)
73
74 #define MCP_BYTES_PER_MBIT_OFFSET 17
75
76 #ifdef _NTDDK_
77 #pragma warning(push)
78 #pragma warning(disable : 28167)
79 #pragma warning(disable : 28123)
80 #endif
81
82 #ifndef ASIC_ONLY
83 static int loaded;
84 static int loaded_port[MAX_NUM_PORTS] = { 0 };
85 #endif
86
ecore_mcp_is_init(struct ecore_hwfn * p_hwfn)87 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
88 {
89 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
90 return false;
91 return true;
92 }
93
ecore_mcp_cmd_port_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)94 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
95 struct ecore_ptt *p_ptt)
96 {
97 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
98 PUBLIC_PORT);
99 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
100
101 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
102 MFW_PORT(p_hwfn));
103 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
104 "port_addr = 0x%x, port_id 0x%02x\n",
105 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
106 }
107
ecore_mcp_read_mb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)108 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
109 struct ecore_ptt *p_ptt)
110 {
111 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
112 OSAL_BE32 tmp;
113 u32 i;
114
115 #ifndef ASIC_ONLY
116 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
117 return;
118 #endif
119
120 if (!p_hwfn->mcp_info->public_base)
121 return;
122
123 for (i = 0; i < length; i++) {
124 tmp = ecore_rd(p_hwfn, p_ptt,
125 p_hwfn->mcp_info->mfw_mb_addr +
126 (i << 2) + sizeof(u32));
127
128 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
129 OSAL_BE32_TO_CPU(tmp);
130 }
131 }
132
133 struct ecore_mcp_cmd_elem {
134 osal_list_entry_t list;
135 struct ecore_mcp_mb_params *p_mb_params;
136 u16 expected_seq_num;
137 bool b_is_completed;
138 };
139
140 /* Must be called while cmd_lock is acquired */
141 static struct ecore_mcp_cmd_elem *
ecore_mcp_cmd_add_elem(struct ecore_hwfn * p_hwfn,struct ecore_mcp_mb_params * p_mb_params,u16 expected_seq_num)142 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
143 struct ecore_mcp_mb_params *p_mb_params,
144 u16 expected_seq_num)
145 {
146 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
147
148 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
149 sizeof(*p_cmd_elem));
150 if (!p_cmd_elem) {
151 DP_NOTICE(p_hwfn, false,
152 "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
153 goto out;
154 }
155
156 p_cmd_elem->p_mb_params = p_mb_params;
157 p_cmd_elem->expected_seq_num = expected_seq_num;
158 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
159 out:
160 return p_cmd_elem;
161 }
162
163 /* Must be called while cmd_lock is acquired */
ecore_mcp_cmd_del_elem(struct ecore_hwfn * p_hwfn,struct ecore_mcp_cmd_elem * p_cmd_elem)164 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
165 struct ecore_mcp_cmd_elem *p_cmd_elem)
166 {
167 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
168 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
169 }
170
171 /* Must be called while cmd_lock is acquired */
172 static struct ecore_mcp_cmd_elem *
ecore_mcp_cmd_get_elem(struct ecore_hwfn * p_hwfn,u16 seq_num)173 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
174 {
175 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
176
177 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
178 struct ecore_mcp_cmd_elem) {
179 if (p_cmd_elem->expected_seq_num == seq_num)
180 return p_cmd_elem;
181 }
182
183 return OSAL_NULL;
184 }
185
ecore_mcp_free(struct ecore_hwfn * p_hwfn)186 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
187 {
188 if (p_hwfn->mcp_info) {
189 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
190
191 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
192 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
193
194 OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock);
195 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
196 &p_hwfn->mcp_info->cmd_list, list,
197 struct ecore_mcp_cmd_elem) {
198 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
199 }
200 OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
201
202 #ifdef CONFIG_ECORE_LOCK_ALLOC
203 OSAL_MUTEX_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
204 OSAL_MUTEX_DEALLOC(&p_hwfn->mcp_info->link_lock);
205 #endif
206 }
207
208 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
209 p_hwfn->mcp_info = OSAL_NULL;
210
211 return ECORE_SUCCESS;
212 }
213
214 /* Maximum of 1 sec to wait for the SHMEM ready indication */
215 #define ECPRE_MCP_SHMEM_RDY_MAX_RETRIES 20
216 #define ECORE_MCP_SHMEM_RDY_ITER_MS 50
217
ecore_load_mcp_offsets(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)218 enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
219 struct ecore_ptt *p_ptt)
220 {
221 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
222 u8 cnt = ECPRE_MCP_SHMEM_RDY_MAX_RETRIES;
223 u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
224 u32 drv_mb_offsize, mfw_mb_offsize;
225 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
226
227 #ifndef ASIC_ONLY
228 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
229 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
230 p_info->public_base = 0;
231 return ECORE_INVAL;
232 }
233 #endif
234
235 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
236 if (!p_info->public_base)
237 return ECORE_INVAL;
238
239 p_info->public_base |= GRCBASE_MCP;
240
241 /* Get the MFW MB address and number of supported messages */
242 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
243 SECTION_OFFSIZE_ADDR(p_info->public_base,
244 PUBLIC_MFW_MB));
245 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
246 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
247 p_info->mfw_mb_addr);
248
249 /* @@@TBD:
250 * The driver can notify that there was an MCP reset, and read the SHMEM
251 * values before the MFW has completed initializing them.
252 * As a temporary solution, the "sup_msgs" field is used as a data ready
253 * indication.
254 * This should be replaced with an actual indication when it is provided
255 * by the MFW.
256 */
257 while (!p_info->mfw_mb_length && cnt--) {
258 OSAL_MSLEEP(msec);
259 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
260 p_info->mfw_mb_addr);
261 }
262
263 if (!cnt) {
264 DP_NOTICE(p_hwfn, false,
265 "Failed to get the SHMEM ready notification after %d msec\n",
266 ECPRE_MCP_SHMEM_RDY_MAX_RETRIES * msec);
267 return ECORE_TIMEOUT;
268 }
269
270 /* Calculate the driver and MFW mailbox address */
271 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
272 SECTION_OFFSIZE_ADDR(p_info->public_base,
273 PUBLIC_DRV_MB));
274 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
275 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
276 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
277 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
278
279 /* Get the current driver mailbox sequence before sending
280 * the first command
281 */
282 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
283 DRV_MSG_SEQ_NUMBER_MASK;
284
285 /* Get current FW pulse sequence */
286 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
287 DRV_PULSE_SEQ_MASK;
288
289 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
290
291 return ECORE_SUCCESS;
292 }
293
ecore_mcp_cmd_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)294 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
295 struct ecore_ptt *p_ptt)
296 {
297 struct ecore_mcp_info *p_info;
298 u32 size;
299
300 /* Allocate mcp_info structure */
301 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
302 sizeof(*p_hwfn->mcp_info));
303 if (!p_hwfn->mcp_info) {
304 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
305 return ECORE_NOMEM;
306 }
307 p_info = p_hwfn->mcp_info;
308
309 /* Initialize the MFW spinlocks */
310 #ifdef CONFIG_ECORE_LOCK_ALLOC
311 if (OSAL_MUTEX_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
312 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
313 return ECORE_NOMEM;
314 }
315 if (OSAL_MUTEX_ALLOC(p_hwfn, &p_info->link_lock)) {
316 OSAL_MUTEX_DEALLOC(&p_info->cmd_lock);
317 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
318 return ECORE_NOMEM;
319 }
320 #endif
321 OSAL_MUTEX_INIT(&p_info->cmd_lock);
322 OSAL_MUTEX_INIT(&p_info->link_lock);
323
324 OSAL_LIST_INIT(&p_info->cmd_list);
325
326 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
327 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
328 /* Do not free mcp_info here, since public_base indicate that
329 * the MCP is not initialized
330 */
331 return ECORE_SUCCESS;
332 }
333
334 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
335 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
336 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
337 if (p_info->mfw_mb_cur == OSAL_NULL || p_info->mfw_mb_shadow == OSAL_NULL)
338 goto err;
339
340 return ECORE_SUCCESS;
341
342 err:
343 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
344 ecore_mcp_free(p_hwfn);
345 return ECORE_NOMEM;
346 }
347
ecore_mcp_reread_offsets(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)348 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
349 struct ecore_ptt *p_ptt)
350 {
351 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
352
353 /* Use MCP history register to check if MCP reset occurred between init
354 * time and now.
355 */
356 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
357 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
358 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
359 p_hwfn->mcp_info->mcp_hist, generic_por_0);
360
361 ecore_load_mcp_offsets(p_hwfn, p_ptt);
362 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
363 }
364 }
365
ecore_mcp_reset(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)366 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
367 struct ecore_ptt *p_ptt)
368 {
369 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
370 enum _ecore_status_t rc = ECORE_SUCCESS;
371
372 #ifndef ASIC_ONLY
373 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
374 delay = EMUL_MCP_RESP_ITER_US;
375 #endif
376
377 if (p_hwfn->mcp_info->b_block_cmd) {
378 DP_NOTICE(p_hwfn, false,
379 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
380 return ECORE_ABORTED;
381 }
382
383 /* Ensure that only a single thread is accessing the mailbox */
384 OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock);
385
386 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
387
388 /* Set drv command along with the updated sequence */
389 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
390 seq = ++p_hwfn->mcp_info->drv_mb_seq;
391 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
392
393 do {
394 /* Wait for MFW response */
395 OSAL_UDELAY(delay);
396 /* Give the FW up to 500 second (50*1000*10usec) */
397 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
398 MISCS_REG_GENERIC_POR_0)) &&
399 (cnt++ < ECORE_MCP_RESET_RETRIES));
400
401 if (org_mcp_reset_seq !=
402 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
403 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
404 "MCP was reset after %d usec\n", cnt * delay);
405 } else {
406 DP_ERR(p_hwfn, "Failed to reset MCP\n");
407 rc = ECORE_AGAIN;
408 }
409
410 OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
411
412 return rc;
413 }
414
415 /* Must be called while cmd_lock is acquired */
ecore_mcp_has_pending_cmd(struct ecore_hwfn * p_hwfn)416 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
417 {
418 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
419
420 /* There is at most one pending command at a certain time, and if it
421 * exists - it is placed at the HEAD of the list.
422 */
423 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
424 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
425 struct ecore_mcp_cmd_elem,
426 list);
427 return !p_cmd_elem->b_is_completed;
428 }
429
430 return false;
431 }
432
433 /* Must be called while cmd_lock is acquired */
434 static enum _ecore_status_t
ecore_mcp_update_pending_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)435 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
436 {
437 struct ecore_mcp_mb_params *p_mb_params;
438 struct ecore_mcp_cmd_elem *p_cmd_elem;
439 u32 mcp_resp;
440 u16 seq_num;
441
442 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
443 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
444
445 /* Return if no new non-handled response has been received */
446 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
447 return ECORE_AGAIN;
448
449 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
450 if (!p_cmd_elem) {
451 DP_ERR(p_hwfn,
452 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
453 seq_num);
454 return ECORE_UNKNOWN_ERROR;
455 }
456
457 p_mb_params = p_cmd_elem->p_mb_params;
458
459 /* Get the MFW response along with the sequence number */
460 p_mb_params->mcp_resp = mcp_resp;
461
462 /* Get the MFW param */
463 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
464
465 /* Get the union data */
466 if (p_mb_params->p_data_dst != OSAL_NULL &&
467 p_mb_params->data_dst_size) {
468 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
469 OFFSETOF(struct public_drv_mb,
470 union_data);
471 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
472 union_data_addr, p_mb_params->data_dst_size);
473 }
474
475 p_cmd_elem->b_is_completed = true;
476
477 return ECORE_SUCCESS;
478 }
479
480 /* Must be called while cmd_lock is acquired */
__ecore_mcp_cmd_and_union(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_mb_params * p_mb_params,u16 seq_num)481 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
482 struct ecore_ptt *p_ptt,
483 struct ecore_mcp_mb_params *p_mb_params,
484 u16 seq_num)
485 {
486 union drv_union_data union_data;
487 u32 union_data_addr;
488
489 /* Set the union data */
490 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
491 OFFSETOF(struct public_drv_mb, union_data);
492 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
493 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
494 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
495 p_mb_params->data_src_size);
496 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
497 sizeof(union_data));
498
499 /* Set the drv param */
500 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
501
502 /* Set the drv command along with the sequence number */
503 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
504
505 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
506 "MFW mailbox: command 0x%08x param 0x%08x\n",
507 (p_mb_params->cmd | seq_num), p_mb_params->param);
508 }
509
ecore_mcp_cmd_set_blocking(struct ecore_hwfn * p_hwfn,bool block_cmd)510 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
511 bool block_cmd)
512 {
513 p_hwfn->mcp_info->b_block_cmd = block_cmd;
514
515 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
516 block_cmd ? "Block" : "Unblock");
517 }
518
ecore_mcp_print_cpu_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)519 static void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
520 struct ecore_ptt *p_ptt)
521 {
522 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
523
524 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
525 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
526 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
527 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
528 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
529 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
530 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
531
532 DP_NOTICE(p_hwfn, false,
533 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
534 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
535 }
536
537 static enum _ecore_status_t
_ecore_mcp_cmd_and_union(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_mb_params * p_mb_params,u32 max_retries,u32 usecs)538 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
539 struct ecore_mcp_mb_params *p_mb_params,
540 u32 max_retries, u32 usecs)
541 {
542 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
543 struct ecore_mcp_cmd_elem *p_cmd_elem;
544 u16 seq_num;
545 enum _ecore_status_t rc = ECORE_SUCCESS;
546
547 /* Wait until the mailbox is non-occupied */
548 do {
549 /* Exit the loop if there is no pending command, or if the
550 * pending command is completed during this iteration.
551 * The spinlock stays locked until the command is sent.
552 */
553
554 OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock);
555
556 if (!ecore_mcp_has_pending_cmd(p_hwfn))
557 break;
558
559 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
560 if (rc == ECORE_SUCCESS)
561 break;
562 else if (rc != ECORE_AGAIN)
563 goto err;
564
565 OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
566 if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
567 OSAL_MSLEEP(msecs);
568 } else {
569 OSAL_UDELAY(usecs);
570 }
571 OSAL_MFW_CMD_PREEMPT(p_hwfn);
572 } while (++cnt < max_retries);
573
574 if (cnt >= max_retries) {
575 DP_NOTICE(p_hwfn, false,
576 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
577 p_mb_params->cmd, p_mb_params->param);
578 return ECORE_AGAIN;
579 }
580
581 /* Send the mailbox command */
582 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
583 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
584 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
585 if (!p_cmd_elem) {
586 rc = ECORE_NOMEM;
587 goto err;
588 }
589
590 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
591 OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
592
593 /* Wait for the MFW response */
594 do {
595 /* Exit the loop if the command is already completed, or if the
596 * command is completed during this iteration.
597 * The spinlock stays locked until the list element is removed.
598 */
599
600 if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
601 OSAL_MSLEEP(msecs);
602 } else {
603 OSAL_UDELAY(usecs);
604 }
605 OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock);
606
607 if (p_cmd_elem->b_is_completed)
608 break;
609
610 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
611 if (rc == ECORE_SUCCESS)
612 break;
613 else if (rc != ECORE_AGAIN)
614 goto err;
615
616 OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
617 OSAL_MFW_CMD_PREEMPT(p_hwfn);
618 } while (++cnt < max_retries);
619
620 if (cnt >= max_retries) {
621 DP_NOTICE(p_hwfn, false,
622 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
623 p_mb_params->cmd, p_mb_params->param);
624 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
625
626 OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock);
627 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
628 OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
629
630 if (!ECORE_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
631 ecore_mcp_cmd_set_blocking(p_hwfn, true);
632 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
633 return ECORE_AGAIN;
634 }
635
636 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
637 OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
638
639 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
640 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
641 p_mb_params->mcp_resp, p_mb_params->mcp_param,
642 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
643
644 /* Clear the sequence number from the MFW response */
645 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
646
647 return ECORE_SUCCESS;
648
649 err:
650 OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
651 return rc;
652 }
653
ecore_mcp_cmd_and_union(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_mb_params * p_mb_params)654 static enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
655 struct ecore_ptt *p_ptt,
656 struct ecore_mcp_mb_params *p_mb_params)
657 {
658 osal_size_t union_data_size = sizeof(union drv_union_data);
659 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
660 u32 usecs = CHIP_MCP_RESP_ITER_US;
661
662 #ifndef ASIC_ONLY
663 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
664 usecs = EMUL_MCP_RESP_ITER_US;
665 /* There is a built-in delay of 100usec in each MFW response read */
666 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
667 max_retries /= 10;
668 #endif
669 if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
670 max_retries = DIV_ROUND_UP(max_retries, 1000);
671 usecs *= 1000;
672 }
673
674 /* MCP not initialized */
675 if (!ecore_mcp_is_init(p_hwfn)) {
676 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
677 return ECORE_BUSY;
678 }
679
680 if (p_mb_params->data_src_size > union_data_size ||
681 p_mb_params->data_dst_size > union_data_size) {
682 DP_ERR(p_hwfn,
683 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
684 p_mb_params->data_src_size, p_mb_params->data_dst_size,
685 union_data_size);
686 return ECORE_INVAL;
687 }
688
689 if (p_hwfn->mcp_info->b_block_cmd) {
690 DP_NOTICE(p_hwfn, false,
691 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
692 p_mb_params->cmd, p_mb_params->param);
693 return ECORE_ABORTED;
694 }
695
696 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
697 usecs);
698 }
699
ecore_mcp_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param)700 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
701 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
702 u32 *o_mcp_resp, u32 *o_mcp_param)
703 {
704 struct ecore_mcp_mb_params mb_params;
705 enum _ecore_status_t rc;
706
707 #ifndef ASIC_ONLY
708 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
709 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
710 loaded--;
711 loaded_port[p_hwfn->port_id]--;
712 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
713 loaded);
714 }
715 return ECORE_SUCCESS;
716 }
717 #endif
718
719 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
720 mb_params.cmd = cmd;
721 mb_params.param = param;
722 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
723 if (rc != ECORE_SUCCESS)
724 return rc;
725
726 *o_mcp_resp = mb_params.mcp_resp;
727 *o_mcp_param = mb_params.mcp_param;
728
729 return ECORE_SUCCESS;
730 }
731
ecore_mcp_nvm_wr_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 i_txn_size,u32 * i_buf)732 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
733 struct ecore_ptt *p_ptt,
734 u32 cmd,
735 u32 param,
736 u32 *o_mcp_resp,
737 u32 *o_mcp_param,
738 u32 i_txn_size,
739 u32 *i_buf)
740 {
741 struct ecore_mcp_mb_params mb_params;
742 enum _ecore_status_t rc;
743
744 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
745 mb_params.cmd = cmd;
746 mb_params.param = param;
747 mb_params.p_data_src = i_buf;
748 mb_params.data_src_size = (u8) i_txn_size;
749 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
750 if (rc != ECORE_SUCCESS)
751 return rc;
752
753 *o_mcp_resp = mb_params.mcp_resp;
754 *o_mcp_param = mb_params.mcp_param;
755
756 return ECORE_SUCCESS;
757 }
758
ecore_mcp_nvm_rd_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 * o_txn_size,u32 * o_buf)759 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
760 struct ecore_ptt *p_ptt,
761 u32 cmd,
762 u32 param,
763 u32 *o_mcp_resp,
764 u32 *o_mcp_param,
765 u32 *o_txn_size,
766 u32 *o_buf)
767 {
768 struct ecore_mcp_mb_params mb_params;
769 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
770 enum _ecore_status_t rc;
771
772 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
773 mb_params.cmd = cmd;
774 mb_params.param = param;
775 mb_params.p_data_dst = raw_data;
776
777 /* Use the maximal value since the actual one is part of the response */
778 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
779
780 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
781 if (rc != ECORE_SUCCESS)
782 return rc;
783
784 *o_mcp_resp = mb_params.mcp_resp;
785 *o_mcp_param = mb_params.mcp_param;
786
787 *o_txn_size = *o_mcp_param;
788 OSAL_MEMCPY(o_buf, raw_data, *o_txn_size);
789
790 return ECORE_SUCCESS;
791 }
792
793 #ifndef ASIC_ONLY
ecore_mcp_mf_workaround(struct ecore_hwfn * p_hwfn,u32 * p_load_code)794 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
795 u32 *p_load_code)
796 {
797 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
798
799 if (!loaded) {
800 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
801 } else if (!loaded_port[p_hwfn->port_id]) {
802 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
803 } else {
804 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
805 }
806
807 /* On CMT, always tell that it's engine */
808 if (ECORE_IS_CMT(p_hwfn->p_dev))
809 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
810
811 *p_load_code = load_phase;
812 loaded++;
813 loaded_port[p_hwfn->port_id]++;
814
815 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
816 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
817 *p_load_code, loaded, p_hwfn->port_id,
818 loaded_port[p_hwfn->port_id]);
819 }
820 #endif
821
822 static bool
ecore_mcp_can_force_load(u8 drv_role,u8 exist_drv_role,enum ecore_override_force_load override_force_load)823 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
824 enum ecore_override_force_load override_force_load)
825 {
826 bool can_force_load = false;
827
828 switch (override_force_load) {
829 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
830 can_force_load = true;
831 break;
832 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
833 can_force_load = false;
834 break;
835 default:
836 can_force_load = (drv_role == DRV_ROLE_OS &&
837 exist_drv_role == DRV_ROLE_PREBOOT) ||
838 (drv_role == DRV_ROLE_KDUMP &&
839 exist_drv_role == DRV_ROLE_OS);
840 break;
841 }
842
843 return can_force_load;
844 }
845
ecore_mcp_cancel_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)846 enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
847 struct ecore_ptt *p_ptt)
848 {
849 u32 resp = 0, param = 0;
850 enum _ecore_status_t rc;
851
852 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
853 &resp, ¶m);
854 if (rc != ECORE_SUCCESS) {
855 DP_NOTICE(p_hwfn, false,
856 "Failed to send cancel load request, rc = %d\n", rc);
857 return rc;
858 }
859
860 if (resp == FW_MSG_CODE_UNSUPPORTED) {
861 DP_INFO(p_hwfn,
862 "The cancel load command is unsupported by the MFW\n");
863 return ECORE_NOTIMPL;
864 }
865
866 return ECORE_SUCCESS;
867 }
868
869 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
870 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
871 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
872 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
873 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
874 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
875 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
876
ecore_get_config_bitmap(void)877 static u32 ecore_get_config_bitmap(void)
878 {
879 u32 config_bitmap = 0x0;
880
881 #ifdef CONFIG_ECORE_L2
882 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
883 #endif
884 #ifdef CONFIG_ECORE_SRIOV
885 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
886 #endif
887 #ifdef CONFIG_ECORE_ROCE
888 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
889 #endif
890 #ifdef CONFIG_ECORE_IWARP
891 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
892 #endif
893 #ifdef CONFIG_ECORE_FCOE
894 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
895 #endif
896 #ifdef CONFIG_ECORE_ISCSI
897 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
898 #endif
899 #ifdef CONFIG_ECORE_LL2
900 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
901 #endif
902
903 return config_bitmap;
904 }
905
906 struct ecore_load_req_in_params {
907 u8 hsi_ver;
908 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
909 #define ECORE_LOAD_REQ_HSI_VER_1 1
910 u32 drv_ver_0;
911 u32 drv_ver_1;
912 u32 fw_ver;
913 u8 drv_role;
914 u8 timeout_val;
915 u8 force_cmd;
916 bool avoid_eng_reset;
917 };
918
919 struct ecore_load_req_out_params {
920 u32 load_code;
921 u32 exist_drv_ver_0;
922 u32 exist_drv_ver_1;
923 u32 exist_fw_ver;
924 u8 exist_drv_role;
925 u8 mfw_hsi_ver;
926 bool drv_exists;
927 };
928
929 static enum _ecore_status_t
__ecore_mcp_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_load_req_in_params * p_in_params,struct ecore_load_req_out_params * p_out_params)930 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
931 struct ecore_load_req_in_params *p_in_params,
932 struct ecore_load_req_out_params *p_out_params)
933 {
934 struct ecore_mcp_mb_params mb_params;
935 struct load_req_stc load_req;
936 struct load_rsp_stc load_rsp;
937 u32 hsi_ver;
938 enum _ecore_status_t rc;
939
940 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
941 load_req.drv_ver_0 = p_in_params->drv_ver_0;
942 load_req.drv_ver_1 = p_in_params->drv_ver_1;
943 load_req.fw_ver = p_in_params->fw_ver;
944 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
945 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
946 p_in_params->timeout_val);
947 SET_MFW_FIELD(load_req.misc0, (u64)LOAD_REQ_FORCE, p_in_params->force_cmd);
948 SET_MFW_FIELD(load_req.misc0, (u64)LOAD_REQ_FLAGS0,
949 p_in_params->avoid_eng_reset);
950
951 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
952 DRV_ID_MCP_HSI_VER_CURRENT :
953 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
954
955 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
956 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
957 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
958 mb_params.p_data_src = &load_req;
959 mb_params.data_src_size = sizeof(load_req);
960 mb_params.p_data_dst = &load_rsp;
961 mb_params.data_dst_size = sizeof(load_rsp);
962 mb_params.flags = ECORE_MB_FLAG_CAN_SLEEP | ECORE_MB_FLAG_AVOID_BLOCK;
963
964 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
965 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
966 mb_params.param,
967 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
968 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
969 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
970 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
971
972 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
973 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
974 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
975 load_req.drv_ver_0, load_req.drv_ver_1,
976 load_req.fw_ver, load_req.misc0,
977 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
978 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
979 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
980 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
981
982 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
983 if (rc != ECORE_SUCCESS) {
984 DP_NOTICE(p_hwfn, false,
985 "Failed to send load request, rc = %d\n", rc);
986 return rc;
987 }
988
989 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
990 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
991 p_out_params->load_code = mb_params.mcp_resp;
992
993 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
994 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
995 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
996 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
997 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
998 load_rsp.fw_ver, load_rsp.misc0,
999 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
1000 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
1001 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
1002
1003 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
1004 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
1005 p_out_params->exist_fw_ver = load_rsp.fw_ver;
1006 p_out_params->exist_drv_role =
1007 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
1008 p_out_params->mfw_hsi_ver =
1009 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
1010 p_out_params->drv_exists =
1011 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
1012 LOAD_RSP_FLAGS0_DRV_EXISTS;
1013 }
1014
1015 return ECORE_SUCCESS;
1016 }
1017
ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,u8 * p_mfw_drv_role)1018 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
1019 u8 *p_mfw_drv_role)
1020 {
1021 switch (drv_role) {
1022 case ECORE_DRV_ROLE_OS:
1023 *p_mfw_drv_role = DRV_ROLE_OS;
1024 break;
1025 case ECORE_DRV_ROLE_KDUMP:
1026 *p_mfw_drv_role = DRV_ROLE_KDUMP;
1027 break;
1028 }
1029 }
1030
1031 enum ecore_load_req_force {
1032 ECORE_LOAD_REQ_FORCE_NONE,
1033 ECORE_LOAD_REQ_FORCE_PF,
1034 ECORE_LOAD_REQ_FORCE_ALL,
1035 };
1036
ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,u8 * p_mfw_force_cmd)1037 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
1038 u8 *p_mfw_force_cmd)
1039 {
1040 switch (force_cmd) {
1041 case ECORE_LOAD_REQ_FORCE_NONE:
1042 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
1043 break;
1044 case ECORE_LOAD_REQ_FORCE_PF:
1045 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
1046 break;
1047 case ECORE_LOAD_REQ_FORCE_ALL:
1048 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
1049 break;
1050 }
1051 }
1052
ecore_mcp_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_load_req_params * p_params)1053 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
1054 struct ecore_ptt *p_ptt,
1055 struct ecore_load_req_params *p_params)
1056 {
1057 struct ecore_load_req_out_params out_params;
1058 struct ecore_load_req_in_params in_params;
1059 u8 mfw_drv_role = 0, mfw_force_cmd;
1060 enum _ecore_status_t rc;
1061
1062 #ifndef ASIC_ONLY
1063 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1064 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
1065 return ECORE_SUCCESS;
1066 }
1067 #endif
1068
1069 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
1070 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
1071 in_params.drv_ver_0 = ECORE_VERSION;
1072 in_params.drv_ver_1 = ecore_get_config_bitmap();
1073 in_params.fw_ver = STORM_FW_VERSION;
1074 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
1075 in_params.drv_role = mfw_drv_role;
1076 in_params.timeout_val = p_params->timeout_val;
1077 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
1078 in_params.force_cmd = mfw_force_cmd;
1079 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
1080
1081 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1082 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1083 if (rc != ECORE_SUCCESS)
1084 return rc;
1085
1086 /* First handle cases where another load request should/might be sent:
1087 * - MFW expects the old interface [HSI version = 1]
1088 * - MFW responds that a force load request is required
1089 */
1090 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1091 DP_INFO(p_hwfn,
1092 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1093
1094 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1095 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1096 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1097 &out_params);
1098 if (rc != ECORE_SUCCESS)
1099 return rc;
1100 } else if (out_params.load_code ==
1101 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1102 if (ecore_mcp_can_force_load(in_params.drv_role,
1103 out_params.exist_drv_role,
1104 p_params->override_force_load)) {
1105 DP_INFO(p_hwfn,
1106 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1107 in_params.drv_role, in_params.fw_ver,
1108 in_params.drv_ver_0, in_params.drv_ver_1,
1109 out_params.exist_drv_role,
1110 out_params.exist_fw_ver,
1111 out_params.exist_drv_ver_0,
1112 out_params.exist_drv_ver_1);
1113
1114 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1115 &mfw_force_cmd);
1116
1117 in_params.force_cmd = mfw_force_cmd;
1118 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1119 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1120 &out_params);
1121 if (rc != ECORE_SUCCESS)
1122 return rc;
1123 } else {
1124 DP_NOTICE(p_hwfn, false,
1125 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1126 in_params.drv_role, in_params.fw_ver,
1127 in_params.drv_ver_0, in_params.drv_ver_1,
1128 out_params.exist_drv_role,
1129 out_params.exist_fw_ver,
1130 out_params.exist_drv_ver_0,
1131 out_params.exist_drv_ver_1);
1132
1133 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1134 return ECORE_BUSY;
1135 }
1136 }
1137
1138 /* Now handle the other types of responses.
1139 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1140 * expected here after the additional revised load requests were sent.
1141 */
1142 switch (out_params.load_code) {
1143 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1144 case FW_MSG_CODE_DRV_LOAD_PORT:
1145 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1146 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1147 out_params.drv_exists) {
1148 /* The role and fw/driver version match, but the PF is
1149 * already loaded and has not been unloaded gracefully.
1150 * This is unexpected since a quasi-FLR request was
1151 * previously sent as part of ecore_hw_prepare().
1152 */
1153 DP_NOTICE(p_hwfn, false,
1154 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1155 return ECORE_INVAL;
1156 }
1157 break;
1158 default:
1159 DP_NOTICE(p_hwfn, false,
1160 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1161 out_params.load_code);
1162 return ECORE_BUSY;
1163 }
1164
1165 p_params->load_code = out_params.load_code;
1166
1167 return ECORE_SUCCESS;
1168 }
1169
ecore_mcp_load_done(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1170 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1171 struct ecore_ptt *p_ptt)
1172 {
1173 u32 resp = 0, param = 0;
1174 enum _ecore_status_t rc;
1175
1176 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1177 ¶m);
1178 if (rc != ECORE_SUCCESS) {
1179 DP_NOTICE(p_hwfn, false,
1180 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1181 return rc;
1182 }
1183
1184 if (resp == FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT) {
1185 DP_NOTICE(p_hwfn, false,
1186 "Received a LOAD_REFUSED_REJECT response from the mfw\n");
1187 return ECORE_ABORTED;
1188 }
1189
1190 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1191 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1192 DP_NOTICE(p_hwfn, false,
1193 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1194
1195 return ECORE_SUCCESS;
1196 }
1197
ecore_mcp_unload_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1198 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1199 struct ecore_ptt *p_ptt)
1200 {
1201 struct ecore_mcp_mb_params mb_params;
1202 u32 wol_param;
1203
1204 switch (p_hwfn->p_dev->wol_config) {
1205 case ECORE_OV_WOL_DISABLED:
1206 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1207 break;
1208 case ECORE_OV_WOL_ENABLED:
1209 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1210 break;
1211 default:
1212 DP_NOTICE(p_hwfn, true,
1213 "Unknown WoL configuration %02x\n",
1214 p_hwfn->p_dev->wol_config);
1215 /* Fallthrough */
1216 case ECORE_OV_WOL_DEFAULT:
1217 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1218 }
1219
1220 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1221 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1222 mb_params.param = wol_param;
1223 mb_params.flags = ECORE_MB_FLAG_CAN_SLEEP | ECORE_MB_FLAG_AVOID_BLOCK;
1224
1225 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1226 }
1227
ecore_mcp_unload_done(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1228 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1229 struct ecore_ptt *p_ptt)
1230 {
1231 struct ecore_mcp_mb_params mb_params;
1232 struct mcp_mac wol_mac;
1233
1234 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1235 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1236
1237 /* Set the primary MAC if WoL is enabled */
1238 if (p_hwfn->p_dev->wol_config == ECORE_OV_WOL_ENABLED) {
1239 u8 *p_mac = p_hwfn->p_dev->wol_mac;
1240
1241 OSAL_MEM_ZERO(&wol_mac, sizeof(wol_mac));
1242 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1243 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1244 p_mac[4] << 8 | p_mac[5];
1245
1246 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFDOWN),
1247 "Setting WoL MAC: %02x:%02x:%02x:%02x:%02x:%02x --> [%08x,%08x]\n",
1248 p_mac[0], p_mac[1], p_mac[2], p_mac[3], p_mac[4],
1249 p_mac[5], wol_mac.mac_upper, wol_mac.mac_lower);
1250
1251 mb_params.p_data_src = &wol_mac;
1252 mb_params.data_src_size = sizeof(wol_mac);
1253 }
1254
1255 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1256 }
1257
ecore_mcp_handle_vf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1258 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1259 struct ecore_ptt *p_ptt)
1260 {
1261 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1262 PUBLIC_PATH);
1263 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1264 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1265 ECORE_PATH_ID(p_hwfn));
1266 u32 disabled_vfs[VF_MAX_STATIC / 32];
1267 int i;
1268
1269 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1270 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1271 mfw_path_offsize, path_addr);
1272
1273 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1274 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1275 path_addr +
1276 OFFSETOF(struct public_path,
1277 mcp_vf_disabled) +
1278 sizeof(u32) * i);
1279 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1280 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1281 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1282 }
1283
1284 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1285 OSAL_VF_FLR_UPDATE(p_hwfn);
1286 }
1287
ecore_mcp_ack_vf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * vfs_to_ack)1288 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1289 struct ecore_ptt *p_ptt,
1290 u32 *vfs_to_ack)
1291 {
1292 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1293 PUBLIC_FUNC);
1294 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1295 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1296 MCP_PF_ID(p_hwfn));
1297 struct ecore_mcp_mb_params mb_params;
1298 enum _ecore_status_t rc;
1299 int i;
1300
1301 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1302 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1303 "Acking VFs [%08x,...,%08x] - %08x\n",
1304 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1305
1306 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1307 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1308 mb_params.p_data_src = vfs_to_ack;
1309 mb_params.data_src_size = VF_MAX_STATIC / 8;
1310 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1311 if (rc != ECORE_SUCCESS) {
1312 DP_NOTICE(p_hwfn, false,
1313 "Failed to pass ACK for VF flr to MFW\n");
1314 return ECORE_TIMEOUT;
1315 }
1316
1317 /* TMP - clear the ACK bits; should be done by MFW */
1318 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1319 ecore_wr(p_hwfn, p_ptt,
1320 func_addr +
1321 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1322 i * sizeof(u32), 0);
1323
1324 return rc;
1325 }
1326
ecore_mcp_handle_transceiver_change(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1327 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1328 struct ecore_ptt *p_ptt)
1329 {
1330 u32 transceiver_state;
1331
1332 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1333 p_hwfn->mcp_info->port_addr +
1334 OFFSETOF(struct public_port,
1335 transceiver_data));
1336
1337 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1338 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1339 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1340 OFFSETOF(struct public_port,
1341 transceiver_data)));
1342
1343 transceiver_state = GET_MFW_FIELD(transceiver_state,
1344 ETH_TRANSCEIVER_STATE);
1345
1346 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1347 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1348 else
1349 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1350
1351 OSAL_TRANSCEIVER_UPDATE(p_hwfn);
1352 }
1353
ecore_mcp_read_eee_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_link_state * p_link)1354 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1355 struct ecore_ptt *p_ptt,
1356 struct ecore_mcp_link_state *p_link)
1357 {
1358 u32 eee_status, val;
1359
1360 p_link->eee_adv_caps = 0;
1361 p_link->eee_lp_adv_caps = 0;
1362 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1363 OFFSETOF(struct public_port, eee_status));
1364 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1365 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1366 if (val & EEE_1G_ADV)
1367 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1368 if (val & EEE_10G_ADV)
1369 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1370 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1371 if (val & EEE_1G_ADV)
1372 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1373 if (val & EEE_10G_ADV)
1374 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1375 }
1376
ecore_mcp_get_shmem_func(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct public_func * p_data,int pfid)1377 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1378 struct ecore_ptt *p_ptt,
1379 struct public_func *p_data,
1380 int pfid)
1381 {
1382 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1383 PUBLIC_FUNC);
1384 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1385 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1386 u32 i, size;
1387
1388 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1389
1390 size = OSAL_MIN_T(u32, sizeof(*p_data),
1391 SECTION_SIZE(mfw_path_offsize));
1392 for (i = 0; i < size / sizeof(u32); i++)
1393 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1394 func_addr + (i << 2));
1395
1396 return size;
1397 }
1398
ecore_read_pf_bandwidth(struct ecore_hwfn * p_hwfn,struct public_func * p_shmem_info)1399 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1400 struct public_func *p_shmem_info)
1401 {
1402 struct ecore_mcp_function_info *p_info;
1403
1404 p_info = &p_hwfn->mcp_info->func_info;
1405
1406 /* TODO - bandwidth min/max should have valid values of 1-100,
1407 * as well as some indication that the feature is disabled.
1408 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1409 * limit and correct value to min `1' and max `100' if limit isn't in
1410 * range.
1411 */
1412 p_info->bandwidth_min = (p_shmem_info->config &
1413 FUNC_MF_CFG_MIN_BW_MASK) >>
1414 FUNC_MF_CFG_MIN_BW_OFFSET;
1415 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1416 DP_INFO(p_hwfn,
1417 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1418 p_info->bandwidth_min);
1419 p_info->bandwidth_min = 1;
1420 }
1421
1422 p_info->bandwidth_max = (p_shmem_info->config &
1423 FUNC_MF_CFG_MAX_BW_MASK) >>
1424 FUNC_MF_CFG_MAX_BW_OFFSET;
1425 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1426 DP_INFO(p_hwfn,
1427 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1428 p_info->bandwidth_max);
1429 p_info->bandwidth_max = 100;
1430 }
1431 }
1432
ecore_mcp_handle_link_change(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool b_reset)1433 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1434 struct ecore_ptt *p_ptt,
1435 bool b_reset)
1436 {
1437 struct ecore_mcp_link_state *p_link;
1438 u8 max_bw, min_bw;
1439 u32 status = 0;
1440
1441 /* Prevent SW/attentions from doing this at the same time */
1442 OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->link_lock);
1443
1444 p_link = &p_hwfn->mcp_info->link_output;
1445 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1446 if (!b_reset) {
1447 status = ecore_rd(p_hwfn, p_ptt,
1448 p_hwfn->mcp_info->port_addr +
1449 OFFSETOF(struct public_port, link_status));
1450 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1451 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1452 status, (u32)(p_hwfn->mcp_info->port_addr +
1453 OFFSETOF(struct public_port, link_status)));
1454 } else {
1455 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1456 "Resetting link indications\n");
1457 goto out;
1458 }
1459
1460 if (p_hwfn->b_drv_link_init) {
1461 /* Link indication with modern MFW arrives as per-PF
1462 * indication.
1463 */
1464 if (p_hwfn->mcp_info->capabilities &
1465 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1466 struct public_func shmem_info;
1467
1468 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1469 MCP_PF_ID(p_hwfn));
1470 p_link->link_up = !!(shmem_info.status &
1471 FUNC_STATUS_VIRTUAL_LINK_UP);
1472 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1473 } else {
1474 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1475 }
1476 } else {
1477 p_link->link_up = false;
1478 }
1479
1480 p_link->full_duplex = true;
1481 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1482 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1483 p_link->speed = 100000;
1484 break;
1485 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1486 p_link->speed = 50000;
1487 break;
1488 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1489 p_link->speed = 40000;
1490 break;
1491 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1492 p_link->speed = 25000;
1493 break;
1494 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1495 p_link->speed = 20000;
1496 break;
1497 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1498 p_link->speed = 10000;
1499 break;
1500 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1501 p_link->full_duplex = false;
1502 /* Fall-through */
1503 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1504 p_link->speed = 1000;
1505 break;
1506 default:
1507 p_link->speed = 0;
1508 p_link->link_up = 0;
1509 }
1510
1511 /* We never store total line speed as p_link->speed is
1512 * again changes according to bandwidth allocation.
1513 */
1514 if (p_link->link_up && p_link->speed)
1515 p_link->line_speed = p_link->speed;
1516 else
1517 p_link->line_speed = 0;
1518
1519 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1520 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1521
1522 /* Max bandwidth configuration */
1523 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1524
1525 /* Min bandwidth configuration */
1526 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1527 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1528 p_link->min_pf_rate);
1529
1530 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1531 p_link->an_complete = !!(status &
1532 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1533 p_link->parallel_detection = !!(status &
1534 LINK_STATUS_PARALLEL_DETECTION_USED);
1535 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1536
1537 p_link->partner_adv_speed |=
1538 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1539 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1540 p_link->partner_adv_speed |=
1541 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1542 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1543 p_link->partner_adv_speed |=
1544 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1545 ECORE_LINK_PARTNER_SPEED_10G : 0;
1546 p_link->partner_adv_speed |=
1547 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1548 ECORE_LINK_PARTNER_SPEED_20G : 0;
1549 p_link->partner_adv_speed |=
1550 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1551 ECORE_LINK_PARTNER_SPEED_25G : 0;
1552 p_link->partner_adv_speed |=
1553 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1554 ECORE_LINK_PARTNER_SPEED_40G : 0;
1555 p_link->partner_adv_speed |=
1556 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1557 ECORE_LINK_PARTNER_SPEED_50G : 0;
1558 p_link->partner_adv_speed |=
1559 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1560 ECORE_LINK_PARTNER_SPEED_100G : 0;
1561
1562 p_link->partner_tx_flow_ctrl_en =
1563 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1564 p_link->partner_rx_flow_ctrl_en =
1565 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1566
1567 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1568 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1569 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1570 break;
1571 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1572 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1573 break;
1574 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1575 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1576 break;
1577 default:
1578 p_link->partner_adv_pause = 0;
1579 }
1580
1581 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1582
1583 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1584 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1585
1586 OSAL_LINK_UPDATE(p_hwfn, p_ptt);
1587 out:
1588 OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->link_lock);
1589 }
1590
ecore_mcp_set_link(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool b_up)1591 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1592 struct ecore_ptt *p_ptt,
1593 bool b_up)
1594 {
1595 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1596 struct ecore_mcp_mb_params mb_params;
1597 struct eth_phy_cfg phy_cfg;
1598 enum _ecore_status_t rc = ECORE_SUCCESS;
1599 u32 cmd;
1600
1601 #ifndef ASIC_ONLY
1602 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1603 return ECORE_SUCCESS;
1604 #endif
1605
1606 /* Set the shmem configuration according to params */
1607 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1608 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1609 if (!params->speed.autoneg)
1610 phy_cfg.speed = params->speed.forced_speed;
1611 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1612 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1613 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1614 phy_cfg.adv_speed = params->speed.advertised_speeds;
1615 phy_cfg.loopback_mode = params->loopback_mode;
1616
1617 /* There are MFWs that share this capability regardless of whether
1618 * this is feasible or not. And given that at the very least adv_caps
1619 * would be set internally by ecore, we want to make sure LFA would
1620 * still work.
1621 */
1622 if ((p_hwfn->mcp_info->capabilities &
1623 FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1624 params->eee.enable) {
1625 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1626 if (params->eee.tx_lpi_enable)
1627 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1628 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1629 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1630 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1631 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1632 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1633 EEE_TX_TIMER_USEC_OFFSET) &
1634 EEE_TX_TIMER_USEC_MASK;
1635 }
1636
1637 p_hwfn->b_drv_link_init = b_up;
1638
1639 if (b_up)
1640 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1641 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1642 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1643 phy_cfg.loopback_mode);
1644 else
1645 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1646
1647 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1648 mb_params.cmd = cmd;
1649 mb_params.p_data_src = &phy_cfg;
1650 mb_params.data_src_size = sizeof(phy_cfg);
1651 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1652
1653 /* if mcp fails to respond we must abort */
1654 if (rc != ECORE_SUCCESS) {
1655 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1656 return rc;
1657 }
1658
1659 /* Mimic link-change attention, done for several reasons:
1660 * - On reset, there's no guarantee MFW would trigger
1661 * an attention.
1662 * - On initialization, older MFWs might not indicate link change
1663 * during LFA, so we'll never get an UP indication.
1664 */
1665 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1666
1667 return ECORE_SUCCESS;
1668 }
1669
ecore_get_process_kill_counter(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1670 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1671 struct ecore_ptt *p_ptt)
1672 {
1673 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1674
1675 /* TODO - Add support for VFs */
1676 if (IS_VF(p_hwfn->p_dev))
1677 return ECORE_INVAL;
1678
1679 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1680 PUBLIC_PATH);
1681 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1682 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1683
1684 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1685 path_addr +
1686 OFFSETOF(struct public_path, process_kill)) &
1687 PROCESS_KILL_COUNTER_MASK;
1688
1689 return proc_kill_cnt;
1690 }
1691
ecore_mcp_handle_process_kill(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1692 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1693 struct ecore_ptt *p_ptt)
1694 {
1695 struct ecore_dev *p_dev = p_hwfn->p_dev;
1696 u32 proc_kill_cnt;
1697
1698 /* Prevent possible attentions/interrupts during the recovery handling
1699 * and till its load phase, during which they will be re-enabled.
1700 */
1701 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1702
1703 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1704
1705 /* The following operations should be done once, and thus in CMT mode
1706 * are carried out by only the first HW function.
1707 */
1708 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1709 return;
1710
1711 if (p_dev->recov_in_prog) {
1712 DP_NOTICE(p_hwfn, false,
1713 "Ignoring the indication since a recovery process is already in progress\n");
1714 return;
1715 }
1716
1717 p_dev->recov_in_prog = true;
1718
1719 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1720 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1721
1722 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1723 }
1724
ecore_mcp_send_protocol_stats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum MFW_DRV_MSG_TYPE type)1725 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1726 struct ecore_ptt *p_ptt,
1727 enum MFW_DRV_MSG_TYPE type)
1728 {
1729 enum ecore_mcp_protocol_type stats_type;
1730 union ecore_mcp_protocol_stats stats;
1731 struct ecore_mcp_mb_params mb_params;
1732 u32 hsi_param;
1733 enum _ecore_status_t rc;
1734
1735 switch (type) {
1736 case MFW_DRV_MSG_GET_LAN_STATS:
1737 stats_type = ECORE_MCP_LAN_STATS;
1738 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1739 break;
1740 case MFW_DRV_MSG_GET_FCOE_STATS:
1741 stats_type = ECORE_MCP_FCOE_STATS;
1742 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1743 break;
1744 case MFW_DRV_MSG_GET_ISCSI_STATS:
1745 stats_type = ECORE_MCP_ISCSI_STATS;
1746 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1747 break;
1748 case MFW_DRV_MSG_GET_RDMA_STATS:
1749 stats_type = ECORE_MCP_RDMA_STATS;
1750 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1751 break;
1752 default:
1753 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1754 "Invalid protocol type %d\n", type);
1755 return;
1756 }
1757
1758 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1759
1760 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1761 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1762 mb_params.param = hsi_param;
1763 mb_params.p_data_src = &stats;
1764 mb_params.data_src_size = sizeof(stats);
1765 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1766 if (rc != ECORE_SUCCESS)
1767 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1768 }
1769
1770 static void
ecore_mcp_update_bw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1771 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1772 {
1773 struct ecore_mcp_function_info *p_info;
1774 struct public_func shmem_info;
1775 u32 resp = 0, param = 0;
1776
1777 OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->link_lock);
1778
1779 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1780 MCP_PF_ID(p_hwfn));
1781
1782 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1783
1784 p_info = &p_hwfn->mcp_info->func_info;
1785
1786 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1787
1788 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1789
1790 OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->link_lock);
1791
1792 /* Acknowledge the MFW */
1793 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1794 ¶m);
1795 }
1796
ecore_mcp_update_stag(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1797 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1798 struct ecore_ptt *p_ptt)
1799 {
1800 struct public_func shmem_info;
1801 u32 resp = 0, param = 0;
1802
1803 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1804 MCP_PF_ID(p_hwfn));
1805
1806 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1807 FUNC_MF_CFG_OV_STAG_MASK;
1808 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1809 if ((p_hwfn->hw_info.hw_mode & (1 << MODE_MF_SD)) &&
1810 (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET)) {
1811 ecore_wr(p_hwfn, p_ptt,
1812 NIG_REG_LLH_FUNC_TAG_VALUE,
1813 p_hwfn->hw_info.ovlan);
1814 ecore_sp_pf_update_stag(p_hwfn);
1815 /* Configure doorbell to add external vlan to EDPM packets */
1816 ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1817 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1818 p_hwfn->hw_info.ovlan);
1819 }
1820
1821 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1822 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1823 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1824
1825 /* Acknowledge the MFW */
1826 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1827 &resp, ¶m);
1828 }
1829
ecore_mcp_handle_fan_failure(struct ecore_hwfn * p_hwfn)1830 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1831 {
1832 /* A single notification should be sent to upper driver in CMT mode */
1833 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1834 return;
1835
1836 DP_NOTICE(p_hwfn, false,
1837 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1838
1839 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1840 }
1841
1842 struct ecore_mdump_cmd_params {
1843 u32 cmd;
1844 void *p_data_src;
1845 u8 data_src_size;
1846 void *p_data_dst;
1847 u8 data_dst_size;
1848 u32 mcp_resp;
1849 };
1850
1851 static enum _ecore_status_t
ecore_mcp_mdump_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_cmd_params * p_mdump_cmd_params)1852 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1853 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1854 {
1855 struct ecore_mcp_mb_params mb_params;
1856 enum _ecore_status_t rc;
1857
1858 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1859 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1860 mb_params.param = p_mdump_cmd_params->cmd;
1861 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1862 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1863 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1864 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1865 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1866 if (rc != ECORE_SUCCESS)
1867 return rc;
1868
1869 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1870
1871 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1872 DP_INFO(p_hwfn,
1873 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1874 p_mdump_cmd_params->cmd);
1875 rc = ECORE_NOTIMPL;
1876 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1877 DP_INFO(p_hwfn,
1878 "The mdump command is not supported by the MFW\n");
1879 rc = ECORE_NOTIMPL;
1880 }
1881
1882 return rc;
1883 }
1884
ecore_mcp_mdump_ack(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1885 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1886 struct ecore_ptt *p_ptt)
1887 {
1888 struct ecore_mdump_cmd_params mdump_cmd_params;
1889
1890 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1891 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1892
1893 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1894 }
1895
ecore_mcp_mdump_set_values(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 epoch)1896 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1897 struct ecore_ptt *p_ptt,
1898 u32 epoch)
1899 {
1900 struct ecore_mdump_cmd_params mdump_cmd_params;
1901
1902 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1903 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1904 mdump_cmd_params.p_data_src = &epoch;
1905 mdump_cmd_params.data_src_size = sizeof(epoch);
1906
1907 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1908 }
1909
ecore_mcp_mdump_trigger(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1910 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1911 struct ecore_ptt *p_ptt)
1912 {
1913 struct ecore_mdump_cmd_params mdump_cmd_params;
1914
1915 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1916 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1917
1918 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1919 }
1920
1921 static enum _ecore_status_t
ecore_mcp_mdump_get_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct mdump_config_stc * p_mdump_config)1922 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1923 struct mdump_config_stc *p_mdump_config)
1924 {
1925 struct ecore_mdump_cmd_params mdump_cmd_params;
1926 enum _ecore_status_t rc;
1927
1928 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1929 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1930 mdump_cmd_params.p_data_dst = p_mdump_config;
1931 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1932
1933 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1934 if (rc != ECORE_SUCCESS)
1935 return rc;
1936
1937 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1938 DP_INFO(p_hwfn,
1939 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1940 mdump_cmd_params.mcp_resp);
1941 rc = ECORE_UNKNOWN_ERROR;
1942 }
1943
1944 return rc;
1945 }
1946
1947 enum _ecore_status_t
ecore_mcp_mdump_get_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_info * p_mdump_info)1948 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1949 struct ecore_mdump_info *p_mdump_info)
1950 {
1951 u32 addr, global_offsize, global_addr;
1952 struct mdump_config_stc mdump_config;
1953 enum _ecore_status_t rc;
1954
1955 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1956
1957 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1958 PUBLIC_GLOBAL);
1959 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1960 global_addr = SECTION_ADDR(global_offsize, 0);
1961 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1962 global_addr +
1963 OFFSETOF(struct public_global,
1964 mdump_reason));
1965
1966 if (p_mdump_info->reason) {
1967 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1968 if (rc != ECORE_SUCCESS)
1969 return rc;
1970
1971 p_mdump_info->version = mdump_config.version;
1972 p_mdump_info->config = mdump_config.config;
1973 p_mdump_info->epoch = mdump_config.epoc;
1974 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1975 p_mdump_info->valid_logs = mdump_config.valid_logs;
1976
1977 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1978 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1979 p_mdump_info->reason, p_mdump_info->version,
1980 p_mdump_info->config, p_mdump_info->epoch,
1981 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1982 } else {
1983 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1984 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1985 }
1986
1987 return ECORE_SUCCESS;
1988 }
1989
ecore_mcp_mdump_clear_logs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1990 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1991 struct ecore_ptt *p_ptt)
1992 {
1993 struct ecore_mdump_cmd_params mdump_cmd_params;
1994
1995 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1996 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1997
1998 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1999 }
2000
2001 enum _ecore_status_t
ecore_mcp_mdump_get_retain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_retain_data * p_mdump_retain)2002 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2003 struct ecore_mdump_retain_data *p_mdump_retain)
2004 {
2005 struct ecore_mdump_cmd_params mdump_cmd_params;
2006 struct mdump_retain_data_stc mfw_mdump_retain;
2007 enum _ecore_status_t rc;
2008
2009 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
2010 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
2011 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
2012 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
2013
2014 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
2015 if (rc != ECORE_SUCCESS)
2016 return rc;
2017
2018 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
2019 DP_INFO(p_hwfn,
2020 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
2021 mdump_cmd_params.mcp_resp);
2022 return ECORE_UNKNOWN_ERROR;
2023 }
2024
2025 p_mdump_retain->valid = mfw_mdump_retain.valid;
2026 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
2027 p_mdump_retain->pf = mfw_mdump_retain.pf;
2028 p_mdump_retain->status = mfw_mdump_retain.status;
2029
2030 return ECORE_SUCCESS;
2031 }
2032
ecore_mcp_mdump_clr_retain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2033 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
2034 struct ecore_ptt *p_ptt)
2035 {
2036 struct ecore_mdump_cmd_params mdump_cmd_params;
2037
2038 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
2039 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
2040
2041 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
2042 }
2043
ecore_mcp_handle_critical_error(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2044 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
2045 struct ecore_ptt *p_ptt)
2046 {
2047 struct ecore_mdump_retain_data mdump_retain;
2048 enum _ecore_status_t rc;
2049
2050 /* In CMT mode - no need for more than a single acknowledgement to the
2051 * MFW, and no more than a single notification to the upper driver.
2052 */
2053 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
2054 return;
2055
2056 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
2057 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
2058 DP_NOTICE(p_hwfn, false,
2059 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
2060 mdump_retain.epoch, mdump_retain.pf,
2061 mdump_retain.status);
2062 } else {
2063 DP_NOTICE(p_hwfn, false,
2064 "The MFW notified that a critical error occurred in the device\n");
2065 }
2066
2067 if (p_hwfn->p_dev->allow_mdump) {
2068 DP_NOTICE(p_hwfn, false,
2069 "Not acknowledging the notification to allow the MFW crash dump\n");
2070 return;
2071 }
2072
2073 DP_NOTICE(p_hwfn, false,
2074 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
2075 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
2076 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
2077 }
2078
2079 void
ecore_mcp_read_ufp_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2080 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2081 {
2082 struct public_func shmem_info;
2083 u32 port_cfg, val;
2084
2085 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
2086 return;
2087
2088 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
2089 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2090 OFFSETOF(struct public_port, oem_cfg_port));
2091 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
2092 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
2093 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
2094 val);
2095
2096 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
2097 if (val == OEM_CFG_SCHED_TYPE_ETS)
2098 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
2099 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
2100 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
2101 else {
2102 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_UNKNOWN;
2103 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
2104 val);
2105 }
2106
2107 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2108 MCP_PF_ID(p_hwfn));
2109 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
2110 p_hwfn->ufp_info.tc = (u8)val;
2111 val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
2112 OEM_CFG_FUNC_HOST_PRI_CTRL);
2113 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
2114 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
2115 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
2116 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
2117 else {
2118 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_UNKNOWN;
2119 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
2120 val);
2121 }
2122
2123 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
2124 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
2125 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
2126 p_hwfn->ufp_info.pri_type);
2127 }
2128
2129 static enum _ecore_status_t
ecore_mcp_handle_ufp_event(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2130 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2131 {
2132 ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
2133
2134 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
2135 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
2136 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
2137
2138 ecore_qm_reconf(p_hwfn, p_ptt);
2139 } else if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_ETS) {
2140 /* Merge UFP TC with the dcbx TC data */
2141 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2142 ECORE_DCBX_OPERATIONAL_MIB);
2143 } else {
2144 DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
2145 return ECORE_INVAL;
2146 }
2147
2148 /* update storm FW with negotiation results */
2149 ecore_sp_pf_update_ufp(p_hwfn);
2150
2151 return ECORE_SUCCESS;
2152 }
2153
ecore_mcp_handle_events(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2154 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
2155 struct ecore_ptt *p_ptt)
2156 {
2157 struct ecore_mcp_info *info = p_hwfn->mcp_info;
2158 enum _ecore_status_t rc = ECORE_SUCCESS;
2159 bool found = false;
2160 u16 i;
2161
2162 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
2163
2164 /* Read Messages from MFW */
2165 ecore_mcp_read_mb(p_hwfn, p_ptt);
2166
2167 /* Compare current messages to old ones */
2168 for (i = 0; i < info->mfw_mb_length; i++) {
2169 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
2170 continue;
2171
2172 found = true;
2173
2174 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2175 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2176 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2177
2178 switch (i) {
2179 case MFW_DRV_MSG_LINK_CHANGE:
2180 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
2181 break;
2182 case MFW_DRV_MSG_VF_DISABLED:
2183 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2184 break;
2185 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2186 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2187 ECORE_DCBX_REMOTE_LLDP_MIB);
2188 break;
2189 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2190 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2191 ECORE_DCBX_REMOTE_MIB);
2192 break;
2193 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2194 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2195 ECORE_DCBX_OPERATIONAL_MIB);
2196 /* clear the user-config cache */
2197 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
2198 sizeof(struct ecore_dcbx_set));
2199 break;
2200 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
2201 ecore_lldp_mib_update_event(p_hwfn, p_ptt);
2202 break;
2203 case MFW_DRV_MSG_OEM_CFG_UPDATE:
2204 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
2205 break;
2206 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2207 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2208 break;
2209 case MFW_DRV_MSG_ERROR_RECOVERY:
2210 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2211 break;
2212 case MFW_DRV_MSG_GET_LAN_STATS:
2213 case MFW_DRV_MSG_GET_FCOE_STATS:
2214 case MFW_DRV_MSG_GET_ISCSI_STATS:
2215 case MFW_DRV_MSG_GET_RDMA_STATS:
2216 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2217 break;
2218 case MFW_DRV_MSG_BW_UPDATE:
2219 ecore_mcp_update_bw(p_hwfn, p_ptt);
2220 break;
2221 case MFW_DRV_MSG_S_TAG_UPDATE:
2222 ecore_mcp_update_stag(p_hwfn, p_ptt);
2223 break;
2224 case MFW_DRV_MSG_FAILURE_DETECTED:
2225 ecore_mcp_handle_fan_failure(p_hwfn);
2226 break;
2227 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2228 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2229 break;
2230 case MFW_DRV_MSG_GET_TLV_REQ:
2231 OSAL_MFW_TLV_REQ(p_hwfn);
2232 break;
2233 default:
2234 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2235 rc = ECORE_INVAL;
2236 }
2237 }
2238
2239 /* ACK everything */
2240 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2241 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2242
2243 /* MFW expect answer in BE, so we force write in that format */
2244 ecore_wr(p_hwfn, p_ptt,
2245 info->mfw_mb_addr + sizeof(u32) +
2246 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2247 sizeof(u32) + i * sizeof(u32), val);
2248 }
2249
2250 if (!found) {
2251 DP_INFO(p_hwfn,
2252 "Received an MFW message indication but no new message!\n");
2253 rc = ECORE_INVAL;
2254 }
2255
2256 /* Copy the new mfw messages into the shadow */
2257 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2258
2259 return rc;
2260 }
2261
ecore_mcp_get_mfw_ver(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_mfw_ver,u32 * p_running_bundle_id)2262 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2263 struct ecore_ptt *p_ptt,
2264 u32 *p_mfw_ver,
2265 u32 *p_running_bundle_id)
2266 {
2267 u32 global_offsize;
2268
2269 #ifndef ASIC_ONLY
2270 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2271 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2272 return ECORE_SUCCESS;
2273 }
2274 #endif
2275
2276 if (IS_VF(p_hwfn->p_dev)) {
2277 if (p_hwfn->vf_iov_info) {
2278 struct pfvf_acquire_resp_tlv *p_resp;
2279
2280 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2281 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2282 return ECORE_SUCCESS;
2283 } else {
2284 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2285 "VF requested MFW version prior to ACQUIRE\n");
2286 return ECORE_INVAL;
2287 }
2288 }
2289
2290 global_offsize = ecore_rd(p_hwfn, p_ptt,
2291 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
2292 PUBLIC_GLOBAL));
2293 *p_mfw_ver = ecore_rd(p_hwfn, p_ptt,
2294 SECTION_ADDR(global_offsize, 0) +
2295 OFFSETOF(struct public_global, mfw_ver));
2296
2297 if (p_running_bundle_id != OSAL_NULL) {
2298 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2299 SECTION_ADDR(global_offsize, 0) +
2300 OFFSETOF(struct public_global,
2301 running_bundle_id));
2302 }
2303
2304 return ECORE_SUCCESS;
2305 }
2306
ecore_mcp_get_mbi_ver(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_mbi_ver)2307 enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
2308 struct ecore_ptt *p_ptt,
2309 u32 *p_mbi_ver)
2310 {
2311 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2312
2313 #ifndef ASIC_ONLY
2314 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2315 DP_NOTICE(p_hwfn, false, "Emulation - can't get MBI version\n");
2316 return ECORE_SUCCESS;
2317 }
2318 #endif
2319
2320 if (IS_VF(p_hwfn->p_dev))
2321 return ECORE_INVAL;
2322
2323 /* Read the address of the nvm_cfg */
2324 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2325 if (!nvm_cfg_addr) {
2326 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
2327 return ECORE_INVAL;
2328 }
2329
2330 /* Read the offset of nvm_cfg1 */
2331 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2332
2333 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2334 OFFSETOF(struct nvm_cfg1, glob) +
2335 OFFSETOF(struct nvm_cfg1_glob, mbi_version);
2336 *p_mbi_ver = ecore_rd(p_hwfn, p_ptt, mbi_ver_addr) &
2337 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2338 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2339 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2340
2341 return ECORE_SUCCESS;
2342 }
2343
ecore_mcp_get_media_type(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_media_type)2344 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2345 struct ecore_ptt *p_ptt,
2346 u32 *p_media_type)
2347 {
2348
2349 /* TODO - Add support for VFs */
2350 if (IS_VF(p_hwfn->p_dev))
2351 return ECORE_INVAL;
2352
2353 if (!ecore_mcp_is_init(p_hwfn)) {
2354 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2355 return ECORE_BUSY;
2356 }
2357 if (!p_ptt) {
2358 *p_media_type = MEDIA_UNSPECIFIED;
2359 return ECORE_INVAL;
2360 } else {
2361 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2362 p_hwfn->mcp_info->port_addr +
2363 OFFSETOF(struct public_port,
2364 media_type));
2365 }
2366
2367 return ECORE_SUCCESS;
2368 }
2369
ecore_mcp_get_transceiver_data(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_tranceiver_type)2370 enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
2371 struct ecore_ptt *p_ptt,
2372 u32 *p_tranceiver_type)
2373 {
2374 /* TODO - Add support for VFs */
2375 if (IS_VF(p_hwfn->p_dev))
2376 return ECORE_INVAL;
2377
2378 if (!ecore_mcp_is_init(p_hwfn)) {
2379 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2380 return ECORE_BUSY;
2381 }
2382 if (!p_ptt) {
2383 *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2384 return ECORE_INVAL;
2385 } else {
2386 *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt,
2387 p_hwfn->mcp_info->port_addr +
2388 offsetof(struct public_port,
2389 transceiver_data));
2390 }
2391
2392 return 0;
2393 }
2394
is_transceiver_ready(u32 transceiver_state,u32 transceiver_type)2395 static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
2396 {
2397
2398 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2399 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2400 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) {
2401 return 1;
2402 }
2403
2404 return 0;
2405 }
2406
ecore_mcp_trans_speed_mask(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_speed_mask)2407 enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
2408 struct ecore_ptt *p_ptt,
2409 u32 *p_speed_mask)
2410 {
2411 u32 transceiver_data, transceiver_type, transceiver_state;
2412
2413 ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data);
2414
2415 transceiver_state = GET_MFW_FIELD(transceiver_data,
2416 ETH_TRANSCEIVER_STATE);
2417
2418 transceiver_type = GET_MFW_FIELD(transceiver_data,
2419 ETH_TRANSCEIVER_TYPE);
2420
2421 if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) {
2422 return ECORE_INVAL;
2423 }
2424
2425 switch (transceiver_type) {
2426 case ETH_TRANSCEIVER_TYPE_1G_LX:
2427 case ETH_TRANSCEIVER_TYPE_1G_SX:
2428 case ETH_TRANSCEIVER_TYPE_1G_PCC:
2429 case ETH_TRANSCEIVER_TYPE_1G_ACC:
2430 case ETH_TRANSCEIVER_TYPE_1000BASET:
2431 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2432 break;
2433
2434 case ETH_TRANSCEIVER_TYPE_10G_SR:
2435 case ETH_TRANSCEIVER_TYPE_10G_LR:
2436 case ETH_TRANSCEIVER_TYPE_10G_LRM:
2437 case ETH_TRANSCEIVER_TYPE_10G_ER:
2438 case ETH_TRANSCEIVER_TYPE_10G_PCC:
2439 case ETH_TRANSCEIVER_TYPE_10G_ACC:
2440 case ETH_TRANSCEIVER_TYPE_4x10G:
2441 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2442 break;
2443
2444 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2445 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2446 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2447 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2448 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2449 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2450 break;
2451
2452 case ETH_TRANSCEIVER_TYPE_100G_AOC:
2453 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2454 case ETH_TRANSCEIVER_TYPE_100G_LR4:
2455 case ETH_TRANSCEIVER_TYPE_100G_ER4:
2456 case ETH_TRANSCEIVER_TYPE_100G_ACC:
2457 *p_speed_mask =
2458 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2459 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2460 break;
2461
2462 case ETH_TRANSCEIVER_TYPE_25G_SR:
2463 case ETH_TRANSCEIVER_TYPE_25G_LR:
2464 case ETH_TRANSCEIVER_TYPE_25G_AOC:
2465 case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2466 case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2467 case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2468 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2469 break;
2470
2471 case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2472 case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2473 case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2474 case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2475 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2476 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2477 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2478 break;
2479
2480 case ETH_TRANSCEIVER_TYPE_40G_CR4:
2481 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2482 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2483 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2484 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2485 break;
2486
2487 case ETH_TRANSCEIVER_TYPE_100G_CR4:
2488 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2489 *p_speed_mask =
2490 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2491 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2492 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2493 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2494 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2495 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2496 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2497 break;
2498
2499 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2500 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2501 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2502 *p_speed_mask =
2503 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2504 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2505 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2506 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2507 break;
2508
2509 case ETH_TRANSCEIVER_TYPE_XLPPI:
2510 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2511 break;
2512
2513 case ETH_TRANSCEIVER_TYPE_10G_BASET:
2514 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2515 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2516 break;
2517
2518 default:
2519 DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
2520 transceiver_type);
2521 *p_speed_mask = 0xff;
2522 break;
2523 }
2524
2525 return ECORE_SUCCESS;
2526 }
2527
ecore_mcp_get_board_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_board_config)2528 enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
2529 struct ecore_ptt *p_ptt,
2530 u32 *p_board_config)
2531 {
2532 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2533
2534 /* TODO - Add support for VFs */
2535 if (IS_VF(p_hwfn->p_dev))
2536 return ECORE_INVAL;
2537
2538 if (!ecore_mcp_is_init(p_hwfn)) {
2539 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2540 return ECORE_BUSY;
2541 }
2542 if (!p_ptt) {
2543 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2544 return ECORE_INVAL;
2545 } else {
2546 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
2547 MISC_REG_GEN_PURP_CR0);
2548 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
2549 nvm_cfg_addr + 4);
2550 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2551 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2552 *p_board_config = ecore_rd(p_hwfn, p_ptt,
2553 port_cfg_addr +
2554 offsetof(struct nvm_cfg1_port,
2555 board_cfg));
2556 }
2557
2558 return ECORE_SUCCESS;
2559 }
2560
2561 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2562 static void
ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn * p_hwfn,enum ecore_pci_personality * p_proto)2563 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2564 enum ecore_pci_personality *p_proto)
2565 {
2566 /* There wasn't ever a legacy MFW that published iwarp.
2567 * So at this point, this is either plain l2 or RoCE.
2568 */
2569 if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE,
2570 &p_hwfn->hw_info.device_capabilities))
2571 *p_proto = ECORE_PCI_ETH_ROCE;
2572 else
2573 *p_proto = ECORE_PCI_ETH;
2574
2575 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2576 "According to Legacy capabilities, L2 personality is %08x\n",
2577 (u32) *p_proto);
2578 }
2579
2580 static enum _ecore_status_t
ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_pci_personality * p_proto)2581 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2582 struct ecore_ptt *p_ptt,
2583 enum ecore_pci_personality *p_proto)
2584 {
2585 u32 resp = 0, param = 0;
2586 enum _ecore_status_t rc;
2587
2588 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2589 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2590 if (rc != ECORE_SUCCESS)
2591 return rc;
2592 if (resp != FW_MSG_CODE_OK) {
2593 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2594 "MFW lacks support for command; Returns %08x\n",
2595 resp);
2596 return ECORE_INVAL;
2597 }
2598
2599 switch (param) {
2600 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2601 *p_proto = ECORE_PCI_ETH;
2602 break;
2603 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2604 *p_proto = ECORE_PCI_ETH_ROCE;
2605 break;
2606 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2607 *p_proto = ECORE_PCI_ETH_IWARP;
2608 break;
2609 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2610 *p_proto = ECORE_PCI_ETH_RDMA;
2611 break;
2612 default:
2613 DP_NOTICE(p_hwfn, true,
2614 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2615 param);
2616 return ECORE_INVAL;
2617 }
2618
2619 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2620 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2621 (u32) *p_proto, resp, param);
2622 return ECORE_SUCCESS;
2623 }
2624
2625 static enum _ecore_status_t
ecore_mcp_get_shmem_proto(struct ecore_hwfn * p_hwfn,struct public_func * p_info,struct ecore_ptt * p_ptt,enum ecore_pci_personality * p_proto)2626 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2627 struct public_func *p_info,
2628 struct ecore_ptt *p_ptt,
2629 enum ecore_pci_personality *p_proto)
2630 {
2631 enum _ecore_status_t rc = ECORE_SUCCESS;
2632
2633 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2634 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2635 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2636 ECORE_SUCCESS)
2637 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2638 break;
2639 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2640 *p_proto = ECORE_PCI_ISCSI;
2641 break;
2642 case FUNC_MF_CFG_PROTOCOL_FCOE:
2643 *p_proto = ECORE_PCI_FCOE;
2644 break;
2645 case FUNC_MF_CFG_PROTOCOL_ROCE:
2646 DP_NOTICE(p_hwfn, true, "RoCE personality is not a valid value!\n");
2647 /* Fallthrough */
2648 default:
2649 rc = ECORE_INVAL;
2650 }
2651
2652 return rc;
2653 }
2654
ecore_mcp_fill_shmem_func_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2655 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2656 struct ecore_ptt *p_ptt)
2657 {
2658 struct ecore_mcp_function_info *info;
2659 struct public_func shmem_info;
2660
2661 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2662 MCP_PF_ID(p_hwfn));
2663 info = &p_hwfn->mcp_info->func_info;
2664
2665 info->pause_on_host = (shmem_info.config &
2666 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2667
2668 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2669 &info->protocol)) {
2670 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2671 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2672 return ECORE_INVAL;
2673 }
2674
2675 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2676
2677 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2678 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2679 info->mac[1] = (u8)(shmem_info.mac_upper);
2680 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2681 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2682 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2683 info->mac[5] = (u8)(shmem_info.mac_lower);
2684
2685 /* Store primary MAC for later possible WoL */
2686 OSAL_MEMCPY(&p_hwfn->p_dev->wol_mac, info->mac, ETH_ALEN);
2687
2688 } else {
2689 /* TODO - are there protocols for which there's no MAC? */
2690 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2691 }
2692
2693 /* TODO - are these calculations true for BE machine? */
2694 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2695 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2696 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2697 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2698
2699 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2700
2701 info->mtu = (u16)shmem_info.mtu_size;
2702
2703 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_NONE;
2704 p_hwfn->p_dev->wol_config = (u8)ECORE_OV_WOL_DEFAULT;
2705 if (ecore_mcp_is_init(p_hwfn)) {
2706 u32 resp = 0, param = 0;
2707 enum _ecore_status_t rc;
2708
2709 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2710 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2711 if (rc != ECORE_SUCCESS)
2712 return rc;
2713 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2714 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_PME;
2715 }
2716
2717 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2718 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2719 info->pause_on_host, info->protocol,
2720 info->bandwidth_min, info->bandwidth_max,
2721 info->mac[0], info->mac[1], info->mac[2],
2722 info->mac[3], info->mac[4], info->mac[5],
2723 (unsigned long long)info->wwn_port, (unsigned long long)info->wwn_node, info->ovlan,
2724 (u8)p_hwfn->hw_info.b_wol_support);
2725
2726 return ECORE_SUCCESS;
2727 }
2728
2729 struct ecore_mcp_link_params
ecore_mcp_get_link_params(struct ecore_hwfn * p_hwfn)2730 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2731 {
2732 if (!p_hwfn || !p_hwfn->mcp_info)
2733 return OSAL_NULL;
2734 return &p_hwfn->mcp_info->link_input;
2735 }
2736
2737 struct ecore_mcp_link_state
ecore_mcp_get_link_state(struct ecore_hwfn * p_hwfn)2738 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2739 {
2740 if (!p_hwfn || !p_hwfn->mcp_info)
2741 return OSAL_NULL;
2742
2743 #ifndef ASIC_ONLY
2744 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2745 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2746 p_hwfn->mcp_info->link_output.link_up = true;
2747 }
2748 #endif
2749
2750 return &p_hwfn->mcp_info->link_output;
2751 }
2752
2753 struct ecore_mcp_link_capabilities
ecore_mcp_get_link_capabilities(struct ecore_hwfn * p_hwfn)2754 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2755 {
2756 if (!p_hwfn || !p_hwfn->mcp_info)
2757 return OSAL_NULL;
2758 return &p_hwfn->mcp_info->link_capabilities;
2759 }
2760
ecore_mcp_drain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2761 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2762 struct ecore_ptt *p_ptt)
2763 {
2764 u32 resp = 0, param = 0;
2765 enum _ecore_status_t rc;
2766
2767 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2768 DRV_MSG_CODE_NIG_DRAIN, 1000,
2769 &resp, ¶m);
2770
2771 /* Wait for the drain to complete before returning */
2772 OSAL_MSLEEP(1020);
2773
2774 return rc;
2775 }
2776
2777 #ifndef LINUX_REMOVE
2778 const struct ecore_mcp_function_info
ecore_mcp_get_function_info(struct ecore_hwfn * p_hwfn)2779 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2780 {
2781 if (!p_hwfn || !p_hwfn->mcp_info)
2782 return OSAL_NULL;
2783 return &p_hwfn->mcp_info->func_info;
2784 }
2785
ecore_mcp_get_personality_cnt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 personalities)2786 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2787 struct ecore_ptt *p_ptt,
2788 u32 personalities)
2789 {
2790 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2791 struct public_func shmem_info;
2792 int i, count = 0, num_pfs;
2793
2794 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2795
2796 for (i = 0; i < num_pfs; i++) {
2797 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2798 MCP_PF_ID_BY_REL(p_hwfn, i));
2799 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2800 continue;
2801
2802 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2803 &protocol) !=
2804 ECORE_SUCCESS)
2805 continue;
2806
2807 if ((1 << ((u32)protocol)) & personalities)
2808 count++;
2809 }
2810
2811 return count;
2812 }
2813 #endif
2814
ecore_mcp_get_flash_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_flash_size)2815 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2816 struct ecore_ptt *p_ptt,
2817 u32 *p_flash_size)
2818 {
2819 u32 flash_size;
2820
2821 #ifndef ASIC_ONLY
2822 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2823 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2824 return ECORE_INVAL;
2825 }
2826 #endif
2827
2828 if (IS_VF(p_hwfn->p_dev))
2829 return ECORE_INVAL;
2830
2831 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2832 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2833 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2834 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2835
2836 *p_flash_size = flash_size;
2837
2838 return ECORE_SUCCESS;
2839 }
2840
ecore_start_recovery_process(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2841 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2842 struct ecore_ptt *p_ptt)
2843 {
2844 struct ecore_dev *p_dev = p_hwfn->p_dev;
2845
2846 if (p_dev->recov_in_prog) {
2847 DP_NOTICE(p_hwfn, false,
2848 "Avoid triggering a recovery since such a process is already in progress\n");
2849 return ECORE_AGAIN;
2850 }
2851
2852 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2853 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2854
2855 return ECORE_SUCCESS;
2856 }
2857
2858 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
2859
ecore_recovery_prolog(struct ecore_dev * p_dev)2860 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
2861 {
2862 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2863 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
2864 enum _ecore_status_t rc;
2865
2866 /* Allow ongoing PCIe transactions to complete */
2867 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
2868
2869 /* Clear the PF's internal FID_enable in the PXP */
2870 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2871 if (rc != ECORE_SUCCESS)
2872 DP_NOTICE(p_hwfn, false,
2873 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
2874 rc);
2875
2876 return rc;
2877 }
2878
2879 static enum _ecore_status_t
ecore_mcp_config_vf_msix_bb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 vf_id,u8 num)2880 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2881 struct ecore_ptt *p_ptt,
2882 u8 vf_id, u8 num)
2883 {
2884 u32 resp = 0, param = 0, rc_param = 0;
2885 enum _ecore_status_t rc;
2886
2887 /* Only Leader can configure MSIX, and need to take CMT into account */
2888 if (!IS_LEAD_HWFN(p_hwfn))
2889 return ECORE_SUCCESS;
2890 num *= p_hwfn->p_dev->num_hwfns;
2891
2892 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2893 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2894 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2895 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2896
2897 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2898 &resp, &rc_param);
2899
2900 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2901 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2902 vf_id);
2903 rc = ECORE_INVAL;
2904 } else {
2905 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2906 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2907 num, vf_id);
2908 }
2909
2910 return rc;
2911 }
2912
2913 static enum _ecore_status_t
ecore_mcp_config_vf_msix_ah(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 num)2914 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2915 struct ecore_ptt *p_ptt,
2916 u8 num)
2917 {
2918 u32 resp = 0, param = num, rc_param = 0;
2919 enum _ecore_status_t rc;
2920
2921 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2922 param, &resp, &rc_param);
2923
2924 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2925 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2926 rc = ECORE_INVAL;
2927 } else {
2928 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2929 "Requested 0x%02x MSI-x interrupts for VFs\n",
2930 num);
2931 }
2932
2933 return rc;
2934 }
2935
ecore_mcp_config_vf_msix(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 vf_id,u8 num)2936 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2937 struct ecore_ptt *p_ptt,
2938 u8 vf_id, u8 num)
2939 {
2940 if (ECORE_IS_BB(p_hwfn->p_dev))
2941 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2942 else
2943 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2944 }
2945
2946 enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_drv_version * p_ver)2947 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2948 struct ecore_mcp_drv_version *p_ver)
2949 {
2950 struct ecore_mcp_mb_params mb_params;
2951 struct drv_version_stc drv_version;
2952 u32 num_words, i;
2953 void *p_name;
2954 OSAL_BE32 val;
2955 enum _ecore_status_t rc;
2956
2957 #ifndef ASIC_ONLY
2958 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2959 return ECORE_SUCCESS;
2960 #endif
2961
2962 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2963 drv_version.version = p_ver->version;
2964 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2965 for (i = 0; i < num_words; i++) {
2966 /* The driver name is expected to be in a big-endian format */
2967 p_name = &p_ver->name[i * sizeof(u32)];
2968 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2969 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2970 }
2971
2972 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2973 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2974 mb_params.p_data_src = &drv_version;
2975 mb_params.data_src_size = sizeof(drv_version);
2976 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2977 if (rc != ECORE_SUCCESS)
2978 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2979
2980 return rc;
2981 }
2982
2983 /* A maximal 100 msec waiting time for the MCP to halt */
2984 #define ECORE_MCP_HALT_SLEEP_MS 10
2985 #define ECORE_MCP_HALT_MAX_RETRIES 10
2986
ecore_mcp_halt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2987 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2988 struct ecore_ptt *p_ptt)
2989 {
2990 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2991 enum _ecore_status_t rc;
2992
2993 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2994 ¶m);
2995 if (rc != ECORE_SUCCESS) {
2996 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2997 return rc;
2998 }
2999
3000 do {
3001 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
3002 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
3003 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
3004 break;
3005 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
3006
3007 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
3008 DP_NOTICE(p_hwfn, false,
3009 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
3010 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
3011 return ECORE_BUSY;
3012 }
3013
3014 ecore_mcp_cmd_set_blocking(p_hwfn, true);
3015
3016 return ECORE_SUCCESS;
3017 }
3018
3019 #define ECORE_MCP_RESUME_SLEEP_MS 10
3020
ecore_mcp_resume(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)3021 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
3022 struct ecore_ptt *p_ptt)
3023 {
3024 u32 cpu_mode, cpu_state;
3025
3026 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
3027
3028 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
3029 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
3030 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
3031
3032 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
3033 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
3034
3035 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
3036 DP_NOTICE(p_hwfn, false,
3037 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
3038 cpu_mode, cpu_state);
3039 return ECORE_BUSY;
3040 }
3041
3042 ecore_mcp_cmd_set_blocking(p_hwfn, false);
3043
3044 return ECORE_SUCCESS;
3045 }
3046
3047 enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_client client)3048 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
3049 struct ecore_ptt *p_ptt,
3050 enum ecore_ov_client client)
3051 {
3052 u32 resp = 0, param = 0;
3053 u32 drv_mb_param;
3054 enum _ecore_status_t rc;
3055
3056 switch (client) {
3057 case ECORE_OV_CLIENT_DRV:
3058 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
3059 break;
3060 case ECORE_OV_CLIENT_USER:
3061 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
3062 break;
3063 case ECORE_OV_CLIENT_VENDOR_SPEC:
3064 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
3065 break;
3066 default:
3067 DP_NOTICE(p_hwfn, true,
3068 "Invalid client type %d\n", client);
3069 return ECORE_INVAL;
3070 }
3071
3072 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
3073 drv_mb_param, &resp, ¶m);
3074 if (rc != ECORE_SUCCESS)
3075 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
3076
3077 return rc;
3078 }
3079
3080 enum _ecore_status_t
ecore_mcp_ov_update_driver_state(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_driver_state drv_state)3081 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
3082 struct ecore_ptt *p_ptt,
3083 enum ecore_ov_driver_state drv_state)
3084 {
3085 u32 resp = 0, param = 0;
3086 u32 drv_mb_param;
3087 enum _ecore_status_t rc;
3088
3089 switch (drv_state) {
3090 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
3091 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
3092 break;
3093 case ECORE_OV_DRIVER_STATE_DISABLED:
3094 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
3095 break;
3096 case ECORE_OV_DRIVER_STATE_ACTIVE:
3097 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
3098 break;
3099 default:
3100 DP_NOTICE(p_hwfn, true,
3101 "Invalid driver state %d\n", drv_state);
3102 return ECORE_INVAL;
3103 }
3104
3105 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
3106 drv_mb_param, &resp, ¶m);
3107 if (rc != ECORE_SUCCESS)
3108 DP_ERR(p_hwfn, "Failed to send driver state\n");
3109
3110 return rc;
3111 }
3112
3113 enum _ecore_status_t
ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_fc_npiv_tbl * p_table)3114 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3115 struct ecore_fc_npiv_tbl *p_table)
3116 {
3117 struct dci_fc_npiv_tbl *p_npiv_table;
3118 u8 *p_buf = OSAL_NULL;
3119 u32 addr, size, i;
3120 enum _ecore_status_t rc = ECORE_SUCCESS;
3121
3122 p_table->num_wwpn = 0;
3123 p_table->num_wwnn = 0;
3124 addr = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
3125 OFFSETOF(struct public_port, fc_npiv_nvram_tbl_addr));
3126 if (addr == NPIV_TBL_INVALID_ADDR) {
3127 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table doesn't exist\n");
3128 return rc;
3129 }
3130
3131 size = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
3132 OFFSETOF(struct public_port, fc_npiv_nvram_tbl_size));
3133 if (!size) {
3134 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table is empty\n");
3135 return rc;
3136 }
3137
3138 p_buf = OSAL_VZALLOC(p_hwfn->p_dev, size);
3139 if (!p_buf) {
3140 DP_ERR(p_hwfn, "Buffer allocation failed\n");
3141 return ECORE_NOMEM;
3142 }
3143
3144 rc = ecore_mcp_nvm_read(p_hwfn->p_dev, addr, p_buf, size);
3145 if (rc != ECORE_SUCCESS) {
3146 OSAL_VFREE(p_hwfn->p_dev, p_buf);
3147 return rc;
3148 }
3149
3150 p_npiv_table = (struct dci_fc_npiv_tbl *)p_buf;
3151 p_table->num_wwpn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
3152 p_table->num_wwnn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
3153 for (i = 0; i < p_table->num_wwpn; i++) {
3154 OSAL_MEMCPY(p_table->wwpn, p_npiv_table->settings[i].npiv_wwpn,
3155 ECORE_WWN_SIZE);
3156 OSAL_MEMCPY(p_table->wwnn, p_npiv_table->settings[i].npiv_wwnn,
3157 ECORE_WWN_SIZE);
3158 }
3159
3160 OSAL_VFREE(p_hwfn->p_dev, p_buf);
3161
3162 return ECORE_SUCCESS;
3163 }
3164
3165 enum _ecore_status_t
ecore_mcp_ov_update_mtu(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 mtu)3166 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3167 u16 mtu)
3168 {
3169 u32 resp = 0, param = 0;
3170 u32 drv_mb_param;
3171 enum _ecore_status_t rc;
3172
3173 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_OFFSET;
3174 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
3175 drv_mb_param, &resp, ¶m);
3176 if (rc != ECORE_SUCCESS)
3177 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
3178
3179 return rc;
3180 }
3181
3182 enum _ecore_status_t
ecore_mcp_ov_update_mac(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 * mac)3183 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3184 u8 *mac)
3185 {
3186 struct ecore_mcp_mb_params mb_params;
3187 u32 mfw_mac[2];
3188 enum _ecore_status_t rc;
3189
3190 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3191 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
3192 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
3193 DRV_MSG_CODE_VMAC_TYPE_OFFSET;
3194 mb_params.param |= MCP_PF_ID(p_hwfn);
3195
3196 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
3197 * in 32-bit granularity.
3198 * So the MAC has to be set in native order [and not byte order],
3199 * otherwise it would be read incorrectly by MFW after swap.
3200 */
3201 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
3202 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
3203
3204 mb_params.p_data_src = (u8 *)mfw_mac;
3205 mb_params.data_src_size = 8;
3206 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3207 if (rc != ECORE_SUCCESS)
3208 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
3209
3210 /* Store primary MAC for later possible WoL */
3211 OSAL_MEMCPY(p_hwfn->p_dev->wol_mac, mac, ETH_ALEN);
3212
3213 return rc;
3214 }
3215
3216 enum _ecore_status_t
ecore_mcp_ov_update_wol(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_wol wol)3217 ecore_mcp_ov_update_wol(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3218 enum ecore_ov_wol wol)
3219 {
3220 u32 resp = 0, param = 0;
3221 u32 drv_mb_param;
3222 enum _ecore_status_t rc;
3223
3224 if (p_hwfn->hw_info.b_wol_support == ECORE_WOL_SUPPORT_NONE) {
3225 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3226 "Can't change WoL configuration when WoL isn't supported\n");
3227 return ECORE_INVAL;
3228 }
3229
3230 switch (wol) {
3231 case ECORE_OV_WOL_DEFAULT:
3232 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
3233 break;
3234 case ECORE_OV_WOL_DISABLED:
3235 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
3236 break;
3237 case ECORE_OV_WOL_ENABLED:
3238 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
3239 break;
3240 default:
3241 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
3242 return ECORE_INVAL;
3243 }
3244
3245 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
3246 drv_mb_param, &resp, ¶m);
3247 if (rc != ECORE_SUCCESS)
3248 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
3249
3250 /* Store the WoL update for a future unload */
3251 p_hwfn->p_dev->wol_config = (u8)wol;
3252
3253 return rc;
3254 }
3255
3256 enum _ecore_status_t
ecore_mcp_ov_update_eswitch(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_eswitch eswitch)3257 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3258 enum ecore_ov_eswitch eswitch)
3259 {
3260 u32 resp = 0, param = 0;
3261 u32 drv_mb_param;
3262 enum _ecore_status_t rc;
3263
3264 switch (eswitch) {
3265 case ECORE_OV_ESWITCH_NONE:
3266 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
3267 break;
3268 case ECORE_OV_ESWITCH_VEB:
3269 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
3270 break;
3271 case ECORE_OV_ESWITCH_VEPA:
3272 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
3273 break;
3274 default:
3275 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
3276 return ECORE_INVAL;
3277 }
3278
3279 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
3280 drv_mb_param, &resp, ¶m);
3281 if (rc != ECORE_SUCCESS)
3282 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
3283
3284 return rc;
3285 }
3286
ecore_mcp_set_led(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_led_mode mode)3287 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
3288 struct ecore_ptt *p_ptt,
3289 enum ecore_led_mode mode)
3290 {
3291 u32 resp = 0, param = 0, drv_mb_param;
3292 enum _ecore_status_t rc;
3293
3294 switch (mode) {
3295 case ECORE_LED_MODE_ON:
3296 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
3297 break;
3298 case ECORE_LED_MODE_OFF:
3299 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
3300 break;
3301 case ECORE_LED_MODE_RESTORE:
3302 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
3303 break;
3304 default:
3305 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
3306 return ECORE_INVAL;
3307 }
3308
3309 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
3310 drv_mb_param, &resp, ¶m);
3311 if (rc != ECORE_SUCCESS)
3312 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
3313
3314 return rc;
3315 }
3316
ecore_mcp_mask_parities(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 mask_parities)3317 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
3318 struct ecore_ptt *p_ptt,
3319 u32 mask_parities)
3320 {
3321 u32 resp = 0, param = 0;
3322 enum _ecore_status_t rc;
3323
3324 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
3325 mask_parities, &resp, ¶m);
3326
3327 if (rc != ECORE_SUCCESS) {
3328 DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n");
3329 } else if (resp != FW_MSG_CODE_OK) {
3330 DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n");
3331 rc = ECORE_INVAL;
3332 }
3333
3334 return rc;
3335 }
3336
ecore_mcp_nvm_read(struct ecore_dev * p_dev,u32 addr,u8 * p_buf,u32 len)3337 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
3338 u8 *p_buf, u32 len)
3339 {
3340 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3341 u32 bytes_left, offset, bytes_to_copy, buf_size;
3342 u32 nvm_offset, resp = 0, param;
3343 struct ecore_ptt *p_ptt;
3344 enum _ecore_status_t rc = ECORE_SUCCESS;
3345
3346 p_ptt = ecore_ptt_acquire(p_hwfn);
3347 if (!p_ptt)
3348 return ECORE_BUSY;
3349
3350 bytes_left = len;
3351 offset = 0;
3352 while (bytes_left > 0) {
3353 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3354 MCP_DRV_NVM_BUF_LEN);
3355 nvm_offset = (addr + offset) | (bytes_to_copy <<
3356 DRV_MB_PARAM_NVM_LEN_OFFSET);
3357 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3358 DRV_MSG_CODE_NVM_READ_NVRAM,
3359 nvm_offset, &resp, ¶m, &buf_size,
3360 (u32 *)(p_buf + offset));
3361 if (rc != ECORE_SUCCESS) {
3362 DP_NOTICE(p_dev, false,
3363 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
3364 rc);
3365 resp = FW_MSG_CODE_ERROR;
3366 break;
3367 }
3368
3369 if (resp != FW_MSG_CODE_NVM_OK) {
3370 DP_NOTICE(p_dev, false,
3371 "nvm read failed, resp = 0x%08x\n", resp);
3372 rc = ECORE_UNKNOWN_ERROR;
3373 break;
3374 }
3375
3376 /* This can be a lengthy process, and it's possible scheduler
3377 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3378 */
3379 if (bytes_left % 0x1000 <
3380 (bytes_left - buf_size) % 0x1000)
3381 OSAL_MSLEEP(1);
3382
3383 offset += buf_size;
3384 bytes_left -= buf_size;
3385 }
3386
3387 p_dev->mcp_nvm_resp = resp;
3388 ecore_ptt_release(p_hwfn, p_ptt);
3389
3390 return rc;
3391 }
3392
ecore_mcp_phy_read(struct ecore_dev * p_dev,u32 cmd,u32 addr,u8 * p_buf,u32 len)3393 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
3394 u32 addr, u8 *p_buf, u32 len)
3395 {
3396 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3397 struct ecore_ptt *p_ptt;
3398 u32 resp, param;
3399 enum _ecore_status_t rc;
3400
3401 p_ptt = ecore_ptt_acquire(p_hwfn);
3402 if (!p_ptt)
3403 return ECORE_BUSY;
3404
3405 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3406 (cmd == ECORE_PHY_CORE_READ) ?
3407 DRV_MSG_CODE_PHY_CORE_READ :
3408 DRV_MSG_CODE_PHY_RAW_READ,
3409 addr, &resp, ¶m, &len, (u32 *)p_buf);
3410 if (rc != ECORE_SUCCESS)
3411 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3412
3413 p_dev->mcp_nvm_resp = resp;
3414 ecore_ptt_release(p_hwfn, p_ptt);
3415
3416 return rc;
3417 }
3418
ecore_mcp_nvm_resp(struct ecore_dev * p_dev,u8 * p_buf)3419 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
3420 {
3421 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3422 struct ecore_ptt *p_ptt;
3423
3424 p_ptt = ecore_ptt_acquire(p_hwfn);
3425 if (!p_ptt)
3426 return ECORE_BUSY;
3427
3428 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
3429 ecore_ptt_release(p_hwfn, p_ptt);
3430
3431 return ECORE_SUCCESS;
3432 }
3433
ecore_mcp_nvm_del_file(struct ecore_dev * p_dev,u32 addr)3434 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
3435 u32 addr)
3436 {
3437 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3438 struct ecore_ptt *p_ptt;
3439 u32 resp, param;
3440 enum _ecore_status_t rc;
3441
3442 p_ptt = ecore_ptt_acquire(p_hwfn);
3443 if (!p_ptt)
3444 return ECORE_BUSY;
3445 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
3446 &resp, ¶m);
3447 p_dev->mcp_nvm_resp = resp;
3448 ecore_ptt_release(p_hwfn, p_ptt);
3449
3450 return rc;
3451 }
3452
ecore_mcp_nvm_put_file_begin(struct ecore_dev * p_dev,u32 addr)3453 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
3454 u32 addr)
3455 {
3456 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3457 struct ecore_ptt *p_ptt;
3458 u32 resp, param;
3459 enum _ecore_status_t rc;
3460
3461 p_ptt = ecore_ptt_acquire(p_hwfn);
3462 if (!p_ptt)
3463 return ECORE_BUSY;
3464 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
3465 &resp, ¶m);
3466 p_dev->mcp_nvm_resp = resp;
3467 ecore_ptt_release(p_hwfn, p_ptt);
3468
3469 return rc;
3470 }
3471
3472 /* rc recieves ECORE_INVAL as default parameter because
3473 * it might not enter the while loop if the len is 0
3474 */
ecore_mcp_nvm_write(struct ecore_dev * p_dev,u32 cmd,u32 addr,u8 * p_buf,u32 len)3475 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3476 u32 addr, u8 *p_buf, u32 len)
3477 {
3478 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
3479 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3480 enum _ecore_status_t rc = ECORE_INVAL;
3481 struct ecore_ptt *p_ptt;
3482
3483 p_ptt = ecore_ptt_acquire(p_hwfn);
3484 if (!p_ptt)
3485 return ECORE_BUSY;
3486
3487 switch (cmd) {
3488 case ECORE_PUT_FILE_DATA:
3489 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3490 break;
3491 case ECORE_NVM_WRITE_NVRAM:
3492 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3493 break;
3494 case ECORE_EXT_PHY_FW_UPGRADE:
3495 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3496 break;
3497 case ECORE_ENCRYPT_PASSWORD:
3498 nvm_cmd = DRV_MSG_CODE_ENCRYPT_PASSWORD;
3499 break;
3500 default:
3501 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3502 cmd);
3503 rc = ECORE_INVAL;
3504 goto out;
3505 }
3506
3507 buf_idx = 0;
3508 while (buf_idx < len) {
3509 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3510 MCP_DRV_NVM_BUF_LEN);
3511 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
3512 addr) +
3513 buf_idx;
3514 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3515 &resp, ¶m, buf_size,
3516 (u32 *)&p_buf[buf_idx]);
3517 if (rc != ECORE_SUCCESS) {
3518 DP_NOTICE(p_dev, false,
3519 "ecore_mcp_nvm_write() failed, rc = %d\n",
3520 rc);
3521 resp = FW_MSG_CODE_ERROR;
3522 break;
3523 }
3524
3525 if (resp != FW_MSG_CODE_OK &&
3526 resp != FW_MSG_CODE_NVM_OK &&
3527 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3528 DP_NOTICE(p_dev, false,
3529 "nvm write failed, resp = 0x%08x\n", resp);
3530 rc = ECORE_UNKNOWN_ERROR;
3531 break;
3532 }
3533
3534 /* This can be a lengthy process, and it's possible scheduler
3535 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3536 */
3537 if (buf_idx % 0x1000 >
3538 (buf_idx + buf_size) % 0x1000)
3539 OSAL_MSLEEP(1);
3540
3541 buf_idx += buf_size;
3542 }
3543
3544 p_dev->mcp_nvm_resp = resp;
3545 out:
3546 ecore_ptt_release(p_hwfn, p_ptt);
3547
3548 return rc;
3549 }
3550
ecore_mcp_phy_write(struct ecore_dev * p_dev,u32 cmd,u32 addr,u8 * p_buf,u32 len)3551 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3552 u32 addr, u8 *p_buf, u32 len)
3553 {
3554 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3555 struct ecore_ptt *p_ptt;
3556 u32 resp, param, nvm_cmd;
3557 enum _ecore_status_t rc;
3558
3559 p_ptt = ecore_ptt_acquire(p_hwfn);
3560 if (!p_ptt)
3561 return ECORE_BUSY;
3562
3563 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
3564 DRV_MSG_CODE_PHY_RAW_WRITE;
3565 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
3566 &resp, ¶m, len, (u32 *)p_buf);
3567 if (rc != ECORE_SUCCESS)
3568 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3569 p_dev->mcp_nvm_resp = resp;
3570 ecore_ptt_release(p_hwfn, p_ptt);
3571
3572 return rc;
3573 }
3574
ecore_mcp_nvm_set_secure_mode(struct ecore_dev * p_dev,u32 addr)3575 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3576 u32 addr)
3577 {
3578 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3579 struct ecore_ptt *p_ptt;
3580 u32 resp, param;
3581 enum _ecore_status_t rc;
3582
3583 p_ptt = ecore_ptt_acquire(p_hwfn);
3584 if (!p_ptt)
3585 return ECORE_BUSY;
3586
3587 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
3588 &resp, ¶m);
3589 p_dev->mcp_nvm_resp = resp;
3590 ecore_ptt_release(p_hwfn, p_ptt);
3591
3592 return rc;
3593 }
3594
ecore_mcp_phy_sfp_read(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 port,u32 addr,u32 offset,u32 len,u8 * p_buf)3595 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3596 struct ecore_ptt *p_ptt,
3597 u32 port, u32 addr, u32 offset,
3598 u32 len, u8 *p_buf)
3599 {
3600 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
3601 u32 resp, param;
3602 enum _ecore_status_t rc;
3603
3604 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3605 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3606 addr = offset;
3607 offset = 0;
3608 bytes_left = len;
3609 while (bytes_left > 0) {
3610 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3611 MAX_I2C_TRANSACTION_SIZE);
3612 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3613 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3614 nvm_offset |= ((addr + offset) <<
3615 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3616 nvm_offset |= (bytes_to_copy <<
3617 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3618 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3619 DRV_MSG_CODE_TRANSCEIVER_READ,
3620 nvm_offset, &resp, ¶m, &buf_size,
3621 (u32 *)(p_buf + offset));
3622 if (rc != ECORE_SUCCESS) {
3623 DP_NOTICE(p_hwfn, false,
3624 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3625 rc);
3626 return rc;
3627 }
3628
3629 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3630 return ECORE_NODEV;
3631 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3632 return ECORE_UNKNOWN_ERROR;
3633
3634 offset += buf_size;
3635 bytes_left -= buf_size;
3636 }
3637
3638 return ECORE_SUCCESS;
3639 }
3640
ecore_mcp_phy_sfp_write(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 port,u32 addr,u32 offset,u32 len,u8 * p_buf)3641 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3642 struct ecore_ptt *p_ptt,
3643 u32 port, u32 addr, u32 offset,
3644 u32 len, u8 *p_buf)
3645 {
3646 u32 buf_idx, buf_size, nvm_offset, resp, param;
3647 enum _ecore_status_t rc;
3648
3649 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3650 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3651 buf_idx = 0;
3652 while (buf_idx < len) {
3653 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3654 MAX_I2C_TRANSACTION_SIZE);
3655 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3656 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3657 nvm_offset |= ((offset + buf_idx) <<
3658 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3659 nvm_offset |= (buf_size <<
3660 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3661 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3662 DRV_MSG_CODE_TRANSCEIVER_WRITE,
3663 nvm_offset, &resp, ¶m, buf_size,
3664 (u32 *)&p_buf[buf_idx]);
3665 if (rc != ECORE_SUCCESS) {
3666 DP_NOTICE(p_hwfn, false,
3667 "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3668 rc);
3669 return rc;
3670 }
3671
3672 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3673 return ECORE_NODEV;
3674 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3675 return ECORE_UNKNOWN_ERROR;
3676
3677 buf_idx += buf_size;
3678 }
3679
3680 return ECORE_SUCCESS;
3681 }
3682
ecore_mcp_gpio_read(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 gpio,u32 * gpio_val)3683 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3684 struct ecore_ptt *p_ptt,
3685 u16 gpio, u32 *gpio_val)
3686 {
3687 enum _ecore_status_t rc = ECORE_SUCCESS;
3688 u32 drv_mb_param = 0, rsp;
3689
3690 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3691
3692 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3693 drv_mb_param, &rsp, gpio_val);
3694
3695 if (rc != ECORE_SUCCESS)
3696 return rc;
3697
3698 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3699 return ECORE_UNKNOWN_ERROR;
3700
3701 return ECORE_SUCCESS;
3702 }
3703
ecore_mcp_gpio_write(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 gpio,u16 gpio_val)3704 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3705 struct ecore_ptt *p_ptt,
3706 u16 gpio, u16 gpio_val)
3707 {
3708 enum _ecore_status_t rc = ECORE_SUCCESS;
3709 u32 drv_mb_param = 0, param, rsp;
3710
3711 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3712 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3713
3714 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3715 drv_mb_param, &rsp, ¶m);
3716
3717 if (rc != ECORE_SUCCESS)
3718 return rc;
3719
3720 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3721 return ECORE_UNKNOWN_ERROR;
3722
3723 return ECORE_SUCCESS;
3724 }
3725
ecore_mcp_gpio_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 gpio,u32 * gpio_direction,u32 * gpio_ctrl)3726 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3727 struct ecore_ptt *p_ptt,
3728 u16 gpio, u32 *gpio_direction,
3729 u32 *gpio_ctrl)
3730 {
3731 u32 drv_mb_param = 0, rsp, val = 0;
3732 enum _ecore_status_t rc = ECORE_SUCCESS;
3733
3734 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3735
3736 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3737 drv_mb_param, &rsp, &val);
3738 if (rc != ECORE_SUCCESS)
3739 return rc;
3740
3741 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3742 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3743 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3744 DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3745
3746 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3747 return ECORE_UNKNOWN_ERROR;
3748
3749 return ECORE_SUCCESS;
3750 }
3751
ecore_mcp_bist_register_test(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)3752 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3753 struct ecore_ptt *p_ptt)
3754 {
3755 u32 drv_mb_param = 0, rsp, param;
3756 enum _ecore_status_t rc = ECORE_SUCCESS;
3757
3758 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3759 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3760
3761 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3762 drv_mb_param, &rsp, ¶m);
3763
3764 if (rc != ECORE_SUCCESS)
3765 return rc;
3766
3767 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3768 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3769 rc = ECORE_UNKNOWN_ERROR;
3770
3771 return rc;
3772 }
3773
ecore_mcp_bist_clock_test(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)3774 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3775 struct ecore_ptt *p_ptt)
3776 {
3777 u32 drv_mb_param, rsp, param;
3778 enum _ecore_status_t rc = ECORE_SUCCESS;
3779
3780 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3781 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3782
3783 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3784 drv_mb_param, &rsp, ¶m);
3785
3786 if (rc != ECORE_SUCCESS)
3787 return rc;
3788
3789 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3790 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3791 rc = ECORE_UNKNOWN_ERROR;
3792
3793 return rc;
3794 }
3795
ecore_mcp_bist_nvm_test_get_num_images(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * num_images)3796 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3797 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3798 {
3799 u32 drv_mb_param = 0, rsp;
3800 enum _ecore_status_t rc = ECORE_SUCCESS;
3801
3802 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3803 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3804
3805 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3806 drv_mb_param, &rsp, num_images);
3807
3808 if (rc != ECORE_SUCCESS)
3809 return rc;
3810
3811 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3812 rc = ECORE_UNKNOWN_ERROR;
3813
3814 return rc;
3815 }
3816
ecore_mcp_bist_nvm_test_get_image_att(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct bist_nvm_image_att * p_image_att,u32 image_index)3817 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3818 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3819 struct bist_nvm_image_att *p_image_att, u32 image_index)
3820 {
3821 u32 buf_size, nvm_offset, resp, param;
3822 enum _ecore_status_t rc;
3823
3824 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3825 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3826 nvm_offset |= (image_index <<
3827 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3828 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3829 nvm_offset, &resp, ¶m, &buf_size,
3830 (u32 *)p_image_att);
3831 if (rc != ECORE_SUCCESS)
3832 return rc;
3833
3834 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3835 (p_image_att->return_code != 1))
3836 rc = ECORE_UNKNOWN_ERROR;
3837
3838 return rc;
3839 }
3840
3841 enum _ecore_status_t
ecore_mcp_get_nvm_image_att(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_nvm_images image_id,struct ecore_nvm_image_att * p_image_att)3842 ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3843 enum ecore_nvm_images image_id,
3844 struct ecore_nvm_image_att *p_image_att)
3845 {
3846 struct bist_nvm_image_att mfw_image_att;
3847 enum nvm_image_type type;
3848 u32 num_images, i;
3849 enum _ecore_status_t rc;
3850
3851 /* Translate image_id into MFW definitions */
3852 switch (image_id) {
3853 case ECORE_NVM_IMAGE_ISCSI_CFG:
3854 type = NVM_TYPE_ISCSI_CFG;
3855 break;
3856 case ECORE_NVM_IMAGE_FCOE_CFG:
3857 type = NVM_TYPE_FCOE_CFG;
3858 break;
3859 case ECORE_NVM_IMAGE_MDUMP:
3860 type = NVM_TYPE_MDUMP;
3861 break;
3862 default:
3863 DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
3864 image_id);
3865 return ECORE_INVAL;
3866 }
3867
3868 /* Learn number of images, then traverse and see if one fits */
3869 rc = ecore_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
3870 if (rc != ECORE_SUCCESS || !num_images)
3871 return ECORE_INVAL;
3872
3873 for (i = 0; i < num_images; i++) {
3874 rc = ecore_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
3875 &mfw_image_att, i);
3876 if (rc != ECORE_SUCCESS)
3877 return rc;
3878
3879 if (type == mfw_image_att.image_type)
3880 break;
3881 }
3882 if (i == num_images) {
3883 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3884 "Failed to find nvram image of type %08x\n",
3885 image_id);
3886 return ECORE_INVAL;
3887 }
3888
3889 p_image_att->start_addr = mfw_image_att.nvm_start_addr;
3890 p_image_att->length = mfw_image_att.len;
3891
3892 return ECORE_SUCCESS;
3893 }
3894
ecore_mcp_get_nvm_image(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_nvm_images image_id,u8 * p_buffer,u32 buffer_len)3895 enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
3896 struct ecore_ptt *p_ptt,
3897 enum ecore_nvm_images image_id,
3898 u8 *p_buffer, u32 buffer_len)
3899 {
3900 struct ecore_nvm_image_att image_att;
3901 enum _ecore_status_t rc;
3902
3903 OSAL_MEM_ZERO(p_buffer, buffer_len);
3904
3905 rc = ecore_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
3906 if (rc != ECORE_SUCCESS)
3907 return rc;
3908
3909 /* Validate sizes - both the image's and the supplied buffer's */
3910 if (image_att.length <= 4) {
3911 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3912 "Image [%d] is too small - only %d bytes\n",
3913 image_id, image_att.length);
3914 return ECORE_INVAL;
3915 }
3916
3917 /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
3918 image_att.length -= 4;
3919
3920 if (image_att.length > buffer_len) {
3921 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3922 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3923 image_id, image_att.length, buffer_len);
3924 return ECORE_NOMEM;
3925 }
3926
3927 return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr,
3928 p_buffer, image_att.length);
3929 }
3930
3931 enum _ecore_status_t
ecore_mcp_get_temperature_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_temperature_info * p_temp_info)3932 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3933 struct ecore_ptt *p_ptt,
3934 struct ecore_temperature_info *p_temp_info)
3935 {
3936 struct ecore_temperature_sensor *p_temp_sensor;
3937 struct temperature_status_stc mfw_temp_info;
3938 struct ecore_mcp_mb_params mb_params;
3939 u32 val;
3940 enum _ecore_status_t rc;
3941 u8 i;
3942
3943 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3944 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3945 mb_params.p_data_dst = &mfw_temp_info;
3946 mb_params.data_dst_size = sizeof(mfw_temp_info);
3947 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3948 if (rc != ECORE_SUCCESS)
3949 return rc;
3950
3951 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3952 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3953 ECORE_MAX_NUM_OF_SENSORS);
3954 for (i = 0; i < p_temp_info->num_sensors; i++) {
3955 val = mfw_temp_info.sensor[i];
3956 p_temp_sensor = &p_temp_info->sensors[i];
3957 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3958 SENSOR_LOCATION_OFFSET;
3959 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3960 THRESHOLD_HIGH_OFFSET;
3961 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3962 CRITICAL_TEMPERATURE_OFFSET;
3963 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3964 CURRENT_TEMP_OFFSET;
3965 }
3966
3967 return ECORE_SUCCESS;
3968 }
3969
ecore_mcp_get_mba_versions(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mba_vers * p_mba_vers)3970 enum _ecore_status_t ecore_mcp_get_mba_versions(
3971 struct ecore_hwfn *p_hwfn,
3972 struct ecore_ptt *p_ptt,
3973 struct ecore_mba_vers *p_mba_vers)
3974 {
3975 u32 buf_size, resp, param;
3976 enum _ecore_status_t rc;
3977
3978 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3979 0, &resp, ¶m, &buf_size,
3980 &(p_mba_vers->mba_vers[0]));
3981
3982 if (rc != ECORE_SUCCESS)
3983 return rc;
3984
3985 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3986 rc = ECORE_UNKNOWN_ERROR;
3987
3988 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3989 rc = ECORE_UNKNOWN_ERROR;
3990
3991 return rc;
3992 }
3993
ecore_mcp_mem_ecc_events(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u64 * num_events)3994 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3995 struct ecore_ptt *p_ptt,
3996 u64 *num_events)
3997 {
3998 struct ecore_mcp_mb_params mb_params;
3999
4000 OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params));
4001 mb_params.cmd = DRV_MSG_CODE_MEM_ECC_EVENTS;
4002 mb_params.p_data_dst = (union drv_union_data *)num_events;
4003
4004 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4005 }
4006
4007 static enum resource_id_enum
ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)4008 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
4009 {
4010 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
4011
4012 switch (res_id) {
4013 case ECORE_SB:
4014 mfw_res_id = RESOURCE_NUM_SB_E;
4015 break;
4016 case ECORE_L2_QUEUE:
4017 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
4018 break;
4019 case ECORE_VPORT:
4020 mfw_res_id = RESOURCE_NUM_VPORT_E;
4021 break;
4022 case ECORE_RSS_ENG:
4023 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
4024 break;
4025 case ECORE_PQ:
4026 mfw_res_id = RESOURCE_NUM_PQ_E;
4027 break;
4028 case ECORE_RL:
4029 mfw_res_id = RESOURCE_NUM_RL_E;
4030 break;
4031 case ECORE_MAC:
4032 case ECORE_VLAN:
4033 /* Each VFC resource can accommodate both a MAC and a VLAN */
4034 mfw_res_id = RESOURCE_VFC_FILTER_E;
4035 break;
4036 case ECORE_ILT:
4037 mfw_res_id = RESOURCE_ILT_E;
4038 break;
4039 case ECORE_LL2_QUEUE:
4040 mfw_res_id = RESOURCE_LL2_QUEUE_E;
4041 break;
4042 case ECORE_RDMA_CNQ_RAM:
4043 case ECORE_CMDQS_CQS:
4044 /* CNQ/CMDQS are the same resource */
4045 mfw_res_id = RESOURCE_CQS_E;
4046 break;
4047 case ECORE_RDMA_STATS_QUEUE:
4048 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
4049 break;
4050 case ECORE_BDQ:
4051 mfw_res_id = RESOURCE_BDQ_E;
4052 break;
4053 default:
4054 break;
4055 }
4056
4057 return mfw_res_id;
4058 }
4059
4060 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
4061 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
4062 #define ECORE_RESC_ALLOC_VERSION \
4063 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
4064 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
4065 (ECORE_RESC_ALLOC_VERSION_MINOR << \
4066 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
4067
4068 struct ecore_resc_alloc_in_params {
4069 u32 cmd;
4070 enum ecore_resources res_id;
4071 u32 resc_max_val;
4072 };
4073
4074 struct ecore_resc_alloc_out_params {
4075 u32 mcp_resp;
4076 u32 mcp_param;
4077 u32 resc_num;
4078 u32 resc_start;
4079 u32 vf_resc_num;
4080 u32 vf_resc_start;
4081 u32 flags;
4082 };
4083
4084 static enum _ecore_status_t
ecore_mcp_resc_allocation_msg(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_alloc_in_params * p_in_params,struct ecore_resc_alloc_out_params * p_out_params)4085 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
4086 struct ecore_ptt *p_ptt,
4087 struct ecore_resc_alloc_in_params *p_in_params,
4088 struct ecore_resc_alloc_out_params *p_out_params)
4089 {
4090 struct ecore_mcp_mb_params mb_params;
4091 struct resource_info mfw_resc_info;
4092 enum _ecore_status_t rc;
4093
4094 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
4095
4096 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
4097 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
4098 DP_ERR(p_hwfn,
4099 "Failed to match resource %d [%s] with the MFW resources\n",
4100 p_in_params->res_id,
4101 ecore_hw_get_resc_name(p_in_params->res_id));
4102 return ECORE_INVAL;
4103 }
4104
4105 switch (p_in_params->cmd) {
4106 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
4107 mfw_resc_info.size = p_in_params->resc_max_val;
4108 /* Fallthrough */
4109 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
4110 break;
4111 default:
4112 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
4113 p_in_params->cmd);
4114 return ECORE_INVAL;
4115 }
4116
4117 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4118 mb_params.cmd = p_in_params->cmd;
4119 mb_params.param = ECORE_RESC_ALLOC_VERSION;
4120 mb_params.p_data_src = &mfw_resc_info;
4121 mb_params.data_src_size = sizeof(mfw_resc_info);
4122 mb_params.p_data_dst = mb_params.p_data_src;
4123 mb_params.data_dst_size = mb_params.data_src_size;
4124
4125 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4126 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
4127 p_in_params->cmd, p_in_params->res_id,
4128 ecore_hw_get_resc_name(p_in_params->res_id),
4129 GET_MFW_FIELD(mb_params.param,
4130 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
4131 GET_MFW_FIELD(mb_params.param,
4132 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
4133 p_in_params->resc_max_val);
4134
4135 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4136 if (rc != ECORE_SUCCESS)
4137 return rc;
4138
4139 p_out_params->mcp_resp = mb_params.mcp_resp;
4140 p_out_params->mcp_param = mb_params.mcp_param;
4141 p_out_params->resc_num = mfw_resc_info.size;
4142 p_out_params->resc_start = mfw_resc_info.offset;
4143 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
4144 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
4145 p_out_params->flags = mfw_resc_info.flags;
4146
4147 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4148 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
4149 GET_MFW_FIELD(p_out_params->mcp_param,
4150 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
4151 GET_MFW_FIELD(p_out_params->mcp_param,
4152 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
4153 p_out_params->resc_num, p_out_params->resc_start,
4154 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
4155 p_out_params->flags);
4156
4157 return ECORE_SUCCESS;
4158 }
4159
4160 enum _ecore_status_t
ecore_mcp_set_resc_max_val(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_resources res_id,u32 resc_max_val,u32 * p_mcp_resp)4161 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4162 enum ecore_resources res_id, u32 resc_max_val,
4163 u32 *p_mcp_resp)
4164 {
4165 struct ecore_resc_alloc_out_params out_params;
4166 struct ecore_resc_alloc_in_params in_params;
4167 enum _ecore_status_t rc;
4168
4169 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
4170 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
4171 in_params.res_id = res_id;
4172 in_params.resc_max_val = resc_max_val;
4173 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
4174 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
4175 &out_params);
4176 if (rc != ECORE_SUCCESS)
4177 return rc;
4178
4179 *p_mcp_resp = out_params.mcp_resp;
4180
4181 return ECORE_SUCCESS;
4182 }
4183
4184 enum _ecore_status_t
ecore_mcp_get_resc_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_resources res_id,u32 * p_mcp_resp,u32 * p_resc_num,u32 * p_resc_start)4185 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4186 enum ecore_resources res_id, u32 *p_mcp_resp,
4187 u32 *p_resc_num, u32 *p_resc_start)
4188 {
4189 struct ecore_resc_alloc_out_params out_params;
4190 struct ecore_resc_alloc_in_params in_params;
4191 enum _ecore_status_t rc;
4192
4193 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
4194 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
4195 in_params.res_id = res_id;
4196 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
4197 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
4198 &out_params);
4199 if (rc != ECORE_SUCCESS)
4200 return rc;
4201
4202 *p_mcp_resp = out_params.mcp_resp;
4203
4204 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
4205 *p_resc_num = out_params.resc_num;
4206 *p_resc_start = out_params.resc_start;
4207 }
4208
4209 return ECORE_SUCCESS;
4210 }
4211
ecore_mcp_initiate_pf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4212 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
4213 struct ecore_ptt *p_ptt)
4214 {
4215 u32 mcp_resp, mcp_param;
4216
4217 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
4218 &mcp_resp, &mcp_param);
4219 }
4220
ecore_mcp_get_lldp_mac(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 lldp_mac_addr[ETH_ALEN])4221 enum _ecore_status_t ecore_mcp_get_lldp_mac(struct ecore_hwfn *p_hwfn,
4222 struct ecore_ptt *p_ptt,
4223 u8 lldp_mac_addr[ETH_ALEN])
4224 {
4225 struct ecore_mcp_mb_params mb_params;
4226 struct mcp_mac lldp_mac;
4227 enum _ecore_status_t rc;
4228
4229 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4230 mb_params.cmd = DRV_MSG_CODE_GET_LLDP_MAC;
4231 mb_params.p_data_dst = &lldp_mac;
4232 mb_params.data_dst_size = sizeof(lldp_mac);
4233 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4234 if (rc != ECORE_SUCCESS)
4235 return rc;
4236
4237 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4238 DP_NOTICE(p_hwfn, false,
4239 "MFW lacks support for the GET_LLDP_MAC command [resp 0x%08x]\n",
4240 mb_params.mcp_resp);
4241 return ECORE_INVAL;
4242 }
4243
4244 *(u16 *)lldp_mac_addr = OSAL_BE16_TO_CPU(*(u16 *)&lldp_mac.mac_upper);
4245 *(u32 *)(lldp_mac_addr + 2) = OSAL_BE32_TO_CPU(lldp_mac.mac_lower);
4246
4247 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4248 "LLDP MAC address is %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
4249 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
4250 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
4251
4252 return ECORE_SUCCESS;
4253 }
4254
ecore_mcp_set_lldp_mac(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 lldp_mac_addr[ETH_ALEN])4255 enum _ecore_status_t ecore_mcp_set_lldp_mac(struct ecore_hwfn *p_hwfn,
4256 struct ecore_ptt *p_ptt,
4257 u8 lldp_mac_addr[ETH_ALEN])
4258 {
4259 struct ecore_mcp_mb_params mb_params;
4260 struct mcp_mac lldp_mac;
4261 enum _ecore_status_t rc;
4262
4263 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4264 "Configuring LLDP MAC address to %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
4265 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
4266 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
4267
4268 OSAL_MEM_ZERO(&lldp_mac, sizeof(lldp_mac));
4269 lldp_mac.mac_upper = OSAL_CPU_TO_BE16(*(u16 *)lldp_mac_addr);
4270 lldp_mac.mac_lower = OSAL_CPU_TO_BE32(*(u32 *)(lldp_mac_addr + 2));
4271
4272 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4273 mb_params.cmd = DRV_MSG_CODE_SET_LLDP_MAC;
4274 mb_params.p_data_src = &lldp_mac;
4275 mb_params.data_src_size = sizeof(lldp_mac);
4276 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4277 if (rc != ECORE_SUCCESS)
4278 return rc;
4279
4280 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4281 DP_NOTICE(p_hwfn, false,
4282 "MFW lacks support for the SET_LLDP_MAC command [resp 0x%08x]\n",
4283 mb_params.mcp_resp);
4284 return ECORE_INVAL;
4285 }
4286
4287 return ECORE_SUCCESS;
4288 }
4289
ecore_mcp_resource_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 param,u32 * p_mcp_resp,u32 * p_mcp_param)4290 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
4291 struct ecore_ptt *p_ptt,
4292 u32 param, u32 *p_mcp_resp,
4293 u32 *p_mcp_param)
4294 {
4295 enum _ecore_status_t rc;
4296
4297 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
4298 p_mcp_resp, p_mcp_param);
4299 if (rc != ECORE_SUCCESS)
4300 return rc;
4301
4302 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4303 DP_INFO(p_hwfn,
4304 "The resource command is unsupported by the MFW\n");
4305 return ECORE_NOTIMPL;
4306 }
4307
4308 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
4309 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
4310
4311 DP_NOTICE(p_hwfn, false,
4312 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
4313 param, opcode);
4314 return ECORE_INVAL;
4315 }
4316
4317 return rc;
4318 }
4319
4320 static enum _ecore_status_t
__ecore_mcp_resc_lock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_lock_params * p_params)4321 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4322 struct ecore_resc_lock_params *p_params)
4323 {
4324 u32 param = 0, mcp_resp, mcp_param;
4325 u8 opcode, timeout;
4326 enum _ecore_status_t rc;
4327
4328 switch (p_params->timeout) {
4329 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
4330 opcode = RESOURCE_OPCODE_REQ;
4331 timeout = 0;
4332 break;
4333 case ECORE_MCP_RESC_LOCK_TO_NONE:
4334 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
4335 timeout = 0;
4336 break;
4337 default:
4338 opcode = RESOURCE_OPCODE_REQ_W_AGING;
4339 timeout = p_params->timeout;
4340 break;
4341 }
4342
4343 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
4344 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
4345 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, timeout);
4346
4347 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4348 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
4349 param, timeout, opcode, p_params->resource);
4350
4351 /* Attempt to acquire the resource */
4352 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4353 &mcp_param);
4354 if (rc != ECORE_SUCCESS)
4355 return rc;
4356
4357 /* Analyze the response */
4358 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
4359 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4360
4361 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4362 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
4363 mcp_param, opcode, p_params->owner);
4364
4365 switch (opcode) {
4366 case RESOURCE_OPCODE_GNT:
4367 p_params->b_granted = true;
4368 break;
4369 case RESOURCE_OPCODE_BUSY:
4370 p_params->b_granted = false;
4371 break;
4372 default:
4373 DP_NOTICE(p_hwfn, false,
4374 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
4375 mcp_param, opcode);
4376 return ECORE_INVAL;
4377 }
4378
4379 return ECORE_SUCCESS;
4380 }
4381
4382 enum _ecore_status_t
ecore_mcp_resc_lock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_lock_params * p_params)4383 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4384 struct ecore_resc_lock_params *p_params)
4385 {
4386 u32 retry_cnt = 0;
4387 enum _ecore_status_t rc;
4388
4389 do {
4390 /* No need for an interval before the first iteration */
4391 if (retry_cnt) {
4392 if (p_params->sleep_b4_retry) {
4393 u32 retry_interval_in_ms =
4394 DIV_ROUND_UP(p_params->retry_interval,
4395 1000);
4396
4397 OSAL_MSLEEP(retry_interval_in_ms);
4398 } else {
4399 OSAL_UDELAY(p_params->retry_interval);
4400 }
4401 }
4402
4403 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
4404 if (rc != ECORE_SUCCESS)
4405 return rc;
4406
4407 if (p_params->b_granted)
4408 break;
4409 } while (retry_cnt++ < p_params->retry_num);
4410
4411 return ECORE_SUCCESS;
4412 }
4413
4414 enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_resc_unlock_params * p_params)4415 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4416 struct ecore_resc_unlock_params *p_params)
4417 {
4418 u32 param = 0, mcp_resp, mcp_param;
4419 u8 opcode;
4420 enum _ecore_status_t rc;
4421
4422 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
4423 : RESOURCE_OPCODE_RELEASE;
4424 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
4425 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
4426
4427 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4428 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
4429 param, opcode, p_params->resource);
4430
4431 /* Attempt to release the resource */
4432 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4433 &mcp_param);
4434 if (rc != ECORE_SUCCESS)
4435 return rc;
4436
4437 /* Analyze the response */
4438 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4439
4440 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4441 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
4442 mcp_param, opcode);
4443
4444 switch (opcode) {
4445 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
4446 DP_INFO(p_hwfn,
4447 "Resource unlock request for an already released resource [%d]\n",
4448 p_params->resource);
4449 /* Fallthrough */
4450 case RESOURCE_OPCODE_RELEASED:
4451 p_params->b_released = true;
4452 break;
4453 case RESOURCE_OPCODE_WRONG_OWNER:
4454 p_params->b_released = false;
4455 break;
4456 default:
4457 DP_NOTICE(p_hwfn, false,
4458 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
4459 mcp_param, opcode);
4460 return ECORE_INVAL;
4461 }
4462
4463 return ECORE_SUCCESS;
4464 }
4465
ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params * p_lock,struct ecore_resc_unlock_params * p_unlock,enum ecore_resc_lock resource,bool b_is_permanent)4466 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
4467 struct ecore_resc_unlock_params *p_unlock,
4468 enum ecore_resc_lock resource,
4469 bool b_is_permanent)
4470 {
4471 if (p_lock != OSAL_NULL) {
4472 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
4473
4474 /* Permanent resources don't require aging, and there's no
4475 * point in trying to acquire them more than once since it's
4476 * unexpected another entity would release them.
4477 */
4478 if (b_is_permanent) {
4479 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
4480 } else {
4481 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
4482 p_lock->retry_interval =
4483 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
4484 p_lock->sleep_b4_retry = true;
4485 }
4486
4487 p_lock->resource = resource;
4488 }
4489
4490 if (p_unlock != OSAL_NULL) {
4491 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
4492 p_unlock->resource = resource;
4493 }
4494 }
4495
4496 enum _ecore_status_t
ecore_mcp_update_fcoe_cvid(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 vlan)4497 ecore_mcp_update_fcoe_cvid(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4498 u16 vlan)
4499 {
4500 u32 resp = 0, param = 0;
4501 enum _ecore_status_t rc;
4502
4503 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID,
4504 (u32)vlan << DRV_MB_PARAM_FCOE_CVID_OFFSET,
4505 &resp, ¶m);
4506 if (rc != ECORE_SUCCESS)
4507 DP_ERR(p_hwfn, "Failed to update fcoe vlan, rc = %d\n", rc);
4508
4509 return rc;
4510 }
4511
4512 enum _ecore_status_t
ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 * wwn)4513 ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn *p_hwfn,
4514 struct ecore_ptt *p_ptt, u8 *wwn)
4515 {
4516 struct ecore_mcp_mb_params mb_params;
4517 struct mcp_wwn fabric_name;
4518 enum _ecore_status_t rc;
4519
4520 OSAL_MEM_ZERO(&fabric_name, sizeof(fabric_name));
4521 fabric_name.wwn_upper = *(u32 *)wwn;
4522 fabric_name.wwn_lower = *(u32 *)(wwn + 4);
4523
4524 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4525 mb_params.cmd = DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME;
4526 mb_params.p_data_src = &fabric_name;
4527 mb_params.data_src_size = sizeof(fabric_name);
4528 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4529 if (rc != ECORE_SUCCESS)
4530 DP_ERR(p_hwfn, "Failed to update fcoe wwn, rc = %d\n", rc);
4531
4532 return rc;
4533 }
4534
ecore_mcp_wol_wr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 offset,u32 val)4535 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4536 u32 offset, u32 val)
4537 {
4538 struct ecore_mcp_mb_params mb_params = {0};
4539 enum _ecore_status_t rc = ECORE_SUCCESS;
4540 u32 dword = val;
4541
4542 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4543 mb_params.param = offset;
4544 mb_params.p_data_src = &dword;
4545 mb_params.data_src_size = sizeof(dword);
4546
4547 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4548 if (rc != ECORE_SUCCESS) {
4549 DP_NOTICE(p_hwfn, false,
4550 "Failed to wol write request, rc = %d\n", rc);
4551 }
4552
4553 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4554 DP_NOTICE(p_hwfn, false,
4555 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4556 val, offset, mb_params.mcp_resp);
4557 rc = ECORE_UNKNOWN_ERROR;
4558 }
4559 }
4560
ecore_mcp_is_smart_an_supported(struct ecore_hwfn * p_hwfn)4561 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
4562 {
4563 return !!(p_hwfn->mcp_info->capabilities &
4564 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
4565 }
4566
ecore_mcp_rlx_odr_supported(struct ecore_hwfn * p_hwfn)4567 bool ecore_mcp_rlx_odr_supported(struct ecore_hwfn *p_hwfn)
4568 {
4569 return !!(p_hwfn->mcp_info->capabilities &
4570 FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD);
4571 }
4572
ecore_mcp_get_capabilities(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4573 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4574 struct ecore_ptt *p_ptt)
4575 {
4576 u32 mcp_resp;
4577 enum _ecore_status_t rc;
4578
4579 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4580 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4581 if (rc == ECORE_SUCCESS)
4582 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4583 "MFW supported features: %08x\n",
4584 p_hwfn->mcp_info->capabilities);
4585
4586 return rc;
4587 }
4588
ecore_mcp_set_capabilities(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4589 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4590 struct ecore_ptt *p_ptt)
4591 {
4592 u32 mcp_resp, mcp_param, features;
4593
4594 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4595 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
4596 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
4597
4598 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4599 features, &mcp_resp, &mcp_param);
4600 }
4601
4602 enum _ecore_status_t
ecore_mcp_drv_attribute(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_drv_attr * p_drv_attr)4603 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4604 struct ecore_mcp_drv_attr *p_drv_attr)
4605 {
4606 struct attribute_cmd_write_stc attr_cmd_write;
4607 enum _attribute_commands_e mfw_attr_cmd;
4608 struct ecore_mcp_mb_params mb_params;
4609 enum _ecore_status_t rc;
4610
4611 switch (p_drv_attr->attr_cmd) {
4612 case ECORE_MCP_DRV_ATTR_CMD_READ:
4613 mfw_attr_cmd = ATTRIBUTE_CMD_READ;
4614 break;
4615 case ECORE_MCP_DRV_ATTR_CMD_WRITE:
4616 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
4617 break;
4618 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
4619 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
4620 break;
4621 case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
4622 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
4623 break;
4624 default:
4625 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
4626 p_drv_attr->attr_cmd);
4627 return ECORE_INVAL;
4628 }
4629
4630 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4631 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
4632 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
4633 p_drv_attr->attr_num);
4634 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
4635 mfw_attr_cmd);
4636 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
4637 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
4638 attr_cmd_write.val = p_drv_attr->val;
4639 attr_cmd_write.mask = p_drv_attr->mask;
4640 attr_cmd_write.offset = p_drv_attr->offset;
4641
4642 mb_params.p_data_src = &attr_cmd_write;
4643 mb_params.data_src_size = sizeof(attr_cmd_write);
4644 }
4645
4646 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4647 if (rc != ECORE_SUCCESS)
4648 return rc;
4649
4650 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4651 DP_INFO(p_hwfn,
4652 "The attribute command is not supported by the MFW\n");
4653 return ECORE_NOTIMPL;
4654 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4655 DP_INFO(p_hwfn,
4656 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
4657 mb_params.mcp_resp, p_drv_attr->attr_cmd,
4658 p_drv_attr->attr_num);
4659 return ECORE_INVAL;
4660 }
4661
4662 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4663 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
4664 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
4665 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
4666 mb_params.mcp_param);
4667
4668 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
4669 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
4670 p_drv_attr->val = mb_params.mcp_param;
4671
4672 return ECORE_SUCCESS;
4673 }
4674
ecore_mcp_get_engine_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4675 enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
4676 struct ecore_ptt *p_ptt)
4677 {
4678 struct ecore_dev *p_dev = p_hwfn->p_dev;
4679 struct ecore_mcp_mb_params mb_params;
4680 u8 fir_valid, l2_valid;
4681 enum _ecore_status_t rc;
4682
4683 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4684 mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
4685 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4686 if (rc != ECORE_SUCCESS)
4687 return rc;
4688
4689 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4690 DP_INFO(p_hwfn,
4691 "The get_engine_config command is unsupported by the MFW\n");
4692 return ECORE_NOTIMPL;
4693 }
4694
4695 fir_valid = GET_MFW_FIELD(mb_params.mcp_param,
4696 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
4697 if (fir_valid)
4698 p_dev->fir_affin =
4699 GET_MFW_FIELD(mb_params.mcp_param,
4700 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
4701
4702 l2_valid = GET_MFW_FIELD(mb_params.mcp_param,
4703 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
4704 if (l2_valid)
4705 p_dev->l2_affin_hint =
4706 GET_MFW_FIELD(mb_params.mcp_param,
4707 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
4708
4709 DP_INFO(p_hwfn,
4710 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
4711 fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint);
4712
4713 return ECORE_SUCCESS;
4714 }
4715
ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4716 enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
4717 struct ecore_ptt *p_ptt)
4718 {
4719 struct ecore_dev *p_dev = p_hwfn->p_dev;
4720 struct ecore_mcp_mb_params mb_params;
4721 enum _ecore_status_t rc;
4722
4723 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4724 mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
4725 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4726 if (rc != ECORE_SUCCESS)
4727 return rc;
4728
4729 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4730 DP_INFO(p_hwfn,
4731 "The get_ppfid_bitmap command is unsupported by the MFW\n");
4732 return ECORE_NOTIMPL;
4733 }
4734
4735 p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param,
4736 FW_MB_PARAM_PPFID_BITMAP);
4737
4738 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n",
4739 p_dev->ppfid_bitmap);
4740
4741 return ECORE_SUCCESS;
4742 }
4743
4744 enum _ecore_status_t
ecore_mcp_ind_table_lock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 retry_num,u32 retry_interval)4745 ecore_mcp_ind_table_lock(struct ecore_hwfn *p_hwfn,
4746 struct ecore_ptt *p_ptt,
4747 u8 retry_num,
4748 u32 retry_interval)
4749 {
4750 struct ecore_resc_lock_params resc_lock_params;
4751 enum _ecore_status_t rc;
4752
4753 OSAL_MEM_ZERO(&resc_lock_params,
4754 sizeof(struct ecore_resc_lock_params));
4755 resc_lock_params.resource = ECORE_RESC_LOCK_IND_TABLE;
4756 if (!retry_num)
4757 retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
4758 resc_lock_params.retry_num = retry_num;
4759
4760 if (!retry_interval)
4761 retry_interval = ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
4762 resc_lock_params.retry_interval = retry_interval;
4763
4764 rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
4765 if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
4766 DP_NOTICE(p_hwfn, false,
4767 "Failed to acquire the resource lock for IDT access\n");
4768 return ECORE_BUSY;
4769 }
4770 return rc;
4771 }
4772
4773 enum _ecore_status_t
ecore_mcp_ind_table_unlock(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)4774 ecore_mcp_ind_table_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
4775 {
4776 struct ecore_resc_unlock_params resc_unlock_params;
4777 enum _ecore_status_t rc;
4778
4779 OSAL_MEM_ZERO(&resc_unlock_params,
4780 sizeof(struct ecore_resc_unlock_params));
4781 resc_unlock_params.resource = ECORE_RESC_LOCK_IND_TABLE;
4782 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
4783 &resc_unlock_params);
4784 return rc;
4785 }
4786 #ifdef _NTDDK_
4787 #pragma warning(pop)
4788 #endif
4789