1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <asm/byteorder.h> 9 #include <linux/delay.h> 10 #include <linux/errno.h> 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 #include <linux/spinlock.h> 14 #include <linux/string.h> 15 #include <linux/etherdevice.h> 16 #include "qed.h" 17 #include "qed_cxt.h" 18 #include "qed_dcbx.h" 19 #include "qed_hsi.h" 20 #include "qed_mfw_hsi.h" 21 #include "qed_hw.h" 22 #include "qed_mcp.h" 23 #include "qed_reg_addr.h" 24 #include "qed_sriov.h" 25 26 #define GRCBASE_MCP 0xe00000 27 28 #define QED_MCP_RESP_ITER_US 10 29 30 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 31 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 32 33 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ 34 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \ 35 _val) 36 37 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ 38 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset))) 39 40 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ 41 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ 42 offsetof(struct public_drv_mb, _field), _val) 43 44 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ 45 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ 46 offsetof(struct public_drv_mb, _field)) 47 48 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ 49 DRV_ID_PDA_COMP_VER_SHIFT) 50 51 #define MCP_BYTES_PER_MBIT_SHIFT 17 52 53 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) 54 { 55 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) 56 return false; 57 return true; 58 } 59 60 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 61 { 62 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 63 PUBLIC_PORT); 64 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr); 65 66 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, 67 MFW_PORT(p_hwfn)); 68 DP_VERBOSE(p_hwfn, QED_MSG_SP, 69 "port_addr = 0x%x, port_id 0x%02x\n", 70 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); 71 } 72 73 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 74 { 75 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); 76 u32 tmp, i; 77 78 if (!p_hwfn->mcp_info->public_base) 79 return; 80 81 for (i = 0; i < length; i++) { 82 tmp = qed_rd(p_hwfn, p_ptt, 83 p_hwfn->mcp_info->mfw_mb_addr + 84 (i << 2) + sizeof(u32)); 85 86 /* The MB data is actually BE; Need to force it to cpu */ 87 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = 88 be32_to_cpu((__force __be32)tmp); 89 } 90 } 91 92 struct qed_mcp_cmd_elem { 93 struct list_head list; 94 struct qed_mcp_mb_params *p_mb_params; 95 u16 expected_seq_num; 96 bool b_is_completed; 97 }; 98 99 /* Must be called while cmd_lock is acquired */ 100 static struct qed_mcp_cmd_elem * 101 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn, 102 struct qed_mcp_mb_params *p_mb_params, 103 u16 expected_seq_num) 104 { 105 struct qed_mcp_cmd_elem *p_cmd_elem = NULL; 106 107 p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC); 108 if (!p_cmd_elem) 109 goto out; 110 111 p_cmd_elem->p_mb_params = p_mb_params; 112 p_cmd_elem->expected_seq_num = expected_seq_num; 113 list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); 114 out: 115 return p_cmd_elem; 116 } 117 118 /* Must be called while cmd_lock is acquired */ 119 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn, 120 struct qed_mcp_cmd_elem *p_cmd_elem) 121 { 122 list_del(&p_cmd_elem->list); 123 kfree(p_cmd_elem); 124 } 125 126 /* Must be called while cmd_lock is acquired */ 127 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn, 128 u16 seq_num) 129 { 130 struct qed_mcp_cmd_elem *p_cmd_elem = NULL; 131 132 list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) { 133 if (p_cmd_elem->expected_seq_num == seq_num) 134 return p_cmd_elem; 135 } 136 137 return NULL; 138 } 139 140 int qed_mcp_free(struct qed_hwfn *p_hwfn) 141 { 142 if (p_hwfn->mcp_info) { 143 struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp; 144 145 kfree(p_hwfn->mcp_info->mfw_mb_cur); 146 kfree(p_hwfn->mcp_info->mfw_mb_shadow); 147 148 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 149 list_for_each_entry_safe(p_cmd_elem, 150 p_tmp, 151 &p_hwfn->mcp_info->cmd_list, list) { 152 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 153 } 154 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 155 } 156 157 kfree(p_hwfn->mcp_info); 158 p_hwfn->mcp_info = NULL; 159 160 return 0; 161 } 162 163 /* Maximum of 1 sec to wait for the SHMEM ready indication */ 164 #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20 165 #define QED_MCP_SHMEM_RDY_ITER_MS 50 166 167 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 168 { 169 struct qed_mcp_info *p_info = p_hwfn->mcp_info; 170 u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES; 171 u8 msec = QED_MCP_SHMEM_RDY_ITER_MS; 172 u32 drv_mb_offsize, mfw_mb_offsize; 173 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 174 175 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 176 if (!p_info->public_base) { 177 DP_NOTICE(p_hwfn, 178 "The address of the MCP scratch-pad is not configured\n"); 179 return -EINVAL; 180 } 181 182 p_info->public_base |= GRCBASE_MCP; 183 184 /* Get the MFW MB address and number of supported messages */ 185 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, 186 SECTION_OFFSIZE_ADDR(p_info->public_base, 187 PUBLIC_MFW_MB)); 188 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); 189 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, 190 p_info->mfw_mb_addr + 191 offsetof(struct public_mfw_mb, 192 sup_msgs)); 193 194 /* The driver can notify that there was an MCP reset, and might read the 195 * SHMEM values before the MFW has completed initializing them. 196 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a 197 * data ready indication. 198 */ 199 while (!p_info->mfw_mb_length && --cnt) { 200 msleep(msec); 201 p_info->mfw_mb_length = 202 (u16)qed_rd(p_hwfn, p_ptt, 203 p_info->mfw_mb_addr + 204 offsetof(struct public_mfw_mb, sup_msgs)); 205 } 206 207 if (!cnt) { 208 DP_NOTICE(p_hwfn, 209 "Failed to get the SHMEM ready notification after %d msec\n", 210 QED_MCP_SHMEM_RDY_MAX_RETRIES * msec); 211 return -EBUSY; 212 } 213 214 /* Calculate the driver and MFW mailbox address */ 215 drv_mb_offsize = qed_rd(p_hwfn, p_ptt, 216 SECTION_OFFSIZE_ADDR(p_info->public_base, 217 PUBLIC_DRV_MB)); 218 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); 219 DP_VERBOSE(p_hwfn, QED_MSG_SP, 220 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", 221 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 222 223 /* Get the current driver mailbox sequence before sending 224 * the first command 225 */ 226 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & 227 DRV_MSG_SEQ_NUMBER_MASK; 228 229 /* Get current FW pulse sequence */ 230 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & 231 DRV_PULSE_SEQ_MASK; 232 233 p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 234 235 return 0; 236 } 237 238 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 239 { 240 struct qed_mcp_info *p_info; 241 u32 size; 242 243 /* Allocate mcp_info structure */ 244 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL); 245 if (!p_hwfn->mcp_info) 246 goto err; 247 p_info = p_hwfn->mcp_info; 248 249 /* Initialize the MFW spinlock */ 250 spin_lock_init(&p_info->cmd_lock); 251 spin_lock_init(&p_info->link_lock); 252 253 INIT_LIST_HEAD(&p_info->cmd_list); 254 255 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) { 256 DP_NOTICE(p_hwfn, "MCP is not initialized\n"); 257 /* Do not free mcp_info here, since public_base indicate that 258 * the MCP is not initialized 259 */ 260 return 0; 261 } 262 263 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 264 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); 265 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); 266 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow) 267 goto err; 268 269 return 0; 270 271 err: 272 qed_mcp_free(p_hwfn); 273 return -ENOMEM; 274 } 275 276 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, 277 struct qed_ptt *p_ptt) 278 { 279 u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 280 281 /* Use MCP history register to check if MCP reset occurred between init 282 * time and now. 283 */ 284 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { 285 DP_VERBOSE(p_hwfn, 286 QED_MSG_SP, 287 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", 288 p_hwfn->mcp_info->mcp_hist, generic_por_0); 289 290 qed_load_mcp_offsets(p_hwfn, p_ptt); 291 qed_mcp_cmd_port_init(p_hwfn, p_ptt); 292 } 293 } 294 295 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 296 { 297 u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; 298 int rc = 0; 299 300 if (p_hwfn->mcp_info->b_block_cmd) { 301 DP_NOTICE(p_hwfn, 302 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); 303 return -EBUSY; 304 } 305 306 /* Ensure that only a single thread is accessing the mailbox */ 307 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 308 309 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); 310 311 /* Set drv command along with the updated sequence */ 312 qed_mcp_reread_offsets(p_hwfn, p_ptt); 313 seq = ++p_hwfn->mcp_info->drv_mb_seq; 314 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); 315 316 do { 317 /* Wait for MFW response */ 318 udelay(delay); 319 /* Give the FW up to 500 second (50*1000*10usec) */ 320 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt, 321 MISCS_REG_GENERIC_POR_0)) && 322 (cnt++ < QED_MCP_RESET_RETRIES)); 323 324 if (org_mcp_reset_seq != 325 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { 326 DP_VERBOSE(p_hwfn, QED_MSG_SP, 327 "MCP was reset after %d usec\n", cnt * delay); 328 } else { 329 DP_ERR(p_hwfn, "Failed to reset MCP\n"); 330 rc = -EAGAIN; 331 } 332 333 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 334 335 return rc; 336 } 337 338 /* Must be called while cmd_lock is acquired */ 339 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn) 340 { 341 struct qed_mcp_cmd_elem *p_cmd_elem; 342 343 /* There is at most one pending command at a certain time, and if it 344 * exists - it is placed at the HEAD of the list. 345 */ 346 if (!list_empty(&p_hwfn->mcp_info->cmd_list)) { 347 p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list, 348 struct qed_mcp_cmd_elem, list); 349 return !p_cmd_elem->b_is_completed; 350 } 351 352 return false; 353 } 354 355 /* Must be called while cmd_lock is acquired */ 356 static int 357 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 358 { 359 struct qed_mcp_mb_params *p_mb_params; 360 struct qed_mcp_cmd_elem *p_cmd_elem; 361 u32 mcp_resp; 362 u16 seq_num; 363 364 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); 365 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); 366 367 /* Return if no new non-handled response has been received */ 368 if (seq_num != p_hwfn->mcp_info->drv_mb_seq) 369 return -EAGAIN; 370 371 p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num); 372 if (!p_cmd_elem) { 373 DP_ERR(p_hwfn, 374 "Failed to find a pending mailbox cmd that expects sequence number %d\n", 375 seq_num); 376 return -EINVAL; 377 } 378 379 p_mb_params = p_cmd_elem->p_mb_params; 380 381 /* Get the MFW response along with the sequence number */ 382 p_mb_params->mcp_resp = mcp_resp; 383 384 /* Get the MFW param */ 385 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); 386 387 /* Get the union data */ 388 if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) { 389 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 390 offsetof(struct public_drv_mb, 391 union_data); 392 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, 393 union_data_addr, p_mb_params->data_dst_size); 394 } 395 396 p_cmd_elem->b_is_completed = true; 397 398 return 0; 399 } 400 401 /* Must be called while cmd_lock is acquired */ 402 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, 403 struct qed_ptt *p_ptt, 404 struct qed_mcp_mb_params *p_mb_params, 405 u16 seq_num) 406 { 407 union drv_union_data union_data; 408 u32 union_data_addr; 409 410 /* Set the union data */ 411 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + 412 offsetof(struct public_drv_mb, union_data); 413 memset(&union_data, 0, sizeof(union_data)); 414 if (p_mb_params->p_data_src && p_mb_params->data_src_size) 415 memcpy(&union_data, p_mb_params->p_data_src, 416 p_mb_params->data_src_size); 417 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, 418 sizeof(union_data)); 419 420 /* Set the drv param */ 421 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); 422 423 /* Set the drv command along with the sequence number */ 424 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); 425 426 DP_VERBOSE(p_hwfn, QED_MSG_SP, 427 "MFW mailbox: command 0x%08x param 0x%08x\n", 428 (p_mb_params->cmd | seq_num), p_mb_params->param); 429 } 430 431 static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd) 432 { 433 p_hwfn->mcp_info->b_block_cmd = block_cmd; 434 435 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", 436 block_cmd ? "Block" : "Unblock"); 437 } 438 439 static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, 440 struct qed_ptt *p_ptt) 441 { 442 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; 443 u32 delay = QED_MCP_RESP_ITER_US; 444 445 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 446 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 447 cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 448 udelay(delay); 449 cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 450 udelay(delay); 451 cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); 452 453 DP_NOTICE(p_hwfn, 454 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", 455 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); 456 } 457 458 static int 459 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, 460 struct qed_ptt *p_ptt, 461 struct qed_mcp_mb_params *p_mb_params, 462 u32 max_retries, u32 usecs) 463 { 464 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); 465 struct qed_mcp_cmd_elem *p_cmd_elem; 466 u16 seq_num; 467 int rc = 0; 468 469 /* Wait until the mailbox is non-occupied */ 470 do { 471 /* Exit the loop if there is no pending command, or if the 472 * pending command is completed during this iteration. 473 * The spinlock stays locked until the command is sent. 474 */ 475 476 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 477 478 if (!qed_mcp_has_pending_cmd(p_hwfn)) 479 break; 480 481 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); 482 if (!rc) 483 break; 484 else if (rc != -EAGAIN) 485 goto err; 486 487 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 488 489 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) 490 msleep(msecs); 491 else 492 udelay(usecs); 493 } while (++cnt < max_retries); 494 495 if (cnt >= max_retries) { 496 DP_NOTICE(p_hwfn, 497 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", 498 p_mb_params->cmd, p_mb_params->param); 499 return -EAGAIN; 500 } 501 502 /* Send the mailbox command */ 503 qed_mcp_reread_offsets(p_hwfn, p_ptt); 504 seq_num = ++p_hwfn->mcp_info->drv_mb_seq; 505 p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); 506 if (!p_cmd_elem) { 507 rc = -ENOMEM; 508 goto err; 509 } 510 511 __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); 512 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 513 514 /* Wait for the MFW response */ 515 do { 516 /* Exit the loop if the command is already completed, or if the 517 * command is completed during this iteration. 518 * The spinlock stays locked until the list element is removed. 519 */ 520 521 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) 522 msleep(msecs); 523 else 524 udelay(usecs); 525 526 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 527 528 if (p_cmd_elem->b_is_completed) 529 break; 530 531 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); 532 if (!rc) 533 break; 534 else if (rc != -EAGAIN) 535 goto err; 536 537 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 538 } while (++cnt < max_retries); 539 540 if (cnt >= max_retries) { 541 DP_NOTICE(p_hwfn, 542 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", 543 p_mb_params->cmd, p_mb_params->param); 544 qed_mcp_print_cpu_info(p_hwfn, p_ptt); 545 546 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 547 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 548 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 549 550 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) 551 qed_mcp_cmd_set_blocking(p_hwfn, true); 552 553 qed_hw_err_notify(p_hwfn, p_ptt, 554 QED_HW_ERR_MFW_RESP_FAIL, NULL); 555 return -EAGAIN; 556 } 557 558 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 559 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 560 561 DP_VERBOSE(p_hwfn, 562 QED_MSG_SP, 563 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", 564 p_mb_params->mcp_resp, 565 p_mb_params->mcp_param, 566 (cnt * usecs) / 1000, (cnt * usecs) % 1000); 567 568 /* Clear the sequence number from the MFW response */ 569 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; 570 571 return 0; 572 573 err: 574 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 575 return rc; 576 } 577 578 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, 579 struct qed_ptt *p_ptt, 580 struct qed_mcp_mb_params *p_mb_params) 581 { 582 size_t union_data_size = sizeof(union drv_union_data); 583 u32 max_retries = QED_DRV_MB_MAX_RETRIES; 584 u32 usecs = QED_MCP_RESP_ITER_US; 585 586 /* MCP not initialized */ 587 if (!qed_mcp_is_init(p_hwfn)) { 588 DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); 589 return -EBUSY; 590 } 591 592 if (p_hwfn->mcp_info->b_block_cmd) { 593 DP_NOTICE(p_hwfn, 594 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", 595 p_mb_params->cmd, p_mb_params->param); 596 return -EBUSY; 597 } 598 599 if (p_mb_params->data_src_size > union_data_size || 600 p_mb_params->data_dst_size > union_data_size) { 601 DP_ERR(p_hwfn, 602 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", 603 p_mb_params->data_src_size, 604 p_mb_params->data_dst_size, union_data_size); 605 return -EINVAL; 606 } 607 608 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { 609 max_retries = DIV_ROUND_UP(max_retries, 1000); 610 usecs *= 1000; 611 } 612 613 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, 614 usecs); 615 } 616 617 static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn, 618 struct qed_ptt *p_ptt, 619 u32 cmd, 620 u32 param, 621 u32 *o_mcp_resp, 622 u32 *o_mcp_param, 623 bool can_sleep) 624 { 625 struct qed_mcp_mb_params mb_params; 626 int rc; 627 628 memset(&mb_params, 0, sizeof(mb_params)); 629 mb_params.cmd = cmd; 630 mb_params.param = param; 631 mb_params.flags = can_sleep ? QED_MB_FLAG_CAN_SLEEP : 0; 632 633 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 634 if (rc) 635 return rc; 636 637 *o_mcp_resp = mb_params.mcp_resp; 638 *o_mcp_param = mb_params.mcp_param; 639 640 return 0; 641 } 642 643 int qed_mcp_cmd(struct qed_hwfn *p_hwfn, 644 struct qed_ptt *p_ptt, 645 u32 cmd, 646 u32 param, 647 u32 *o_mcp_resp, 648 u32 *o_mcp_param) 649 { 650 return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param, 651 o_mcp_resp, o_mcp_param, true)); 652 } 653 654 int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn, 655 struct qed_ptt *p_ptt, 656 u32 cmd, 657 u32 param, 658 u32 *o_mcp_resp, 659 u32 *o_mcp_param) 660 { 661 return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param, 662 o_mcp_resp, o_mcp_param, false)); 663 } 664 665 static int 666 qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, 667 struct qed_ptt *p_ptt, 668 u32 cmd, 669 u32 param, 670 u32 *o_mcp_resp, 671 u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf) 672 { 673 struct qed_mcp_mb_params mb_params; 674 int rc; 675 676 memset(&mb_params, 0, sizeof(mb_params)); 677 mb_params.cmd = cmd; 678 mb_params.param = param; 679 mb_params.p_data_src = i_buf; 680 mb_params.data_src_size = (u8)i_txn_size; 681 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 682 if (rc) 683 return rc; 684 685 *o_mcp_resp = mb_params.mcp_resp; 686 *o_mcp_param = mb_params.mcp_param; 687 688 /* nvm_info needs to be updated */ 689 p_hwfn->nvm_info.valid = false; 690 691 return 0; 692 } 693 694 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, 695 struct qed_ptt *p_ptt, 696 u32 cmd, 697 u32 param, 698 u32 *o_mcp_resp, 699 u32 *o_mcp_param, 700 u32 *o_txn_size, u32 *o_buf, bool b_can_sleep) 701 { 702 struct qed_mcp_mb_params mb_params; 703 u8 raw_data[MCP_DRV_NVM_BUF_LEN]; 704 int rc; 705 706 memset(&mb_params, 0, sizeof(mb_params)); 707 mb_params.cmd = cmd; 708 mb_params.param = param; 709 mb_params.p_data_dst = raw_data; 710 711 /* Use the maximal value since the actual one is part of the response */ 712 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; 713 if (b_can_sleep) 714 mb_params.flags = QED_MB_FLAG_CAN_SLEEP; 715 716 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 717 if (rc) 718 return rc; 719 720 *o_mcp_resp = mb_params.mcp_resp; 721 *o_mcp_param = mb_params.mcp_param; 722 723 *o_txn_size = *o_mcp_param; 724 memcpy(o_buf, raw_data, *o_txn_size); 725 726 return 0; 727 } 728 729 static bool 730 qed_mcp_can_force_load(u8 drv_role, 731 u8 exist_drv_role, 732 enum qed_override_force_load override_force_load) 733 { 734 bool can_force_load = false; 735 736 switch (override_force_load) { 737 case QED_OVERRIDE_FORCE_LOAD_ALWAYS: 738 can_force_load = true; 739 break; 740 case QED_OVERRIDE_FORCE_LOAD_NEVER: 741 can_force_load = false; 742 break; 743 default: 744 can_force_load = (drv_role == DRV_ROLE_OS && 745 exist_drv_role == DRV_ROLE_PREBOOT) || 746 (drv_role == DRV_ROLE_KDUMP && 747 exist_drv_role == DRV_ROLE_OS); 748 break; 749 } 750 751 return can_force_load; 752 } 753 754 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn, 755 struct qed_ptt *p_ptt) 756 { 757 u32 resp = 0, param = 0; 758 int rc; 759 760 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, 761 &resp, ¶m); 762 if (rc) 763 DP_NOTICE(p_hwfn, 764 "Failed to send cancel load request, rc = %d\n", rc); 765 766 return rc; 767 } 768 769 #define CONFIG_QEDE_BITMAP_IDX BIT(0) 770 #define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1) 771 #define CONFIG_QEDR_BITMAP_IDX BIT(2) 772 #define CONFIG_QEDF_BITMAP_IDX BIT(4) 773 #define CONFIG_QEDI_BITMAP_IDX BIT(5) 774 #define CONFIG_QED_LL2_BITMAP_IDX BIT(6) 775 776 static u32 qed_get_config_bitmap(void) 777 { 778 u32 config_bitmap = 0x0; 779 780 if (IS_ENABLED(CONFIG_QEDE)) 781 config_bitmap |= CONFIG_QEDE_BITMAP_IDX; 782 783 if (IS_ENABLED(CONFIG_QED_SRIOV)) 784 config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX; 785 786 if (IS_ENABLED(CONFIG_QED_RDMA)) 787 config_bitmap |= CONFIG_QEDR_BITMAP_IDX; 788 789 if (IS_ENABLED(CONFIG_QED_FCOE)) 790 config_bitmap |= CONFIG_QEDF_BITMAP_IDX; 791 792 if (IS_ENABLED(CONFIG_QED_ISCSI)) 793 config_bitmap |= CONFIG_QEDI_BITMAP_IDX; 794 795 if (IS_ENABLED(CONFIG_QED_LL2)) 796 config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX; 797 798 return config_bitmap; 799 } 800 801 struct qed_load_req_in_params { 802 u8 hsi_ver; 803 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0 804 #define QED_LOAD_REQ_HSI_VER_1 1 805 u32 drv_ver_0; 806 u32 drv_ver_1; 807 u32 fw_ver; 808 u8 drv_role; 809 u8 timeout_val; 810 u8 force_cmd; 811 bool avoid_eng_reset; 812 }; 813 814 struct qed_load_req_out_params { 815 u32 load_code; 816 u32 exist_drv_ver_0; 817 u32 exist_drv_ver_1; 818 u32 exist_fw_ver; 819 u8 exist_drv_role; 820 u8 mfw_hsi_ver; 821 bool drv_exists; 822 }; 823 824 static int 825 __qed_mcp_load_req(struct qed_hwfn *p_hwfn, 826 struct qed_ptt *p_ptt, 827 struct qed_load_req_in_params *p_in_params, 828 struct qed_load_req_out_params *p_out_params) 829 { 830 struct qed_mcp_mb_params mb_params; 831 struct load_req_stc load_req; 832 struct load_rsp_stc load_rsp; 833 u32 hsi_ver; 834 int rc; 835 836 memset(&load_req, 0, sizeof(load_req)); 837 load_req.drv_ver_0 = p_in_params->drv_ver_0; 838 load_req.drv_ver_1 = p_in_params->drv_ver_1; 839 load_req.fw_ver = p_in_params->fw_ver; 840 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); 841 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, 842 p_in_params->timeout_val); 843 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE, 844 p_in_params->force_cmd); 845 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, 846 p_in_params->avoid_eng_reset); 847 848 hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ? 849 DRV_ID_MCP_HSI_VER_CURRENT : 850 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT); 851 852 memset(&mb_params, 0, sizeof(mb_params)); 853 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; 854 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type; 855 mb_params.p_data_src = &load_req; 856 mb_params.data_src_size = sizeof(load_req); 857 mb_params.p_data_dst = &load_rsp; 858 mb_params.data_dst_size = sizeof(load_rsp); 859 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; 860 861 DP_VERBOSE(p_hwfn, QED_MSG_SP, 862 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", 863 mb_params.param, 864 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), 865 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE), 866 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), 867 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); 868 869 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) { 870 DP_VERBOSE(p_hwfn, QED_MSG_SP, 871 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", 872 load_req.drv_ver_0, 873 load_req.drv_ver_1, 874 load_req.fw_ver, 875 load_req.misc0, 876 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE), 877 QED_MFW_GET_FIELD(load_req.misc0, 878 LOAD_REQ_LOCK_TO), 879 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE), 880 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); 881 } 882 883 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 884 if (rc) { 885 DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc); 886 return rc; 887 } 888 889 DP_VERBOSE(p_hwfn, QED_MSG_SP, 890 "Load Response: resp 0x%08x\n", mb_params.mcp_resp); 891 p_out_params->load_code = mb_params.mcp_resp; 892 893 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 && 894 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 895 DP_VERBOSE(p_hwfn, 896 QED_MSG_SP, 897 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", 898 load_rsp.drv_ver_0, 899 load_rsp.drv_ver_1, 900 load_rsp.fw_ver, 901 load_rsp.misc0, 902 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), 903 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI), 904 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); 905 906 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; 907 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; 908 p_out_params->exist_fw_ver = load_rsp.fw_ver; 909 p_out_params->exist_drv_role = 910 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); 911 p_out_params->mfw_hsi_ver = 912 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI); 913 p_out_params->drv_exists = 914 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & 915 LOAD_RSP_FLAGS0_DRV_EXISTS; 916 } 917 918 return 0; 919 } 920 921 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn, 922 enum qed_drv_role drv_role, 923 u8 *p_mfw_drv_role) 924 { 925 switch (drv_role) { 926 case QED_DRV_ROLE_OS: 927 *p_mfw_drv_role = DRV_ROLE_OS; 928 break; 929 case QED_DRV_ROLE_KDUMP: 930 *p_mfw_drv_role = DRV_ROLE_KDUMP; 931 break; 932 default: 933 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role); 934 return -EINVAL; 935 } 936 937 return 0; 938 } 939 940 enum qed_load_req_force { 941 QED_LOAD_REQ_FORCE_NONE, 942 QED_LOAD_REQ_FORCE_PF, 943 QED_LOAD_REQ_FORCE_ALL, 944 }; 945 946 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn, 947 enum qed_load_req_force force_cmd, 948 u8 *p_mfw_force_cmd) 949 { 950 switch (force_cmd) { 951 case QED_LOAD_REQ_FORCE_NONE: 952 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; 953 break; 954 case QED_LOAD_REQ_FORCE_PF: 955 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; 956 break; 957 case QED_LOAD_REQ_FORCE_ALL: 958 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; 959 break; 960 } 961 } 962 963 int qed_mcp_load_req(struct qed_hwfn *p_hwfn, 964 struct qed_ptt *p_ptt, 965 struct qed_load_req_params *p_params) 966 { 967 struct qed_load_req_out_params out_params; 968 struct qed_load_req_in_params in_params; 969 u8 mfw_drv_role, mfw_force_cmd; 970 int rc; 971 972 memset(&in_params, 0, sizeof(in_params)); 973 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT; 974 in_params.drv_ver_1 = qed_get_config_bitmap(); 975 in_params.fw_ver = STORM_FW_VERSION; 976 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role); 977 if (rc) 978 return rc; 979 980 in_params.drv_role = mfw_drv_role; 981 in_params.timeout_val = p_params->timeout_val; 982 qed_get_mfw_force_cmd(p_hwfn, 983 QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); 984 985 in_params.force_cmd = mfw_force_cmd; 986 in_params.avoid_eng_reset = p_params->avoid_eng_reset; 987 988 memset(&out_params, 0, sizeof(out_params)); 989 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); 990 if (rc) 991 return rc; 992 993 /* First handle cases where another load request should/might be sent: 994 * - MFW expects the old interface [HSI version = 1] 995 * - MFW responds that a force load request is required 996 */ 997 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { 998 DP_INFO(p_hwfn, 999 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n"); 1000 1001 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1; 1002 memset(&out_params, 0, sizeof(out_params)); 1003 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); 1004 if (rc) 1005 return rc; 1006 } else if (out_params.load_code == 1007 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { 1008 if (qed_mcp_can_force_load(in_params.drv_role, 1009 out_params.exist_drv_role, 1010 p_params->override_force_load)) { 1011 DP_INFO(p_hwfn, 1012 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n", 1013 in_params.drv_role, in_params.fw_ver, 1014 in_params.drv_ver_0, in_params.drv_ver_1, 1015 out_params.exist_drv_role, 1016 out_params.exist_fw_ver, 1017 out_params.exist_drv_ver_0, 1018 out_params.exist_drv_ver_1); 1019 1020 qed_get_mfw_force_cmd(p_hwfn, 1021 QED_LOAD_REQ_FORCE_ALL, 1022 &mfw_force_cmd); 1023 1024 in_params.force_cmd = mfw_force_cmd; 1025 memset(&out_params, 0, sizeof(out_params)); 1026 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, 1027 &out_params); 1028 if (rc) 1029 return rc; 1030 } else { 1031 DP_NOTICE(p_hwfn, 1032 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", 1033 in_params.drv_role, in_params.fw_ver, 1034 in_params.drv_ver_0, in_params.drv_ver_1, 1035 out_params.exist_drv_role, 1036 out_params.exist_fw_ver, 1037 out_params.exist_drv_ver_0, 1038 out_params.exist_drv_ver_1); 1039 DP_NOTICE(p_hwfn, 1040 "Avoid sending a force load request to prevent disruption of active PFs\n"); 1041 1042 qed_mcp_cancel_load_req(p_hwfn, p_ptt); 1043 return -EBUSY; 1044 } 1045 } 1046 1047 /* Now handle the other types of responses. 1048 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not 1049 * expected here after the additional revised load requests were sent. 1050 */ 1051 switch (out_params.load_code) { 1052 case FW_MSG_CODE_DRV_LOAD_ENGINE: 1053 case FW_MSG_CODE_DRV_LOAD_PORT: 1054 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 1055 if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 && 1056 out_params.drv_exists) { 1057 /* The role and fw/driver version match, but the PF is 1058 * already loaded and has not been unloaded gracefully. 1059 */ 1060 DP_NOTICE(p_hwfn, 1061 "PF is already loaded\n"); 1062 return -EINVAL; 1063 } 1064 break; 1065 default: 1066 DP_NOTICE(p_hwfn, 1067 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", 1068 out_params.load_code); 1069 return -EBUSY; 1070 } 1071 1072 p_params->load_code = out_params.load_code; 1073 1074 return 0; 1075 } 1076 1077 int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1078 { 1079 u32 resp = 0, param = 0; 1080 int rc; 1081 1082 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, 1083 ¶m); 1084 if (rc) { 1085 DP_NOTICE(p_hwfn, 1086 "Failed to send a LOAD_DONE command, rc = %d\n", rc); 1087 return rc; 1088 } 1089 1090 /* Check if there is a DID mismatch between nvm-cfg/efuse */ 1091 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) 1092 DP_NOTICE(p_hwfn, 1093 "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); 1094 1095 return 0; 1096 } 1097 1098 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1099 { 1100 struct qed_mcp_mb_params mb_params; 1101 u32 wol_param; 1102 1103 switch (p_hwfn->cdev->wol_config) { 1104 case QED_OV_WOL_DISABLED: 1105 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED; 1106 break; 1107 case QED_OV_WOL_ENABLED: 1108 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED; 1109 break; 1110 default: 1111 DP_NOTICE(p_hwfn, 1112 "Unknown WoL configuration %02x\n", 1113 p_hwfn->cdev->wol_config); 1114 fallthrough; 1115 case QED_OV_WOL_DEFAULT: 1116 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; 1117 } 1118 1119 memset(&mb_params, 0, sizeof(mb_params)); 1120 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; 1121 mb_params.param = wol_param; 1122 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; 1123 1124 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1125 } 1126 1127 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1128 { 1129 struct qed_mcp_mb_params mb_params; 1130 struct mcp_mac wol_mac; 1131 1132 memset(&mb_params, 0, sizeof(mb_params)); 1133 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; 1134 1135 /* Set the primary MAC if WoL is enabled */ 1136 if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) { 1137 u8 *p_mac = p_hwfn->cdev->wol_mac; 1138 1139 memset(&wol_mac, 0, sizeof(wol_mac)); 1140 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1]; 1141 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 | 1142 p_mac[4] << 8 | p_mac[5]; 1143 1144 DP_VERBOSE(p_hwfn, 1145 (QED_MSG_SP | NETIF_MSG_IFDOWN), 1146 "Setting WoL MAC: %pM --> [%08x,%08x]\n", 1147 p_mac, wol_mac.mac_upper, wol_mac.mac_lower); 1148 1149 mb_params.p_data_src = &wol_mac; 1150 mb_params.data_src_size = sizeof(wol_mac); 1151 } 1152 1153 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1154 } 1155 1156 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn, 1157 struct qed_ptt *p_ptt) 1158 { 1159 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1160 PUBLIC_PATH); 1161 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); 1162 u32 path_addr = SECTION_ADDR(mfw_path_offsize, 1163 QED_PATH_ID(p_hwfn)); 1164 u32 disabled_vfs[VF_MAX_STATIC / 32]; 1165 int i; 1166 1167 DP_VERBOSE(p_hwfn, 1168 QED_MSG_SP, 1169 "Reading Disabled VF information from [offset %08x], path_addr %08x\n", 1170 mfw_path_offsize, path_addr); 1171 1172 for (i = 0; i < (VF_MAX_STATIC / 32); i++) { 1173 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt, 1174 path_addr + 1175 offsetof(struct public_path, 1176 mcp_vf_disabled) + 1177 sizeof(u32) * i); 1178 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), 1179 "FLR-ed VFs [%08x,...,%08x] - %08x\n", 1180 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); 1181 } 1182 1183 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs)) 1184 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG); 1185 } 1186 1187 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, 1188 struct qed_ptt *p_ptt, u32 *vfs_to_ack) 1189 { 1190 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1191 PUBLIC_FUNC); 1192 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr); 1193 u32 func_addr = SECTION_ADDR(mfw_func_offsize, 1194 MCP_PF_ID(p_hwfn)); 1195 struct qed_mcp_mb_params mb_params; 1196 int rc; 1197 int i; 1198 1199 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 1200 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), 1201 "Acking VFs [%08x,...,%08x] - %08x\n", 1202 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); 1203 1204 memset(&mb_params, 0, sizeof(mb_params)); 1205 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; 1206 mb_params.p_data_src = vfs_to_ack; 1207 mb_params.data_src_size = VF_MAX_STATIC / 8; 1208 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1209 if (rc) { 1210 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n"); 1211 return -EBUSY; 1212 } 1213 1214 /* Clear the ACK bits */ 1215 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 1216 qed_wr(p_hwfn, p_ptt, 1217 func_addr + 1218 offsetof(struct public_func, drv_ack_vf_disabled) + 1219 i * sizeof(u32), 0); 1220 1221 return rc; 1222 } 1223 1224 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, 1225 struct qed_ptt *p_ptt) 1226 { 1227 u32 transceiver_state; 1228 1229 transceiver_state = qed_rd(p_hwfn, p_ptt, 1230 p_hwfn->mcp_info->port_addr + 1231 offsetof(struct public_port, 1232 transceiver_data)); 1233 1234 DP_VERBOSE(p_hwfn, 1235 (NETIF_MSG_HW | QED_MSG_SP), 1236 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", 1237 transceiver_state, 1238 (u32)(p_hwfn->mcp_info->port_addr + 1239 offsetof(struct public_port, transceiver_data))); 1240 1241 transceiver_state = GET_FIELD(transceiver_state, 1242 ETH_TRANSCEIVER_STATE); 1243 1244 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) 1245 DP_NOTICE(p_hwfn, "Transceiver is present.\n"); 1246 else 1247 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); 1248 } 1249 1250 static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn, 1251 struct qed_ptt *p_ptt, 1252 struct qed_mcp_link_state *p_link) 1253 { 1254 u32 eee_status, val; 1255 1256 p_link->eee_adv_caps = 0; 1257 p_link->eee_lp_adv_caps = 0; 1258 eee_status = qed_rd(p_hwfn, 1259 p_ptt, 1260 p_hwfn->mcp_info->port_addr + 1261 offsetof(struct public_port, eee_status)); 1262 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); 1263 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; 1264 if (val & EEE_1G_ADV) 1265 p_link->eee_adv_caps |= QED_EEE_1G_ADV; 1266 if (val & EEE_10G_ADV) 1267 p_link->eee_adv_caps |= QED_EEE_10G_ADV; 1268 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; 1269 if (val & EEE_1G_ADV) 1270 p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV; 1271 if (val & EEE_10G_ADV) 1272 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV; 1273 } 1274 1275 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, 1276 struct qed_ptt *p_ptt, 1277 struct public_func *p_data, int pfid) 1278 { 1279 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1280 PUBLIC_FUNC); 1281 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); 1282 u32 func_addr; 1283 u32 i, size; 1284 1285 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); 1286 memset(p_data, 0, sizeof(*p_data)); 1287 1288 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); 1289 for (i = 0; i < size / sizeof(u32); i++) 1290 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, 1291 func_addr + (i << 2)); 1292 return size; 1293 } 1294 1295 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, 1296 struct public_func *p_shmem_info) 1297 { 1298 struct qed_mcp_function_info *p_info; 1299 1300 p_info = &p_hwfn->mcp_info->func_info; 1301 1302 p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config, 1303 FUNC_MF_CFG_MIN_BW); 1304 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { 1305 DP_INFO(p_hwfn, 1306 "bandwidth minimum out of bounds [%02x]. Set to 1\n", 1307 p_info->bandwidth_min); 1308 p_info->bandwidth_min = 1; 1309 } 1310 1311 p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config, 1312 FUNC_MF_CFG_MAX_BW); 1313 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { 1314 DP_INFO(p_hwfn, 1315 "bandwidth maximum out of bounds [%02x]. Set to 100\n", 1316 p_info->bandwidth_max); 1317 p_info->bandwidth_max = 100; 1318 } 1319 } 1320 1321 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, 1322 struct qed_ptt *p_ptt, bool b_reset) 1323 { 1324 struct qed_mcp_link_state *p_link; 1325 u8 max_bw, min_bw; 1326 u32 status = 0; 1327 1328 /* Prevent SW/attentions from doing this at the same time */ 1329 spin_lock_bh(&p_hwfn->mcp_info->link_lock); 1330 1331 p_link = &p_hwfn->mcp_info->link_output; 1332 memset(p_link, 0, sizeof(*p_link)); 1333 if (!b_reset) { 1334 status = qed_rd(p_hwfn, p_ptt, 1335 p_hwfn->mcp_info->port_addr + 1336 offsetof(struct public_port, link_status)); 1337 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP), 1338 "Received link update [0x%08x] from mfw [Addr 0x%x]\n", 1339 status, 1340 (u32)(p_hwfn->mcp_info->port_addr + 1341 offsetof(struct public_port, link_status))); 1342 } else { 1343 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 1344 "Resetting link indications\n"); 1345 goto out; 1346 } 1347 1348 if (p_hwfn->b_drv_link_init) { 1349 /* Link indication with modern MFW arrives as per-PF 1350 * indication. 1351 */ 1352 if (p_hwfn->mcp_info->capabilities & 1353 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { 1354 struct public_func shmem_info; 1355 1356 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, 1357 MCP_PF_ID(p_hwfn)); 1358 p_link->link_up = !!(shmem_info.status & 1359 FUNC_STATUS_VIRTUAL_LINK_UP); 1360 qed_read_pf_bandwidth(p_hwfn, &shmem_info); 1361 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 1362 "Virtual link_up = %d\n", p_link->link_up); 1363 } else { 1364 p_link->link_up = !!(status & LINK_STATUS_LINK_UP); 1365 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 1366 "Physical link_up = %d\n", p_link->link_up); 1367 } 1368 } else { 1369 p_link->link_up = false; 1370 } 1371 1372 p_link->full_duplex = true; 1373 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { 1374 case LINK_STATUS_SPEED_AND_DUPLEX_100G: 1375 p_link->speed = 100000; 1376 break; 1377 case LINK_STATUS_SPEED_AND_DUPLEX_50G: 1378 p_link->speed = 50000; 1379 break; 1380 case LINK_STATUS_SPEED_AND_DUPLEX_40G: 1381 p_link->speed = 40000; 1382 break; 1383 case LINK_STATUS_SPEED_AND_DUPLEX_25G: 1384 p_link->speed = 25000; 1385 break; 1386 case LINK_STATUS_SPEED_AND_DUPLEX_20G: 1387 p_link->speed = 20000; 1388 break; 1389 case LINK_STATUS_SPEED_AND_DUPLEX_10G: 1390 p_link->speed = 10000; 1391 break; 1392 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: 1393 p_link->full_duplex = false; 1394 fallthrough; 1395 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: 1396 p_link->speed = 1000; 1397 break; 1398 default: 1399 p_link->speed = 0; 1400 p_link->link_up = 0; 1401 } 1402 1403 if (p_link->link_up && p_link->speed) 1404 p_link->line_speed = p_link->speed; 1405 else 1406 p_link->line_speed = 0; 1407 1408 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; 1409 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; 1410 1411 /* Max bandwidth configuration */ 1412 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); 1413 1414 /* Min bandwidth configuration */ 1415 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); 1416 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt, 1417 p_link->min_pf_rate); 1418 1419 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); 1420 p_link->an_complete = !!(status & 1421 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); 1422 p_link->parallel_detection = !!(status & 1423 LINK_STATUS_PARALLEL_DETECTION_USED); 1424 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); 1425 1426 p_link->partner_adv_speed |= 1427 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? 1428 QED_LINK_PARTNER_SPEED_1G_FD : 0; 1429 p_link->partner_adv_speed |= 1430 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? 1431 QED_LINK_PARTNER_SPEED_1G_HD : 0; 1432 p_link->partner_adv_speed |= 1433 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? 1434 QED_LINK_PARTNER_SPEED_10G : 0; 1435 p_link->partner_adv_speed |= 1436 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? 1437 QED_LINK_PARTNER_SPEED_20G : 0; 1438 p_link->partner_adv_speed |= 1439 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? 1440 QED_LINK_PARTNER_SPEED_25G : 0; 1441 p_link->partner_adv_speed |= 1442 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? 1443 QED_LINK_PARTNER_SPEED_40G : 0; 1444 p_link->partner_adv_speed |= 1445 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? 1446 QED_LINK_PARTNER_SPEED_50G : 0; 1447 p_link->partner_adv_speed |= 1448 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? 1449 QED_LINK_PARTNER_SPEED_100G : 0; 1450 1451 p_link->partner_tx_flow_ctrl_en = 1452 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); 1453 p_link->partner_rx_flow_ctrl_en = 1454 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); 1455 1456 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { 1457 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: 1458 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE; 1459 break; 1460 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: 1461 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE; 1462 break; 1463 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: 1464 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE; 1465 break; 1466 default: 1467 p_link->partner_adv_pause = 0; 1468 } 1469 1470 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); 1471 1472 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) 1473 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); 1474 1475 if (p_hwfn->mcp_info->capabilities & 1476 FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { 1477 switch (status & LINK_STATUS_FEC_MODE_MASK) { 1478 case LINK_STATUS_FEC_MODE_NONE: 1479 p_link->fec_active = QED_FEC_MODE_NONE; 1480 break; 1481 case LINK_STATUS_FEC_MODE_FIRECODE_CL74: 1482 p_link->fec_active = QED_FEC_MODE_FIRECODE; 1483 break; 1484 case LINK_STATUS_FEC_MODE_RS_CL91: 1485 p_link->fec_active = QED_FEC_MODE_RS; 1486 break; 1487 default: 1488 p_link->fec_active = QED_FEC_MODE_AUTO; 1489 } 1490 } else { 1491 p_link->fec_active = QED_FEC_MODE_UNSUPPORTED; 1492 } 1493 1494 qed_link_update(p_hwfn, p_ptt); 1495 out: 1496 spin_unlock_bh(&p_hwfn->mcp_info->link_lock); 1497 } 1498 1499 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) 1500 { 1501 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; 1502 struct qed_mcp_mb_params mb_params; 1503 struct eth_phy_cfg phy_cfg; 1504 u32 cmd, fec_bit = 0; 1505 u32 val, ext_speed; 1506 int rc = 0; 1507 1508 /* Set the shmem configuration according to params */ 1509 memset(&phy_cfg, 0, sizeof(phy_cfg)); 1510 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; 1511 if (!params->speed.autoneg) 1512 phy_cfg.speed = params->speed.forced_speed; 1513 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; 1514 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; 1515 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; 1516 phy_cfg.adv_speed = params->speed.advertised_speeds; 1517 phy_cfg.loopback_mode = params->loopback_mode; 1518 1519 /* There are MFWs that share this capability regardless of whether 1520 * this is feasible or not. And given that at the very least adv_caps 1521 * would be set internally by qed, we want to make sure LFA would 1522 * still work. 1523 */ 1524 if ((p_hwfn->mcp_info->capabilities & 1525 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) { 1526 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; 1527 if (params->eee.tx_lpi_enable) 1528 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; 1529 if (params->eee.adv_caps & QED_EEE_1G_ADV) 1530 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; 1531 if (params->eee.adv_caps & QED_EEE_10G_ADV) 1532 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; 1533 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << 1534 EEE_TX_TIMER_USEC_OFFSET) & 1535 EEE_TX_TIMER_USEC_MASK; 1536 } 1537 1538 if (p_hwfn->mcp_info->capabilities & 1539 FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { 1540 if (params->fec & QED_FEC_MODE_NONE) 1541 fec_bit |= FEC_FORCE_MODE_NONE; 1542 else if (params->fec & QED_FEC_MODE_FIRECODE) 1543 fec_bit |= FEC_FORCE_MODE_FIRECODE; 1544 else if (params->fec & QED_FEC_MODE_RS) 1545 fec_bit |= FEC_FORCE_MODE_RS; 1546 else if (params->fec & QED_FEC_MODE_AUTO) 1547 fec_bit |= FEC_FORCE_MODE_AUTO; 1548 1549 SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit); 1550 } 1551 1552 if (p_hwfn->mcp_info->capabilities & 1553 FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { 1554 ext_speed = 0; 1555 if (params->ext_speed.autoneg) 1556 ext_speed |= ETH_EXT_SPEED_NONE; 1557 1558 val = params->ext_speed.forced_speed; 1559 if (val & QED_EXT_SPEED_1G) 1560 ext_speed |= ETH_EXT_SPEED_1G; 1561 if (val & QED_EXT_SPEED_10G) 1562 ext_speed |= ETH_EXT_SPEED_10G; 1563 if (val & QED_EXT_SPEED_25G) 1564 ext_speed |= ETH_EXT_SPEED_25G; 1565 if (val & QED_EXT_SPEED_40G) 1566 ext_speed |= ETH_EXT_SPEED_40G; 1567 if (val & QED_EXT_SPEED_50G_R) 1568 ext_speed |= ETH_EXT_SPEED_50G_BASE_R; 1569 if (val & QED_EXT_SPEED_50G_R2) 1570 ext_speed |= ETH_EXT_SPEED_50G_BASE_R2; 1571 if (val & QED_EXT_SPEED_100G_R2) 1572 ext_speed |= ETH_EXT_SPEED_100G_BASE_R2; 1573 if (val & QED_EXT_SPEED_100G_R4) 1574 ext_speed |= ETH_EXT_SPEED_100G_BASE_R4; 1575 if (val & QED_EXT_SPEED_100G_P4) 1576 ext_speed |= ETH_EXT_SPEED_100G_BASE_P4; 1577 1578 SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED, 1579 ext_speed); 1580 1581 ext_speed = 0; 1582 1583 val = params->ext_speed.advertised_speeds; 1584 if (val & QED_EXT_SPEED_MASK_1G) 1585 ext_speed |= ETH_EXT_ADV_SPEED_1G; 1586 if (val & QED_EXT_SPEED_MASK_10G) 1587 ext_speed |= ETH_EXT_ADV_SPEED_10G; 1588 if (val & QED_EXT_SPEED_MASK_25G) 1589 ext_speed |= ETH_EXT_ADV_SPEED_25G; 1590 if (val & QED_EXT_SPEED_MASK_40G) 1591 ext_speed |= ETH_EXT_ADV_SPEED_40G; 1592 if (val & QED_EXT_SPEED_MASK_50G_R) 1593 ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R; 1594 if (val & QED_EXT_SPEED_MASK_50G_R2) 1595 ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2; 1596 if (val & QED_EXT_SPEED_MASK_100G_R2) 1597 ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2; 1598 if (val & QED_EXT_SPEED_MASK_100G_R4) 1599 ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4; 1600 if (val & QED_EXT_SPEED_MASK_100G_P4) 1601 ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4; 1602 1603 phy_cfg.extended_speed |= ext_speed; 1604 1605 SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE, 1606 params->ext_fec_mode); 1607 } 1608 1609 p_hwfn->b_drv_link_init = b_up; 1610 1611 if (b_up) { 1612 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 1613 "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n", 1614 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, 1615 phy_cfg.loopback_mode, phy_cfg.fec_mode, 1616 phy_cfg.extended_speed); 1617 } else { 1618 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); 1619 } 1620 1621 memset(&mb_params, 0, sizeof(mb_params)); 1622 mb_params.cmd = cmd; 1623 mb_params.p_data_src = &phy_cfg; 1624 mb_params.data_src_size = sizeof(phy_cfg); 1625 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1626 1627 /* if mcp fails to respond we must abort */ 1628 if (rc) { 1629 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 1630 return rc; 1631 } 1632 1633 /* Mimic link-change attention, done for several reasons: 1634 * - On reset, there's no guarantee MFW would trigger 1635 * an attention. 1636 * - On initialization, older MFWs might not indicate link change 1637 * during LFA, so we'll never get an UP indication. 1638 */ 1639 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); 1640 1641 return 0; 1642 } 1643 1644 u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, 1645 struct qed_ptt *p_ptt) 1646 { 1647 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; 1648 1649 if (IS_VF(p_hwfn->cdev)) 1650 return -EINVAL; 1651 1652 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, 1653 PUBLIC_PATH); 1654 path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr); 1655 path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn)); 1656 1657 proc_kill_cnt = qed_rd(p_hwfn, p_ptt, 1658 path_addr + 1659 offsetof(struct public_path, process_kill)) & 1660 PROCESS_KILL_COUNTER_MASK; 1661 1662 return proc_kill_cnt; 1663 } 1664 1665 static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn, 1666 struct qed_ptt *p_ptt) 1667 { 1668 struct qed_dev *cdev = p_hwfn->cdev; 1669 u32 proc_kill_cnt; 1670 1671 /* Prevent possible attentions/interrupts during the recovery handling 1672 * and till its load phase, during which they will be re-enabled. 1673 */ 1674 qed_int_igu_disable_int(p_hwfn, p_ptt); 1675 1676 DP_NOTICE(p_hwfn, "Received a process kill indication\n"); 1677 1678 /* The following operations should be done once, and thus in CMT mode 1679 * are carried out by only the first HW function. 1680 */ 1681 if (p_hwfn != QED_LEADING_HWFN(cdev)) 1682 return; 1683 1684 if (cdev->recov_in_prog) { 1685 DP_NOTICE(p_hwfn, 1686 "Ignoring the indication since a recovery process is already in progress\n"); 1687 return; 1688 } 1689 1690 cdev->recov_in_prog = true; 1691 1692 proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt); 1693 DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt); 1694 1695 qed_schedule_recovery_handler(p_hwfn); 1696 } 1697 1698 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, 1699 struct qed_ptt *p_ptt, 1700 enum MFW_DRV_MSG_TYPE type) 1701 { 1702 enum qed_mcp_protocol_type stats_type; 1703 union qed_mcp_protocol_stats stats; 1704 struct qed_mcp_mb_params mb_params; 1705 u32 hsi_param; 1706 1707 switch (type) { 1708 case MFW_DRV_MSG_GET_LAN_STATS: 1709 stats_type = QED_MCP_LAN_STATS; 1710 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; 1711 break; 1712 case MFW_DRV_MSG_GET_FCOE_STATS: 1713 stats_type = QED_MCP_FCOE_STATS; 1714 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE; 1715 break; 1716 case MFW_DRV_MSG_GET_ISCSI_STATS: 1717 stats_type = QED_MCP_ISCSI_STATS; 1718 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI; 1719 break; 1720 case MFW_DRV_MSG_GET_RDMA_STATS: 1721 stats_type = QED_MCP_RDMA_STATS; 1722 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA; 1723 break; 1724 default: 1725 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type); 1726 return; 1727 } 1728 1729 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats); 1730 1731 memset(&mb_params, 0, sizeof(mb_params)); 1732 mb_params.cmd = DRV_MSG_CODE_GET_STATS; 1733 mb_params.param = hsi_param; 1734 mb_params.p_data_src = &stats; 1735 mb_params.data_src_size = sizeof(stats); 1736 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1737 } 1738 1739 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1740 { 1741 struct qed_mcp_function_info *p_info; 1742 struct public_func shmem_info; 1743 u32 resp = 0, param = 0; 1744 1745 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 1746 1747 qed_read_pf_bandwidth(p_hwfn, &shmem_info); 1748 1749 p_info = &p_hwfn->mcp_info->func_info; 1750 1751 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min); 1752 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max); 1753 1754 /* Acknowledge the MFW */ 1755 qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, 1756 ¶m); 1757 } 1758 1759 static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1760 { 1761 struct public_func shmem_info; 1762 u32 resp = 0, param = 0; 1763 1764 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 1765 1766 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & 1767 FUNC_MF_CFG_OV_STAG_MASK; 1768 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; 1769 if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) { 1770 if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) { 1771 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 1772 p_hwfn->hw_info.ovlan); 1773 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); 1774 1775 /* Configure DB to add external vlan to EDPM packets */ 1776 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); 1777 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 1778 p_hwfn->hw_info.ovlan); 1779 } else { 1780 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); 1781 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); 1782 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); 1783 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0); 1784 } 1785 1786 qed_sp_pf_update_stag(p_hwfn); 1787 } 1788 1789 DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", 1790 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); 1791 1792 /* Acknowledge the MFW */ 1793 qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, 1794 &resp, ¶m); 1795 } 1796 1797 static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn, 1798 struct qed_ptt *p_ptt) 1799 { 1800 /* A single notification should be sent to upper driver in CMT mode */ 1801 if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev)) 1802 return; 1803 1804 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL, 1805 "Fan failure was detected on the network interface card and it's going to be shut down.\n"); 1806 } 1807 1808 struct qed_mdump_cmd_params { 1809 u32 cmd; 1810 void *p_data_src; 1811 u8 data_src_size; 1812 void *p_data_dst; 1813 u8 data_dst_size; 1814 u32 mcp_resp; 1815 }; 1816 1817 static int 1818 qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn, 1819 struct qed_ptt *p_ptt, 1820 struct qed_mdump_cmd_params *p_mdump_cmd_params) 1821 { 1822 struct qed_mcp_mb_params mb_params; 1823 int rc; 1824 1825 memset(&mb_params, 0, sizeof(mb_params)); 1826 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; 1827 mb_params.param = p_mdump_cmd_params->cmd; 1828 mb_params.p_data_src = p_mdump_cmd_params->p_data_src; 1829 mb_params.data_src_size = p_mdump_cmd_params->data_src_size; 1830 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; 1831 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; 1832 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 1833 if (rc) 1834 return rc; 1835 1836 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; 1837 1838 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { 1839 DP_INFO(p_hwfn, 1840 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", 1841 p_mdump_cmd_params->cmd); 1842 rc = -EOPNOTSUPP; 1843 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 1844 DP_INFO(p_hwfn, 1845 "The mdump command is not supported by the MFW\n"); 1846 rc = -EOPNOTSUPP; 1847 } 1848 1849 return rc; 1850 } 1851 1852 static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1853 { 1854 struct qed_mdump_cmd_params mdump_cmd_params; 1855 1856 memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params)); 1857 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; 1858 1859 return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1860 } 1861 1862 int 1863 qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, 1864 struct qed_ptt *p_ptt, 1865 struct mdump_retain_data_stc *p_mdump_retain) 1866 { 1867 struct qed_mdump_cmd_params mdump_cmd_params; 1868 int rc; 1869 1870 memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params)); 1871 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; 1872 mdump_cmd_params.p_data_dst = p_mdump_retain; 1873 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain); 1874 1875 rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); 1876 if (rc) 1877 return rc; 1878 1879 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { 1880 DP_INFO(p_hwfn, 1881 "Failed to get the mdump retained data [mcp_resp 0x%x]\n", 1882 mdump_cmd_params.mcp_resp); 1883 return -EINVAL; 1884 } 1885 1886 return 0; 1887 } 1888 1889 static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn, 1890 struct qed_ptt *p_ptt) 1891 { 1892 struct mdump_retain_data_stc mdump_retain; 1893 int rc; 1894 1895 /* In CMT mode - no need for more than a single acknowledgment to the 1896 * MFW, and no more than a single notification to the upper driver. 1897 */ 1898 if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev)) 1899 return; 1900 1901 rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); 1902 if (rc == 0 && mdump_retain.valid) 1903 DP_NOTICE(p_hwfn, 1904 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", 1905 mdump_retain.epoch, 1906 mdump_retain.pf, mdump_retain.status); 1907 else 1908 DP_NOTICE(p_hwfn, 1909 "The MFW notified that a critical error occurred in the device\n"); 1910 1911 DP_NOTICE(p_hwfn, 1912 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); 1913 qed_mcp_mdump_ack(p_hwfn, p_ptt); 1914 1915 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL); 1916 } 1917 1918 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1919 { 1920 struct public_func shmem_info; 1921 u32 port_cfg, val; 1922 1923 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) 1924 return; 1925 1926 memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); 1927 port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1928 offsetof(struct public_port, oem_cfg_port)); 1929 val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >> 1930 OEM_CFG_CHANNEL_TYPE_OFFSET; 1931 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) 1932 DP_NOTICE(p_hwfn, 1933 "Incorrect UFP Channel type %d port_id 0x%02x\n", 1934 val, MFW_PORT(p_hwfn)); 1935 1936 val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET; 1937 if (val == OEM_CFG_SCHED_TYPE_ETS) { 1938 p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS; 1939 } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) { 1940 p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW; 1941 } else { 1942 p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN; 1943 DP_NOTICE(p_hwfn, 1944 "Unknown UFP scheduling mode %d port_id 0x%02x\n", 1945 val, MFW_PORT(p_hwfn)); 1946 } 1947 1948 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 1949 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >> 1950 OEM_CFG_FUNC_TC_OFFSET; 1951 p_hwfn->ufp_info.tc = (u8)val; 1952 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> 1953 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET; 1954 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) { 1955 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC; 1956 } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) { 1957 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS; 1958 } else { 1959 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN; 1960 DP_NOTICE(p_hwfn, 1961 "Unknown Host priority control %d port_id 0x%02x\n", 1962 val, MFW_PORT(p_hwfn)); 1963 } 1964 1965 DP_NOTICE(p_hwfn, 1966 "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n", 1967 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, 1968 p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn)); 1969 } 1970 1971 static int 1972 qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1973 { 1974 qed_mcp_read_ufp_config(p_hwfn, p_ptt); 1975 1976 if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) { 1977 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; 1978 qed_hw_info_set_offload_tc(&p_hwfn->hw_info, 1979 p_hwfn->ufp_info.tc); 1980 1981 qed_qm_reconf(p_hwfn, p_ptt); 1982 } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) { 1983 /* Merge UFP TC with the dcbx TC data */ 1984 qed_dcbx_mib_update_event(p_hwfn, p_ptt, 1985 QED_DCBX_OPERATIONAL_MIB); 1986 } else { 1987 DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n"); 1988 return -EINVAL; 1989 } 1990 1991 /* update storm FW with negotiation results */ 1992 qed_sp_pf_update_ufp(p_hwfn); 1993 1994 /* update stag pcp value */ 1995 qed_sp_pf_update_stag(p_hwfn); 1996 1997 return 0; 1998 } 1999 2000 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, 2001 struct qed_ptt *p_ptt) 2002 { 2003 struct qed_mcp_info *info = p_hwfn->mcp_info; 2004 int rc = 0; 2005 bool found = false; 2006 u16 i; 2007 2008 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n"); 2009 2010 /* Read Messages from MFW */ 2011 qed_mcp_read_mb(p_hwfn, p_ptt); 2012 2013 /* Compare current messages to old ones */ 2014 for (i = 0; i < info->mfw_mb_length; i++) { 2015 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) 2016 continue; 2017 2018 found = true; 2019 2020 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 2021 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", 2022 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); 2023 2024 switch (i) { 2025 case MFW_DRV_MSG_LINK_CHANGE: 2026 qed_mcp_handle_link_change(p_hwfn, p_ptt, false); 2027 break; 2028 case MFW_DRV_MSG_VF_DISABLED: 2029 qed_mcp_handle_vf_flr(p_hwfn, p_ptt); 2030 break; 2031 case MFW_DRV_MSG_LLDP_DATA_UPDATED: 2032 qed_dcbx_mib_update_event(p_hwfn, p_ptt, 2033 QED_DCBX_REMOTE_LLDP_MIB); 2034 break; 2035 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: 2036 qed_dcbx_mib_update_event(p_hwfn, p_ptt, 2037 QED_DCBX_REMOTE_MIB); 2038 break; 2039 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: 2040 qed_dcbx_mib_update_event(p_hwfn, p_ptt, 2041 QED_DCBX_OPERATIONAL_MIB); 2042 break; 2043 case MFW_DRV_MSG_OEM_CFG_UPDATE: 2044 qed_mcp_handle_ufp_event(p_hwfn, p_ptt); 2045 break; 2046 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: 2047 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); 2048 break; 2049 case MFW_DRV_MSG_ERROR_RECOVERY: 2050 qed_mcp_handle_process_kill(p_hwfn, p_ptt); 2051 break; 2052 case MFW_DRV_MSG_GET_LAN_STATS: 2053 case MFW_DRV_MSG_GET_FCOE_STATS: 2054 case MFW_DRV_MSG_GET_ISCSI_STATS: 2055 case MFW_DRV_MSG_GET_RDMA_STATS: 2056 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i); 2057 break; 2058 case MFW_DRV_MSG_BW_UPDATE: 2059 qed_mcp_update_bw(p_hwfn, p_ptt); 2060 break; 2061 case MFW_DRV_MSG_S_TAG_UPDATE: 2062 qed_mcp_update_stag(p_hwfn, p_ptt); 2063 break; 2064 case MFW_DRV_MSG_FAILURE_DETECTED: 2065 qed_mcp_handle_fan_failure(p_hwfn, p_ptt); 2066 break; 2067 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: 2068 qed_mcp_handle_critical_error(p_hwfn, p_ptt); 2069 break; 2070 case MFW_DRV_MSG_GET_TLV_REQ: 2071 qed_mfw_tlv_req(p_hwfn); 2072 break; 2073 default: 2074 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); 2075 rc = -EINVAL; 2076 } 2077 } 2078 2079 /* ACK everything */ 2080 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { 2081 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]); 2082 2083 /* MFW expect answer in BE, so we force write in that format */ 2084 qed_wr(p_hwfn, p_ptt, 2085 info->mfw_mb_addr + sizeof(u32) + 2086 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * 2087 sizeof(u32) + i * sizeof(u32), 2088 (__force u32)val); 2089 } 2090 2091 if (!found) { 2092 DP_NOTICE(p_hwfn, 2093 "Received an MFW message indication but no new message!\n"); 2094 rc = -EINVAL; 2095 } 2096 2097 /* Copy the new mfw messages into the shadow */ 2098 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); 2099 2100 return rc; 2101 } 2102 2103 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, 2104 struct qed_ptt *p_ptt, 2105 u32 *p_mfw_ver, u32 *p_running_bundle_id) 2106 { 2107 u32 global_offsize, public_base; 2108 2109 if (IS_VF(p_hwfn->cdev)) { 2110 if (p_hwfn->vf_iov_info) { 2111 struct pfvf_acquire_resp_tlv *p_resp; 2112 2113 p_resp = &p_hwfn->vf_iov_info->acquire_resp; 2114 *p_mfw_ver = p_resp->pfdev_info.mfw_ver; 2115 return 0; 2116 } else { 2117 DP_VERBOSE(p_hwfn, 2118 QED_MSG_IOV, 2119 "VF requested MFW version prior to ACQUIRE\n"); 2120 return -EINVAL; 2121 } 2122 } 2123 2124 public_base = p_hwfn->mcp_info->public_base; 2125 global_offsize = qed_rd(p_hwfn, p_ptt, 2126 SECTION_OFFSIZE_ADDR(public_base, 2127 PUBLIC_GLOBAL)); 2128 *p_mfw_ver = 2129 qed_rd(p_hwfn, p_ptt, 2130 SECTION_ADDR(global_offsize, 2131 0) + offsetof(struct public_global, mfw_ver)); 2132 2133 if (p_running_bundle_id) { 2134 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, 2135 SECTION_ADDR(global_offsize, 0) + 2136 offsetof(struct public_global, 2137 running_bundle_id)); 2138 } 2139 2140 return 0; 2141 } 2142 2143 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, 2144 struct qed_ptt *p_ptt, u32 *p_mbi_ver) 2145 { 2146 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr; 2147 2148 if (IS_VF(p_hwfn->cdev)) 2149 return -EINVAL; 2150 2151 /* Read the address of the nvm_cfg */ 2152 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 2153 if (!nvm_cfg_addr) { 2154 DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); 2155 return -EINVAL; 2156 } 2157 2158 /* Read the offset of nvm_cfg1 */ 2159 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 2160 2161 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2162 offsetof(struct nvm_cfg1, glob) + 2163 offsetof(struct nvm_cfg1_glob, mbi_version); 2164 *p_mbi_ver = qed_rd(p_hwfn, p_ptt, 2165 mbi_ver_addr) & 2166 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK | 2167 NVM_CFG1_GLOB_MBI_VERSION_1_MASK | 2168 NVM_CFG1_GLOB_MBI_VERSION_2_MASK); 2169 2170 return 0; 2171 } 2172 2173 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, 2174 struct qed_ptt *p_ptt, u32 *p_media_type) 2175 { 2176 *p_media_type = MEDIA_UNSPECIFIED; 2177 2178 if (IS_VF(p_hwfn->cdev)) 2179 return -EINVAL; 2180 2181 if (!qed_mcp_is_init(p_hwfn)) { 2182 DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); 2183 return -EBUSY; 2184 } 2185 2186 if (!p_ptt) { 2187 *p_media_type = MEDIA_UNSPECIFIED; 2188 return -EINVAL; 2189 } 2190 2191 *p_media_type = qed_rd(p_hwfn, p_ptt, 2192 p_hwfn->mcp_info->port_addr + 2193 offsetof(struct public_port, 2194 media_type)); 2195 2196 return 0; 2197 } 2198 2199 int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, 2200 struct qed_ptt *p_ptt, 2201 u32 *p_transceiver_state, 2202 u32 *p_transceiver_type) 2203 { 2204 u32 transceiver_info; 2205 2206 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE; 2207 *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING; 2208 2209 if (IS_VF(p_hwfn->cdev)) 2210 return -EINVAL; 2211 2212 if (!qed_mcp_is_init(p_hwfn)) { 2213 DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); 2214 return -EBUSY; 2215 } 2216 2217 transceiver_info = qed_rd(p_hwfn, p_ptt, 2218 p_hwfn->mcp_info->port_addr + 2219 offsetof(struct public_port, 2220 transceiver_data)); 2221 2222 *p_transceiver_state = (transceiver_info & 2223 ETH_TRANSCEIVER_STATE_MASK) >> 2224 ETH_TRANSCEIVER_STATE_OFFSET; 2225 2226 if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) 2227 *p_transceiver_type = (transceiver_info & 2228 ETH_TRANSCEIVER_TYPE_MASK) >> 2229 ETH_TRANSCEIVER_TYPE_OFFSET; 2230 else 2231 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN; 2232 2233 return 0; 2234 } 2235 2236 static bool qed_is_transceiver_ready(u32 transceiver_state, 2237 u32 transceiver_type) 2238 { 2239 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && 2240 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && 2241 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) 2242 return true; 2243 2244 return false; 2245 } 2246 2247 int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, 2248 struct qed_ptt *p_ptt, u32 *p_speed_mask) 2249 { 2250 u32 transceiver_type, transceiver_state; 2251 int ret; 2252 2253 ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, 2254 &transceiver_type); 2255 if (ret) 2256 return ret; 2257 2258 if (qed_is_transceiver_ready(transceiver_state, transceiver_type) == 2259 false) 2260 return -EINVAL; 2261 2262 switch (transceiver_type) { 2263 case ETH_TRANSCEIVER_TYPE_1G_LX: 2264 case ETH_TRANSCEIVER_TYPE_1G_SX: 2265 case ETH_TRANSCEIVER_TYPE_1G_PCC: 2266 case ETH_TRANSCEIVER_TYPE_1G_ACC: 2267 case ETH_TRANSCEIVER_TYPE_1000BASET: 2268 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2269 break; 2270 case ETH_TRANSCEIVER_TYPE_10G_SR: 2271 case ETH_TRANSCEIVER_TYPE_10G_LR: 2272 case ETH_TRANSCEIVER_TYPE_10G_LRM: 2273 case ETH_TRANSCEIVER_TYPE_10G_ER: 2274 case ETH_TRANSCEIVER_TYPE_10G_PCC: 2275 case ETH_TRANSCEIVER_TYPE_10G_ACC: 2276 case ETH_TRANSCEIVER_TYPE_4x10G: 2277 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2278 break; 2279 case ETH_TRANSCEIVER_TYPE_40G_LR4: 2280 case ETH_TRANSCEIVER_TYPE_40G_SR4: 2281 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 2282 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 2283 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2284 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2285 break; 2286 case ETH_TRANSCEIVER_TYPE_100G_AOC: 2287 case ETH_TRANSCEIVER_TYPE_100G_SR4: 2288 case ETH_TRANSCEIVER_TYPE_100G_LR4: 2289 case ETH_TRANSCEIVER_TYPE_100G_ER4: 2290 case ETH_TRANSCEIVER_TYPE_100G_ACC: 2291 *p_speed_mask = 2292 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | 2293 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2294 break; 2295 case ETH_TRANSCEIVER_TYPE_25G_SR: 2296 case ETH_TRANSCEIVER_TYPE_25G_LR: 2297 case ETH_TRANSCEIVER_TYPE_25G_AOC: 2298 case ETH_TRANSCEIVER_TYPE_25G_ACC_S: 2299 case ETH_TRANSCEIVER_TYPE_25G_ACC_M: 2300 case ETH_TRANSCEIVER_TYPE_25G_ACC_L: 2301 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2302 break; 2303 case ETH_TRANSCEIVER_TYPE_25G_CA_N: 2304 case ETH_TRANSCEIVER_TYPE_25G_CA_S: 2305 case ETH_TRANSCEIVER_TYPE_25G_CA_L: 2306 case ETH_TRANSCEIVER_TYPE_4x25G_CR: 2307 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | 2308 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2309 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2310 break; 2311 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 2312 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: 2313 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | 2314 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2315 break; 2316 case ETH_TRANSCEIVER_TYPE_40G_CR4: 2317 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 2318 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2319 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2320 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2321 break; 2322 case ETH_TRANSCEIVER_TYPE_100G_CR4: 2323 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 2324 *p_speed_mask = 2325 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | 2326 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | 2327 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2328 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | 2329 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | 2330 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2331 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2332 break; 2333 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2334 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2335 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: 2336 *p_speed_mask = 2337 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | 2338 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | 2339 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | 2340 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2341 break; 2342 case ETH_TRANSCEIVER_TYPE_XLPPI: 2343 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 2344 break; 2345 case ETH_TRANSCEIVER_TYPE_10G_BASET: 2346 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 2347 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 2348 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | 2349 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2350 break; 2351 default: 2352 DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n", 2353 transceiver_type); 2354 *p_speed_mask = 0xff; 2355 break; 2356 } 2357 2358 return 0; 2359 } 2360 2361 int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, 2362 struct qed_ptt *p_ptt, u32 *p_board_config) 2363 { 2364 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; 2365 2366 if (IS_VF(p_hwfn->cdev)) 2367 return -EINVAL; 2368 2369 if (!qed_mcp_is_init(p_hwfn)) { 2370 DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); 2371 return -EBUSY; 2372 } 2373 if (!p_ptt) { 2374 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 2375 return -EINVAL; 2376 } 2377 2378 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 2379 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 2380 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2381 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 2382 *p_board_config = qed_rd(p_hwfn, p_ptt, 2383 port_cfg_addr + 2384 offsetof(struct nvm_cfg1_port, 2385 board_cfg)); 2386 2387 return 0; 2388 } 2389 2390 /* Old MFW has a global configuration for all PFs regarding RDMA support */ 2391 static void 2392 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn, 2393 enum qed_pci_personality *p_proto) 2394 { 2395 /* There wasn't ever a legacy MFW that published iwarp. 2396 * So at this point, this is either plain l2 or RoCE. 2397 */ 2398 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities)) 2399 *p_proto = QED_PCI_ETH_ROCE; 2400 else 2401 *p_proto = QED_PCI_ETH; 2402 2403 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 2404 "According to Legacy capabilities, L2 personality is %08x\n", 2405 (u32)*p_proto); 2406 } 2407 2408 static int 2409 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, 2410 struct qed_ptt *p_ptt, 2411 enum qed_pci_personality *p_proto) 2412 { 2413 u32 resp = 0, param = 0; 2414 int rc; 2415 2416 rc = qed_mcp_cmd(p_hwfn, p_ptt, 2417 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m); 2418 if (rc) 2419 return rc; 2420 if (resp != FW_MSG_CODE_OK) { 2421 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 2422 "MFW lacks support for command; Returns %08x\n", 2423 resp); 2424 return -EINVAL; 2425 } 2426 2427 switch (param) { 2428 case FW_MB_PARAM_GET_PF_RDMA_NONE: 2429 *p_proto = QED_PCI_ETH; 2430 break; 2431 case FW_MB_PARAM_GET_PF_RDMA_ROCE: 2432 *p_proto = QED_PCI_ETH_ROCE; 2433 break; 2434 case FW_MB_PARAM_GET_PF_RDMA_IWARP: 2435 *p_proto = QED_PCI_ETH_IWARP; 2436 break; 2437 case FW_MB_PARAM_GET_PF_RDMA_BOTH: 2438 *p_proto = QED_PCI_ETH_RDMA; 2439 break; 2440 default: 2441 DP_NOTICE(p_hwfn, 2442 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n", 2443 param); 2444 return -EINVAL; 2445 } 2446 2447 DP_VERBOSE(p_hwfn, 2448 NETIF_MSG_IFUP, 2449 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", 2450 (u32)*p_proto, resp, param); 2451 return 0; 2452 } 2453 2454 static int 2455 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, 2456 struct public_func *p_info, 2457 struct qed_ptt *p_ptt, 2458 enum qed_pci_personality *p_proto) 2459 { 2460 int rc = 0; 2461 2462 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { 2463 case FUNC_MF_CFG_PROTOCOL_ETHERNET: 2464 if (!IS_ENABLED(CONFIG_QED_RDMA)) 2465 *p_proto = QED_PCI_ETH; 2466 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) 2467 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); 2468 break; 2469 case FUNC_MF_CFG_PROTOCOL_ISCSI: 2470 *p_proto = QED_PCI_ISCSI; 2471 break; 2472 case FUNC_MF_CFG_PROTOCOL_FCOE: 2473 *p_proto = QED_PCI_FCOE; 2474 break; 2475 case FUNC_MF_CFG_PROTOCOL_ROCE: 2476 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); 2477 fallthrough; 2478 default: 2479 rc = -EINVAL; 2480 } 2481 2482 return rc; 2483 } 2484 2485 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, 2486 struct qed_ptt *p_ptt) 2487 { 2488 struct qed_mcp_function_info *info; 2489 struct public_func shmem_info; 2490 2491 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 2492 info = &p_hwfn->mcp_info->func_info; 2493 2494 info->pause_on_host = (shmem_info.config & 2495 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; 2496 2497 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, 2498 &info->protocol)) { 2499 DP_ERR(p_hwfn, "Unknown personality %08x\n", 2500 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); 2501 return -EINVAL; 2502 } 2503 2504 qed_read_pf_bandwidth(p_hwfn, &shmem_info); 2505 2506 if (shmem_info.mac_upper || shmem_info.mac_lower) { 2507 info->mac[0] = (u8)(shmem_info.mac_upper >> 8); 2508 info->mac[1] = (u8)(shmem_info.mac_upper); 2509 info->mac[2] = (u8)(shmem_info.mac_lower >> 24); 2510 info->mac[3] = (u8)(shmem_info.mac_lower >> 16); 2511 info->mac[4] = (u8)(shmem_info.mac_lower >> 8); 2512 info->mac[5] = (u8)(shmem_info.mac_lower); 2513 2514 /* Store primary MAC for later possible WoL */ 2515 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN); 2516 } else { 2517 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); 2518 } 2519 2520 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower | 2521 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32); 2522 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower | 2523 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32); 2524 2525 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); 2526 2527 info->mtu = (u16)shmem_info.mtu_size; 2528 2529 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE; 2530 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT; 2531 if (qed_mcp_is_init(p_hwfn)) { 2532 u32 resp = 0, param = 0; 2533 int rc; 2534 2535 rc = qed_mcp_cmd(p_hwfn, p_ptt, 2536 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m); 2537 if (rc) 2538 return rc; 2539 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED) 2540 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME; 2541 } 2542 2543 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), 2544 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n", 2545 info->pause_on_host, info->protocol, 2546 info->bandwidth_min, info->bandwidth_max, 2547 info->mac, 2548 info->wwn_port, info->wwn_node, 2549 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support); 2550 2551 return 0; 2552 } 2553 2554 struct qed_mcp_link_params 2555 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn) 2556 { 2557 if (!p_hwfn || !p_hwfn->mcp_info) 2558 return NULL; 2559 return &p_hwfn->mcp_info->link_input; 2560 } 2561 2562 struct qed_mcp_link_state 2563 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn) 2564 { 2565 if (!p_hwfn || !p_hwfn->mcp_info) 2566 return NULL; 2567 return &p_hwfn->mcp_info->link_output; 2568 } 2569 2570 struct qed_mcp_link_capabilities 2571 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn) 2572 { 2573 if (!p_hwfn || !p_hwfn->mcp_info) 2574 return NULL; 2575 return &p_hwfn->mcp_info->link_capabilities; 2576 } 2577 2578 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2579 { 2580 u32 resp = 0, param = 0; 2581 int rc; 2582 2583 rc = qed_mcp_cmd(p_hwfn, p_ptt, 2584 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); 2585 2586 /* Wait for the drain to complete before returning */ 2587 msleep(1020); 2588 2589 return rc; 2590 } 2591 2592 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, 2593 struct qed_ptt *p_ptt, u32 *p_flash_size) 2594 { 2595 u32 flash_size; 2596 2597 if (IS_VF(p_hwfn->cdev)) 2598 return -EINVAL; 2599 2600 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); 2601 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> 2602 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; 2603 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT)); 2604 2605 *p_flash_size = flash_size; 2606 2607 return 0; 2608 } 2609 2610 int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2611 { 2612 struct qed_dev *cdev = p_hwfn->cdev; 2613 2614 if (cdev->recov_in_prog) { 2615 DP_NOTICE(p_hwfn, 2616 "Avoid triggering a recovery since such a process is already in progress\n"); 2617 return -EAGAIN; 2618 } 2619 2620 DP_NOTICE(p_hwfn, "Triggering a recovery process\n"); 2621 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); 2622 2623 return 0; 2624 } 2625 2626 #define QED_RECOVERY_PROLOG_SLEEP_MS 100 2627 2628 int qed_recovery_prolog(struct qed_dev *cdev) 2629 { 2630 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2631 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; 2632 int rc; 2633 2634 /* Allow ongoing PCIe transactions to complete */ 2635 msleep(QED_RECOVERY_PROLOG_SLEEP_MS); 2636 2637 /* Clear the PF's internal FID_enable in the PXP */ 2638 rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); 2639 if (rc) 2640 DP_NOTICE(p_hwfn, 2641 "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", 2642 rc); 2643 2644 return rc; 2645 } 2646 2647 static int 2648 qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn, 2649 struct qed_ptt *p_ptt, u8 vf_id, u8 num) 2650 { 2651 u32 resp = 0, param = 0, rc_param = 0; 2652 int rc; 2653 2654 /* Only Leader can configure MSIX, and need to take CMT into account */ 2655 if (!IS_LEAD_HWFN(p_hwfn)) 2656 return 0; 2657 num *= p_hwfn->cdev->num_hwfns; 2658 2659 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) & 2660 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; 2661 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) & 2662 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; 2663 2664 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, 2665 &resp, &rc_param); 2666 2667 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { 2668 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id); 2669 rc = -EINVAL; 2670 } else { 2671 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2672 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", 2673 num, vf_id); 2674 } 2675 2676 return rc; 2677 } 2678 2679 static int 2680 qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn, 2681 struct qed_ptt *p_ptt, u8 num) 2682 { 2683 u32 resp = 0, param = num, rc_param = 0; 2684 int rc; 2685 2686 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, 2687 param, &resp, &rc_param); 2688 2689 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { 2690 DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n"); 2691 rc = -EINVAL; 2692 } else { 2693 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2694 "Requested 0x%02x MSI-x interrupts for VFs\n", num); 2695 } 2696 2697 return rc; 2698 } 2699 2700 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, 2701 struct qed_ptt *p_ptt, u8 vf_id, u8 num) 2702 { 2703 if (QED_IS_BB(p_hwfn->cdev)) 2704 return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); 2705 else 2706 return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); 2707 } 2708 2709 int 2710 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, 2711 struct qed_ptt *p_ptt, 2712 struct qed_mcp_drv_version *p_ver) 2713 { 2714 struct qed_mcp_mb_params mb_params; 2715 struct drv_version_stc drv_version; 2716 __be32 val; 2717 u32 i; 2718 int rc; 2719 2720 memset(&drv_version, 0, sizeof(drv_version)); 2721 drv_version.version = p_ver->version; 2722 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { 2723 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); 2724 *(__be32 *)&drv_version.name[i * sizeof(u32)] = val; 2725 } 2726 2727 memset(&mb_params, 0, sizeof(mb_params)); 2728 mb_params.cmd = DRV_MSG_CODE_SET_VERSION; 2729 mb_params.p_data_src = &drv_version; 2730 mb_params.data_src_size = sizeof(drv_version); 2731 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 2732 if (rc) 2733 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2734 2735 return rc; 2736 } 2737 2738 /* A maximal 100 msec waiting time for the MCP to halt */ 2739 #define QED_MCP_HALT_SLEEP_MS 10 2740 #define QED_MCP_HALT_MAX_RETRIES 10 2741 2742 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2743 { 2744 u32 resp = 0, param = 0, cpu_state, cnt = 0; 2745 int rc; 2746 2747 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, 2748 ¶m); 2749 if (rc) { 2750 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2751 return rc; 2752 } 2753 2754 do { 2755 msleep(QED_MCP_HALT_SLEEP_MS); 2756 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2757 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) 2758 break; 2759 } while (++cnt < QED_MCP_HALT_MAX_RETRIES); 2760 2761 if (cnt == QED_MCP_HALT_MAX_RETRIES) { 2762 DP_NOTICE(p_hwfn, 2763 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2764 qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); 2765 return -EBUSY; 2766 } 2767 2768 qed_mcp_cmd_set_blocking(p_hwfn, true); 2769 2770 return 0; 2771 } 2772 2773 #define QED_MCP_RESUME_SLEEP_MS 10 2774 2775 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2776 { 2777 u32 cpu_mode, cpu_state; 2778 2779 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); 2780 2781 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 2782 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; 2783 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); 2784 msleep(QED_MCP_RESUME_SLEEP_MS); 2785 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); 2786 2787 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { 2788 DP_NOTICE(p_hwfn, 2789 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", 2790 cpu_mode, cpu_state); 2791 return -EBUSY; 2792 } 2793 2794 qed_mcp_cmd_set_blocking(p_hwfn, false); 2795 2796 return 0; 2797 } 2798 2799 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, 2800 struct qed_ptt *p_ptt, 2801 enum qed_ov_client client) 2802 { 2803 u32 resp = 0, param = 0; 2804 u32 drv_mb_param; 2805 int rc; 2806 2807 switch (client) { 2808 case QED_OV_CLIENT_DRV: 2809 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; 2810 break; 2811 case QED_OV_CLIENT_USER: 2812 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; 2813 break; 2814 case QED_OV_CLIENT_VENDOR_SPEC: 2815 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; 2816 break; 2817 default: 2818 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client); 2819 return -EINVAL; 2820 } 2821 2822 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, 2823 drv_mb_param, &resp, ¶m); 2824 if (rc) 2825 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2826 2827 return rc; 2828 } 2829 2830 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, 2831 struct qed_ptt *p_ptt, 2832 enum qed_ov_driver_state drv_state) 2833 { 2834 u32 resp = 0, param = 0; 2835 u32 drv_mb_param; 2836 int rc; 2837 2838 switch (drv_state) { 2839 case QED_OV_DRIVER_STATE_NOT_LOADED: 2840 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; 2841 break; 2842 case QED_OV_DRIVER_STATE_DISABLED: 2843 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; 2844 break; 2845 case QED_OV_DRIVER_STATE_ACTIVE: 2846 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; 2847 break; 2848 default: 2849 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state); 2850 return -EINVAL; 2851 } 2852 2853 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, 2854 drv_mb_param, &resp, ¶m); 2855 if (rc) 2856 DP_ERR(p_hwfn, "Failed to send driver state\n"); 2857 2858 return rc; 2859 } 2860 2861 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, 2862 struct qed_ptt *p_ptt, u16 mtu) 2863 { 2864 u32 resp = 0, param = 0; 2865 u32 drv_mb_param; 2866 int rc; 2867 2868 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT; 2869 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU, 2870 drv_mb_param, &resp, ¶m); 2871 if (rc) 2872 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc); 2873 2874 return rc; 2875 } 2876 2877 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, 2878 struct qed_ptt *p_ptt, const u8 *mac) 2879 { 2880 struct qed_mcp_mb_params mb_params; 2881 u32 mfw_mac[2]; 2882 int rc; 2883 2884 memset(&mb_params, 0, sizeof(mb_params)); 2885 mb_params.cmd = DRV_MSG_CODE_SET_VMAC; 2886 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC << 2887 DRV_MSG_CODE_VMAC_TYPE_SHIFT; 2888 mb_params.param |= MCP_PF_ID(p_hwfn); 2889 2890 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM 2891 * in 32-bit granularity. 2892 * So the MAC has to be set in native order [and not byte order], 2893 * otherwise it would be read incorrectly by MFW after swap. 2894 */ 2895 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3]; 2896 mfw_mac[1] = mac[4] << 24 | mac[5] << 16; 2897 2898 mb_params.p_data_src = (u8 *)mfw_mac; 2899 mb_params.data_src_size = 8; 2900 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 2901 if (rc) 2902 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); 2903 2904 /* Store primary MAC for later possible WoL */ 2905 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN); 2906 2907 return rc; 2908 } 2909 2910 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, 2911 struct qed_ptt *p_ptt, enum qed_ov_wol wol) 2912 { 2913 u32 resp = 0, param = 0; 2914 u32 drv_mb_param; 2915 int rc; 2916 2917 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) { 2918 DP_VERBOSE(p_hwfn, QED_MSG_SP, 2919 "Can't change WoL configuration when WoL isn't supported\n"); 2920 return -EINVAL; 2921 } 2922 2923 switch (wol) { 2924 case QED_OV_WOL_DEFAULT: 2925 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT; 2926 break; 2927 case QED_OV_WOL_DISABLED: 2928 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED; 2929 break; 2930 case QED_OV_WOL_ENABLED: 2931 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED; 2932 break; 2933 default: 2934 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol); 2935 return -EINVAL; 2936 } 2937 2938 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL, 2939 drv_mb_param, &resp, ¶m); 2940 if (rc) 2941 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc); 2942 2943 /* Store the WoL update for a future unload */ 2944 p_hwfn->cdev->wol_config = (u8)wol; 2945 2946 return rc; 2947 } 2948 2949 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, 2950 struct qed_ptt *p_ptt, 2951 enum qed_ov_eswitch eswitch) 2952 { 2953 u32 resp = 0, param = 0; 2954 u32 drv_mb_param; 2955 int rc; 2956 2957 switch (eswitch) { 2958 case QED_OV_ESWITCH_NONE: 2959 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE; 2960 break; 2961 case QED_OV_ESWITCH_VEB: 2962 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB; 2963 break; 2964 case QED_OV_ESWITCH_VEPA: 2965 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA; 2966 break; 2967 default: 2968 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); 2969 return -EINVAL; 2970 } 2971 2972 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE, 2973 drv_mb_param, &resp, ¶m); 2974 if (rc) 2975 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc); 2976 2977 return rc; 2978 } 2979 2980 int qed_mcp_set_led(struct qed_hwfn *p_hwfn, 2981 struct qed_ptt *p_ptt, enum qed_led_mode mode) 2982 { 2983 u32 resp = 0, param = 0, drv_mb_param; 2984 int rc; 2985 2986 switch (mode) { 2987 case QED_LED_MODE_ON: 2988 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; 2989 break; 2990 case QED_LED_MODE_OFF: 2991 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; 2992 break; 2993 case QED_LED_MODE_RESTORE: 2994 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; 2995 break; 2996 default: 2997 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode); 2998 return -EINVAL; 2999 } 3000 3001 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, 3002 drv_mb_param, &resp, ¶m); 3003 3004 return rc; 3005 } 3006 3007 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, 3008 struct qed_ptt *p_ptt, u32 mask_parities) 3009 { 3010 u32 resp = 0, param = 0; 3011 int rc; 3012 3013 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, 3014 mask_parities, &resp, ¶m); 3015 3016 if (rc) { 3017 DP_ERR(p_hwfn, 3018 "MCP response failure for mask parities, aborting\n"); 3019 } else if (resp != FW_MSG_CODE_OK) { 3020 DP_ERR(p_hwfn, 3021 "MCP did not acknowledge mask parity request. Old MFW?\n"); 3022 rc = -EINVAL; 3023 } 3024 3025 return rc; 3026 } 3027 3028 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) 3029 { 3030 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0; 3031 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 3032 u32 resp = 0, resp_param = 0; 3033 struct qed_ptt *p_ptt; 3034 int rc = 0; 3035 3036 p_ptt = qed_ptt_acquire(p_hwfn); 3037 if (!p_ptt) 3038 return -EBUSY; 3039 3040 while (bytes_left > 0) { 3041 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN); 3042 3043 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 3044 DRV_MSG_CODE_NVM_READ_NVRAM, 3045 addr + offset + 3046 (bytes_to_copy << 3047 DRV_MB_PARAM_NVM_LEN_OFFSET), 3048 &resp, &resp_param, 3049 &read_len, 3050 (u32 *)(p_buf + offset), false); 3051 3052 if (rc || (resp != FW_MSG_CODE_NVM_OK)) { 3053 DP_NOTICE(cdev, "MCP command rc = %d\n", rc); 3054 break; 3055 } 3056 3057 /* This can be a lengthy process, and it's possible scheduler 3058 * isn't preemptible. Sleep a bit to prevent CPU hogging. 3059 */ 3060 if (bytes_left % 0x1000 < 3061 (bytes_left - read_len) % 0x1000) 3062 usleep_range(1000, 2000); 3063 3064 offset += read_len; 3065 bytes_left -= read_len; 3066 } 3067 3068 cdev->mcp_nvm_resp = resp; 3069 qed_ptt_release(p_hwfn, p_ptt); 3070 3071 return rc; 3072 } 3073 3074 int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf) 3075 { 3076 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 3077 struct qed_ptt *p_ptt; 3078 3079 p_ptt = qed_ptt_acquire(p_hwfn); 3080 if (!p_ptt) 3081 return -EBUSY; 3082 3083 memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp)); 3084 qed_ptt_release(p_hwfn, p_ptt); 3085 3086 return 0; 3087 } 3088 3089 int qed_mcp_nvm_write(struct qed_dev *cdev, 3090 u32 cmd, u32 addr, u8 *p_buf, u32 len) 3091 { 3092 u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param; 3093 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 3094 struct qed_ptt *p_ptt; 3095 int rc = -EINVAL; 3096 3097 p_ptt = qed_ptt_acquire(p_hwfn); 3098 if (!p_ptt) 3099 return -EBUSY; 3100 3101 switch (cmd) { 3102 case QED_PUT_FILE_BEGIN: 3103 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN; 3104 break; 3105 case QED_PUT_FILE_DATA: 3106 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; 3107 break; 3108 case QED_NVM_WRITE_NVRAM: 3109 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; 3110 break; 3111 default: 3112 DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd); 3113 rc = -EINVAL; 3114 goto out; 3115 } 3116 3117 buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); 3118 while (buf_idx < len) { 3119 if (cmd == QED_PUT_FILE_BEGIN) 3120 nvm_offset = addr; 3121 else 3122 nvm_offset = ((buf_size << 3123 DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) + 3124 buf_idx; 3125 rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, 3126 &resp, ¶m, buf_size, 3127 (u32 *)&p_buf[buf_idx]); 3128 if (rc) { 3129 DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc); 3130 resp = FW_MSG_CODE_ERROR; 3131 break; 3132 } 3133 3134 if (resp != FW_MSG_CODE_OK && 3135 resp != FW_MSG_CODE_NVM_OK && 3136 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { 3137 DP_NOTICE(cdev, 3138 "nvm write failed, resp = 0x%08x\n", resp); 3139 rc = -EINVAL; 3140 break; 3141 } 3142 3143 /* This can be a lengthy process, and it's possible scheduler 3144 * isn't pre-emptable. Sleep a bit to prevent CPU hogging. 3145 */ 3146 if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000) 3147 usleep_range(1000, 2000); 3148 3149 /* For MBI upgrade, MFW response includes the next buffer offset 3150 * to be delivered to MFW. 3151 */ 3152 if (param && cmd == QED_PUT_FILE_DATA) { 3153 buf_idx = 3154 QED_MFW_GET_FIELD(param, 3155 FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); 3156 buf_size = 3157 QED_MFW_GET_FIELD(param, 3158 FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); 3159 } else { 3160 buf_idx += buf_size; 3161 buf_size = min_t(u32, (len - buf_idx), 3162 MCP_DRV_NVM_BUF_LEN); 3163 } 3164 } 3165 3166 cdev->mcp_nvm_resp = resp; 3167 out: 3168 qed_ptt_release(p_hwfn, p_ptt); 3169 3170 return rc; 3171 } 3172 3173 int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3174 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) 3175 { 3176 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0; 3177 u32 resp, param; 3178 int rc; 3179 3180 nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) & 3181 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK; 3182 nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) & 3183 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK; 3184 3185 addr = offset; 3186 offset = 0; 3187 bytes_left = len; 3188 while (bytes_left > 0) { 3189 bytes_to_copy = min_t(u32, bytes_left, 3190 MAX_I2C_TRANSACTION_SIZE); 3191 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | 3192 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); 3193 nvm_offset |= ((addr + offset) << 3194 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) & 3195 DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK; 3196 nvm_offset |= (bytes_to_copy << 3197 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) & 3198 DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK; 3199 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 3200 DRV_MSG_CODE_TRANSCEIVER_READ, 3201 nvm_offset, &resp, ¶m, &buf_size, 3202 (u32 *)(p_buf + offset), true); 3203 if (rc) { 3204 DP_NOTICE(p_hwfn, 3205 "Failed to send a transceiver read command to the MFW. rc = %d.\n", 3206 rc); 3207 return rc; 3208 } 3209 3210 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) 3211 return -ENODEV; 3212 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) 3213 return -EINVAL; 3214 3215 offset += buf_size; 3216 bytes_left -= buf_size; 3217 } 3218 3219 return 0; 3220 } 3221 3222 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3223 { 3224 u32 drv_mb_param = 0, rsp, param; 3225 int rc = 0; 3226 3227 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << 3228 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); 3229 3230 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3231 drv_mb_param, &rsp, ¶m); 3232 3233 if (rc) 3234 return rc; 3235 3236 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3237 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3238 rc = -EAGAIN; 3239 3240 return rc; 3241 } 3242 3243 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3244 { 3245 u32 drv_mb_param, rsp, param; 3246 int rc = 0; 3247 3248 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << 3249 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); 3250 3251 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3252 drv_mb_param, &rsp, ¶m); 3253 3254 if (rc) 3255 return rc; 3256 3257 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3258 (param != DRV_MB_PARAM_BIST_RC_PASSED)) 3259 rc = -EAGAIN; 3260 3261 return rc; 3262 } 3263 3264 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, 3265 struct qed_ptt *p_ptt, 3266 u32 *num_images) 3267 { 3268 u32 drv_mb_param = 0, rsp; 3269 int rc = 0; 3270 3271 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << 3272 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); 3273 3274 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, 3275 drv_mb_param, &rsp, num_images); 3276 if (rc) 3277 return rc; 3278 3279 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) 3280 rc = -EINVAL; 3281 3282 return rc; 3283 } 3284 3285 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, 3286 struct qed_ptt *p_ptt, 3287 struct bist_nvm_image_att *p_image_att, 3288 u32 image_index) 3289 { 3290 u32 buf_size = 0, param, resp = 0, resp_param = 0; 3291 int rc; 3292 3293 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << 3294 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT; 3295 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT; 3296 3297 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 3298 DRV_MSG_CODE_BIST_TEST, param, 3299 &resp, &resp_param, 3300 &buf_size, 3301 (u32 *)p_image_att, false); 3302 if (rc) 3303 return rc; 3304 3305 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || 3306 (p_image_att->return_code != 1)) 3307 rc = -EINVAL; 3308 3309 return rc; 3310 } 3311 3312 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) 3313 { 3314 struct qed_nvm_image_info nvm_info; 3315 struct qed_ptt *p_ptt; 3316 int rc; 3317 u32 i; 3318 3319 if (p_hwfn->nvm_info.valid) 3320 return 0; 3321 3322 p_ptt = qed_ptt_acquire(p_hwfn); 3323 if (!p_ptt) { 3324 DP_ERR(p_hwfn, "failed to acquire ptt\n"); 3325 return -EBUSY; 3326 } 3327 3328 /* Acquire from MFW the amount of available images */ 3329 nvm_info.num_images = 0; 3330 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, 3331 p_ptt, &nvm_info.num_images); 3332 if (rc == -EOPNOTSUPP) { 3333 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); 3334 goto out; 3335 } else if (rc || !nvm_info.num_images) { 3336 DP_ERR(p_hwfn, "Failed getting number of images\n"); 3337 goto err0; 3338 } 3339 3340 nvm_info.image_att = kmalloc_array(nvm_info.num_images, 3341 sizeof(struct bist_nvm_image_att), 3342 GFP_KERNEL); 3343 if (!nvm_info.image_att) { 3344 rc = -ENOMEM; 3345 goto err0; 3346 } 3347 3348 /* Iterate over images and get their attributes */ 3349 for (i = 0; i < nvm_info.num_images; i++) { 3350 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, 3351 &nvm_info.image_att[i], i); 3352 if (rc) { 3353 DP_ERR(p_hwfn, 3354 "Failed getting image index %d attributes\n", i); 3355 goto err1; 3356 } 3357 3358 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, 3359 nvm_info.image_att[i].len); 3360 } 3361 out: 3362 /* Update hwfn's nvm_info */ 3363 if (nvm_info.num_images) { 3364 p_hwfn->nvm_info.num_images = nvm_info.num_images; 3365 kfree(p_hwfn->nvm_info.image_att); 3366 p_hwfn->nvm_info.image_att = nvm_info.image_att; 3367 p_hwfn->nvm_info.valid = true; 3368 } 3369 3370 qed_ptt_release(p_hwfn, p_ptt); 3371 return 0; 3372 3373 err1: 3374 kfree(nvm_info.image_att); 3375 err0: 3376 qed_ptt_release(p_hwfn, p_ptt); 3377 return rc; 3378 } 3379 3380 void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn) 3381 { 3382 kfree(p_hwfn->nvm_info.image_att); 3383 p_hwfn->nvm_info.image_att = NULL; 3384 p_hwfn->nvm_info.valid = false; 3385 } 3386 3387 int 3388 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, 3389 enum qed_nvm_images image_id, 3390 struct qed_nvm_image_att *p_image_att) 3391 { 3392 enum nvm_image_type type; 3393 int rc; 3394 u32 i; 3395 3396 /* Translate image_id into MFW definitions */ 3397 switch (image_id) { 3398 case QED_NVM_IMAGE_ISCSI_CFG: 3399 type = NVM_TYPE_ISCSI_CFG; 3400 break; 3401 case QED_NVM_IMAGE_FCOE_CFG: 3402 type = NVM_TYPE_FCOE_CFG; 3403 break; 3404 case QED_NVM_IMAGE_MDUMP: 3405 type = NVM_TYPE_MDUMP; 3406 break; 3407 case QED_NVM_IMAGE_NVM_CFG1: 3408 type = NVM_TYPE_NVM_CFG1; 3409 break; 3410 case QED_NVM_IMAGE_DEFAULT_CFG: 3411 type = NVM_TYPE_DEFAULT_CFG; 3412 break; 3413 case QED_NVM_IMAGE_NVM_META: 3414 type = NVM_TYPE_NVM_META; 3415 break; 3416 default: 3417 DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", 3418 image_id); 3419 return -EINVAL; 3420 } 3421 3422 rc = qed_mcp_nvm_info_populate(p_hwfn); 3423 if (rc) 3424 return rc; 3425 3426 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 3427 if (type == p_hwfn->nvm_info.image_att[i].image_type) 3428 break; 3429 if (i == p_hwfn->nvm_info.num_images) { 3430 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, 3431 "Failed to find nvram image of type %08x\n", 3432 image_id); 3433 return -ENOENT; 3434 } 3435 3436 p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 3437 p_image_att->length = p_hwfn->nvm_info.image_att[i].len; 3438 3439 return 0; 3440 } 3441 3442 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, 3443 enum qed_nvm_images image_id, 3444 u8 *p_buffer, u32 buffer_len) 3445 { 3446 struct qed_nvm_image_att image_att; 3447 int rc; 3448 3449 memset(p_buffer, 0, buffer_len); 3450 3451 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); 3452 if (rc) 3453 return rc; 3454 3455 /* Validate sizes - both the image's and the supplied buffer's */ 3456 if (image_att.length <= 4) { 3457 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, 3458 "Image [%d] is too small - only %d bytes\n", 3459 image_id, image_att.length); 3460 return -EINVAL; 3461 } 3462 3463 if (image_att.length > buffer_len) { 3464 DP_VERBOSE(p_hwfn, 3465 QED_MSG_STORAGE, 3466 "Image [%d] is too big - %08x bytes where only %08x are available\n", 3467 image_id, image_att.length, buffer_len); 3468 return -ENOMEM; 3469 } 3470 3471 return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr, 3472 p_buffer, image_att.length); 3473 } 3474 3475 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id) 3476 { 3477 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; 3478 3479 switch (res_id) { 3480 case QED_SB: 3481 mfw_res_id = RESOURCE_NUM_SB_E; 3482 break; 3483 case QED_L2_QUEUE: 3484 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; 3485 break; 3486 case QED_VPORT: 3487 mfw_res_id = RESOURCE_NUM_VPORT_E; 3488 break; 3489 case QED_RSS_ENG: 3490 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; 3491 break; 3492 case QED_PQ: 3493 mfw_res_id = RESOURCE_NUM_PQ_E; 3494 break; 3495 case QED_RL: 3496 mfw_res_id = RESOURCE_NUM_RL_E; 3497 break; 3498 case QED_MAC: 3499 case QED_VLAN: 3500 /* Each VFC resource can accommodate both a MAC and a VLAN */ 3501 mfw_res_id = RESOURCE_VFC_FILTER_E; 3502 break; 3503 case QED_ILT: 3504 mfw_res_id = RESOURCE_ILT_E; 3505 break; 3506 case QED_LL2_RAM_QUEUE: 3507 mfw_res_id = RESOURCE_LL2_QUEUE_E; 3508 break; 3509 case QED_LL2_CTX_QUEUE: 3510 mfw_res_id = RESOURCE_LL2_CQS_E; 3511 break; 3512 case QED_RDMA_CNQ_RAM: 3513 case QED_CMDQS_CQS: 3514 /* CNQ/CMDQS are the same resource */ 3515 mfw_res_id = RESOURCE_CQS_E; 3516 break; 3517 case QED_RDMA_STATS_QUEUE: 3518 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; 3519 break; 3520 case QED_BDQ: 3521 mfw_res_id = RESOURCE_BDQ_E; 3522 break; 3523 default: 3524 break; 3525 } 3526 3527 return mfw_res_id; 3528 } 3529 3530 #define QED_RESC_ALLOC_VERSION_MAJOR 2 3531 #define QED_RESC_ALLOC_VERSION_MINOR 0 3532 #define QED_RESC_ALLOC_VERSION \ 3533 ((QED_RESC_ALLOC_VERSION_MAJOR << \ 3534 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \ 3535 (QED_RESC_ALLOC_VERSION_MINOR << \ 3536 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT)) 3537 3538 struct qed_resc_alloc_in_params { 3539 u32 cmd; 3540 enum qed_resources res_id; 3541 u32 resc_max_val; 3542 }; 3543 3544 struct qed_resc_alloc_out_params { 3545 u32 mcp_resp; 3546 u32 mcp_param; 3547 u32 resc_num; 3548 u32 resc_start; 3549 u32 vf_resc_num; 3550 u32 vf_resc_start; 3551 u32 flags; 3552 }; 3553 3554 static int 3555 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn, 3556 struct qed_ptt *p_ptt, 3557 struct qed_resc_alloc_in_params *p_in_params, 3558 struct qed_resc_alloc_out_params *p_out_params) 3559 { 3560 struct qed_mcp_mb_params mb_params; 3561 struct resource_info mfw_resc_info; 3562 int rc; 3563 3564 memset(&mfw_resc_info, 0, sizeof(mfw_resc_info)); 3565 3566 mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id); 3567 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { 3568 DP_ERR(p_hwfn, 3569 "Failed to match resource %d [%s] with the MFW resources\n", 3570 p_in_params->res_id, 3571 qed_hw_get_resc_name(p_in_params->res_id)); 3572 return -EINVAL; 3573 } 3574 3575 switch (p_in_params->cmd) { 3576 case DRV_MSG_SET_RESOURCE_VALUE_MSG: 3577 mfw_resc_info.size = p_in_params->resc_max_val; 3578 fallthrough; 3579 case DRV_MSG_GET_RESOURCE_ALLOC_MSG: 3580 break; 3581 default: 3582 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", 3583 p_in_params->cmd); 3584 return -EINVAL; 3585 } 3586 3587 memset(&mb_params, 0, sizeof(mb_params)); 3588 mb_params.cmd = p_in_params->cmd; 3589 mb_params.param = QED_RESC_ALLOC_VERSION; 3590 mb_params.p_data_src = &mfw_resc_info; 3591 mb_params.data_src_size = sizeof(mfw_resc_info); 3592 mb_params.p_data_dst = mb_params.p_data_src; 3593 mb_params.data_dst_size = mb_params.data_src_size; 3594 3595 DP_VERBOSE(p_hwfn, 3596 QED_MSG_SP, 3597 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", 3598 p_in_params->cmd, 3599 p_in_params->res_id, 3600 qed_hw_get_resc_name(p_in_params->res_id), 3601 QED_MFW_GET_FIELD(mb_params.param, 3602 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3603 QED_MFW_GET_FIELD(mb_params.param, 3604 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3605 p_in_params->resc_max_val); 3606 3607 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3608 if (rc) 3609 return rc; 3610 3611 p_out_params->mcp_resp = mb_params.mcp_resp; 3612 p_out_params->mcp_param = mb_params.mcp_param; 3613 p_out_params->resc_num = mfw_resc_info.size; 3614 p_out_params->resc_start = mfw_resc_info.offset; 3615 p_out_params->vf_resc_num = mfw_resc_info.vf_size; 3616 p_out_params->vf_resc_start = mfw_resc_info.vf_offset; 3617 p_out_params->flags = mfw_resc_info.flags; 3618 3619 DP_VERBOSE(p_hwfn, 3620 QED_MSG_SP, 3621 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", 3622 QED_MFW_GET_FIELD(p_out_params->mcp_param, 3623 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), 3624 QED_MFW_GET_FIELD(p_out_params->mcp_param, 3625 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), 3626 p_out_params->resc_num, 3627 p_out_params->resc_start, 3628 p_out_params->vf_resc_num, 3629 p_out_params->vf_resc_start, p_out_params->flags); 3630 3631 return 0; 3632 } 3633 3634 int 3635 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, 3636 struct qed_ptt *p_ptt, 3637 enum qed_resources res_id, 3638 u32 resc_max_val, u32 *p_mcp_resp) 3639 { 3640 struct qed_resc_alloc_out_params out_params; 3641 struct qed_resc_alloc_in_params in_params; 3642 int rc; 3643 3644 memset(&in_params, 0, sizeof(in_params)); 3645 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; 3646 in_params.res_id = res_id; 3647 in_params.resc_max_val = resc_max_val; 3648 memset(&out_params, 0, sizeof(out_params)); 3649 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3650 &out_params); 3651 if (rc) 3652 return rc; 3653 3654 *p_mcp_resp = out_params.mcp_resp; 3655 3656 return 0; 3657 } 3658 3659 int 3660 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, 3661 struct qed_ptt *p_ptt, 3662 enum qed_resources res_id, 3663 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start) 3664 { 3665 struct qed_resc_alloc_out_params out_params; 3666 struct qed_resc_alloc_in_params in_params; 3667 int rc; 3668 3669 memset(&in_params, 0, sizeof(in_params)); 3670 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; 3671 in_params.res_id = res_id; 3672 memset(&out_params, 0, sizeof(out_params)); 3673 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, 3674 &out_params); 3675 if (rc) 3676 return rc; 3677 3678 *p_mcp_resp = out_params.mcp_resp; 3679 3680 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { 3681 *p_resc_num = out_params.resc_num; 3682 *p_resc_start = out_params.resc_start; 3683 } 3684 3685 return 0; 3686 } 3687 3688 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3689 { 3690 u32 mcp_resp, mcp_param; 3691 3692 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, 3693 &mcp_resp, &mcp_param); 3694 } 3695 3696 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn, 3697 struct qed_ptt *p_ptt, 3698 u32 param, u32 *p_mcp_resp, u32 *p_mcp_param) 3699 { 3700 int rc; 3701 3702 rc = qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, 3703 param, p_mcp_resp, p_mcp_param); 3704 if (rc) 3705 return rc; 3706 3707 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3708 DP_INFO(p_hwfn, 3709 "The resource command is unsupported by the MFW\n"); 3710 return -EINVAL; 3711 } 3712 3713 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { 3714 u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE); 3715 3716 DP_NOTICE(p_hwfn, 3717 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", 3718 param, opcode); 3719 return -EINVAL; 3720 } 3721 3722 return rc; 3723 } 3724 3725 static int 3726 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, 3727 struct qed_ptt *p_ptt, 3728 struct qed_resc_lock_params *p_params) 3729 { 3730 u32 param = 0, mcp_resp, mcp_param; 3731 u8 opcode; 3732 int rc; 3733 3734 switch (p_params->timeout) { 3735 case QED_MCP_RESC_LOCK_TO_DEFAULT: 3736 opcode = RESOURCE_OPCODE_REQ; 3737 p_params->timeout = 0; 3738 break; 3739 case QED_MCP_RESC_LOCK_TO_NONE: 3740 opcode = RESOURCE_OPCODE_REQ_WO_AGING; 3741 p_params->timeout = 0; 3742 break; 3743 default: 3744 opcode = RESOURCE_OPCODE_REQ_W_AGING; 3745 break; 3746 } 3747 3748 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3749 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3750 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); 3751 3752 DP_VERBOSE(p_hwfn, 3753 QED_MSG_SP, 3754 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", 3755 param, p_params->timeout, opcode, p_params->resource); 3756 3757 /* Attempt to acquire the resource */ 3758 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param); 3759 if (rc) 3760 return rc; 3761 3762 /* Analyze the response */ 3763 p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); 3764 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3765 3766 DP_VERBOSE(p_hwfn, 3767 QED_MSG_SP, 3768 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", 3769 mcp_param, opcode, p_params->owner); 3770 3771 switch (opcode) { 3772 case RESOURCE_OPCODE_GNT: 3773 p_params->b_granted = true; 3774 break; 3775 case RESOURCE_OPCODE_BUSY: 3776 p_params->b_granted = false; 3777 break; 3778 default: 3779 DP_NOTICE(p_hwfn, 3780 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", 3781 mcp_param, opcode); 3782 return -EINVAL; 3783 } 3784 3785 return 0; 3786 } 3787 3788 int 3789 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, 3790 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params) 3791 { 3792 u32 retry_cnt = 0; 3793 int rc; 3794 3795 do { 3796 /* No need for an interval before the first iteration */ 3797 if (retry_cnt) { 3798 if (p_params->sleep_b4_retry) { 3799 u16 retry_interval_in_ms = 3800 DIV_ROUND_UP(p_params->retry_interval, 3801 1000); 3802 3803 msleep(retry_interval_in_ms); 3804 } else { 3805 udelay(p_params->retry_interval); 3806 } 3807 } 3808 3809 rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params); 3810 if (rc) 3811 return rc; 3812 3813 if (p_params->b_granted) 3814 break; 3815 } while (retry_cnt++ < p_params->retry_num); 3816 3817 return 0; 3818 } 3819 3820 int 3821 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, 3822 struct qed_ptt *p_ptt, 3823 struct qed_resc_unlock_params *p_params) 3824 { 3825 u32 param = 0, mcp_resp, mcp_param; 3826 u8 opcode; 3827 int rc; 3828 3829 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE 3830 : RESOURCE_OPCODE_RELEASE; 3831 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); 3832 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); 3833 3834 DP_VERBOSE(p_hwfn, QED_MSG_SP, 3835 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", 3836 param, opcode, p_params->resource); 3837 3838 /* Attempt to release the resource */ 3839 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param); 3840 if (rc) 3841 return rc; 3842 3843 /* Analyze the response */ 3844 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); 3845 3846 DP_VERBOSE(p_hwfn, QED_MSG_SP, 3847 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", 3848 mcp_param, opcode); 3849 3850 switch (opcode) { 3851 case RESOURCE_OPCODE_RELEASED_PREVIOUS: 3852 DP_INFO(p_hwfn, 3853 "Resource unlock request for an already released resource [%d]\n", 3854 p_params->resource); 3855 fallthrough; 3856 case RESOURCE_OPCODE_RELEASED: 3857 p_params->b_released = true; 3858 break; 3859 case RESOURCE_OPCODE_WRONG_OWNER: 3860 p_params->b_released = false; 3861 break; 3862 default: 3863 DP_NOTICE(p_hwfn, 3864 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", 3865 mcp_param, opcode); 3866 return -EINVAL; 3867 } 3868 3869 return 0; 3870 } 3871 3872 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, 3873 struct qed_resc_unlock_params *p_unlock, 3874 enum qed_resc_lock 3875 resource, bool b_is_permanent) 3876 { 3877 if (p_lock) { 3878 memset(p_lock, 0, sizeof(*p_lock)); 3879 3880 /* Permanent resources don't require aging, and there's no 3881 * point in trying to acquire them more than once since it's 3882 * unexpected another entity would release them. 3883 */ 3884 if (b_is_permanent) { 3885 p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE; 3886 } else { 3887 p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT; 3888 p_lock->retry_interval = 3889 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT; 3890 p_lock->sleep_b4_retry = true; 3891 } 3892 3893 p_lock->resource = resource; 3894 } 3895 3896 if (p_unlock) { 3897 memset(p_unlock, 0, sizeof(*p_unlock)); 3898 p_unlock->resource = resource; 3899 } 3900 } 3901 3902 bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn) 3903 { 3904 return !!(p_hwfn->mcp_info->capabilities & 3905 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); 3906 } 3907 3908 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3909 { 3910 u32 mcp_resp; 3911 int rc; 3912 3913 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, 3914 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); 3915 if (!rc) 3916 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE), 3917 "MFW supported features: %08x\n", 3918 p_hwfn->mcp_info->capabilities); 3919 3920 return rc; 3921 } 3922 3923 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3924 { 3925 u32 mcp_resp, mcp_param, features; 3926 3927 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | 3928 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | 3929 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; 3930 3931 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, 3932 features, &mcp_resp, &mcp_param); 3933 } 3934 3935 int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3936 { 3937 struct qed_mcp_mb_params mb_params = {0}; 3938 struct qed_dev *cdev = p_hwfn->cdev; 3939 u8 fir_valid, l2_valid; 3940 int rc; 3941 3942 mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG; 3943 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3944 if (rc) 3945 return rc; 3946 3947 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3948 DP_INFO(p_hwfn, 3949 "The get_engine_config command is unsupported by the MFW\n"); 3950 return -EOPNOTSUPP; 3951 } 3952 3953 fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param, 3954 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID); 3955 if (fir_valid) 3956 cdev->fir_affin = 3957 QED_MFW_GET_FIELD(mb_params.mcp_param, 3958 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE); 3959 3960 l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param, 3961 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID); 3962 if (l2_valid) 3963 cdev->l2_affin_hint = 3964 QED_MFW_GET_FIELD(mb_params.mcp_param, 3965 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE); 3966 3967 DP_INFO(p_hwfn, 3968 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n", 3969 fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint); 3970 3971 return 0; 3972 } 3973 3974 int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3975 { 3976 struct qed_mcp_mb_params mb_params = {0}; 3977 struct qed_dev *cdev = p_hwfn->cdev; 3978 int rc; 3979 3980 mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP; 3981 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 3982 if (rc) 3983 return rc; 3984 3985 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 3986 DP_INFO(p_hwfn, 3987 "The get_ppfid_bitmap command is unsupported by the MFW\n"); 3988 return -EOPNOTSUPP; 3989 } 3990 3991 cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param, 3992 FW_MB_PARAM_PPFID_BITMAP); 3993 3994 DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n", 3995 cdev->ppfid_bitmap); 3996 3997 return 0; 3998 } 3999 4000 int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 4001 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, 4002 u32 *p_len) 4003 { 4004 u32 mb_param = 0, resp, param; 4005 int rc; 4006 4007 QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); 4008 if (flags & QED_NVM_CFG_OPTION_INIT) 4009 QED_MFW_SET_FIELD(mb_param, 4010 DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); 4011 if (flags & QED_NVM_CFG_OPTION_FREE) 4012 QED_MFW_SET_FIELD(mb_param, 4013 DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); 4014 if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { 4015 QED_MFW_SET_FIELD(mb_param, 4016 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); 4017 QED_MFW_SET_FIELD(mb_param, 4018 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, 4019 entity_id); 4020 } 4021 4022 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 4023 DRV_MSG_CODE_GET_NVM_CFG_OPTION, 4024 mb_param, &resp, ¶m, p_len, 4025 (u32 *)p_buf, false); 4026 4027 return rc; 4028 } 4029 4030 int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 4031 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, 4032 u32 len) 4033 { 4034 u32 mb_param = 0, resp, param; 4035 4036 QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); 4037 if (flags & QED_NVM_CFG_OPTION_ALL) 4038 QED_MFW_SET_FIELD(mb_param, 4039 DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1); 4040 if (flags & QED_NVM_CFG_OPTION_INIT) 4041 QED_MFW_SET_FIELD(mb_param, 4042 DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); 4043 if (flags & QED_NVM_CFG_OPTION_COMMIT) 4044 QED_MFW_SET_FIELD(mb_param, 4045 DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1); 4046 if (flags & QED_NVM_CFG_OPTION_FREE) 4047 QED_MFW_SET_FIELD(mb_param, 4048 DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); 4049 if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { 4050 QED_MFW_SET_FIELD(mb_param, 4051 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); 4052 QED_MFW_SET_FIELD(mb_param, 4053 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, 4054 entity_id); 4055 } 4056 4057 return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, 4058 DRV_MSG_CODE_SET_NVM_CFG_OPTION, 4059 mb_param, &resp, ¶m, len, (u32 *)p_buf); 4060 } 4061 4062 #define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN 4063 #define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32) 4064 #define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \ 4065 (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE) 4066 4067 static int 4068 __qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn, 4069 struct qed_ptt *p_ptt, u8 *p_buf, u8 size) 4070 { 4071 struct qed_mcp_mb_params mb_params; 4072 int rc; 4073 4074 if (size > QED_MCP_DBG_DATA_MAX_SIZE) { 4075 DP_ERR(p_hwfn, 4076 "Debug data size is %d while it should not exceed %d\n", 4077 size, QED_MCP_DBG_DATA_MAX_SIZE); 4078 return -EINVAL; 4079 } 4080 4081 memset(&mb_params, 0, sizeof(mb_params)); 4082 mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND; 4083 SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size); 4084 mb_params.p_data_src = p_buf; 4085 mb_params.data_src_size = size; 4086 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); 4087 if (rc) 4088 return rc; 4089 4090 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { 4091 DP_INFO(p_hwfn, 4092 "The DEBUG_DATA_SEND command is unsupported by the MFW\n"); 4093 return -EOPNOTSUPP; 4094 } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) { 4095 DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n"); 4096 return -EBUSY; 4097 } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) { 4098 DP_NOTICE(p_hwfn, 4099 "Failed to send debug data to the MFW [resp 0x%08x]\n", 4100 mb_params.mcp_resp); 4101 return -EINVAL; 4102 } 4103 4104 return 0; 4105 } 4106 4107 enum qed_mcp_dbg_data_type { 4108 QED_MCP_DBG_DATA_TYPE_RAW, 4109 }; 4110 4111 /* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */ 4112 #define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0 4113 #define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff 4114 #define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12 4115 #define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000 4116 #define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20 4117 #define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000 4118 #define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28 4119 #define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000 4120 4121 #define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1 4122 #define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2 4123 4124 static int 4125 qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn, 4126 struct qed_ptt *p_ptt, 4127 enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size) 4128 { 4129 u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf; 4130 u32 tmp_size = size, *p_header, *p_payload; 4131 u8 flags = 0; 4132 u16 seq; 4133 int rc; 4134 4135 p_header = (u32 *)raw_data; 4136 p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE); 4137 4138 seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq); 4139 4140 /* First chunk is marked as 'first' */ 4141 flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST; 4142 4143 *p_header = 0; 4144 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq); 4145 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type); 4146 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags); 4147 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id); 4148 4149 while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) { 4150 memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE); 4151 rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data, 4152 QED_MCP_DBG_DATA_MAX_SIZE); 4153 if (rc) 4154 return rc; 4155 4156 /* Clear the 'first' marking after sending the first chunk */ 4157 if (p_tmp_buf == p_buf) { 4158 flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST; 4159 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, 4160 flags); 4161 } 4162 4163 p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE; 4164 tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE; 4165 } 4166 4167 /* Last chunk is marked as 'last' */ 4168 flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST; 4169 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags); 4170 memcpy(p_payload, p_tmp_buf, tmp_size); 4171 4172 /* Casting the left size to u8 is ok since at this point it is <= 32 */ 4173 return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data, 4174 (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE + 4175 tmp_size)); 4176 } 4177 4178 int 4179 qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, 4180 struct qed_ptt *p_ptt, u8 *p_buf, u32 size) 4181 { 4182 return qed_mcp_send_debug_data(p_hwfn, p_ptt, 4183 QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size); 4184 } 4185 4186 bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn) 4187 { 4188 return !!(p_hwfn->mcp_info->capabilities & 4189 FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK); 4190 } 4191 4192 int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active) 4193 { 4194 u32 resp = 0, param = 0; 4195 int rc; 4196 4197 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, ¶m); 4198 if (rc) { 4199 DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc); 4200 return rc; 4201 } 4202 4203 *active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED); 4204 4205 return 0; 4206 } 4207