1 /* 2 * Copyright (c) 2018-2019 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "bcm_osal.h" 33 #include "ecore.h" 34 #include "reg_addr.h" 35 #include "ecore_sriov.h" 36 #include "ecore_status.h" 37 #include "ecore_hw.h" 38 #include "ecore_hw_defs.h" 39 #include "ecore_int.h" 40 #include "ecore_hsi_eth.h" 41 #include "ecore_l2.h" 42 #include "ecore_vfpf_if.h" 43 #include "ecore_rt_defs.h" 44 #include "ecore_init_ops.h" 45 #include "pcics_reg_driver.h" 46 #include "ecore_gtt_reg_addr.h" 47 #include "ecore_iro.h" 48 #include "ecore_mcp.h" 49 #include "ecore_cxt.h" 50 #include "ecore_vf.h" 51 #include "ecore_init_fw_funcs.h" 52 #include "ecore_sp_commands.h" 53 54 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, 55 u8 opcode, 56 __le16 echo, 57 union event_ring_data *data, 58 u8 fw_return_code); 59 60 const char *ecore_channel_tlvs_string[] = { 61 "CHANNEL_TLV_NONE", /* ends tlv sequence */ 62 "CHANNEL_TLV_ACQUIRE", 63 "CHANNEL_TLV_VPORT_START", 64 "CHANNEL_TLV_VPORT_UPDATE", 65 "CHANNEL_TLV_VPORT_TEARDOWN", 66 "CHANNEL_TLV_START_RXQ", 67 "CHANNEL_TLV_START_TXQ", 68 "CHANNEL_TLV_STOP_RXQ", 69 "CHANNEL_TLV_STOP_TXQ", 70 "CHANNEL_TLV_UPDATE_RXQ", 71 "CHANNEL_TLV_INT_CLEANUP", 72 "CHANNEL_TLV_CLOSE", 73 "CHANNEL_TLV_RELEASE", 74 "CHANNEL_TLV_LIST_END", 75 "CHANNEL_TLV_UCAST_FILTER", 76 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE", 77 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH", 78 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP", 79 "CHANNEL_TLV_VPORT_UPDATE_MCAST", 80 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM", 81 "CHANNEL_TLV_VPORT_UPDATE_RSS", 82 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN", 83 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA", 84 "CHANNEL_TLV_UPDATE_TUNN_PARAM", 85 "CHANNEL_TLV_COALESCE_UPDATE", 86 "CHANNEL_TLV_QID", 87 "CHANNEL_TLV_COALESCE_READ", 88 "CHANNEL_TLV_MAX" 89 }; 90 91 static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf) 92 { 93 u8 legacy = 0; 94 95 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 96 ETH_HSI_VER_NO_PKT_LEN_TUNN) 97 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD; 98 99 if (!(p_vf->acquire.vfdev_info.capabilities & 100 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) 101 legacy |= ECORE_QCID_LEGACY_VF_CID; 102 103 return legacy; 104 } 105 106 /* IOV ramrods */ 107 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn, 108 struct ecore_vf_info *p_vf) 109 { 110 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL; 111 struct ecore_spq_entry *p_ent = OSAL_NULL; 112 struct ecore_sp_init_data init_data; 113 enum _ecore_status_t rc = ECORE_NOTIMPL; 114 u8 fp_minor; 115 116 /* Get SPQ entry */ 117 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 118 init_data.cid = ecore_spq_get_cid(p_hwfn); 119 init_data.opaque_fid = p_vf->opaque_fid; 120 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 121 122 rc = ecore_sp_init_request(p_hwfn, &p_ent, 123 COMMON_RAMROD_VF_START, 124 PROTOCOLID_COMMON, &init_data); 125 if (rc != ECORE_SUCCESS) 126 return rc; 127 128 p_ramrod = &p_ent->ramrod.vf_start; 129 130 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); 131 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid); 132 133 switch (p_hwfn->hw_info.personality) { 134 case ECORE_PCI_ETH: 135 p_ramrod->personality = PERSONALITY_ETH; 136 break; 137 case ECORE_PCI_ETH_ROCE: 138 case ECORE_PCI_ETH_IWARP: 139 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; 140 break; 141 default: 142 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n", 143 p_hwfn->hw_info.personality); 144 return ECORE_INVAL; 145 } 146 147 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; 148 if (fp_minor > ETH_HSI_VER_MINOR && 149 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { 150 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 151 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", 152 p_vf->abs_vf_id, 153 ETH_HSI_VER_MAJOR, fp_minor, 154 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 155 fp_minor = ETH_HSI_VER_MINOR; 156 } 157 158 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 159 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; 160 161 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 162 "VF[%d] - Starting using HSI %02x.%02x\n", 163 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); 164 165 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 166 } 167 168 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn, 169 u32 concrete_vfid, 170 u16 opaque_vfid) 171 { 172 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL; 173 struct ecore_spq_entry *p_ent = OSAL_NULL; 174 struct ecore_sp_init_data init_data; 175 enum _ecore_status_t rc = ECORE_NOTIMPL; 176 177 /* Get SPQ entry */ 178 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 179 init_data.cid = ecore_spq_get_cid(p_hwfn); 180 init_data.opaque_fid = opaque_vfid; 181 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 182 183 rc = ecore_sp_init_request(p_hwfn, &p_ent, 184 COMMON_RAMROD_VF_STOP, 185 PROTOCOLID_COMMON, &init_data); 186 if (rc != ECORE_SUCCESS) 187 return rc; 188 189 p_ramrod = &p_ent->ramrod.vf_stop; 190 191 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); 192 193 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 194 } 195 196 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id, 197 bool b_enabled_only, bool b_non_malicious) 198 { 199 if (!p_hwfn->pf_iov_info) { 200 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n"); 201 return false; 202 } 203 204 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) || 205 (rel_vf_id < 0)) 206 return false; 207 208 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && 209 b_enabled_only) 210 return false; 211 212 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && 213 b_non_malicious) 214 return false; 215 216 return true; 217 } 218 219 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn, 220 u16 relative_vf_id, 221 bool b_enabled_only) 222 { 223 struct ecore_vf_info *vf = OSAL_NULL; 224 225 if (!p_hwfn->pf_iov_info) { 226 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n"); 227 return OSAL_NULL; 228 } 229 230 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, 231 b_enabled_only, false)) 232 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; 233 else 234 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n", 235 relative_vf_id); 236 237 return vf; 238 } 239 240 static struct ecore_queue_cid * 241 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue) 242 { 243 int i; 244 245 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 246 if (p_queue->cids[i].p_cid && 247 !p_queue->cids[i].b_is_tx) 248 return p_queue->cids[i].p_cid; 249 } 250 251 return OSAL_NULL; 252 } 253 254 enum ecore_iov_validate_q_mode { 255 ECORE_IOV_VALIDATE_Q_NA, 256 ECORE_IOV_VALIDATE_Q_ENABLE, 257 ECORE_IOV_VALIDATE_Q_DISABLE, 258 }; 259 260 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf, 261 u16 qid, 262 enum ecore_iov_validate_q_mode mode, 263 bool b_is_tx) 264 { 265 int i; 266 267 if (mode == ECORE_IOV_VALIDATE_Q_NA) 268 return true; 269 270 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 271 struct ecore_vf_queue_cid *p_qcid; 272 273 p_qcid = &p_vf->vf_queues[qid].cids[i]; 274 275 if (p_qcid->p_cid == OSAL_NULL) 276 continue; 277 278 if (p_qcid->b_is_tx != b_is_tx) 279 continue; 280 281 /* Found. It's enabled. */ 282 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE); 283 } 284 285 /* In case we haven't found any valid cid, then its disabled */ 286 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE); 287 } 288 289 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn, 290 struct ecore_vf_info *p_vf, 291 u16 rx_qid, 292 enum ecore_iov_validate_q_mode mode) 293 { 294 if (rx_qid >= p_vf->num_rxqs) { 295 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 296 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", 297 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); 298 return false; 299 } 300 301 return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false); 302 } 303 304 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn, 305 struct ecore_vf_info *p_vf, 306 u16 tx_qid, 307 enum ecore_iov_validate_q_mode mode) 308 { 309 if (tx_qid >= p_vf->num_txqs) { 310 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 311 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", 312 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); 313 return false; 314 } 315 316 return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true); 317 } 318 319 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn, 320 struct ecore_vf_info *p_vf, 321 u16 sb_idx) 322 { 323 int i; 324 325 for (i = 0; i < p_vf->num_sbs; i++) 326 if (p_vf->igu_sbs[i] == sb_idx) 327 return true; 328 329 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 330 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", 331 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); 332 333 return false; 334 } 335 336 /* Is there at least 1 queue open? */ 337 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf) 338 { 339 u8 i; 340 341 for (i = 0; i < p_vf->num_rxqs; i++) 342 if (ecore_iov_validate_queue_mode(p_vf, i, 343 ECORE_IOV_VALIDATE_Q_ENABLE, 344 false)) 345 return true; 346 347 return false; 348 } 349 350 static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf) 351 { 352 u8 i; 353 354 for (i = 0; i < p_vf->num_txqs; i++) 355 if (ecore_iov_validate_queue_mode(p_vf, i, 356 ECORE_IOV_VALIDATE_Q_ENABLE, 357 true)) 358 return true; 359 360 return false; 361 } 362 363 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn, 364 int vfid, 365 struct ecore_ptt *p_ptt) 366 { 367 struct ecore_bulletin_content *p_bulletin; 368 int crc_size = sizeof(p_bulletin->crc); 369 struct ecore_dmae_params params; 370 struct ecore_vf_info *p_vf; 371 372 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 373 if (!p_vf) 374 return ECORE_INVAL; 375 376 /* TODO - check VF is in a state where it can accept message */ 377 if (!p_vf->vf_bulletin) 378 return ECORE_INVAL; 379 380 p_bulletin = p_vf->bulletin.p_virt; 381 382 /* Increment bulletin board version and compute crc */ 383 p_bulletin->version++; 384 p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size, 385 p_vf->bulletin.size - crc_size); 386 387 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 388 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", 389 p_bulletin->version, p_vf->relative_vf_id, 390 p_bulletin->crc); 391 392 /* propagate bulletin board via dmae to vm memory */ 393 OSAL_MEMSET(¶ms, 0, sizeof(params)); 394 params.flags = ECORE_DMAE_FLAG_VF_DST; 395 params.dst_vfid = p_vf->abs_vf_id; 396 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, 397 p_vf->vf_bulletin, p_vf->bulletin.size / 4, 398 ¶ms); 399 } 400 401 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev) 402 { 403 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info; 404 int pos = iov->pos; 405 406 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos); 407 OSAL_PCI_READ_CONFIG_WORD(p_dev, 408 pos + PCI_SRIOV_CTRL, 409 &iov->ctrl); 410 411 OSAL_PCI_READ_CONFIG_WORD(p_dev, 412 pos + PCI_SRIOV_TOTAL_VF, 413 &iov->total_vfs); 414 OSAL_PCI_READ_CONFIG_WORD(p_dev, 415 pos + PCI_SRIOV_INITIAL_VF, 416 &iov->initial_vfs); 417 418 OSAL_PCI_READ_CONFIG_WORD(p_dev, 419 pos + PCI_SRIOV_NUM_VF, 420 &iov->num_vfs); 421 if (iov->num_vfs) { 422 /* @@@TODO - in future we might want to add an OSAL here to 423 * allow each OS to decide on its own how to act. 424 */ 425 DP_VERBOSE(p_dev, ECORE_MSG_IOV, 426 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); 427 iov->num_vfs = 0; 428 } 429 430 OSAL_PCI_READ_CONFIG_WORD(p_dev, 431 pos + PCI_SRIOV_VF_OFFSET, 432 &iov->offset); 433 434 OSAL_PCI_READ_CONFIG_WORD(p_dev, 435 pos + PCI_SRIOV_VF_STRIDE, 436 &iov->stride); 437 438 OSAL_PCI_READ_CONFIG_WORD(p_dev, 439 pos + PCI_SRIOV_VF_DID, 440 &iov->vf_device_id); 441 442 OSAL_PCI_READ_CONFIG_DWORD(p_dev, 443 pos + PCI_SRIOV_SUP_PGSIZE, 444 &iov->pgsz); 445 446 OSAL_PCI_READ_CONFIG_DWORD(p_dev, 447 pos + PCI_SRIOV_CAP, 448 &iov->cap); 449 450 OSAL_PCI_READ_CONFIG_BYTE(p_dev, 451 pos + PCI_SRIOV_FUNC_LINK, 452 &iov->link); 453 454 DP_VERBOSE(p_dev, ECORE_MSG_IOV, 455 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 456 iov->nres, iov->cap, iov->ctrl, 457 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn, 458 iov->offset, iov->stride, iov->pgsz); 459 460 /* Some sanity checks */ 461 if (iov->num_vfs > NUM_OF_VFS(p_dev) || 462 iov->total_vfs > NUM_OF_VFS(p_dev)) { 463 /* This can happen only due to a bug. In this case we set 464 * num_vfs to zero to avoid memory corruption in the code that 465 * assumes max number of vfs 466 */ 467 DP_NOTICE(p_dev, false, "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", 468 iov->num_vfs); 469 470 iov->num_vfs = 0; 471 iov->total_vfs = 0; 472 } 473 474 return ECORE_SUCCESS; 475 } 476 477 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn) 478 { 479 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 480 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 481 struct ecore_bulletin_content *p_bulletin_virt; 482 dma_addr_t req_p, rply_p, bulletin_p; 483 union pfvf_tlvs *p_reply_virt_addr; 484 union vfpf_tlvs *p_req_virt_addr; 485 u8 idx = 0; 486 487 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); 488 489 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; 490 req_p = p_iov_info->mbx_msg_phys_addr; 491 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; 492 rply_p = p_iov_info->mbx_reply_phys_addr; 493 p_bulletin_virt = p_iov_info->p_bulletins; 494 bulletin_p = p_iov_info->bulletins_phys; 495 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { 496 DP_ERR(p_hwfn, "ecore_iov_setup_vfdb called without allocating mem first\n"); 497 return; 498 } 499 500 for (idx = 0; idx < p_iov->total_vfs; idx++) { 501 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx]; 502 u32 concrete; 503 504 vf->vf_mbx.req_virt = p_req_virt_addr + idx; 505 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); 506 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; 507 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); 508 509 #ifdef CONFIG_ECORE_SW_CHANNEL 510 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs); 511 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST; 512 #endif 513 vf->state = VF_STOPPED; 514 vf->b_init = false; 515 516 vf->bulletin.phys = idx * 517 sizeof(struct ecore_bulletin_content) + 518 bulletin_p; 519 vf->bulletin.p_virt = p_bulletin_virt + idx; 520 vf->bulletin.size = sizeof(struct ecore_bulletin_content); 521 522 vf->relative_vf_id = idx; 523 vf->abs_vf_id = idx + p_iov->first_vf_in_pf; 524 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id); 525 vf->concrete_fid = concrete; 526 /* TODO - need to devise a better way of getting opaque */ 527 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | 528 (vf->abs_vf_id << 8); 529 530 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS; 531 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS; 532 } 533 } 534 535 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn) 536 { 537 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 538 void **p_v_addr; 539 u16 num_vfs = 0; 540 541 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs; 542 543 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 544 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs); 545 546 /* Allocate PF Mailbox buffer (per-VF) */ 547 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; 548 p_v_addr = &p_iov_info->mbx_msg_virt_addr; 549 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 550 &p_iov_info->mbx_msg_phys_addr, 551 p_iov_info->mbx_msg_size); 552 if (!*p_v_addr) 553 return ECORE_NOMEM; 554 555 /* Allocate PF Mailbox Reply buffer (per-VF) */ 556 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; 557 p_v_addr = &p_iov_info->mbx_reply_virt_addr; 558 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 559 &p_iov_info->mbx_reply_phys_addr, 560 p_iov_info->mbx_reply_size); 561 if (!*p_v_addr) 562 return ECORE_NOMEM; 563 564 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) * 565 num_vfs; 566 p_v_addr = &p_iov_info->p_bulletins; 567 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 568 &p_iov_info->bulletins_phys, 569 p_iov_info->bulletins_size); 570 if (!*p_v_addr) 571 return ECORE_NOMEM; 572 573 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 574 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", 575 p_iov_info->mbx_msg_virt_addr, 576 (unsigned long long)p_iov_info->mbx_msg_phys_addr, 577 p_iov_info->mbx_reply_virt_addr, 578 (unsigned long long)p_iov_info->mbx_reply_phys_addr, 579 p_iov_info->p_bulletins, 580 (unsigned long long)p_iov_info->bulletins_phys); 581 582 return ECORE_SUCCESS; 583 } 584 585 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn) 586 { 587 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 588 589 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) 590 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 591 p_iov_info->mbx_msg_virt_addr, 592 p_iov_info->mbx_msg_phys_addr, 593 p_iov_info->mbx_msg_size); 594 595 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) 596 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 597 p_iov_info->mbx_reply_virt_addr, 598 p_iov_info->mbx_reply_phys_addr, 599 p_iov_info->mbx_reply_size); 600 601 if (p_iov_info->p_bulletins) 602 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 603 p_iov_info->p_bulletins, 604 p_iov_info->bulletins_phys, 605 p_iov_info->bulletins_size); 606 } 607 608 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn) 609 { 610 struct ecore_pf_iov *p_sriov; 611 612 if (!IS_PF_SRIOV(p_hwfn)) { 613 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 614 "No SR-IOV - no need for IOV db\n"); 615 return ECORE_SUCCESS; 616 } 617 618 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov)); 619 if (!p_sriov) { 620 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n"); 621 return ECORE_NOMEM; 622 } 623 624 p_hwfn->pf_iov_info = p_sriov; 625 626 ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, 627 ecore_sriov_eqe_event); 628 629 return ecore_iov_allocate_vfdb(p_hwfn); 630 } 631 632 void ecore_iov_setup(struct ecore_hwfn *p_hwfn) 633 { 634 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) 635 return; 636 637 ecore_iov_setup_vfdb(p_hwfn); 638 } 639 640 void ecore_iov_free(struct ecore_hwfn *p_hwfn) 641 { 642 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); 643 644 if (IS_PF_SRIOV_ALLOC(p_hwfn)) { 645 ecore_iov_free_vfdb(p_hwfn); 646 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info); 647 p_hwfn->pf_iov_info = OSAL_NULL; 648 } 649 } 650 651 void ecore_iov_free_hw_info(struct ecore_dev *p_dev) 652 { 653 OSAL_FREE(p_dev, p_dev->p_iov_info); 654 p_dev->p_iov_info = OSAL_NULL; 655 } 656 657 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn) 658 { 659 struct ecore_dev *p_dev = p_hwfn->p_dev; 660 int pos; 661 enum _ecore_status_t rc; 662 663 if (IS_VF(p_hwfn->p_dev)) 664 return ECORE_SUCCESS; 665 666 /* Learn the PCI configuration */ 667 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev, 668 PCI_EXT_CAP_ID_SRIOV); 669 if (!pos) { 670 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n"); 671 return ECORE_SUCCESS; 672 } 673 674 /* Allocate a new struct for IOV information */ 675 /* TODO - can change to VALLOC when its available */ 676 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, 677 sizeof(*p_dev->p_iov_info)); 678 if (!p_dev->p_iov_info) { 679 DP_NOTICE(p_hwfn, false, 680 "Can't support IOV due to lack of memory\n"); 681 return ECORE_NOMEM; 682 } 683 p_dev->p_iov_info->pos = pos; 684 685 rc = ecore_iov_pci_cfg_info(p_dev); 686 if (rc) 687 return rc; 688 689 /* We want PF IOV to be synonemous with the existance of p_iov_info; 690 * In case the capability is published but there are no VFs, simply 691 * de-allocate the struct. 692 */ 693 if (!p_dev->p_iov_info->total_vfs) { 694 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 695 "IOV capabilities, but no VFs are published\n"); 696 OSAL_FREE(p_dev, p_dev->p_iov_info); 697 p_dev->p_iov_info = OSAL_NULL; 698 return ECORE_SUCCESS; 699 } 700 701 /* First VF index based on offset is tricky: 702 * - If ARI is supported [likely], offset - (16 - pf_id) would 703 * provide the number for eng0. 2nd engine Vfs would begin 704 * after the first engine's VFs. 705 * - If !ARI, VFs would start on next device. 706 * so offset - (256 - pf_id) would provide the number. 707 * Utilize the fact that (256 - pf_id) is achieved only be later 708 * to diffrentiate between the two. 709 */ 710 711 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { 712 u32 first = p_hwfn->p_dev->p_iov_info->offset + 713 p_hwfn->abs_pf_id - 16; 714 715 p_dev->p_iov_info->first_vf_in_pf = first; 716 717 if (ECORE_PATH_ID(p_hwfn)) 718 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; 719 } else { 720 u32 first = p_hwfn->p_dev->p_iov_info->offset + 721 p_hwfn->abs_pf_id - 256; 722 723 p_dev->p_iov_info->first_vf_in_pf = first; 724 } 725 726 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 727 "First VF in hwfn 0x%08x\n", 728 p_dev->p_iov_info->first_vf_in_pf); 729 730 return ECORE_SUCCESS; 731 } 732 733 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid, 734 bool b_fail_malicious) 735 { 736 /* Check PF supports sriov */ 737 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) || 738 !IS_PF_SRIOV_ALLOC(p_hwfn)) 739 return false; 740 741 /* Check VF validity */ 742 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) 743 return false; 744 745 return true; 746 } 747 748 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid) 749 { 750 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true); 751 } 752 753 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev, 754 u16 rel_vf_id, 755 u8 to_disable) 756 { 757 struct ecore_vf_info *vf; 758 int i; 759 760 for_each_hwfn(p_dev, i) { 761 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 762 763 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false); 764 if (!vf) 765 continue; 766 767 vf->to_disable = to_disable; 768 } 769 } 770 771 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev, 772 u8 to_disable) 773 { 774 u16 i; 775 776 if (!IS_ECORE_SRIOV(p_dev)) 777 return; 778 779 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++) 780 ecore_iov_set_vf_to_disable(p_dev, i, to_disable); 781 } 782 783 #ifndef LINUX_REMOVE 784 /* @@@TBD Consider taking outside of ecore... */ 785 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn, 786 u16 vf_id, 787 void *ctx) 788 { 789 enum _ecore_status_t rc = ECORE_SUCCESS; 790 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true); 791 792 if (vf != OSAL_NULL) { 793 vf->ctx = ctx; 794 #ifdef CONFIG_ECORE_SW_CHANNEL 795 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST; 796 #endif 797 } else { 798 rc = ECORE_UNKNOWN_ERROR; 799 } 800 return rc; 801 } 802 #endif 803 804 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn, 805 struct ecore_ptt *p_ptt, 806 u8 abs_vfid) 807 { 808 ecore_wr(p_hwfn, p_ptt, 809 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, 810 1 << (abs_vfid & 0x1f)); 811 } 812 813 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn, 814 struct ecore_ptt *p_ptt, 815 struct ecore_vf_info *vf) 816 { 817 int i; 818 819 /* Set VF masks and configuration - pretend */ 820 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); 821 822 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); 823 824 /* unpretend */ 825 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); 826 827 /* iterate over all queues, clear sb consumer */ 828 for (i = 0; i < vf->num_sbs; i++) 829 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 830 vf->igu_sbs[i], 831 vf->opaque_fid, true); 832 } 833 834 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn, 835 struct ecore_ptt *p_ptt, 836 struct ecore_vf_info *vf, 837 bool enable) 838 { 839 u32 igu_vf_conf; 840 841 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); 842 843 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); 844 845 if (enable) { 846 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; 847 } else { 848 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; 849 } 850 851 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); 852 853 /* unpretend */ 854 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); 855 } 856 857 static enum _ecore_status_t 858 ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn, 859 struct ecore_ptt *p_ptt, 860 u8 abs_vf_id, 861 u8 num_sbs) 862 { 863 u8 current_max = 0; 864 int i; 865 866 /* If client overrides this, don't do anything */ 867 if (p_hwfn->p_dev->b_dont_override_vf_msix) 868 return ECORE_SUCCESS; 869 870 /* For AH onward, configuration is per-PF. Find maximum of all 871 * the currently enabled child VFs, and set the number to be that. 872 */ 873 if (!ECORE_IS_BB(p_hwfn->p_dev)) { 874 ecore_for_each_vf(p_hwfn, i) { 875 struct ecore_vf_info *p_vf; 876 877 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true); 878 if (!p_vf) 879 continue; 880 881 current_max = OSAL_MAX_T(u8, current_max, 882 p_vf->num_sbs); 883 } 884 } 885 886 if (num_sbs > current_max) 887 return ecore_mcp_config_vf_msix(p_hwfn, p_ptt, 888 abs_vf_id, num_sbs); 889 890 return ECORE_SUCCESS; 891 } 892 893 static enum _ecore_status_t ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn, 894 struct ecore_ptt *p_ptt, 895 struct ecore_vf_info *vf) 896 { 897 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; 898 enum _ecore_status_t rc = ECORE_SUCCESS; 899 900 /* It's possible VF was previously considered malicious - 901 * clear the indication even if we're only going to disable VF. 902 */ 903 vf->b_malicious = false; 904 905 if (vf->to_disable) 906 return ECORE_SUCCESS; 907 908 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Enable internal access for vf %x [abs %x]\n", 909 vf->abs_vf_id, ECORE_VF_ABS_ID(p_hwfn, vf)); 910 911 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt, 912 ECORE_VF_ABS_ID(p_hwfn, vf)); 913 914 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 915 916 rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt, 917 vf->abs_vf_id, vf->num_sbs); 918 if (rc != ECORE_SUCCESS) 919 return rc; 920 921 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); 922 923 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); 924 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); 925 926 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, 927 p_hwfn->hw_info.hw_mode); 928 929 /* unpretend */ 930 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); 931 932 vf->state = VF_FREE; 933 934 return rc; 935 } 936 937 /** 938 * @brief ecore_iov_config_perm_table - configure the permission 939 * zone table. 940 * In E4, queue zone permission table size is 320x9. There 941 * are 320 VF queues for single engine device (256 for dual 942 * engine device), and each entry has the following format: 943 * {Valid, VF[7:0]} 944 * @param p_hwfn 945 * @param p_ptt 946 * @param vf 947 * @param enable 948 */ 949 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn, 950 struct ecore_ptt *p_ptt, 951 struct ecore_vf_info *vf, 952 u8 enable) 953 { 954 u32 reg_addr, val; 955 u16 qzone_id = 0; 956 int qid; 957 958 for (qid = 0; qid < vf->num_rxqs; qid++) { 959 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, 960 &qzone_id); 961 962 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; 963 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0; 964 ecore_wr(p_hwfn, p_ptt, reg_addr, val); 965 } 966 } 967 968 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn, 969 struct ecore_ptt *p_ptt, 970 struct ecore_vf_info *vf) 971 { 972 /* Reset vf in IGU - interrupts are still disabled */ 973 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 974 975 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); 976 977 /* Permission Table */ 978 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true); 979 } 980 981 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn, 982 struct ecore_ptt *p_ptt, 983 struct ecore_vf_info *vf, 984 u16 num_rx_queues) 985 { 986 struct ecore_igu_block *p_block; 987 struct cau_sb_entry sb_entry; 988 int qid = 0; 989 u32 val = 0; 990 991 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) 992 num_rx_queues = 993 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; 994 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; 995 996 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); 997 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); 998 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); 999 1000 for (qid = 0; qid < num_rx_queues; qid++) { 1001 p_block = ecore_get_igu_free_sb(p_hwfn, false); 1002 vf->igu_sbs[qid] = p_block->igu_sb_id; 1003 p_block->status &= ~ECORE_IGU_STATUS_FREE; 1004 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); 1005 1006 ecore_wr(p_hwfn, p_ptt, 1007 IGU_REG_MAPPING_MEMORY + 1008 sizeof(u32) * p_block->igu_sb_id, val); 1009 1010 /* Configure igu sb in CAU which were marked valid */ 1011 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, 1012 p_hwfn->rel_pf_id, 1013 vf->abs_vf_id, 1); 1014 1015 ecore_dmae_host2grc(p_hwfn, p_ptt, 1016 (u64)(osal_uintptr_t)&sb_entry, 1017 CAU_REG_SB_VAR_MEMORY + 1018 p_block->igu_sb_id * sizeof(u64), 2, 1019 OSAL_NULL /* default parameters */); 1020 } 1021 1022 vf->num_sbs = (u8)num_rx_queues; 1023 1024 return vf->num_sbs; 1025 } 1026 1027 /** 1028 * 1029 * @brief The function invalidates all the VF entries, 1030 * technically this isn't required, but added for 1031 * cleaness and ease of debugging incase a VF attempts to 1032 * produce an interrupt after it has been taken down. 1033 * 1034 * @param p_hwfn 1035 * @param p_ptt 1036 * @param vf 1037 */ 1038 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn, 1039 struct ecore_ptt *p_ptt, 1040 struct ecore_vf_info *vf) 1041 1042 { 1043 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 1044 int idx, igu_id; 1045 u32 addr, val; 1046 1047 /* Invalidate igu CAM lines and mark them as free */ 1048 for (idx = 0; idx < vf->num_sbs; idx++) { 1049 igu_id = vf->igu_sbs[idx]; 1050 addr = IGU_REG_MAPPING_MEMORY + 1051 sizeof(u32) * igu_id; 1052 1053 val = ecore_rd(p_hwfn, p_ptt, addr); 1054 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 1055 ecore_wr(p_hwfn, p_ptt, addr, val); 1056 1057 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE; 1058 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; 1059 } 1060 1061 vf->num_sbs = 0; 1062 } 1063 1064 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, 1065 u16 vfid, 1066 struct ecore_mcp_link_params *params, 1067 struct ecore_mcp_link_state *link, 1068 struct ecore_mcp_link_capabilities *p_caps) 1069 { 1070 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false); 1071 struct ecore_bulletin_content *p_bulletin; 1072 1073 if (!p_vf) 1074 return; 1075 1076 p_bulletin = p_vf->bulletin.p_virt; 1077 p_bulletin->req_autoneg = params->speed.autoneg; 1078 p_bulletin->req_adv_speed = params->speed.advertised_speeds; 1079 p_bulletin->req_forced_speed = params->speed.forced_speed; 1080 p_bulletin->req_autoneg_pause = params->pause.autoneg; 1081 p_bulletin->req_forced_rx = params->pause.forced_rx; 1082 p_bulletin->req_forced_tx = params->pause.forced_tx; 1083 p_bulletin->req_loopback = params->loopback_mode; 1084 1085 p_bulletin->link_up = link->link_up; 1086 p_bulletin->speed = link->speed; 1087 p_bulletin->full_duplex = link->full_duplex; 1088 p_bulletin->autoneg = link->an; 1089 p_bulletin->autoneg_complete = link->an_complete; 1090 p_bulletin->parallel_detection = link->parallel_detection; 1091 p_bulletin->pfc_enabled = link->pfc_enabled; 1092 p_bulletin->partner_adv_speed = link->partner_adv_speed; 1093 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; 1094 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; 1095 p_bulletin->partner_adv_pause = link->partner_adv_pause; 1096 p_bulletin->sfp_tx_fault = link->sfp_tx_fault; 1097 1098 p_bulletin->capability_speed = p_caps->speed_capabilities; 1099 } 1100 1101 enum _ecore_status_t 1102 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn, 1103 struct ecore_ptt *p_ptt, 1104 struct ecore_iov_vf_init_params *p_params) 1105 { 1106 struct ecore_mcp_link_capabilities link_caps; 1107 struct ecore_mcp_link_params link_params; 1108 struct ecore_mcp_link_state link_state; 1109 u8 num_of_vf_avaiable_chains = 0; 1110 struct ecore_vf_info *vf = OSAL_NULL; 1111 u16 qid, num_irqs; 1112 enum _ecore_status_t rc = ECORE_SUCCESS; 1113 u32 cids; 1114 u8 i; 1115 1116 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); 1117 if (!vf) { 1118 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n"); 1119 return ECORE_UNKNOWN_ERROR; 1120 } 1121 1122 if (vf->b_init) { 1123 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n", 1124 p_params->rel_vf_id); 1125 return ECORE_INVAL; 1126 } 1127 1128 /* Perform sanity checking on the requested vport/rss */ 1129 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { 1130 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n", 1131 p_params->rel_vf_id, p_params->vport_id); 1132 return ECORE_INVAL; 1133 } 1134 1135 if ((p_params->num_queues > 1) && 1136 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) { 1137 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n", 1138 p_params->rel_vf_id, p_params->rss_eng_id); 1139 return ECORE_INVAL; 1140 } 1141 1142 /* TODO - remove this once we get confidence of change */ 1143 if (!p_params->vport_id) { 1144 DP_NOTICE(p_hwfn, false, 1145 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n", 1146 p_params->rel_vf_id); 1147 } 1148 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) { 1149 DP_NOTICE(p_hwfn, false, 1150 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n", 1151 p_params->rel_vf_id); 1152 } 1153 vf->vport_id = p_params->vport_id; 1154 vf->rss_eng_id = p_params->rss_eng_id; 1155 1156 /* Since it's possible to relocate SBs, it's a bit difficult to check 1157 * things here. Simply check whether the index falls in the range 1158 * belonging to the PF. 1159 */ 1160 for (i = 0; i < p_params->num_queues; i++) { 1161 qid = p_params->req_rx_queue[i]; 1162 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { 1163 DP_NOTICE(p_hwfn, true, 1164 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n", 1165 qid, p_params->rel_vf_id, 1166 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)); 1167 return ECORE_INVAL; 1168 } 1169 1170 qid = p_params->req_tx_queue[i]; 1171 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { 1172 DP_NOTICE(p_hwfn, true, 1173 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n", 1174 qid, p_params->rel_vf_id, 1175 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)); 1176 return ECORE_INVAL; 1177 } 1178 } 1179 1180 /* Limit number of queues according to number of CIDs */ 1181 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); 1182 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1183 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", 1184 vf->relative_vf_id, p_params->num_queues, (u16)cids); 1185 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids)); 1186 1187 num_of_vf_avaiable_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn, 1188 p_ptt, 1189 vf, 1190 num_irqs); 1191 if (num_of_vf_avaiable_chains == 0) { 1192 DP_ERR(p_hwfn, "no available igu sbs\n"); 1193 return ECORE_NOMEM; 1194 } 1195 1196 /* Choose queue number and index ranges */ 1197 vf->num_rxqs = num_of_vf_avaiable_chains; 1198 vf->num_txqs = num_of_vf_avaiable_chains; 1199 1200 for (i = 0; i < vf->num_rxqs; i++) { 1201 struct ecore_vf_queue *p_queue = &vf->vf_queues[i]; 1202 1203 p_queue->fw_rx_qid = p_params->req_rx_queue[i]; 1204 p_queue->fw_tx_qid = p_params->req_tx_queue[i]; 1205 1206 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1207 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n", 1208 vf->relative_vf_id, i, vf->igu_sbs[i], 1209 p_queue->fw_rx_qid, p_queue->fw_tx_qid); 1210 } 1211 1212 /* Update the link configuration in bulletin. 1213 */ 1214 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn), 1215 sizeof(link_params)); 1216 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn), 1217 sizeof(link_state)); 1218 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn), 1219 sizeof(link_caps)); 1220 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id, 1221 &link_params, &link_state, &link_caps); 1222 1223 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf); 1224 1225 if (rc == ECORE_SUCCESS) { 1226 vf->b_init = true; 1227 #ifndef REMOVE_DBG 1228 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |= 1229 (1ULL << (vf->relative_vf_id % 64)); 1230 #endif 1231 1232 if (IS_LEAD_HWFN(p_hwfn)) 1233 p_hwfn->p_dev->p_iov_info->num_vfs++; 1234 } 1235 1236 return rc; 1237 } 1238 1239 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn, 1240 struct ecore_ptt *p_ptt, 1241 u16 rel_vf_id) 1242 { 1243 struct ecore_mcp_link_capabilities caps; 1244 struct ecore_mcp_link_params params; 1245 struct ecore_mcp_link_state link; 1246 struct ecore_vf_info *vf = OSAL_NULL; 1247 1248 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 1249 if (!vf) { 1250 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n"); 1251 return ECORE_UNKNOWN_ERROR; 1252 } 1253 1254 if (vf->bulletin.p_virt) 1255 OSAL_MEMSET(vf->bulletin.p_virt, 0, 1256 sizeof(*vf->bulletin.p_virt)); 1257 1258 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); 1259 1260 /* Get the link configuration back in bulletin so 1261 * that when VFs are re-enabled they get the actual 1262 * link configuration. 1263 */ 1264 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); 1265 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); 1266 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn), 1267 sizeof(caps)); 1268 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); 1269 1270 /* Forget the VF's acquisition message */ 1271 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire)); 1272 1273 /* disablng interrupts and resetting permission table was done during 1274 * vf-close, however, we could get here without going through vf_close 1275 */ 1276 /* Disable Interrupts for VF */ 1277 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 1278 1279 /* Reset Permission table */ 1280 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 1281 1282 vf->num_rxqs = 0; 1283 vf->num_txqs = 0; 1284 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); 1285 1286 if (vf->b_init) { 1287 vf->b_init = false; 1288 #ifndef REMOVE_DBG 1289 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &= 1290 ~(1ULL << (vf->relative_vf_id / 64)); 1291 #endif 1292 1293 if (IS_LEAD_HWFN(p_hwfn)) 1294 p_hwfn->p_dev->p_iov_info->num_vfs--; 1295 } 1296 1297 return ECORE_SUCCESS; 1298 } 1299 1300 static bool ecore_iov_tlv_supported(u16 tlvtype) 1301 { 1302 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; 1303 } 1304 1305 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn, 1306 struct ecore_vf_info *vf, 1307 u16 tlv) 1308 { 1309 /* lock the channel */ 1310 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */ 1311 1312 /* record the locking op */ 1313 /* vf->op_current = tlv; @@@TBD MichalK */ 1314 1315 /* log the lock */ 1316 if (ecore_iov_tlv_supported(tlv)) 1317 DP_VERBOSE(p_hwfn, 1318 ECORE_MSG_IOV, 1319 "VF[%d]: vf pf channel locked by %s\n", 1320 vf->abs_vf_id, 1321 ecore_channel_tlvs_string[tlv]); 1322 else 1323 DP_VERBOSE(p_hwfn, 1324 ECORE_MSG_IOV, 1325 "VF[%d]: vf pf channel locked by %04x\n", 1326 vf->abs_vf_id, tlv); 1327 } 1328 1329 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn, 1330 struct ecore_vf_info *vf, 1331 u16 expected_tlv) 1332 { 1333 /*WARN(expected_tlv != vf->op_current, 1334 "lock mismatch: expected %s found %s", 1335 channel_tlvs_string[expected_tlv], 1336 channel_tlvs_string[vf->op_current]); 1337 @@@TBD MichalK 1338 */ 1339 1340 /* lock the channel */ 1341 /* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */ 1342 1343 /* log the unlock */ 1344 if (ecore_iov_tlv_supported(expected_tlv)) 1345 DP_VERBOSE(p_hwfn, 1346 ECORE_MSG_IOV, 1347 "VF[%d]: vf pf channel unlocked by %s\n", 1348 vf->abs_vf_id, 1349 ecore_channel_tlvs_string[expected_tlv]); 1350 else 1351 DP_VERBOSE(p_hwfn, 1352 ECORE_MSG_IOV, 1353 "VF[%d]: vf pf channel unlocked by %04x\n", 1354 vf->abs_vf_id, expected_tlv); 1355 1356 /* record the locking op */ 1357 /* vf->op_current = CHANNEL_TLV_NONE;*/ 1358 } 1359 1360 /* place a given tlv on the tlv buffer, continuing current tlv list */ 1361 void *ecore_add_tlv(u8 **offset, u16 type, u16 length) 1362 { 1363 struct channel_tlv *tl = (struct channel_tlv *)*offset; 1364 1365 tl->type = type; 1366 tl->length = length; 1367 1368 /* Offset should keep pointing to next TLV (the end of the last) */ 1369 *offset += length; 1370 1371 /* Return a pointer to the start of the added tlv */ 1372 return *offset - length; 1373 } 1374 1375 /* list the types and lengths of the tlvs on the buffer */ 1376 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list) 1377 { 1378 u16 i = 1, total_length = 0; 1379 struct channel_tlv *tlv; 1380 1381 do { 1382 /* cast current tlv list entry to channel tlv header*/ 1383 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); 1384 1385 /* output tlv */ 1386 if (ecore_iov_tlv_supported(tlv->type)) 1387 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1388 "TLV number %d: type %s, length %d\n", 1389 i, ecore_channel_tlvs_string[tlv->type], 1390 tlv->length); 1391 else 1392 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1393 "TLV number %d: type %d, length %d\n", 1394 i, tlv->type, tlv->length); 1395 1396 if (tlv->type == CHANNEL_TLV_LIST_END) 1397 return; 1398 1399 /* Validate entry - protect against malicious VFs */ 1400 if (!tlv->length) { 1401 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n"); 1402 return; 1403 } 1404 1405 total_length += tlv->length; 1406 1407 if (total_length >= sizeof(struct tlv_buffer_size)) { 1408 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n"); 1409 return; 1410 } 1411 1412 i++; 1413 } while (1); 1414 } 1415 1416 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn, 1417 struct ecore_ptt *p_ptt, 1418 struct ecore_vf_info *p_vf, 1419 #ifdef CONFIG_ECORE_SW_CHANNEL 1420 u16 length, 1421 #else 1422 u16 OSAL_UNUSED length, 1423 #endif 1424 u8 status) 1425 { 1426 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; 1427 struct ecore_dmae_params params; 1428 u8 eng_vf_id; 1429 1430 mbx->reply_virt->default_resp.hdr.status = status; 1431 1432 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt); 1433 1434 #ifdef CONFIG_ECORE_SW_CHANNEL 1435 mbx->sw_mbx.response_size = 1436 length + sizeof(struct channel_list_end_tlv); 1437 1438 if (!p_vf->b_hw_channel) 1439 return; 1440 #endif 1441 1442 eng_vf_id = p_vf->abs_vf_id; 1443 1444 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); 1445 params.flags = ECORE_DMAE_FLAG_VF_DST; 1446 params.dst_vfid = eng_vf_id; 1447 1448 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), 1449 mbx->req_virt->first_tlv.reply_address + 1450 sizeof(u64), 1451 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4 , 1452 ¶ms); 1453 1454 /* Once PF copies the rc to the VF, the latter can continue and 1455 * and send an additional message. So we have to make sure the 1456 * channel would be re-set to ready prior to that. 1457 */ 1458 REG_WR(p_hwfn, 1459 GTT_BAR0_MAP_REG_USDM_RAM + 1460 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1461 1); 1462 1463 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, 1464 mbx->req_virt->first_tlv.reply_address, 1465 sizeof(u64) / 4, ¶ms); 1466 1467 OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status); 1468 } 1469 1470 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag) 1471 { 1472 switch (flag) { 1473 case ECORE_IOV_VP_UPDATE_ACTIVATE: 1474 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 1475 case ECORE_IOV_VP_UPDATE_VLAN_STRIP: 1476 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 1477 case ECORE_IOV_VP_UPDATE_TX_SWITCH: 1478 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1479 case ECORE_IOV_VP_UPDATE_MCAST: 1480 return CHANNEL_TLV_VPORT_UPDATE_MCAST; 1481 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM: 1482 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1483 case ECORE_IOV_VP_UPDATE_RSS: 1484 return CHANNEL_TLV_VPORT_UPDATE_RSS; 1485 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: 1486 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 1487 case ECORE_IOV_VP_UPDATE_SGE_TPA: 1488 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 1489 default: 1490 return 0; 1491 } 1492 } 1493 1494 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn, 1495 struct ecore_vf_info *p_vf, 1496 struct ecore_iov_vf_mbx *p_mbx, 1497 u8 status, u16 tlvs_mask, 1498 u16 tlvs_accepted) 1499 { 1500 struct pfvf_def_resp_tlv *resp; 1501 u16 size, total_len, i; 1502 1503 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); 1504 p_mbx->offset = (u8 *)p_mbx->reply_virt; 1505 size = sizeof(struct pfvf_def_resp_tlv); 1506 total_len = size; 1507 1508 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); 1509 1510 /* Prepare response for all extended tlvs if they are found by PF */ 1511 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) { 1512 if (!(tlvs_mask & (1 << i))) 1513 continue; 1514 1515 resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i), 1516 size); 1517 1518 if (tlvs_accepted & (1 << i)) 1519 resp->hdr.status = status; 1520 else 1521 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; 1522 1523 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1524 "VF[%d] - vport_update response: TLV %d, status %02x\n", 1525 p_vf->relative_vf_id, 1526 ecore_iov_vport_to_tlv(i), 1527 resp->hdr.status); 1528 1529 total_len += size; 1530 } 1531 1532 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END, 1533 sizeof(struct channel_list_end_tlv)); 1534 1535 return total_len; 1536 } 1537 1538 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn, 1539 struct ecore_ptt *p_ptt, 1540 struct ecore_vf_info *vf_info, 1541 u16 type, u16 length, u8 status) 1542 { 1543 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx; 1544 1545 mbx->offset = (u8 *)mbx->reply_virt; 1546 1547 ecore_add_tlv(&mbx->offset, type, length); 1548 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, 1549 sizeof(struct channel_list_end_tlv)); 1550 1551 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); 1552 } 1553 1554 struct ecore_public_vf_info * ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, 1555 u16 relative_vf_id, 1556 bool b_enabled_only) 1557 { 1558 struct ecore_vf_info *vf = OSAL_NULL; 1559 1560 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); 1561 if (!vf) 1562 return OSAL_NULL; 1563 1564 return &vf->p_vf_info; 1565 } 1566 1567 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, 1568 struct ecore_vf_info *p_vf) 1569 { 1570 u32 i, j; 1571 1572 p_vf->vf_bulletin = 0; 1573 p_vf->vport_instance = 0; 1574 p_vf->configured_features = 0; 1575 1576 /* If VF previously requested less resources, go back to default */ 1577 p_vf->num_rxqs = p_vf->num_sbs; 1578 p_vf->num_txqs = p_vf->num_sbs; 1579 1580 p_vf->num_active_rxqs = 0; 1581 1582 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) { 1583 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i]; 1584 1585 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) { 1586 if (!p_queue->cids[j].p_cid) 1587 continue; 1588 1589 ecore_eth_queue_cid_release(p_hwfn, 1590 p_queue->cids[j].p_cid); 1591 p_queue->cids[j].p_cid = OSAL_NULL; 1592 } 1593 } 1594 1595 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); 1596 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire)); 1597 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id); 1598 } 1599 1600 /* Returns either 0, or log(size) */ 1601 static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn, 1602 struct ecore_ptt *p_ptt) 1603 { 1604 u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); 1605 1606 if (val) 1607 return val + 11; 1608 return 0; 1609 } 1610 1611 static void 1612 ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn, 1613 struct ecore_ptt *p_ptt, 1614 struct ecore_vf_info *p_vf, 1615 struct vf_pf_resc_request *p_req, 1616 struct pf_vf_resc *p_resp) 1617 { 1618 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; 1619 u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) - 1620 DB_ADDR_VF(0, DQ_DEMS_LEGACY); 1621 u32 bar_size; 1622 1623 p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons); 1624 1625 /* If VF didn't bother asking for QIDs than don't bother limiting 1626 * number of CIDs. The VF doesn't care about the number, and this 1627 * has the likely result of causing an additional acquisition. 1628 */ 1629 if (!(p_vf->acquire.vfdev_info.capabilities & 1630 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) 1631 return; 1632 1633 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount 1634 * that would make sure doorbells for all CIDs fall within the bar. 1635 * If it doesn't, make sure regview window is sufficient. 1636 */ 1637 if (p_vf->acquire.vfdev_info.capabilities & 1638 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { 1639 bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt); 1640 if (bar_size) 1641 bar_size = 1 << bar_size; 1642 1643 if (ECORE_IS_CMT(p_hwfn->p_dev)) 1644 bar_size /= 2; 1645 } else { 1646 bar_size = PXP_VF_BAR0_DQ_LENGTH; 1647 } 1648 1649 if (bar_size / db_size < 256) 1650 p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids, 1651 (u8)(bar_size / db_size)); 1652 } 1653 1654 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn, 1655 struct ecore_ptt *p_ptt, 1656 struct ecore_vf_info *p_vf, 1657 struct vf_pf_resc_request *p_req, 1658 struct pf_vf_resc *p_resp) 1659 { 1660 u8 i; 1661 1662 /* Queue related information */ 1663 p_resp->num_rxqs = p_vf->num_rxqs; 1664 p_resp->num_txqs = p_vf->num_txqs; 1665 p_resp->num_sbs = p_vf->num_sbs; 1666 1667 for (i = 0; i < p_resp->num_sbs; i++) { 1668 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; 1669 /* TODO - what's this sb_qid field? Is it deprecated? 1670 * or is there an ecore_client that looks at this? 1671 */ 1672 p_resp->hw_sbs[i].sb_qid = 0; 1673 } 1674 1675 /* These fields are filled for backward compatibility. 1676 * Unused by modern vfs. 1677 */ 1678 for (i = 0; i < p_resp->num_rxqs; i++) { 1679 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, 1680 (u16 *)&p_resp->hw_qid[i]); 1681 p_resp->cid[i] = i; 1682 } 1683 1684 /* Filter related information */ 1685 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters, 1686 p_req->num_mac_filters); 1687 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters, 1688 p_req->num_vlan_filters); 1689 1690 ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); 1691 1692 /* This isn't really needed/enforced, but some legacy VFs might depend 1693 * on the correct filling of this field. 1694 */ 1695 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS; 1696 1697 /* Validate sufficient resources for VF */ 1698 if (p_resp->num_rxqs < p_req->num_rxqs || 1699 p_resp->num_txqs < p_req->num_txqs || 1700 p_resp->num_sbs < p_req->num_sbs || 1701 p_resp->num_mac_filters < p_req->num_mac_filters || 1702 p_resp->num_vlan_filters < p_req->num_vlan_filters || 1703 p_resp->num_mc_filters < p_req->num_mc_filters || 1704 p_resp->num_cids < p_req->num_cids) { 1705 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1706 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", 1707 p_vf->abs_vf_id, 1708 p_req->num_rxqs, p_resp->num_rxqs, 1709 p_req->num_rxqs, p_resp->num_txqs, 1710 p_req->num_sbs, p_resp->num_sbs, 1711 p_req->num_mac_filters, p_resp->num_mac_filters, 1712 p_req->num_vlan_filters, p_resp->num_vlan_filters, 1713 p_req->num_mc_filters, p_resp->num_mc_filters, 1714 p_req->num_cids, p_resp->num_cids); 1715 1716 /* Some legacy OSes are incapable of correctly handling this 1717 * failure. 1718 */ 1719 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 1720 ETH_HSI_VER_NO_PKT_LEN_TUNN) && 1721 (p_vf->acquire.vfdev_info.os_type == 1722 VFPF_ACQUIRE_OS_WINDOWS)) 1723 return PFVF_STATUS_SUCCESS; 1724 1725 return PFVF_STATUS_NO_RESOURCE; 1726 } 1727 1728 return PFVF_STATUS_SUCCESS; 1729 } 1730 1731 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats) 1732 { 1733 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + 1734 OFFSETOF(struct mstorm_vf_zone, 1735 non_trigger.eth_queue_stat); 1736 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); 1737 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + 1738 OFFSETOF(struct ustorm_vf_zone, 1739 non_trigger.eth_queue_stat); 1740 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); 1741 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + 1742 OFFSETOF(struct pstorm_vf_zone, 1743 non_trigger.eth_queue_stat); 1744 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); 1745 p_stats->tstats.address = 0; 1746 p_stats->tstats.len = 0; 1747 } 1748 1749 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn, 1750 struct ecore_ptt *p_ptt, 1751 struct ecore_vf_info *vf) 1752 { 1753 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 1754 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; 1755 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 1756 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; 1757 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; 1758 struct pf_vf_resc *resc = &resp->resc; 1759 enum _ecore_status_t rc; 1760 1761 OSAL_MEMSET(resp, 0, sizeof(*resp)); 1762 1763 /* Write the PF version so that VF would know which version 1764 * is supported - might be later overriden. This guarantees that 1765 * VF could recognize legacy PF based on lack of versions in reply. 1766 */ 1767 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; 1768 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; 1769 1770 /* TODO - not doing anything is bad since we'll assert, but this isn't 1771 * necessarily the right behavior - perhaps we should have allowed some 1772 * versatility here. 1773 */ 1774 if (vf->state != VF_FREE && 1775 vf->state != VF_STOPPED) { 1776 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1777 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", 1778 vf->abs_vf_id, vf->state); 1779 goto out; 1780 } 1781 1782 /* Validate FW compatibility */ 1783 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { 1784 if (req->vfdev_info.capabilities & 1785 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 1786 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; 1787 1788 /* This legacy support would need to be removed once 1789 * the major has changed. 1790 */ 1791 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3); 1792 1793 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1794 "VF[%d] is pre-fastpath HSI\n", 1795 vf->abs_vf_id); 1796 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 1797 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; 1798 } else { 1799 DP_INFO(p_hwfn, 1800 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", 1801 vf->abs_vf_id, 1802 req->vfdev_info.eth_fp_hsi_major, 1803 req->vfdev_info.eth_fp_hsi_minor, 1804 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 1805 1806 goto out; 1807 } 1808 } 1809 1810 /* On 100g PFs, prevent old VFs from loading */ 1811 if (ECORE_IS_CMT(p_hwfn->p_dev) && 1812 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { 1813 DP_INFO(p_hwfn, "VF[%d] is running an old driver that doesn't support 100g\n", 1814 vf->abs_vf_id); 1815 goto out; 1816 } 1817 1818 #ifndef __EXTRACT__LINUX__ 1819 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) { 1820 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; 1821 goto out; 1822 } 1823 #endif 1824 1825 /* Store the acquire message */ 1826 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire)); 1827 1828 vf->opaque_fid = req->vfdev_info.opaque_fid; 1829 1830 vf->vf_bulletin = req->bulletin_addr; 1831 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? 1832 vf->bulletin.size : req->bulletin_size; 1833 1834 /* fill in pfdev info */ 1835 pfdev_info->chip_num = p_hwfn->p_dev->chip_num; 1836 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */ 1837 pfdev_info->indices_per_sb = PIS_PER_SB_E4; 1838 1839 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | 1840 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; 1841 if (ECORE_IS_CMT(p_hwfn->p_dev)) 1842 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; 1843 1844 /* Share our ability to use multiple queue-ids only with VFs 1845 * that request it. 1846 */ 1847 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) 1848 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; 1849 1850 /* Share the sizes of the bars with VF */ 1851 resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn, 1852 p_ptt); 1853 1854 ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info); 1855 1856 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, 1857 ETH_ALEN); 1858 1859 pfdev_info->fw_major = FW_MAJOR_VERSION; 1860 pfdev_info->fw_minor = FW_MINOR_VERSION; 1861 pfdev_info->fw_rev = FW_REVISION_VERSION; 1862 pfdev_info->fw_eng = FW_ENGINEERING_VERSION; 1863 1864 /* Incorrect when legacy, but doesn't matter as legacy isn't reading 1865 * this field. 1866 */ 1867 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR, 1868 req->vfdev_info.eth_fp_hsi_minor); 1869 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE(); 1870 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, 1871 OSAL_NULL); 1872 1873 pfdev_info->dev_type = p_hwfn->p_dev->type; 1874 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev; 1875 1876 /* Fill resources available to VF; Make sure there are enough to 1877 * satisfy the VF's request. 1878 */ 1879 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, 1880 &req->resc_request, resc); 1881 if (vfpf_status != PFVF_STATUS_SUCCESS) 1882 goto out; 1883 1884 /* Start the VF in FW */ 1885 rc = ecore_sp_vf_start(p_hwfn, vf); 1886 if (rc != ECORE_SUCCESS) { 1887 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n", 1888 vf->abs_vf_id); 1889 vfpf_status = PFVF_STATUS_FAILURE; 1890 goto out; 1891 } 1892 1893 /* Fill agreed size of bulletin board in response, and post 1894 * an initial image to the bulletin board. 1895 */ 1896 resp->bulletin_size = vf->bulletin.size; 1897 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); 1898 1899 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1900 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" 1901 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", 1902 vf->abs_vf_id, resp->pfdev_info.chip_num, 1903 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb, 1904 (unsigned long long)resp->pfdev_info.capabilities, resc->num_rxqs, 1905 resc->num_txqs, resc->num_sbs, resc->num_mac_filters, 1906 resc->num_vlan_filters); 1907 1908 vf->state = VF_ACQUIRED; 1909 1910 out: 1911 /* Prepare Response */ 1912 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, 1913 sizeof(struct pfvf_acquire_resp_tlv), 1914 vfpf_status); 1915 } 1916 1917 static enum _ecore_status_t __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, 1918 struct ecore_vf_info *p_vf, bool val) 1919 { 1920 struct ecore_sp_vport_update_params params; 1921 enum _ecore_status_t rc; 1922 1923 if (val == p_vf->spoof_chk) { 1924 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1925 "Spoofchk value[%d] is already configured\n", 1926 val); 1927 return ECORE_SUCCESS; 1928 } 1929 1930 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1931 params.opaque_fid = p_vf->opaque_fid; 1932 params.vport_id = p_vf->vport_id; 1933 params.update_anti_spoofing_en_flg = 1; 1934 params.anti_spoofing_en = val; 1935 1936 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK, 1937 OSAL_NULL); 1938 if (rc == ECORE_SUCCESS) { 1939 p_vf->spoof_chk = val; 1940 p_vf->req_spoofchk_val = p_vf->spoof_chk; 1941 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1942 "Spoofchk val[%d] configured\n", val); 1943 } else { 1944 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1945 "Spoofchk configuration[val:%d] failed for VF[%d]\n", 1946 val, p_vf->relative_vf_id); 1947 } 1948 1949 return rc; 1950 } 1951 1952 static enum _ecore_status_t ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn, 1953 struct ecore_vf_info *p_vf) 1954 { 1955 struct ecore_filter_ucast filter; 1956 enum _ecore_status_t rc = ECORE_SUCCESS; 1957 int i; 1958 1959 OSAL_MEMSET(&filter, 0, sizeof(filter)); 1960 filter.is_rx_filter = 1; 1961 filter.is_tx_filter = 1; 1962 filter.vport_to_add_to = p_vf->vport_id; 1963 filter.opcode = ECORE_FILTER_ADD; 1964 1965 /* Reconfigure vlans */ 1966 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 1967 if (!p_vf->shadow_config.vlans[i].used) 1968 continue; 1969 1970 filter.type = ECORE_FILTER_VLAN; 1971 filter.vlan = p_vf->shadow_config.vlans[i].vid; 1972 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1973 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", 1974 filter.vlan, p_vf->relative_vf_id); 1975 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1976 &filter, ECORE_SPQ_MODE_CB, OSAL_NULL); 1977 if (rc) { 1978 DP_NOTICE(p_hwfn, true, "Failed to configure VLAN [%04x] to VF [%04x]\n", 1979 filter.vlan, 1980 p_vf->relative_vf_id); 1981 break; 1982 } 1983 } 1984 1985 return rc; 1986 } 1987 1988 static enum _ecore_status_t 1989 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn, 1990 struct ecore_vf_info *p_vf, 1991 u64 events) 1992 { 1993 enum _ecore_status_t rc = ECORE_SUCCESS; 1994 1995 /*TODO - what about MACs? */ 1996 1997 if ((events & (1 << VLAN_ADDR_FORCED)) && 1998 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) 1999 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); 2000 2001 return rc; 2002 } 2003 2004 static enum _ecore_status_t 2005 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, 2006 struct ecore_vf_info *p_vf, 2007 u64 events) 2008 { 2009 enum _ecore_status_t rc = ECORE_SUCCESS; 2010 struct ecore_filter_ucast filter; 2011 2012 if (!p_vf->vport_instance) 2013 return ECORE_INVAL; 2014 2015 if (events & (1 << MAC_ADDR_FORCED)) { 2016 /* Since there's no way [currently] of removing the MAC, 2017 * we can always assume this means we need to force it. 2018 */ 2019 OSAL_MEMSET(&filter, 0, sizeof(filter)); 2020 filter.type = ECORE_FILTER_MAC; 2021 filter.opcode = ECORE_FILTER_REPLACE; 2022 filter.is_rx_filter = 1; 2023 filter.is_tx_filter = 1; 2024 filter.vport_to_add_to = p_vf->vport_id; 2025 OSAL_MEMCPY(filter.mac, 2026 p_vf->bulletin.p_virt->mac, 2027 ETH_ALEN); 2028 2029 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 2030 &filter, 2031 ECORE_SPQ_MODE_CB, OSAL_NULL); 2032 if (rc) { 2033 DP_NOTICE(p_hwfn, true, 2034 "PF failed to configure MAC for VF\n"); 2035 return rc; 2036 } 2037 2038 p_vf->configured_features |= 1 << MAC_ADDR_FORCED; 2039 } 2040 2041 if (events & (1 << VLAN_ADDR_FORCED)) { 2042 struct ecore_sp_vport_update_params vport_update; 2043 u8 removal; 2044 int i; 2045 2046 OSAL_MEMSET(&filter, 0, sizeof(filter)); 2047 filter.type = ECORE_FILTER_VLAN; 2048 filter.is_rx_filter = 1; 2049 filter.is_tx_filter = 1; 2050 filter.vport_to_add_to = p_vf->vport_id; 2051 filter.vlan = p_vf->bulletin.p_virt->pvid; 2052 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE : 2053 ECORE_FILTER_FLUSH; 2054 2055 /* Send the ramrod */ 2056 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 2057 &filter, 2058 ECORE_SPQ_MODE_CB, OSAL_NULL); 2059 if (rc) { 2060 DP_NOTICE(p_hwfn, true, 2061 "PF failed to configure VLAN for VF\n"); 2062 return rc; 2063 } 2064 2065 /* Update the default-vlan & silent vlan stripping */ 2066 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update)); 2067 vport_update.opaque_fid = p_vf->opaque_fid; 2068 vport_update.vport_id = p_vf->vport_id; 2069 vport_update.update_default_vlan_enable_flg = 1; 2070 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; 2071 vport_update.update_default_vlan_flg = 1; 2072 vport_update.default_vlan = filter.vlan; 2073 2074 vport_update.update_inner_vlan_removal_flg = 1; 2075 removal = filter.vlan ? 2076 1 : p_vf->shadow_config.inner_vlan_removal; 2077 vport_update.inner_vlan_removal_flg = removal; 2078 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; 2079 rc = ecore_sp_vport_update(p_hwfn, &vport_update, 2080 ECORE_SPQ_MODE_EBLOCK, 2081 OSAL_NULL); 2082 if (rc) { 2083 DP_NOTICE(p_hwfn, true, 2084 "PF failed to configure VF vport for vlan\n"); 2085 return rc; 2086 } 2087 2088 /* Update all the Rx queues */ 2089 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) { 2090 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i]; 2091 struct ecore_queue_cid *p_cid = OSAL_NULL; 2092 2093 /* There can be at most 1 Rx queue on qzone. Find it */ 2094 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue); 2095 if (p_cid == OSAL_NULL) 2096 continue; 2097 2098 rc = ecore_sp_eth_rx_queues_update(p_hwfn, 2099 (void **)&p_cid, 2100 1, 0, 1, 2101 ECORE_SPQ_MODE_EBLOCK, 2102 OSAL_NULL); 2103 if (rc) { 2104 DP_NOTICE(p_hwfn, true, 2105 "Failed to send Rx update fo queue[0x%04x]\n", 2106 p_cid->rel.queue_id); 2107 return rc; 2108 } 2109 } 2110 2111 if (filter.vlan) 2112 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; 2113 else 2114 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED); 2115 } 2116 2117 /* If forced features are terminated, we need to configure the shadow 2118 * configuration back again. 2119 */ 2120 if (events) 2121 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); 2122 2123 return rc; 2124 } 2125 2126 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn, 2127 struct ecore_ptt *p_ptt, 2128 struct ecore_vf_info *vf) 2129 { 2130 struct ecore_sp_vport_start_params params = {0}; 2131 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 2132 struct vfpf_vport_start_tlv *start; 2133 u8 status = PFVF_STATUS_SUCCESS; 2134 struct ecore_vf_info *vf_info; 2135 u64 *p_bitmap; 2136 int sb_id; 2137 enum _ecore_status_t rc; 2138 2139 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true); 2140 if (!vf_info) { 2141 DP_NOTICE(p_hwfn->p_dev, true, 2142 "Failed to get VF info, invalid vfid [%d]\n", 2143 vf->relative_vf_id); 2144 return; 2145 } 2146 2147 vf->state = VF_ENABLED; 2148 start = &mbx->req_virt->start_vport; 2149 2150 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); 2151 2152 /* Initialize Status block in CAU */ 2153 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { 2154 if (!start->sb_addr[sb_id]) { 2155 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2156 "VF[%d] did not fill the address of SB %d\n", 2157 vf->relative_vf_id, sb_id); 2158 break; 2159 } 2160 2161 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 2162 start->sb_addr[sb_id], 2163 vf->igu_sbs[sb_id], 2164 vf->abs_vf_id, 1); 2165 } 2166 2167 vf->mtu = start->mtu; 2168 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; 2169 2170 /* Take into consideration configuration forced by hypervisor; 2171 * If none is configured, use the supplied VF values [for old 2172 * vfs that would still be fine, since they passed '0' as padding]. 2173 */ 2174 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; 2175 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { 2176 u8 vf_req = start->only_untagged; 2177 2178 vf_info->bulletin.p_virt->default_only_untagged = vf_req; 2179 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; 2180 } 2181 2182 params.tpa_mode = start->tpa_mode; 2183 params.remove_inner_vlan = start->inner_vlan_removal; 2184 params.tx_switching = true; 2185 params.zero_placement_offset = start->zero_placement_offset; 2186 2187 #ifndef ASIC_ONLY 2188 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 2189 DP_NOTICE(p_hwfn, false, "FPGA: Don't configure VF for Tx-switching [no pVFC]\n"); 2190 params.tx_switching = false; 2191 } 2192 #endif 2193 2194 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; 2195 params.drop_ttl0 = false; 2196 params.concrete_fid = vf->concrete_fid; 2197 params.opaque_fid = vf->opaque_fid; 2198 params.vport_id = vf->vport_id; 2199 params.max_buffers_per_cqe = start->max_buffers_per_cqe; 2200 params.mtu = vf->mtu; 2201 params.check_mac = true; 2202 2203 #ifndef ECORE_UPSTREAM 2204 rc = OSAL_IOV_PRE_START_VPORT(p_hwfn, vf->relative_vf_id, ¶ms); 2205 if (rc != ECORE_SUCCESS) { 2206 DP_ERR(p_hwfn, "OSAL_IOV_PRE_START_VPORT returned error %d\n", rc); 2207 status = PFVF_STATUS_FAILURE; 2208 goto exit; 2209 } 2210 #endif 2211 2212 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms); 2213 if (rc != ECORE_SUCCESS) { 2214 DP_ERR(p_hwfn, "ecore_iov_vf_mbx_start_vport returned error %d\n", rc); 2215 status = PFVF_STATUS_FAILURE; 2216 } else { 2217 vf->vport_instance++; 2218 2219 /* Force configuration if needed on the newly opened vport */ 2220 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); 2221 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id, 2222 vf->vport_id, vf->opaque_fid); 2223 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); 2224 } 2225 #ifndef ECORE_UPSTREAM 2226 exit: 2227 #endif 2228 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, 2229 sizeof(struct pfvf_def_resp_tlv), status); 2230 } 2231 2232 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn, 2233 struct ecore_ptt *p_ptt, 2234 struct ecore_vf_info *vf) 2235 { 2236 u8 status = PFVF_STATUS_SUCCESS; 2237 enum _ecore_status_t rc; 2238 2239 OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf); 2240 vf->vport_instance--; 2241 vf->spoof_chk = false; 2242 2243 if ((ecore_iov_validate_active_rxq(vf)) || 2244 (ecore_iov_validate_active_txq(vf))) { 2245 vf->b_malicious = true; 2246 DP_NOTICE(p_hwfn, 2247 false, " VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n", 2248 vf->abs_vf_id); 2249 status = PFVF_STATUS_MALICIOUS; 2250 goto out; 2251 } 2252 2253 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); 2254 if (rc != ECORE_SUCCESS) { 2255 DP_ERR(p_hwfn, "ecore_iov_vf_mbx_stop_vport returned error %d\n", 2256 rc); 2257 status = PFVF_STATUS_FAILURE; 2258 } 2259 2260 /* Forget the configuration on the vport */ 2261 vf->configured_features = 0; 2262 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config)); 2263 2264 out: 2265 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, 2266 sizeof(struct pfvf_def_resp_tlv), status); 2267 } 2268 2269 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn, 2270 struct ecore_ptt *p_ptt, 2271 struct ecore_vf_info *vf, 2272 u8 status, bool b_legacy) 2273 { 2274 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 2275 struct pfvf_start_queue_resp_tlv *p_tlv; 2276 struct vfpf_start_rxq_tlv *req; 2277 u16 length; 2278 2279 mbx->offset = (u8 *)mbx->reply_virt; 2280 2281 /* Taking a bigger struct instead of adding a TLV to list was a 2282 * mistake, but one which we're now stuck with, as some older 2283 * clients assume the size of the previous response. 2284 */ 2285 if (!b_legacy) 2286 length = sizeof(*p_tlv); 2287 else 2288 length = sizeof(struct pfvf_def_resp_tlv); 2289 2290 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length); 2291 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, 2292 sizeof(struct channel_list_end_tlv)); 2293 2294 /* Update the TLV with the response */ 2295 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { 2296 req = &mbx->req_virt->start_rxq; 2297 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + 2298 OFFSETOF(struct mstorm_vf_zone, 2299 non_trigger.eth_rx_queue_producers) + 2300 sizeof(struct eth_rx_prod_data) * req->rx_qid; 2301 } 2302 2303 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status); 2304 } 2305 2306 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn, 2307 struct ecore_vf_info *p_vf, bool b_is_tx) 2308 { 2309 struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; 2310 struct vfpf_qid_tlv *p_qid_tlv; 2311 2312 /* Search for the qid if the VF published if its going to provide it */ 2313 if (!(p_vf->acquire.vfdev_info.capabilities & 2314 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { 2315 if (b_is_tx) 2316 return ECORE_IOV_LEGACY_QID_TX; 2317 else 2318 return ECORE_IOV_LEGACY_QID_RX; 2319 } 2320 2321 p_qid_tlv = (struct vfpf_qid_tlv *) 2322 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2323 CHANNEL_TLV_QID); 2324 if (p_qid_tlv == OSAL_NULL) { 2325 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2326 "VF[%2x]: Failed to provide qid\n", 2327 p_vf->relative_vf_id); 2328 2329 return ECORE_IOV_QID_INVALID; 2330 } 2331 2332 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { 2333 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2334 "VF[%02x]: Provided qid out-of-bounds %02x\n", 2335 p_vf->relative_vf_id, p_qid_tlv->qid); 2336 return ECORE_IOV_QID_INVALID; 2337 } 2338 2339 return p_qid_tlv->qid; 2340 } 2341 2342 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn, 2343 struct ecore_ptt *p_ptt, 2344 struct ecore_vf_info *vf) 2345 { 2346 struct ecore_queue_start_common_params params; 2347 struct ecore_queue_cid_vf_params vf_params; 2348 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 2349 u8 status = PFVF_STATUS_NO_RESOURCE; 2350 u8 qid_usage_idx, vf_legacy = 0; 2351 struct ecore_vf_queue *p_queue; 2352 struct vfpf_start_rxq_tlv *req; 2353 struct ecore_queue_cid *p_cid; 2354 struct ecore_sb_info sb_dummy; 2355 enum _ecore_status_t rc; 2356 2357 req = &mbx->req_virt->start_rxq; 2358 2359 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid, 2360 ECORE_IOV_VALIDATE_Q_DISABLE) || 2361 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 2362 goto out; 2363 2364 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false); 2365 if (qid_usage_idx == ECORE_IOV_QID_INVALID) 2366 goto out; 2367 2368 p_queue = &vf->vf_queues[req->rx_qid]; 2369 if (p_queue->cids[qid_usage_idx].p_cid) 2370 goto out; 2371 2372 vf_legacy = ecore_vf_calculate_legacy(vf); 2373 2374 /* Acquire a new queue-cid */ 2375 OSAL_MEMSET(¶ms, 0, sizeof(params)); 2376 params.queue_id = (u8)p_queue->fw_rx_qid; 2377 params.vport_id = vf->vport_id; 2378 params.stats_id = vf->abs_vf_id + 0x10; 2379 2380 /* Since IGU index is passed via sb_info, construct a dummy one */ 2381 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy)); 2382 sb_dummy.igu_sb_id = req->hw_sb; 2383 params.p_sb = &sb_dummy; 2384 params.sb_idx = req->sb_index; 2385 2386 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params)); 2387 vf_params.vfid = vf->relative_vf_id; 2388 vf_params.vf_qid = (u8)req->rx_qid; 2389 vf_params.vf_legacy = vf_legacy; 2390 vf_params.qid_usage_idx = qid_usage_idx; 2391 2392 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid, 2393 ¶ms, true, &vf_params); 2394 if (p_cid == OSAL_NULL) 2395 goto out; 2396 2397 /* Legacy VFs have their Producers in a different location, which they 2398 * calculate on their own and clean the producer prior to this. 2399 */ 2400 if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD)) 2401 REG_WR(p_hwfn, 2402 GTT_BAR0_MAP_REG_MSDM_RAM + 2403 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 2404 0); 2405 2406 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid, 2407 req->bd_max_bytes, 2408 req->rxq_addr, 2409 req->cqe_pbl_addr, 2410 req->cqe_pbl_size); 2411 if (rc != ECORE_SUCCESS) { 2412 status = PFVF_STATUS_FAILURE; 2413 ecore_eth_queue_cid_release(p_hwfn, p_cid); 2414 } else { 2415 p_queue->cids[qid_usage_idx].p_cid = p_cid; 2416 p_queue->cids[qid_usage_idx].b_is_tx = false; 2417 status = PFVF_STATUS_SUCCESS; 2418 vf->num_active_rxqs++; 2419 } 2420 2421 out: 2422 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, 2423 !!(vf_legacy & 2424 ECORE_QCID_LEGACY_VF_RX_PROD)); 2425 } 2426 2427 static void 2428 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, 2429 struct ecore_tunnel_info *p_tun, 2430 u16 tunn_feature_mask) 2431 { 2432 p_resp->tunn_feature_mask = tunn_feature_mask; 2433 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; 2434 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; 2435 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; 2436 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; 2437 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; 2438 p_resp->vxlan_clss = p_tun->vxlan.tun_cls; 2439 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; 2440 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; 2441 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; 2442 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; 2443 p_resp->geneve_udp_port = p_tun->geneve_port.port; 2444 p_resp->vxlan_udp_port = p_tun->vxlan_port.port; 2445 } 2446 2447 static void 2448 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2449 struct ecore_tunn_update_type *p_tun, 2450 enum ecore_tunn_mode mask, u8 tun_cls) 2451 { 2452 if (p_req->tun_mode_update_mask & (1 << mask)) { 2453 p_tun->b_update_mode = true; 2454 2455 if (p_req->tunn_mode & (1 << mask)) 2456 p_tun->b_mode_enabled = true; 2457 } 2458 2459 p_tun->tun_cls = tun_cls; 2460 } 2461 2462 static void 2463 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2464 struct ecore_tunn_update_type *p_tun, 2465 struct ecore_tunn_update_udp_port *p_port, 2466 enum ecore_tunn_mode mask, 2467 u8 tun_cls, u8 update_port, u16 port) 2468 { 2469 if (update_port) { 2470 p_port->b_update_port = true; 2471 p_port->port = port; 2472 } 2473 2474 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); 2475 } 2476 2477 static bool 2478 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) 2479 { 2480 bool b_update_requested = false; 2481 2482 if (p_req->tun_mode_update_mask || p_req->update_tun_cls || 2483 p_req->update_geneve_port || p_req->update_vxlan_port) 2484 b_update_requested = true; 2485 2486 return b_update_requested; 2487 } 2488 2489 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn, 2490 struct ecore_ptt *p_ptt, 2491 struct ecore_vf_info *p_vf) 2492 { 2493 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; 2494 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; 2495 struct pfvf_update_tunn_param_tlv *p_resp; 2496 struct vfpf_update_tunn_param_tlv *p_req; 2497 enum _ecore_status_t rc = ECORE_SUCCESS; 2498 u8 status = PFVF_STATUS_SUCCESS; 2499 bool b_update_required = false; 2500 struct ecore_tunnel_info tunn; 2501 u16 tunn_feature_mask = 0; 2502 int i; 2503 2504 mbx->offset = (u8 *)mbx->reply_virt; 2505 2506 OSAL_MEM_ZERO(&tunn, sizeof(tunn)); 2507 p_req = &mbx->req_virt->tunn_param_update; 2508 2509 if (!ecore_iov_pf_validate_tunn_param(p_req)) { 2510 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2511 "No tunnel update requested by VF\n"); 2512 status = PFVF_STATUS_FAILURE; 2513 goto send_resp; 2514 } 2515 2516 tunn.b_update_rx_cls = p_req->update_tun_cls; 2517 tunn.b_update_tx_cls = p_req->update_tun_cls; 2518 2519 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, 2520 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss, 2521 p_req->update_vxlan_port, 2522 p_req->vxlan_port); 2523 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, 2524 ECORE_MODE_L2GENEVE_TUNN, 2525 p_req->l2geneve_clss, 2526 p_req->update_geneve_port, 2527 p_req->geneve_port); 2528 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, 2529 ECORE_MODE_IPGENEVE_TUNN, 2530 p_req->ipgeneve_clss); 2531 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre, 2532 ECORE_MODE_L2GRE_TUNN, 2533 p_req->l2gre_clss); 2534 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre, 2535 ECORE_MODE_IPGRE_TUNN, 2536 p_req->ipgre_clss); 2537 2538 /* If PF modifies VF's req then it should 2539 * still return an error in case of partial configuration 2540 * or modified configuration as opposed to requested one. 2541 */ 2542 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask, 2543 &b_update_required, &tunn); 2544 2545 if (rc != ECORE_SUCCESS) 2546 status = PFVF_STATUS_FAILURE; 2547 2548 /* If ECORE client is willing to update anything ? */ 2549 if (b_update_required) { 2550 u16 geneve_port; 2551 2552 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, 2553 ECORE_SPQ_MODE_EBLOCK, 2554 OSAL_NULL); 2555 if (rc != ECORE_SUCCESS) 2556 status = PFVF_STATUS_FAILURE; 2557 2558 geneve_port = p_tun->geneve_port.port; 2559 ecore_for_each_vf(p_hwfn, i) { 2560 ecore_iov_bulletin_set_udp_ports(p_hwfn, i, 2561 p_tun->vxlan_port.port, 2562 geneve_port); 2563 } 2564 } 2565 2566 send_resp: 2567 p_resp = ecore_add_tlv(&mbx->offset, 2568 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); 2569 2570 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); 2571 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, 2572 sizeof(struct channel_list_end_tlv)); 2573 2574 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); 2575 } 2576 2577 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn, 2578 struct ecore_ptt *p_ptt, 2579 struct ecore_vf_info *p_vf, 2580 u32 cid, 2581 u8 status) 2582 { 2583 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; 2584 struct pfvf_start_queue_resp_tlv *p_tlv; 2585 bool b_legacy = false; 2586 u16 length; 2587 2588 mbx->offset = (u8 *)mbx->reply_virt; 2589 2590 /* Taking a bigger struct instead of adding a TLV to list was a 2591 * mistake, but one which we're now stuck with, as some older 2592 * clients assume the size of the previous response. 2593 */ 2594 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 2595 ETH_HSI_VER_NO_PKT_LEN_TUNN) 2596 b_legacy = true; 2597 2598 if (!b_legacy) 2599 length = sizeof(*p_tlv); 2600 else 2601 length = sizeof(struct pfvf_def_resp_tlv); 2602 2603 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length); 2604 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, 2605 sizeof(struct channel_list_end_tlv)); 2606 2607 /* Update the TLV with the response */ 2608 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) 2609 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY); 2610 2611 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); 2612 } 2613 2614 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn, 2615 struct ecore_ptt *p_ptt, 2616 struct ecore_vf_info *vf) 2617 { 2618 struct ecore_queue_start_common_params params; 2619 struct ecore_queue_cid_vf_params vf_params; 2620 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 2621 u8 status = PFVF_STATUS_NO_RESOURCE; 2622 struct ecore_vf_queue *p_queue; 2623 struct vfpf_start_txq_tlv *req; 2624 struct ecore_queue_cid *p_cid; 2625 struct ecore_sb_info sb_dummy; 2626 u8 qid_usage_idx, vf_legacy; 2627 u32 cid = 0; 2628 enum _ecore_status_t rc; 2629 u16 pq; 2630 2631 OSAL_MEMSET(¶ms, 0, sizeof(params)); 2632 req = &mbx->req_virt->start_txq; 2633 2634 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid, 2635 ECORE_IOV_VALIDATE_Q_NA) || 2636 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 2637 goto out; 2638 2639 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true); 2640 if (qid_usage_idx == ECORE_IOV_QID_INVALID) 2641 goto out; 2642 2643 p_queue = &vf->vf_queues[req->tx_qid]; 2644 if (p_queue->cids[qid_usage_idx].p_cid) 2645 goto out; 2646 2647 vf_legacy = ecore_vf_calculate_legacy(vf); 2648 2649 /* Acquire a new queue-cid */ 2650 params.queue_id = p_queue->fw_tx_qid; 2651 params.vport_id = vf->vport_id; 2652 params.stats_id = vf->abs_vf_id + 0x10; 2653 2654 /* Since IGU index is passed via sb_info, construct a dummy one */ 2655 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy)); 2656 sb_dummy.igu_sb_id = req->hw_sb; 2657 params.p_sb = &sb_dummy; 2658 params.sb_idx = req->sb_index; 2659 2660 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params)); 2661 vf_params.vfid = vf->relative_vf_id; 2662 vf_params.vf_qid = (u8)req->tx_qid; 2663 vf_params.vf_legacy = vf_legacy; 2664 vf_params.qid_usage_idx = qid_usage_idx; 2665 2666 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid, 2667 ¶ms, false, &vf_params); 2668 if (p_cid == OSAL_NULL) 2669 goto out; 2670 2671 pq = ecore_get_cm_pq_idx_vf(p_hwfn, 2672 vf->relative_vf_id); 2673 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, 2674 req->pbl_addr, req->pbl_size, pq); 2675 if (rc != ECORE_SUCCESS) { 2676 status = PFVF_STATUS_FAILURE; 2677 ecore_eth_queue_cid_release(p_hwfn, p_cid); 2678 } else { 2679 status = PFVF_STATUS_SUCCESS; 2680 p_queue->cids[qid_usage_idx].p_cid = p_cid; 2681 p_queue->cids[qid_usage_idx].b_is_tx = true; 2682 cid = p_cid->cid; 2683 } 2684 2685 out: 2686 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, 2687 cid, status); 2688 } 2689 2690 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn, 2691 struct ecore_vf_info *vf, 2692 u16 rxq_id, 2693 u8 qid_usage_idx, 2694 bool cqe_completion) 2695 { 2696 struct ecore_vf_queue *p_queue; 2697 enum _ecore_status_t rc = ECORE_SUCCESS; 2698 2699 if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id, 2700 ECORE_IOV_VALIDATE_Q_NA)) { 2701 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2702 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", 2703 vf->relative_vf_id, rxq_id, qid_usage_idx); 2704 return ECORE_INVAL; 2705 } 2706 2707 p_queue = &vf->vf_queues[rxq_id]; 2708 2709 /* We've validated the index and the existance of the active RXQ - 2710 * now we need to make sure that it's using the correct qid. 2711 */ 2712 if (!p_queue->cids[qid_usage_idx].p_cid || 2713 p_queue->cids[qid_usage_idx].b_is_tx) { 2714 struct ecore_queue_cid *p_cid; 2715 2716 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue); 2717 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2718 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", 2719 vf->relative_vf_id, rxq_id, qid_usage_idx, 2720 rxq_id, p_cid->qid_usage_idx); 2721 return ECORE_INVAL; 2722 } 2723 2724 /* Now that we know we have a valid Rx-queue - close it */ 2725 rc = ecore_eth_rx_queue_stop(p_hwfn, 2726 p_queue->cids[qid_usage_idx].p_cid, 2727 false, cqe_completion); 2728 if (rc != ECORE_SUCCESS) 2729 return rc; 2730 2731 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL; 2732 vf->num_active_rxqs--; 2733 2734 return ECORE_SUCCESS; 2735 } 2736 2737 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn, 2738 struct ecore_vf_info *vf, 2739 u16 txq_id, 2740 u8 qid_usage_idx) 2741 { 2742 struct ecore_vf_queue *p_queue; 2743 enum _ecore_status_t rc = ECORE_SUCCESS; 2744 2745 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id, 2746 ECORE_IOV_VALIDATE_Q_NA)) 2747 return ECORE_INVAL; 2748 2749 p_queue = &vf->vf_queues[txq_id]; 2750 if (!p_queue->cids[qid_usage_idx].p_cid || 2751 !p_queue->cids[qid_usage_idx].b_is_tx) 2752 return ECORE_INVAL; 2753 2754 rc = ecore_eth_tx_queue_stop(p_hwfn, 2755 p_queue->cids[qid_usage_idx].p_cid); 2756 if (rc != ECORE_SUCCESS) 2757 return rc; 2758 2759 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL; 2760 return ECORE_SUCCESS; 2761 } 2762 2763 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn, 2764 struct ecore_ptt *p_ptt, 2765 struct ecore_vf_info *vf) 2766 { 2767 u16 length = sizeof(struct pfvf_def_resp_tlv); 2768 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 2769 u8 status = PFVF_STATUS_FAILURE; 2770 struct vfpf_stop_rxqs_tlv *req; 2771 u8 qid_usage_idx; 2772 enum _ecore_status_t rc; 2773 2774 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs' 2775 * would be one. Since no older ecore passed multiple queues 2776 * using this API, sanitize on the value. 2777 */ 2778 req = &mbx->req_virt->stop_rxqs; 2779 if (req->num_rxqs != 1) { 2780 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2781 "Odd; VF[%d] tried stopping multiple Rx queues\n", 2782 vf->relative_vf_id); 2783 status = PFVF_STATUS_NOT_SUPPORTED; 2784 goto out; 2785 } 2786 2787 /* Find which qid-index is associated with the queue */ 2788 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false); 2789 if (qid_usage_idx == ECORE_IOV_QID_INVALID) 2790 goto out; 2791 2792 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, 2793 qid_usage_idx, req->cqe_completion); 2794 if (rc == ECORE_SUCCESS) 2795 status = PFVF_STATUS_SUCCESS; 2796 out: 2797 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, 2798 length, status); 2799 } 2800 2801 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn, 2802 struct ecore_ptt *p_ptt, 2803 struct ecore_vf_info *vf) 2804 { 2805 u16 length = sizeof(struct pfvf_def_resp_tlv); 2806 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 2807 u8 status = PFVF_STATUS_FAILURE; 2808 struct vfpf_stop_txqs_tlv *req; 2809 u8 qid_usage_idx; 2810 enum _ecore_status_t rc; 2811 2812 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs' 2813 * would be one. Since no older ecore passed multiple queues 2814 * using this API, sanitize on the value. 2815 */ 2816 req = &mbx->req_virt->stop_txqs; 2817 if (req->num_txqs != 1) { 2818 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2819 "Odd; VF[%d] tried stopping multiple Tx queues\n", 2820 vf->relative_vf_id); 2821 status = PFVF_STATUS_NOT_SUPPORTED; 2822 goto out; 2823 } 2824 2825 /* Find which qid-index is associated with the queue */ 2826 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true); 2827 if (qid_usage_idx == ECORE_IOV_QID_INVALID) 2828 goto out; 2829 2830 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, 2831 qid_usage_idx); 2832 if (rc == ECORE_SUCCESS) 2833 status = PFVF_STATUS_SUCCESS; 2834 2835 out: 2836 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, 2837 length, status); 2838 } 2839 2840 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn, 2841 struct ecore_ptt *p_ptt, 2842 struct ecore_vf_info *vf) 2843 { 2844 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF]; 2845 u16 length = sizeof(struct pfvf_def_resp_tlv); 2846 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 2847 struct vfpf_update_rxq_tlv *req; 2848 u8 status = PFVF_STATUS_FAILURE; 2849 u8 complete_event_flg; 2850 u8 complete_cqe_flg; 2851 u8 qid_usage_idx; 2852 enum _ecore_status_t rc; 2853 u16 i; 2854 2855 req = &mbx->req_virt->update_rxq; 2856 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); 2857 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); 2858 2859 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false); 2860 if (qid_usage_idx == ECORE_IOV_QID_INVALID) 2861 goto out; 2862 2863 /* Starting with the addition of CHANNEL_TLV_QID, this API started 2864 * expecting a single queue at a time. Validate this. 2865 */ 2866 if ((vf->acquire.vfdev_info.capabilities & 2867 VFPF_ACQUIRE_CAP_QUEUE_QIDS) && 2868 req->num_rxqs != 1) { 2869 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2870 "VF[%d] supports QIDs but sends multiple queues\n", 2871 vf->relative_vf_id); 2872 goto out; 2873 } 2874 2875 /* Validate inputs - for the legacy case this is still true since 2876 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. 2877 */ 2878 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { 2879 if (!ecore_iov_validate_rxq(p_hwfn, vf, i, 2880 ECORE_IOV_VALIDATE_Q_NA) || 2881 !vf->vf_queues[i].cids[qid_usage_idx].p_cid || 2882 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { 2883 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2884 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", 2885 vf->relative_vf_id, req->rx_qid, 2886 req->num_rxqs); 2887 goto out; 2888 } 2889 } 2890 2891 for (i = 0; i < req->num_rxqs; i++) { 2892 u16 qid = req->rx_qid + i; 2893 2894 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; 2895 } 2896 2897 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, 2898 req->num_rxqs, 2899 complete_cqe_flg, 2900 complete_event_flg, 2901 ECORE_SPQ_MODE_EBLOCK, 2902 OSAL_NULL); 2903 if (rc != ECORE_SUCCESS) 2904 goto out; 2905 2906 status = PFVF_STATUS_SUCCESS; 2907 out: 2908 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, 2909 length, status); 2910 } 2911 2912 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, 2913 void *p_tlvs_list, u16 req_type) 2914 { 2915 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; 2916 int len = 0; 2917 2918 do { 2919 if (!p_tlv->length) { 2920 DP_NOTICE(p_hwfn, true, 2921 "Zero length TLV found\n"); 2922 return OSAL_NULL; 2923 } 2924 2925 if (p_tlv->type == req_type) { 2926 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 2927 "Extended tlv type %s, length %d found\n", 2928 ecore_channel_tlvs_string[p_tlv->type], 2929 p_tlv->length); 2930 return p_tlv; 2931 } 2932 2933 len += p_tlv->length; 2934 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); 2935 2936 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { 2937 DP_NOTICE(p_hwfn, true, 2938 "TLVs has overrun the buffer size\n"); 2939 return OSAL_NULL; 2940 } 2941 } while (p_tlv->type != CHANNEL_TLV_LIST_END); 2942 2943 return OSAL_NULL; 2944 } 2945 2946 static void 2947 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn, 2948 struct ecore_sp_vport_update_params *p_data, 2949 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2950 { 2951 struct vfpf_vport_update_activate_tlv *p_act_tlv; 2952 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 2953 2954 p_act_tlv = (struct vfpf_vport_update_activate_tlv *) 2955 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2956 tlv); 2957 if (!p_act_tlv) 2958 return; 2959 2960 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; 2961 p_data->vport_active_rx_flg = p_act_tlv->active_rx; 2962 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; 2963 p_data->vport_active_tx_flg = p_act_tlv->active_tx; 2964 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE; 2965 } 2966 2967 static void 2968 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn, 2969 struct ecore_sp_vport_update_params *p_data, 2970 struct ecore_vf_info *p_vf, 2971 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2972 { 2973 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 2974 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 2975 2976 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) 2977 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2978 tlv); 2979 if (!p_vlan_tlv) 2980 return; 2981 2982 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; 2983 2984 /* Ignore the VF request if we're forcing a vlan */ 2985 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) { 2986 p_data->update_inner_vlan_removal_flg = 1; 2987 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; 2988 } 2989 2990 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP; 2991 } 2992 2993 static void 2994 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn, 2995 struct ecore_sp_vport_update_params *p_data, 2996 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2997 { 2998 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 2999 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 3000 3001 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) 3002 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 3003 tlv); 3004 if (!p_tx_switch_tlv) 3005 return; 3006 3007 #ifndef ASIC_ONLY 3008 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 3009 DP_NOTICE(p_hwfn, false, "FPGA: Ignore tx-switching configuration originating from VFs\n"); 3010 return; 3011 } 3012 #endif 3013 3014 p_data->update_tx_switching_flg = 1; 3015 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; 3016 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH; 3017 } 3018 3019 static void 3020 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn, 3021 struct ecore_sp_vport_update_params *p_data, 3022 struct ecore_iov_vf_mbx *p_mbx, 3023 u16 *tlvs_mask) 3024 { 3025 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 3026 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; 3027 3028 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) 3029 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 3030 tlv); 3031 if (!p_mcast_tlv) 3032 return; 3033 3034 p_data->update_approx_mcast_flg = 1; 3035 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins, 3036 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 3037 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST; 3038 } 3039 3040 static void 3041 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn, 3042 struct ecore_sp_vport_update_params *p_data, 3043 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 3044 { 3045 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags; 3046 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 3047 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 3048 3049 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) 3050 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 3051 tlv); 3052 if (!p_accept_tlv) 3053 return; 3054 3055 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; 3056 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; 3057 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; 3058 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; 3059 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM; 3060 } 3061 3062 static void 3063 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn, 3064 struct ecore_sp_vport_update_params *p_data, 3065 struct ecore_iov_vf_mbx *p_mbx, 3066 u16 *tlvs_mask) 3067 { 3068 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; 3069 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 3070 3071 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) 3072 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 3073 tlv); 3074 if (!p_accept_any_vlan) 3075 return; 3076 3077 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; 3078 p_data->update_accept_any_vlan_flg = 3079 p_accept_any_vlan->update_accept_any_vlan_flg; 3080 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; 3081 } 3082 3083 static void 3084 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn, 3085 struct ecore_vf_info *vf, 3086 struct ecore_sp_vport_update_params *p_data, 3087 struct ecore_rss_params *p_rss, 3088 struct ecore_iov_vf_mbx *p_mbx, 3089 u16 *tlvs_mask, u16 *tlvs_accepted) 3090 { 3091 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 3092 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; 3093 bool b_reject = false; 3094 u16 table_size; 3095 u16 i, q_idx; 3096 3097 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) 3098 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 3099 tlv); 3100 if (!p_rss_tlv) { 3101 p_data->rss_params = OSAL_NULL; 3102 return; 3103 } 3104 3105 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params)); 3106 3107 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & 3108 VFPF_UPDATE_RSS_CONFIG_FLAG); 3109 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & 3110 VFPF_UPDATE_RSS_CAPS_FLAG); 3111 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & 3112 VFPF_UPDATE_RSS_IND_TABLE_FLAG); 3113 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & 3114 VFPF_UPDATE_RSS_KEY_FLAG); 3115 3116 p_rss->rss_enable = p_rss_tlv->rss_enable; 3117 p_rss->rss_eng_id = vf->rss_eng_id; 3118 p_rss->rss_caps = p_rss_tlv->rss_caps; 3119 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; 3120 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key, 3121 sizeof(p_rss->rss_key)); 3122 3123 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table), 3124 (1 << p_rss_tlv->rss_table_size_log)); 3125 3126 for (i = 0; i < table_size; i++) { 3127 struct ecore_queue_cid *p_cid; 3128 3129 q_idx = p_rss_tlv->rss_ind_table[i]; 3130 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx, 3131 ECORE_IOV_VALIDATE_Q_ENABLE)) { 3132 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3133 "VF[%d]: Omitting RSS due to wrong queue %04x\n", 3134 vf->relative_vf_id, q_idx); 3135 b_reject = true; 3136 goto out; 3137 } 3138 3139 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); 3140 p_rss->rss_ind_table[i] = p_cid; 3141 } 3142 3143 p_data->rss_params = p_rss; 3144 out: 3145 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS; 3146 if (!b_reject) 3147 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS; 3148 } 3149 3150 static void 3151 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn, 3152 struct ecore_sp_vport_update_params *p_data, 3153 struct ecore_sge_tpa_params *p_sge_tpa, 3154 struct ecore_iov_vf_mbx *p_mbx, 3155 u16 *tlvs_mask) 3156 { 3157 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 3158 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 3159 3160 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) 3161 ecore_iov_search_list_tlvs(p_hwfn, 3162 p_mbx->req_virt, tlv); 3163 3164 if (!p_sge_tpa_tlv) { 3165 p_data->sge_tpa_params = OSAL_NULL; 3166 return; 3167 } 3168 3169 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params)); 3170 3171 p_sge_tpa->update_tpa_en_flg = 3172 !!(p_sge_tpa_tlv->update_sge_tpa_flags & 3173 VFPF_UPDATE_TPA_EN_FLAG); 3174 p_sge_tpa->update_tpa_param_flg = 3175 !!(p_sge_tpa_tlv->update_sge_tpa_flags & 3176 VFPF_UPDATE_TPA_PARAM_FLAG); 3177 3178 p_sge_tpa->tpa_ipv4_en_flg = 3179 !!(p_sge_tpa_tlv->sge_tpa_flags & 3180 VFPF_TPA_IPV4_EN_FLAG); 3181 p_sge_tpa->tpa_ipv6_en_flg = 3182 !!(p_sge_tpa_tlv->sge_tpa_flags & 3183 VFPF_TPA_IPV6_EN_FLAG); 3184 p_sge_tpa->tpa_pkt_split_flg = 3185 !!(p_sge_tpa_tlv->sge_tpa_flags & 3186 VFPF_TPA_PKT_SPLIT_FLAG); 3187 p_sge_tpa->tpa_hdr_data_split_flg = 3188 !!(p_sge_tpa_tlv->sge_tpa_flags & 3189 VFPF_TPA_HDR_DATA_SPLIT_FLAG); 3190 p_sge_tpa->tpa_gro_consistent_flg = 3191 !!(p_sge_tpa_tlv->sge_tpa_flags & 3192 VFPF_TPA_GRO_CONSIST_FLAG); 3193 3194 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; 3195 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; 3196 p_sge_tpa->tpa_min_size_to_start = 3197 p_sge_tpa_tlv->tpa_min_size_to_start; 3198 p_sge_tpa->tpa_min_size_to_cont = 3199 p_sge_tpa_tlv->tpa_min_size_to_cont; 3200 p_sge_tpa->max_buffers_per_cqe = 3201 p_sge_tpa_tlv->max_buffers_per_cqe; 3202 3203 p_data->sge_tpa_params = p_sge_tpa; 3204 3205 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA; 3206 } 3207 3208 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn, 3209 struct ecore_ptt *p_ptt, 3210 struct ecore_vf_info *vf) 3211 { 3212 struct ecore_rss_params *p_rss_params = OSAL_NULL; 3213 struct ecore_sp_vport_update_params params; 3214 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 3215 struct ecore_sge_tpa_params sge_tpa_params; 3216 u16 tlvs_mask = 0, tlvs_accepted = 0; 3217 u8 status = PFVF_STATUS_SUCCESS; 3218 u16 length; 3219 enum _ecore_status_t rc; 3220 3221 /* Valiate PF can send such a request */ 3222 if (!vf->vport_instance) { 3223 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3224 "No VPORT instance available for VF[%d], failing vport update\n", 3225 vf->abs_vf_id); 3226 status = PFVF_STATUS_FAILURE; 3227 goto out; 3228 } 3229 3230 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params)); 3231 if (p_rss_params == OSAL_NULL) { 3232 status = PFVF_STATUS_FAILURE; 3233 goto out; 3234 } 3235 3236 OSAL_MEMSET(¶ms, 0, sizeof(params)); 3237 params.opaque_fid = vf->opaque_fid; 3238 params.vport_id = vf->vport_id; 3239 params.rss_params = OSAL_NULL; 3240 3241 /* Search for extended tlvs list and update values 3242 * from VF in struct ecore_sp_vport_update_params. 3243 */ 3244 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 3245 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); 3246 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); 3247 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 3248 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); 3249 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); 3250 ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms, 3251 &sge_tpa_params, mbx, &tlvs_mask); 3252 3253 tlvs_accepted = tlvs_mask; 3254 3255 /* Some of the extended TLVs need to be validated first; In that case, 3256 * they can update the mask without updating the accepted [so that 3257 * PF could communicate to VF it has rejected request]. 3258 */ 3259 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, 3260 mbx, &tlvs_mask, &tlvs_accepted); 3261 3262 /* Just log a message if there is no single extended tlv in buffer. 3263 * When all features of vport update ramrod would be requested by VF 3264 * as extended TLVs in buffer then an error can be returned in response 3265 * if there is no extended TLV present in buffer. 3266 */ 3267 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id, 3268 ¶ms, &tlvs_accepted) != 3269 ECORE_SUCCESS) { 3270 tlvs_accepted = 0; 3271 status = PFVF_STATUS_NOT_SUPPORTED; 3272 goto out; 3273 } 3274 3275 if (!tlvs_accepted) { 3276 if (tlvs_mask) 3277 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3278 "Upper-layer prevents said VF configuration\n"); 3279 else 3280 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3281 "No feature tlvs found for vport update\n"); 3282 status = PFVF_STATUS_NOT_SUPPORTED; 3283 goto out; 3284 } 3285 3286 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK, 3287 OSAL_NULL); 3288 3289 if (rc) 3290 status = PFVF_STATUS_FAILURE; 3291 3292 out: 3293 OSAL_VFREE(p_hwfn->p_dev, p_rss_params); 3294 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, 3295 tlvs_mask, tlvs_accepted); 3296 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status); 3297 } 3298 3299 static enum _ecore_status_t ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn, 3300 struct ecore_vf_info *p_vf, 3301 struct ecore_filter_ucast *p_params) 3302 { 3303 int i; 3304 3305 /* First remove entries and then add new ones */ 3306 if (p_params->opcode == ECORE_FILTER_REMOVE) { 3307 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 3308 if (p_vf->shadow_config.vlans[i].used && 3309 p_vf->shadow_config.vlans[i].vid == 3310 p_params->vlan) { 3311 p_vf->shadow_config.vlans[i].used = false; 3312 break; 3313 } 3314 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) { 3315 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3316 "VF [%d] - Tries to remove a non-existing vlan\n", 3317 p_vf->relative_vf_id); 3318 return ECORE_INVAL; 3319 } 3320 } else if (p_params->opcode == ECORE_FILTER_REPLACE || 3321 p_params->opcode == ECORE_FILTER_FLUSH) { 3322 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 3323 p_vf->shadow_config.vlans[i].used = false; 3324 } 3325 3326 /* In forced mode, we're willing to remove entries - but we don't add 3327 * new ones. 3328 */ 3329 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)) 3330 return ECORE_SUCCESS; 3331 3332 if (p_params->opcode == ECORE_FILTER_ADD || 3333 p_params->opcode == ECORE_FILTER_REPLACE) { 3334 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 3335 if (p_vf->shadow_config.vlans[i].used) 3336 continue; 3337 3338 p_vf->shadow_config.vlans[i].used = true; 3339 p_vf->shadow_config.vlans[i].vid = p_params->vlan; 3340 break; 3341 } 3342 3343 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) { 3344 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3345 "VF [%d] - Tries to configure more than %d vlan filters\n", 3346 p_vf->relative_vf_id, 3347 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1); 3348 return ECORE_INVAL; 3349 } 3350 } 3351 3352 return ECORE_SUCCESS; 3353 } 3354 3355 static enum _ecore_status_t ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn, 3356 struct ecore_vf_info *p_vf, 3357 struct ecore_filter_ucast *p_params) 3358 { 3359 char empty_mac[ETH_ALEN]; 3360 int i; 3361 3362 OSAL_MEM_ZERO(empty_mac, ETH_ALEN); 3363 3364 /* If we're in forced-mode, we don't allow any change */ 3365 /* TODO - this would change if we were ever to implement logic for 3366 * removing a forced MAC altogether [in which case, like for vlans, 3367 * we should be able to re-trace previous configuration. 3368 */ 3369 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) 3370 return ECORE_SUCCESS; 3371 3372 /* First remove entries and then add new ones */ 3373 if (p_params->opcode == ECORE_FILTER_REMOVE) { 3374 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) { 3375 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i], 3376 p_params->mac, ETH_ALEN)) { 3377 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], 3378 ETH_ALEN); 3379 break; 3380 } 3381 } 3382 3383 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) { 3384 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3385 "MAC isn't configured\n"); 3386 return ECORE_INVAL; 3387 } 3388 } else if (p_params->opcode == ECORE_FILTER_REPLACE || 3389 p_params->opcode == ECORE_FILTER_FLUSH) { 3390 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) 3391 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN); 3392 } 3393 3394 /* List the new MAC address */ 3395 if (p_params->opcode != ECORE_FILTER_ADD && 3396 p_params->opcode != ECORE_FILTER_REPLACE) 3397 return ECORE_SUCCESS; 3398 3399 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) { 3400 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i], 3401 empty_mac, ETH_ALEN)) { 3402 OSAL_MEMCPY(p_vf->shadow_config.macs[i], 3403 p_params->mac, ETH_ALEN); 3404 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3405 "Added MAC at %d entry in shadow\n", i); 3406 break; 3407 } 3408 } 3409 3410 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) { 3411 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3412 "No available place for MAC\n"); 3413 return ECORE_INVAL; 3414 } 3415 3416 return ECORE_SUCCESS; 3417 } 3418 3419 static enum _ecore_status_t 3420 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn, 3421 struct ecore_vf_info *p_vf, 3422 struct ecore_filter_ucast *p_params) 3423 { 3424 enum _ecore_status_t rc = ECORE_SUCCESS; 3425 3426 if (p_params->type == ECORE_FILTER_MAC) { 3427 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); 3428 if (rc != ECORE_SUCCESS) 3429 return rc; 3430 } 3431 3432 if (p_params->type == ECORE_FILTER_VLAN) 3433 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); 3434 3435 return rc; 3436 } 3437 3438 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn, 3439 struct ecore_ptt *p_ptt, 3440 struct ecore_vf_info *vf) 3441 { 3442 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt; 3443 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 3444 struct vfpf_ucast_filter_tlv *req; 3445 u8 status = PFVF_STATUS_SUCCESS; 3446 struct ecore_filter_ucast params; 3447 enum _ecore_status_t rc; 3448 3449 /* Prepare the unicast filter params */ 3450 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast)); 3451 req = &mbx->req_virt->ucast_filter; 3452 params.opcode = (enum ecore_filter_opcode)req->opcode; 3453 params.type = (enum ecore_filter_ucast_type)req->type; 3454 3455 /* @@@TBD - We might need logic on HV side in determining this */ 3456 params.is_rx_filter = 1; 3457 params.is_tx_filter = 1; 3458 params.vport_to_remove_from = vf->vport_id; 3459 params.vport_to_add_to = vf->vport_id; 3460 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN); 3461 params.vlan = req->vlan; 3462 3463 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3464 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", 3465 vf->abs_vf_id, params.opcode, params.type, 3466 params.is_rx_filter ? "RX" : "", 3467 params.is_tx_filter ? "TX" : "", 3468 params.vport_to_add_to, 3469 params.mac[0], params.mac[1], params.mac[2], 3470 params.mac[3], params.mac[4], params.mac[5], params.vlan); 3471 3472 if (!vf->vport_instance) { 3473 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3474 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", 3475 vf->abs_vf_id); 3476 status = PFVF_STATUS_FAILURE; 3477 goto out; 3478 } 3479 3480 /* Update shadow copy of the VF configuration. In case shadow indicates 3481 * the action should be blocked return success to VF to imitate the 3482 * firmware behaviour in such case. 3483 */ 3484 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) != 3485 ECORE_SUCCESS) 3486 goto out; 3487 3488 /* Determine if the unicast filtering is acceptible by PF */ 3489 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) && 3490 (params.type == ECORE_FILTER_VLAN || 3491 params.type == ECORE_FILTER_MAC_VLAN)) { 3492 /* Once VLAN is forced or PVID is set, do not allow 3493 * to add/replace any further VLANs. 3494 */ 3495 if (params.opcode == ECORE_FILTER_ADD || 3496 params.opcode == ECORE_FILTER_REPLACE) 3497 status = PFVF_STATUS_FORCED; 3498 goto out; 3499 } 3500 3501 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) && 3502 (params.type == ECORE_FILTER_MAC || 3503 params.type == ECORE_FILTER_MAC_VLAN)) { 3504 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) || 3505 (params.opcode != ECORE_FILTER_ADD && 3506 params.opcode != ECORE_FILTER_REPLACE)) 3507 status = PFVF_STATUS_FORCED; 3508 goto out; 3509 } 3510 3511 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms); 3512 if (rc == ECORE_EXISTS) { 3513 goto out; 3514 } else if (rc == ECORE_INVAL) { 3515 status = PFVF_STATUS_FAILURE; 3516 goto out; 3517 } 3518 3519 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, 3520 ECORE_SPQ_MODE_CB, OSAL_NULL); 3521 if (rc) 3522 status = PFVF_STATUS_FAILURE; 3523 3524 out: 3525 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, 3526 sizeof(struct pfvf_def_resp_tlv), status); 3527 } 3528 3529 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn, 3530 struct ecore_ptt *p_ptt, 3531 struct ecore_vf_info *vf) 3532 { 3533 int i; 3534 3535 /* Reset the SBs */ 3536 for (i = 0; i < vf->num_sbs; i++) 3537 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 3538 vf->igu_sbs[i], 3539 vf->opaque_fid, false); 3540 3541 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, 3542 sizeof(struct pfvf_def_resp_tlv), 3543 PFVF_STATUS_SUCCESS); 3544 } 3545 3546 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn, 3547 struct ecore_ptt *p_ptt, 3548 struct ecore_vf_info *vf) 3549 { 3550 u16 length = sizeof(struct pfvf_def_resp_tlv); 3551 u8 status = PFVF_STATUS_SUCCESS; 3552 3553 /* Disable Interrupts for VF */ 3554 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 3555 3556 /* Reset Permission table */ 3557 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 3558 3559 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, 3560 length, status); 3561 } 3562 3563 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn, 3564 struct ecore_ptt *p_ptt, 3565 struct ecore_vf_info *p_vf) 3566 { 3567 u16 length = sizeof(struct pfvf_def_resp_tlv); 3568 u8 status = PFVF_STATUS_SUCCESS; 3569 enum _ecore_status_t rc = ECORE_SUCCESS; 3570 3571 ecore_iov_vf_cleanup(p_hwfn, p_vf); 3572 3573 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { 3574 /* Stopping the VF */ 3575 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid, 3576 p_vf->opaque_fid); 3577 3578 if (rc != ECORE_SUCCESS) { 3579 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n", 3580 rc); 3581 status = PFVF_STATUS_FAILURE; 3582 } 3583 3584 p_vf->state = VF_STOPPED; 3585 } 3586 3587 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, 3588 length, status); 3589 } 3590 3591 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn, 3592 struct ecore_ptt *p_ptt, 3593 struct ecore_vf_info *p_vf) 3594 { 3595 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; 3596 struct pfvf_read_coal_resp_tlv *p_resp; 3597 struct vfpf_read_coal_req_tlv *req; 3598 u8 status = PFVF_STATUS_FAILURE; 3599 struct ecore_vf_queue *p_queue; 3600 struct ecore_queue_cid *p_cid; 3601 enum _ecore_status_t rc = ECORE_SUCCESS; 3602 u16 coal = 0, qid, i; 3603 bool b_is_rx; 3604 3605 mbx->offset = (u8 *)mbx->reply_virt; 3606 req = &mbx->req_virt->read_coal_req; 3607 3608 qid = req->qid; 3609 b_is_rx = req->is_rx ? true : false; 3610 3611 if (b_is_rx) { 3612 if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid, 3613 ECORE_IOV_VALIDATE_Q_ENABLE)) { 3614 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3615 "VF[%d]: Invalid Rx queue_id = %d\n", 3616 p_vf->abs_vf_id, qid); 3617 goto send_resp; 3618 } 3619 3620 p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); 3621 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); 3622 if (rc != ECORE_SUCCESS) 3623 goto send_resp; 3624 } else { 3625 if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid, 3626 ECORE_IOV_VALIDATE_Q_ENABLE)) { 3627 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3628 "VF[%d]: Invalid Tx queue_id = %d\n", 3629 p_vf->abs_vf_id, qid); 3630 goto send_resp; 3631 } 3632 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 3633 p_queue = &p_vf->vf_queues[qid]; 3634 if ((p_queue->cids[i].p_cid == OSAL_NULL) || 3635 (!p_queue->cids[i].b_is_tx)) 3636 continue; 3637 3638 p_cid = p_queue->cids[i].p_cid; 3639 3640 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, 3641 p_cid, &coal); 3642 if (rc != ECORE_SUCCESS) 3643 goto send_resp; 3644 break; 3645 } 3646 } 3647 3648 status = PFVF_STATUS_SUCCESS; 3649 3650 send_resp: 3651 p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ, 3652 sizeof(*p_resp)); 3653 p_resp->coal = coal; 3654 3655 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, 3656 sizeof(struct channel_list_end_tlv)); 3657 3658 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); 3659 } 3660 3661 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, 3662 struct ecore_ptt *p_ptt, 3663 struct ecore_vf_info *vf) 3664 { 3665 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; 3666 enum _ecore_status_t rc = ECORE_SUCCESS; 3667 struct vfpf_update_coalesce *req; 3668 u8 status = PFVF_STATUS_FAILURE; 3669 struct ecore_queue_cid *p_cid; 3670 u16 rx_coal, tx_coal; 3671 u16 qid; 3672 int i; 3673 3674 req = &mbx->req_virt->update_coalesce; 3675 3676 rx_coal = req->rx_coal; 3677 tx_coal = req->tx_coal; 3678 qid = req->qid; 3679 3680 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid, 3681 ECORE_IOV_VALIDATE_Q_ENABLE) && 3682 rx_coal) { 3683 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n", 3684 vf->abs_vf_id, qid); 3685 goto out; 3686 } 3687 3688 if (!ecore_iov_validate_txq(p_hwfn, vf, qid, 3689 ECORE_IOV_VALIDATE_Q_ENABLE) && 3690 tx_coal) { 3691 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n", 3692 vf->abs_vf_id, qid); 3693 goto out; 3694 } 3695 3696 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3697 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", 3698 vf->abs_vf_id, rx_coal, tx_coal, qid); 3699 3700 if (rx_coal) { 3701 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); 3702 3703 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 3704 if (rc != ECORE_SUCCESS) { 3705 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3706 "VF[%d]: Unable to set rx queue = %d coalesce\n", 3707 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); 3708 goto out; 3709 } 3710 vf->rx_coal = rx_coal; 3711 } 3712 3713 /* TODO - in future, it might be possible to pass this in a per-cid 3714 * granularity. For now, do this for all Tx queues. 3715 */ 3716 if (tx_coal) { 3717 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid]; 3718 3719 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 3720 if (p_queue->cids[i].p_cid == OSAL_NULL) 3721 continue; 3722 3723 if (!p_queue->cids[i].b_is_tx) 3724 continue; 3725 3726 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, 3727 p_queue->cids[i].p_cid); 3728 if (rc != ECORE_SUCCESS) { 3729 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3730 "VF[%d]: Unable to set tx queue coalesce\n", 3731 vf->abs_vf_id); 3732 goto out; 3733 } 3734 } 3735 vf->tx_coal = tx_coal; 3736 } 3737 3738 status = PFVF_STATUS_SUCCESS; 3739 out: 3740 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, 3741 sizeof(struct pfvf_def_resp_tlv), status); 3742 } 3743 3744 enum _ecore_status_t 3745 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn, 3746 u16 rx_coal, u16 tx_coal, 3747 u16 vf_id, u16 qid) 3748 { 3749 struct ecore_queue_cid *p_cid; 3750 struct ecore_vf_info *vf; 3751 struct ecore_ptt *p_ptt; 3752 int i, rc = 0; 3753 3754 if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) { 3755 DP_NOTICE(p_hwfn, true, 3756 "VF[%d] - Can not set coalescing: VF is not active\n", 3757 vf_id); 3758 return ECORE_INVAL; 3759 } 3760 3761 vf = &p_hwfn->pf_iov_info->vfs_array[vf_id]; 3762 p_ptt = ecore_ptt_acquire(p_hwfn); 3763 if (!p_ptt) 3764 return ECORE_AGAIN; 3765 3766 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid, 3767 ECORE_IOV_VALIDATE_Q_ENABLE) && 3768 rx_coal) { 3769 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n", 3770 vf->abs_vf_id, qid); 3771 goto out; 3772 } 3773 3774 if (!ecore_iov_validate_txq(p_hwfn, vf, qid, 3775 ECORE_IOV_VALIDATE_Q_ENABLE) && 3776 tx_coal) { 3777 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n", 3778 vf->abs_vf_id, qid); 3779 goto out; 3780 } 3781 3782 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3783 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", 3784 vf->abs_vf_id, rx_coal, tx_coal, qid); 3785 3786 if (rx_coal) { 3787 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); 3788 3789 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 3790 if (rc != ECORE_SUCCESS) { 3791 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3792 "VF[%d]: Unable to set rx queue = %d coalesce\n", 3793 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); 3794 goto out; 3795 } 3796 vf->rx_coal = rx_coal; 3797 } 3798 3799 /* TODO - in future, it might be possible to pass this in a per-cid 3800 * granularity. For now, do this for all Tx queues. 3801 */ 3802 if (tx_coal) { 3803 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid]; 3804 3805 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 3806 if (p_queue->cids[i].p_cid == OSAL_NULL) 3807 continue; 3808 3809 if (!p_queue->cids[i].b_is_tx) 3810 continue; 3811 3812 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, 3813 p_queue->cids[i].p_cid); 3814 if (rc != ECORE_SUCCESS) { 3815 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3816 "VF[%d]: Unable to set tx queue coalesce\n", 3817 vf->abs_vf_id); 3818 goto out; 3819 } 3820 } 3821 vf->tx_coal = tx_coal; 3822 } 3823 3824 out: 3825 ecore_ptt_release(p_hwfn, p_ptt); 3826 3827 return rc; 3828 } 3829 3830 static enum _ecore_status_t 3831 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn, 3832 struct ecore_vf_info *p_vf, 3833 struct ecore_ptt *p_ptt) 3834 { 3835 int cnt; 3836 u32 val; 3837 3838 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid); 3839 3840 for (cnt = 0; cnt < 50; cnt++) { 3841 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); 3842 if (!val) 3843 break; 3844 OSAL_MSLEEP(20); 3845 } 3846 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); 3847 3848 if (cnt == 50) { 3849 DP_ERR(p_hwfn, "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", 3850 p_vf->abs_vf_id, val); 3851 return ECORE_TIMEOUT; 3852 } 3853 3854 return ECORE_SUCCESS; 3855 } 3856 3857 static enum _ecore_status_t 3858 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn, 3859 struct ecore_vf_info *p_vf, 3860 struct ecore_ptt *p_ptt) 3861 { 3862 u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4]; 3863 int i, cnt; 3864 3865 /* Read initial consumers & producers */ 3866 for (i = 0; i < MAX_NUM_VOQS_E4; i++) { 3867 u32 prod; 3868 3869 cons[i] = ecore_rd(p_hwfn, p_ptt, 3870 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 3871 i * 0x40); 3872 prod = ecore_rd(p_hwfn, p_ptt, 3873 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + 3874 i * 0x40); 3875 distance[i] = prod - cons[i]; 3876 } 3877 3878 /* Wait for consumers to pass the producers */ 3879 i = 0; 3880 for (cnt = 0; cnt < 50; cnt++) { 3881 for (; i < MAX_NUM_VOQS_E4; i++) { 3882 u32 tmp; 3883 3884 tmp = ecore_rd(p_hwfn, p_ptt, 3885 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 3886 i * 0x40); 3887 if (distance[i] > tmp - cons[i]) 3888 break; 3889 } 3890 3891 if (i == MAX_NUM_VOQS_E4) 3892 break; 3893 3894 OSAL_MSLEEP(20); 3895 } 3896 3897 if (cnt == 50) { 3898 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", 3899 p_vf->abs_vf_id, i); 3900 return ECORE_TIMEOUT; 3901 } 3902 3903 return ECORE_SUCCESS; 3904 } 3905 3906 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn, 3907 struct ecore_vf_info *p_vf, 3908 struct ecore_ptt *p_ptt) 3909 { 3910 enum _ecore_status_t rc; 3911 3912 /* TODO - add SRC and TM polling once we add storage IOV */ 3913 3914 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); 3915 if (rc) 3916 return rc; 3917 3918 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); 3919 if (rc) 3920 return rc; 3921 3922 return ECORE_SUCCESS; 3923 } 3924 3925 static enum _ecore_status_t 3926 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, 3927 struct ecore_ptt *p_ptt, 3928 u16 rel_vf_id, 3929 u32 *ack_vfs) 3930 { 3931 struct ecore_vf_info *p_vf; 3932 enum _ecore_status_t rc = ECORE_SUCCESS; 3933 3934 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false); 3935 if (!p_vf) 3936 return ECORE_SUCCESS; 3937 3938 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & 3939 (1ULL << (rel_vf_id % 64))) { 3940 u16 vfid = p_vf->abs_vf_id; 3941 3942 /* TODO - should we lock channel? */ 3943 3944 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 3945 "VF[%d] - Handling FLR\n", vfid); 3946 3947 ecore_iov_vf_cleanup(p_hwfn, p_vf); 3948 3949 /* If VF isn't active, no need for anything but SW */ 3950 if (!p_vf->b_init) 3951 goto cleanup; 3952 3953 /* TODO - what to do in case of failure? */ 3954 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); 3955 if (rc != ECORE_SUCCESS) 3956 goto cleanup; 3957 3958 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true); 3959 if (rc) { 3960 /* TODO - what's now? What a mess.... */ 3961 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", 3962 vfid); 3963 return rc; 3964 } 3965 3966 /* Workaround to make VF-PF channel ready, as FW 3967 * doesn't do that as a part of FLR. 3968 */ 3969 REG_WR(p_hwfn, 3970 GTT_BAR0_MAP_REG_USDM_RAM + 3971 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); 3972 3973 /* VF_STOPPED has to be set only after final cleanup 3974 * but prior to re-enabling the VF. 3975 */ 3976 p_vf->state = VF_STOPPED; 3977 3978 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); 3979 if (rc) { 3980 /* TODO - again, a mess... */ 3981 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", 3982 vfid); 3983 return rc; 3984 } 3985 cleanup: 3986 /* Mark VF for ack and clean pending state */ 3987 if (p_vf->state == VF_RESET) 3988 p_vf->state = VF_STOPPED; 3989 ack_vfs[vfid / 32] |= (1 << (vfid % 32)); 3990 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= 3991 ~(1ULL << (rel_vf_id % 64)); 3992 p_vf->vf_mbx.b_pending_msg = false; 3993 } 3994 3995 return rc; 3996 } 3997 3998 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, 3999 struct ecore_ptt *p_ptt) 4000 4001 { 4002 u32 ack_vfs[VF_MAX_STATIC / 32]; 4003 enum _ecore_status_t rc = ECORE_SUCCESS; 4004 u16 i; 4005 4006 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); 4007 4008 /* Since BRB <-> PRS interface can't be tested as part of the flr 4009 * polling due to HW limitations, simply sleep a bit. And since 4010 * there's no need to wait per-vf, do it before looping. 4011 */ 4012 OSAL_MSLEEP(100); 4013 4014 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) 4015 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); 4016 4017 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); 4018 return rc; 4019 } 4020 4021 #ifndef LINUX_REMOVE 4022 enum _ecore_status_t 4023 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, 4024 struct ecore_ptt *p_ptt, 4025 u16 rel_vf_id) 4026 4027 { 4028 u32 ack_vfs[VF_MAX_STATIC / 32]; 4029 enum _ecore_status_t rc = ECORE_SUCCESS; 4030 4031 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); 4032 4033 /* Wait instead of polling the BRB <-> PRS interface */ 4034 OSAL_MSLEEP(100); 4035 4036 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs); 4037 4038 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); 4039 return rc; 4040 } 4041 #endif 4042 4043 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, 4044 u32 *p_disabled_vfs) 4045 { 4046 bool found = false; 4047 u16 i; 4048 4049 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n"); 4050 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 4051 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4052 "[%08x,...,%08x]: %08x\n", 4053 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); 4054 4055 if (!p_hwfn->p_dev->p_iov_info) { 4056 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n"); 4057 return false; 4058 } 4059 4060 /* Mark VFs */ 4061 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { 4062 struct ecore_vf_info *p_vf; 4063 u8 vfid; 4064 4065 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false); 4066 if (!p_vf) 4067 continue; 4068 4069 vfid = p_vf->abs_vf_id; 4070 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) { 4071 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; 4072 u16 rel_vf_id = p_vf->relative_vf_id; 4073 4074 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4075 "VF[%d] [rel %d] got FLR-ed\n", 4076 vfid, rel_vf_id); 4077 4078 p_vf->state = VF_RESET; 4079 4080 /* No need to lock here, since pending_flr should 4081 * only change here and before ACKing MFw. Since 4082 * MFW will not trigger an additional attention for 4083 * VF flr until ACKs, we're safe. 4084 */ 4085 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); 4086 found = true; 4087 } 4088 } 4089 4090 return found; 4091 } 4092 4093 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, 4094 u16 vfid, 4095 struct ecore_mcp_link_params *p_params, 4096 struct ecore_mcp_link_state *p_link, 4097 struct ecore_mcp_link_capabilities *p_caps) 4098 { 4099 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false); 4100 struct ecore_bulletin_content *p_bulletin; 4101 4102 if (!p_vf) 4103 return; 4104 4105 p_bulletin = p_vf->bulletin.p_virt; 4106 4107 if (p_params) 4108 __ecore_vf_get_link_params(p_params, p_bulletin); 4109 if (p_link) 4110 __ecore_vf_get_link_state(p_link, p_bulletin); 4111 if (p_caps) 4112 __ecore_vf_get_link_caps(p_caps, p_bulletin); 4113 } 4114 4115 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, 4116 struct ecore_ptt *p_ptt, 4117 int vfid) 4118 { 4119 struct ecore_iov_vf_mbx *mbx; 4120 struct ecore_vf_info *p_vf; 4121 4122 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4123 if (!p_vf) 4124 return; 4125 4126 mbx = &p_vf->vf_mbx; 4127 4128 /* ecore_iov_process_mbx_request */ 4129 #ifndef CONFIG_ECORE_SW_CHANNEL 4130 if (!mbx->b_pending_msg) { 4131 DP_NOTICE(p_hwfn, true, 4132 "VF[%02x]: Trying to process mailbox message when none is pending\n", 4133 p_vf->abs_vf_id); 4134 return; 4135 } 4136 mbx->b_pending_msg = false; 4137 #endif 4138 4139 mbx->first_tlv = mbx->req_virt->first_tlv; 4140 4141 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4142 "VF[%02x]: Processing mailbox message [type %04x]\n", 4143 p_vf->abs_vf_id, mbx->first_tlv.tl.type); 4144 4145 OSAL_IOV_VF_MSG_TYPE(p_hwfn, 4146 p_vf->relative_vf_id, 4147 mbx->first_tlv.tl.type); 4148 4149 /* Lock the per vf op mutex and note the locker's identity. 4150 * The unlock will take place in mbx response. 4151 */ 4152 ecore_iov_lock_vf_pf_channel(p_hwfn, p_vf, 4153 mbx->first_tlv.tl.type); 4154 4155 /* check if tlv type is known */ 4156 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) && 4157 !p_vf->b_malicious) { 4158 /* switch on the opcode */ 4159 switch (mbx->first_tlv.tl.type) { 4160 case CHANNEL_TLV_ACQUIRE: 4161 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); 4162 break; 4163 case CHANNEL_TLV_VPORT_START: 4164 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); 4165 break; 4166 case CHANNEL_TLV_VPORT_TEARDOWN: 4167 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); 4168 break; 4169 case CHANNEL_TLV_START_RXQ: 4170 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); 4171 break; 4172 case CHANNEL_TLV_START_TXQ: 4173 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); 4174 break; 4175 case CHANNEL_TLV_STOP_RXQS: 4176 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); 4177 break; 4178 case CHANNEL_TLV_STOP_TXQS: 4179 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); 4180 break; 4181 case CHANNEL_TLV_UPDATE_RXQ: 4182 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); 4183 break; 4184 case CHANNEL_TLV_VPORT_UPDATE: 4185 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); 4186 break; 4187 case CHANNEL_TLV_UCAST_FILTER: 4188 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); 4189 break; 4190 case CHANNEL_TLV_CLOSE: 4191 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); 4192 break; 4193 case CHANNEL_TLV_INT_CLEANUP: 4194 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); 4195 break; 4196 case CHANNEL_TLV_RELEASE: 4197 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); 4198 break; 4199 case CHANNEL_TLV_UPDATE_TUNN_PARAM: 4200 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); 4201 break; 4202 case CHANNEL_TLV_COALESCE_UPDATE: 4203 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); 4204 break; 4205 case CHANNEL_TLV_COALESCE_READ: 4206 ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); 4207 break; 4208 } 4209 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) { 4210 /* If we've received a message from a VF we consider malicious 4211 * we ignore the messasge unless it's one for RELEASE, in which 4212 * case we'll let it have the benefit of doubt, allowing the 4213 * next loaded driver to start again. 4214 */ 4215 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) { 4216 /* TODO - initiate FLR, remove malicious indication */ 4217 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4218 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n", 4219 p_vf->abs_vf_id); 4220 } else { 4221 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4222 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", 4223 p_vf->abs_vf_id, mbx->first_tlv.tl.type); 4224 } 4225 4226 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 4227 mbx->first_tlv.tl.type, 4228 sizeof(struct pfvf_def_resp_tlv), 4229 PFVF_STATUS_MALICIOUS); 4230 } else { 4231 /* unknown TLV - this may belong to a VF driver from the future 4232 * - a version written after this PF driver was written, which 4233 * supports features unknown as of yet. Too bad since we don't 4234 * support them. Or this may be because someone wrote a crappy 4235 * VF driver and is sending garbage over the channel. 4236 */ 4237 DP_NOTICE(p_hwfn, false, 4238 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", 4239 p_vf->abs_vf_id, 4240 mbx->first_tlv.tl.type, 4241 mbx->first_tlv.tl.length, 4242 mbx->first_tlv.padding, 4243 (unsigned long long)mbx->first_tlv.reply_address); 4244 4245 /* Try replying in case reply address matches the acquisition's 4246 * posted address. 4247 */ 4248 if (p_vf->acquire.first_tlv.reply_address && 4249 (mbx->first_tlv.reply_address == 4250 p_vf->acquire.first_tlv.reply_address)) 4251 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 4252 mbx->first_tlv.tl.type, 4253 sizeof(struct pfvf_def_resp_tlv), 4254 PFVF_STATUS_NOT_SUPPORTED); 4255 else 4256 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4257 "VF[%02x]: Can't respond to TLV - no valid reply address\n", 4258 p_vf->abs_vf_id); 4259 } 4260 4261 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf, 4262 mbx->first_tlv.tl.type); 4263 4264 #ifdef CONFIG_ECORE_SW_CHANNEL 4265 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY; 4266 mbx->sw_mbx.response_offset = 0; 4267 #endif 4268 } 4269 4270 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn, 4271 u64 *events) 4272 { 4273 int i; 4274 4275 OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH); 4276 4277 ecore_for_each_vf(p_hwfn, i) { 4278 struct ecore_vf_info *p_vf; 4279 4280 p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; 4281 if (p_vf->vf_mbx.b_pending_msg) 4282 events[i / 64] |= 1ULL << (i % 64); 4283 } 4284 } 4285 4286 static struct ecore_vf_info * 4287 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid) 4288 { 4289 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; 4290 4291 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { 4292 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4293 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", 4294 abs_vfid); 4295 return OSAL_NULL; 4296 } 4297 4298 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; 4299 } 4300 4301 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn, 4302 u16 abs_vfid, 4303 struct regpair *vf_msg) 4304 { 4305 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, 4306 abs_vfid); 4307 4308 if (!p_vf) 4309 return ECORE_SUCCESS; 4310 4311 /* List the physical address of the request so that handler 4312 * could later on copy the message from it. 4313 */ 4314 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | 4315 vf_msg->lo; 4316 4317 p_vf->vf_mbx.b_pending_msg = true; 4318 4319 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id); 4320 } 4321 4322 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn, 4323 struct malicious_vf_eqe_data *p_data) 4324 { 4325 struct ecore_vf_info *p_vf; 4326 4327 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); 4328 4329 if (!p_vf) 4330 return; 4331 4332 if (!p_vf->b_malicious) { 4333 DP_NOTICE(p_hwfn, false, 4334 "VF [%d] - Malicious behavior [%02x]\n", 4335 p_vf->abs_vf_id, p_data->err_id); 4336 4337 p_vf->b_malicious = true; 4338 } else { 4339 DP_INFO(p_hwfn, 4340 "VF [%d] - Malicious behavior [%02x]\n", 4341 p_vf->abs_vf_id, p_data->err_id); 4342 } 4343 4344 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id); 4345 } 4346 4347 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, 4348 u8 opcode, 4349 __le16 echo, 4350 union event_ring_data *data, 4351 u8 OSAL_UNUSED fw_return_code) 4352 { 4353 switch (opcode) { 4354 case COMMON_EVENT_VF_PF_CHANNEL: 4355 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo), 4356 &data->vf_pf_channel.msg_addr); 4357 case COMMON_EVENT_VF_FLR: 4358 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4359 "VF-FLR is still not supported\n"); 4360 return ECORE_SUCCESS; 4361 case COMMON_EVENT_MALICIOUS_VF: 4362 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); 4363 return ECORE_SUCCESS; 4364 default: 4365 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n", 4366 opcode); 4367 return ECORE_INVAL; 4368 } 4369 } 4370 4371 #ifndef LINUX_REMOVE 4372 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, 4373 u16 rel_vf_id) 4374 { 4375 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & 4376 (1ULL << (rel_vf_id % 64))); 4377 } 4378 #endif 4379 4380 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) 4381 { 4382 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 4383 u16 i; 4384 4385 if (!p_iov) 4386 goto out; 4387 4388 for (i = rel_vf_id; i < p_iov->total_vfs; i++) 4389 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) 4390 return i; 4391 4392 out: 4393 return MAX_NUM_VFS_E4; 4394 } 4395 4396 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn, 4397 struct ecore_ptt *ptt, 4398 int vfid) 4399 { 4400 struct ecore_dmae_params params; 4401 struct ecore_vf_info *vf_info; 4402 4403 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4404 if (!vf_info) 4405 return ECORE_INVAL; 4406 4407 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); 4408 params.flags = ECORE_DMAE_FLAG_VF_SRC | 4409 ECORE_DMAE_FLAG_COMPLETION_DST; 4410 params.src_vfid = vf_info->abs_vf_id; 4411 4412 if (ecore_dmae_host2host(p_hwfn, ptt, 4413 vf_info->vf_mbx.pending_req, 4414 vf_info->vf_mbx.req_phys, 4415 sizeof(union vfpf_tlvs) / 4, 4416 ¶ms)) { 4417 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4418 "Failed to copy message from VF 0x%02x\n", 4419 vfid); 4420 4421 return ECORE_IO; 4422 } 4423 4424 return ECORE_SUCCESS; 4425 } 4426 4427 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, 4428 u8 *mac, int vfid) 4429 { 4430 struct ecore_vf_info *vf_info; 4431 u64 feature; 4432 4433 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4434 if (!vf_info) { 4435 DP_NOTICE(p_hwfn->p_dev, true, "Can not set forced MAC, invalid vfid [%d]\n", 4436 vfid); 4437 return; 4438 } 4439 if (vf_info->b_malicious) { 4440 DP_NOTICE(p_hwfn->p_dev, false, "Can't set forced MAC to malicious VF [%d]\n", 4441 vfid); 4442 return; 4443 } 4444 4445 feature = 1 << MAC_ADDR_FORCED; 4446 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, 4447 mac, ETH_ALEN); 4448 4449 vf_info->bulletin.p_virt->valid_bitmap |= feature; 4450 /* Forced MAC will disable MAC_ADDR */ 4451 vf_info->bulletin.p_virt->valid_bitmap &= 4452 ~(1 << VFPF_BULLETIN_MAC_ADDR); 4453 4454 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); 4455 } 4456 4457 #ifndef LINUX_REMOVE 4458 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, 4459 u8 *mac, int vfid) 4460 { 4461 struct ecore_vf_info *vf_info; 4462 u64 feature; 4463 4464 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4465 if (!vf_info) { 4466 DP_NOTICE(p_hwfn->p_dev, true, "Can not set MAC, invalid vfid [%d]\n", 4467 vfid); 4468 return ECORE_INVAL; 4469 } 4470 if (vf_info->b_malicious) { 4471 DP_NOTICE(p_hwfn->p_dev, false, "Can't set MAC to malicious VF [%d]\n", 4472 vfid); 4473 return ECORE_INVAL; 4474 } 4475 4476 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 4477 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Can not set MAC, Forced MAC is configured\n"); 4478 return ECORE_INVAL; 4479 } 4480 4481 feature = 1 << VFPF_BULLETIN_MAC_ADDR; 4482 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, 4483 mac, ETH_ALEN); 4484 4485 vf_info->bulletin.p_virt->valid_bitmap |= feature; 4486 4487 return ECORE_SUCCESS; 4488 } 4489 4490 enum _ecore_status_t 4491 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, 4492 bool b_untagged_only, 4493 int vfid) 4494 { 4495 struct ecore_vf_info *vf_info; 4496 u64 feature; 4497 4498 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4499 if (!vf_info) { 4500 DP_NOTICE(p_hwfn->p_dev, true, 4501 "Can not set untagged default, invalid vfid [%d]\n", 4502 vfid); 4503 return ECORE_INVAL; 4504 } 4505 if (vf_info->b_malicious) { 4506 DP_NOTICE(p_hwfn->p_dev, false, 4507 "Can't set untagged default to malicious VF [%d]\n", 4508 vfid); 4509 return ECORE_INVAL; 4510 } 4511 4512 /* Since this is configurable only during vport-start, don't take it 4513 * if we're past that point. 4514 */ 4515 if (vf_info->state == VF_ENABLED) { 4516 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4517 "Can't support untagged change for vfid[%d] - VF is already active\n", 4518 vfid); 4519 return ECORE_INVAL; 4520 } 4521 4522 /* Set configuration; This will later be taken into account during the 4523 * VF initialization. 4524 */ 4525 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) | 4526 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED); 4527 vf_info->bulletin.p_virt->valid_bitmap |= feature; 4528 4529 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1 4530 : 0; 4531 4532 return ECORE_SUCCESS; 4533 } 4534 4535 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, 4536 u16 *opaque_fid) 4537 { 4538 struct ecore_vf_info *vf_info; 4539 4540 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4541 if (!vf_info) 4542 return; 4543 4544 *opaque_fid = vf_info->opaque_fid; 4545 } 4546 #endif 4547 4548 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn, 4549 u16 pvid, int vfid) 4550 { 4551 struct ecore_vf_info *vf_info; 4552 u64 feature; 4553 4554 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4555 if (!vf_info) { 4556 DP_NOTICE(p_hwfn->p_dev, true, "Can not set forced MAC, invalid vfid [%d]\n", 4557 vfid); 4558 return; 4559 } 4560 if (vf_info->b_malicious) { 4561 DP_NOTICE(p_hwfn->p_dev, false, 4562 "Can't set forced vlan to malicious VF [%d]\n", 4563 vfid); 4564 return; 4565 } 4566 4567 feature = 1 << VLAN_ADDR_FORCED; 4568 vf_info->bulletin.p_virt->pvid = pvid; 4569 if (pvid) 4570 vf_info->bulletin.p_virt->valid_bitmap |= feature; 4571 else 4572 vf_info->bulletin.p_virt->valid_bitmap &= ~feature; 4573 4574 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); 4575 } 4576 4577 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, 4578 int vfid, u16 vxlan_port, u16 geneve_port) 4579 { 4580 struct ecore_vf_info *vf_info; 4581 4582 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4583 if (!vf_info) { 4584 DP_NOTICE(p_hwfn->p_dev, true, 4585 "Can not set udp ports, invalid vfid [%d]\n", vfid); 4586 return; 4587 } 4588 4589 if (vf_info->b_malicious) { 4590 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 4591 "Can not set udp ports to malicious VF [%d]\n", 4592 vfid); 4593 return; 4594 } 4595 4596 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; 4597 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; 4598 } 4599 4600 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid) 4601 { 4602 struct ecore_vf_info *p_vf_info; 4603 4604 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4605 if (!p_vf_info) 4606 return false; 4607 4608 return !!p_vf_info->vport_instance; 4609 } 4610 4611 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid) 4612 { 4613 struct ecore_vf_info *p_vf_info; 4614 4615 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4616 if (!p_vf_info) 4617 return true; 4618 4619 return p_vf_info->state == VF_STOPPED; 4620 } 4621 4622 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid) 4623 { 4624 struct ecore_vf_info *vf_info; 4625 4626 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4627 if (!vf_info) 4628 return false; 4629 4630 return vf_info->spoof_chk; 4631 } 4632 4633 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, 4634 int vfid, bool val) 4635 { 4636 struct ecore_vf_info *vf; 4637 enum _ecore_status_t rc = ECORE_INVAL; 4638 4639 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) { 4640 DP_NOTICE(p_hwfn, true, 4641 "SR-IOV sanity check failed, can't set spoofchk\n"); 4642 goto out; 4643 } 4644 4645 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4646 if (!vf) 4647 goto out; 4648 4649 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { 4650 /* After VF VPORT start PF will configure spoof check */ 4651 vf->req_spoofchk_val = val; 4652 rc = ECORE_SUCCESS; 4653 goto out; 4654 } 4655 4656 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val); 4657 4658 out: 4659 return rc; 4660 } 4661 4662 #ifndef LINUX_REMOVE 4663 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn) 4664 { 4665 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf; 4666 4667 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf 4668 : ECORE_MAX_VF_CHAINS_PER_PF; 4669 4670 return max_chains_per_vf; 4671 } 4672 4673 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn, 4674 u16 rel_vf_id, 4675 void **pp_req_virt_addr, 4676 u16 *p_req_virt_size) 4677 { 4678 struct ecore_vf_info *vf_info = 4679 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4680 4681 if (!vf_info) 4682 return; 4683 4684 if (pp_req_virt_addr) 4685 *pp_req_virt_addr = vf_info->vf_mbx.req_virt; 4686 4687 if (p_req_virt_size) 4688 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt); 4689 } 4690 4691 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn, 4692 u16 rel_vf_id, 4693 void **pp_reply_virt_addr, 4694 u16 *p_reply_virt_size) 4695 { 4696 struct ecore_vf_info *vf_info = 4697 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4698 4699 if (!vf_info) 4700 return; 4701 4702 if (pp_reply_virt_addr) 4703 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt; 4704 4705 if (p_reply_virt_size) 4706 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt); 4707 } 4708 4709 #ifdef CONFIG_ECORE_SW_CHANNEL 4710 struct ecore_iov_sw_mbx* 4711 ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn, 4712 u16 rel_vf_id) 4713 { 4714 struct ecore_vf_info *vf_info = 4715 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4716 4717 if (!vf_info) 4718 return OSAL_NULL; 4719 4720 return &vf_info->vf_mbx.sw_mbx; 4721 } 4722 #endif 4723 4724 bool ecore_iov_is_valid_vfpf_msg_length(u32 length) 4725 { 4726 return (length >= sizeof(struct vfpf_first_tlv) && 4727 (length <= sizeof(union vfpf_tlvs))); 4728 } 4729 4730 u32 ecore_iov_pfvf_msg_length(void) 4731 { 4732 return sizeof(union pfvf_tlvs); 4733 } 4734 #endif 4735 4736 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, 4737 u16 rel_vf_id) 4738 { 4739 struct ecore_vf_info *p_vf; 4740 4741 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4742 if (!p_vf || !p_vf->bulletin.p_virt) 4743 return OSAL_NULL; 4744 4745 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))) 4746 return OSAL_NULL; 4747 4748 return p_vf->bulletin.p_virt->mac; 4749 } 4750 4751 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn, 4752 u16 rel_vf_id) 4753 { 4754 struct ecore_vf_info *p_vf; 4755 4756 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4757 if (!p_vf || !p_vf->bulletin.p_virt) 4758 return 0; 4759 4760 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))) 4761 return 0; 4762 4763 return p_vf->bulletin.p_virt->pvid; 4764 } 4765 4766 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn, 4767 struct ecore_ptt *p_ptt, 4768 int vfid, int val) 4769 { 4770 struct ecore_mcp_link_state *p_link; 4771 struct ecore_vf_info *vf; 4772 u8 abs_vp_id = 0; 4773 enum _ecore_status_t rc; 4774 4775 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4776 4777 if (!vf) 4778 return ECORE_INVAL; 4779 4780 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); 4781 if (rc != ECORE_SUCCESS) 4782 return rc; 4783 4784 p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output; 4785 4786 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val, 4787 p_link->speed); 4788 } 4789 4790 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev, 4791 int vfid, u32 rate) 4792 { 4793 struct ecore_vf_info *vf; 4794 u8 vport_id; 4795 int i; 4796 4797 for_each_hwfn(p_dev, i) { 4798 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 4799 4800 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) { 4801 DP_NOTICE(p_hwfn, true, 4802 "SR-IOV sanity check failed, can't set min rate\n"); 4803 return ECORE_INVAL; 4804 } 4805 } 4806 4807 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true); 4808 vport_id = vf->vport_id; 4809 4810 return ecore_configure_vport_wfq(p_dev, vport_id, rate); 4811 } 4812 4813 #ifndef LINUX_REMOVE 4814 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn, 4815 struct ecore_ptt *p_ptt, 4816 int vfid, 4817 struct ecore_eth_stats *p_stats) 4818 { 4819 struct ecore_vf_info *vf; 4820 4821 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4822 if (!vf) 4823 return ECORE_INVAL; 4824 4825 if (vf->state != VF_ENABLED) 4826 return ECORE_INVAL; 4827 4828 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats, 4829 vf->abs_vf_id + 0x10, false); 4830 4831 return ECORE_SUCCESS; 4832 } 4833 4834 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, 4835 u16 rel_vf_id) 4836 { 4837 struct ecore_vf_info *p_vf; 4838 4839 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4840 if (!p_vf) 4841 return 0; 4842 4843 return p_vf->num_rxqs; 4844 } 4845 4846 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, 4847 u16 rel_vf_id) 4848 { 4849 struct ecore_vf_info *p_vf; 4850 4851 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4852 if (!p_vf) 4853 return 0; 4854 4855 return p_vf->num_active_rxqs; 4856 } 4857 4858 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, 4859 u16 rel_vf_id) 4860 { 4861 struct ecore_vf_info *p_vf; 4862 4863 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4864 if (!p_vf) 4865 return OSAL_NULL; 4866 4867 return p_vf->ctx; 4868 } 4869 4870 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, 4871 u16 rel_vf_id) 4872 { 4873 struct ecore_vf_info *p_vf; 4874 4875 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4876 if (!p_vf) 4877 return 0; 4878 4879 return p_vf->num_sbs; 4880 } 4881 4882 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, 4883 u16 rel_vf_id) 4884 { 4885 struct ecore_vf_info *p_vf; 4886 4887 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4888 if (!p_vf) 4889 return false; 4890 4891 return (p_vf->state == VF_FREE); 4892 } 4893 4894 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn, 4895 u16 rel_vf_id) 4896 { 4897 struct ecore_vf_info *p_vf; 4898 4899 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4900 if (!p_vf) 4901 return false; 4902 4903 return (p_vf->state == VF_ACQUIRED); 4904 } 4905 4906 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, 4907 u16 rel_vf_id) 4908 { 4909 struct ecore_vf_info *p_vf; 4910 4911 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4912 if (!p_vf) 4913 return false; 4914 4915 return (p_vf->state == VF_ENABLED); 4916 } 4917 4918 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn, 4919 u16 rel_vf_id) 4920 { 4921 struct ecore_vf_info *p_vf; 4922 4923 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4924 if (!p_vf) 4925 return false; 4926 4927 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED); 4928 } 4929 #endif 4930 4931 enum _ecore_status_t 4932 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid) 4933 { 4934 struct ecore_wfq_data *vf_vp_wfq; 4935 struct ecore_vf_info *vf_info; 4936 4937 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4938 if (!vf_info) 4939 return 0; 4940 4941 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; 4942 4943 if (vf_vp_wfq->configured) 4944 return vf_vp_wfq->min_speed; 4945 else 4946 return 0; 4947 } 4948 4949 #ifdef CONFIG_ECORE_SW_CHANNEL 4950 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid, 4951 bool b_is_hw) 4952 { 4953 struct ecore_vf_info *vf_info; 4954 4955 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4956 if (!vf_info) 4957 return; 4958 4959 vf_info->b_hw_channel = b_is_hw; 4960 } 4961 #endif 4962