1 /* 2 * Copyright (c) 2018-2019 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 #include "bcm_osal.h" 30 #include "ecore.h" 31 #include "ecore_hsi_eth.h" 32 #include "ecore_sriov.h" 33 #include "ecore_l2_api.h" 34 #include "ecore_vf.h" 35 #include "ecore_vfpf_if.h" 36 #include "ecore_status.h" 37 #include "reg_addr.h" 38 #include "ecore_int.h" 39 #include "ecore_l2.h" 40 #include "ecore_mcp_api.h" 41 #include "ecore_vf_api.h" 42 43 #ifdef _NTDDK_ 44 #pragma warning(push) 45 #pragma warning(disable : 28167) 46 #pragma warning(disable : 28123) 47 #pragma warning(disable : 28121) 48 #endif 49 50 static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, 51 u16 type, u16 length) 52 { 53 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 54 void *p_tlv; 55 56 /* This lock is released when we receive PF's response 57 * in ecore_send_msg2pf(). 58 * So, ecore_vf_pf_prep() and ecore_send_msg2pf() 59 * must come in sequence. 60 */ 61 OSAL_MUTEX_ACQUIRE(&(p_iov->mutex)); 62 63 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "preparing to send %s tlv over vf pf channel\n", 64 ecore_channel_tlvs_string[type]); 65 66 /* Reset Requst offset */ 67 p_iov->offset = (u8 *)p_iov->vf2pf_request; 68 69 /* Clear mailbox - both request and reply */ 70 OSAL_MEMSET(p_iov->vf2pf_request, 0, 71 sizeof(union vfpf_tlvs)); 72 OSAL_MEMSET(p_iov->pf2vf_reply, 0, 73 sizeof(union pfvf_tlvs)); 74 75 /* Init type and length */ 76 p_tlv = ecore_add_tlv(&p_iov->offset, type, length); 77 78 /* Init first tlv header */ 79 ((struct vfpf_first_tlv *)p_tlv)->reply_address = 80 (u64)p_iov->pf2vf_reply_phys; 81 82 return p_tlv; 83 } 84 85 static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn, 86 enum _ecore_status_t req_status) 87 { 88 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; 89 90 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 91 "VF request status = 0x%x, PF reply status = 0x%x\n", 92 req_status, resp->default_resp.hdr.status); 93 94 OSAL_MUTEX_RELEASE(&(p_hwfn->vf_iov_info->mutex)); 95 } 96 97 #ifdef CONFIG_ECORE_SW_CHANNEL 98 /* The SW channel implementation of Windows needs to know the 'exact' 99 * response size of any given message. That means that for future 100 * messages we'd be unable to send TLVs to PF if he'll be unable to 101 * answer them if the |response| != |default response|. 102 * We'd need to handshake in acquire capabilities for any such. 103 */ 104 #endif 105 static enum _ecore_status_t 106 ecore_send_msg2pf(struct ecore_hwfn *p_hwfn, 107 u8 *done, u32 resp_size) 108 { 109 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; 110 struct ustorm_trigger_vf_zone trigger; 111 struct ustorm_vf_zone *zone_data; 112 enum _ecore_status_t rc = ECORE_SUCCESS; 113 int time = 100; 114 115 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; 116 117 /* output tlvs list */ 118 ecore_dp_tlv_list(p_hwfn, p_req); 119 120 /* need to add the END TLV to the message size */ 121 resp_size += sizeof(struct channel_list_end_tlv); 122 123 #ifdef CONFIG_ECORE_SW_CHANNEL 124 if (!p_hwfn->vf_iov_info->b_hw_channel) { 125 rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev, 126 done, 127 p_req, 128 p_hwfn->vf_iov_info->pf2vf_reply, 129 sizeof(union vfpf_tlvs), 130 resp_size); 131 /* TODO - no prints about message ? */ 132 return rc; 133 } 134 #endif 135 136 /* Send TLVs over HW channel */ 137 OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); 138 trigger.vf_pf_msg_valid = 1; 139 140 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 141 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", 142 GET_FIELD(p_hwfn->hw_info.concrete_fid, 143 PXP_CONCRETE_FID_PFID), 144 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys), 145 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys), 146 &zone_data->non_trigger.vf_pf_msg_addr, 147 *((u32 *)&trigger), 148 &zone_data->trigger); 149 150 REG_WR(p_hwfn, 151 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, 152 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys)); 153 154 REG_WR(p_hwfn, 155 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, 156 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys)); 157 158 /* The message data must be written first, to prevent trigger before 159 * data is written. 160 */ 161 OSAL_WMB(p_hwfn->p_dev); 162 163 REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); 164 165 /* When PF would be done with the response, it would write back to the 166 * `done' address. Poll until then. 167 */ 168 while ((!*done) && time) { 169 OSAL_MSLEEP(25); 170 time--; 171 } 172 173 if (!*done) { 174 DP_NOTICE(p_hwfn, true, 175 "VF <-- PF Timeout [Type %d]\n", 176 p_req->first_tlv.tl.type); 177 rc = ECORE_TIMEOUT; 178 } else { 179 if ((*done != PFVF_STATUS_SUCCESS) && 180 (*done != PFVF_STATUS_NO_RESOURCE)) 181 DP_NOTICE(p_hwfn, false, 182 "PF response: %d [Type %d]\n", 183 *done, p_req->first_tlv.tl.type); 184 else 185 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 186 "PF response: %d [Type %d]\n", 187 *done, p_req->first_tlv.tl.type); 188 } 189 190 return rc; 191 } 192 193 static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn, 194 struct ecore_queue_cid *p_cid) 195 { 196 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 197 struct vfpf_qid_tlv *p_qid_tlv; 198 199 /* Only add QIDs for the queue if it was negotiated with PF */ 200 if (!(p_iov->acquire_resp.pfdev_info.capabilities & 201 PFVF_ACQUIRE_CAP_QUEUE_QIDS)) 202 return; 203 204 p_qid_tlv = ecore_add_tlv(&p_iov->offset, 205 CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); 206 p_qid_tlv->qid = p_cid->qid_usage_idx; 207 } 208 209 static enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn, 210 bool b_final) 211 { 212 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 213 struct pfvf_def_resp_tlv *resp; 214 struct vfpf_first_tlv *req; 215 u32 size; 216 enum _ecore_status_t rc; 217 218 /* clear mailbox and prep first tlv */ 219 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); 220 221 /* add list termination tlv */ 222 ecore_add_tlv(&p_iov->offset, 223 CHANNEL_TLV_LIST_END, 224 sizeof(struct channel_list_end_tlv)); 225 226 resp = &p_iov->pf2vf_reply->default_resp; 227 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 228 229 if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS) 230 rc = ECORE_AGAIN; 231 232 ecore_vf_pf_req_end(p_hwfn, rc); 233 if (!b_final) 234 return rc; 235 236 p_hwfn->b_int_enabled = 0; 237 238 if (p_iov->vf2pf_request) 239 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 240 p_iov->vf2pf_request, 241 p_iov->vf2pf_request_phys, 242 sizeof(union vfpf_tlvs)); 243 if (p_iov->pf2vf_reply) 244 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 245 p_iov->pf2vf_reply, 246 p_iov->pf2vf_reply_phys, 247 sizeof(union pfvf_tlvs)); 248 249 if (p_iov->bulletin.p_virt) { 250 size = sizeof(struct ecore_bulletin_content); 251 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 252 p_iov->bulletin.p_virt, 253 p_iov->bulletin.phys, 254 size); 255 } 256 257 #ifdef CONFIG_ECORE_LOCK_ALLOC 258 OSAL_MUTEX_DEALLOC(&p_iov->mutex); 259 #endif 260 261 OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info); 262 p_hwfn->vf_iov_info = OSAL_NULL; 263 264 return rc; 265 } 266 267 enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn) 268 { 269 return _ecore_vf_pf_release(p_hwfn, true); 270 } 271 272 #define VF_ACQUIRE_THRESH 3 273 static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn, 274 struct vf_pf_resc_request *p_req, 275 struct pf_vf_resc *p_resp) 276 { 277 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 278 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", 279 p_req->num_rxqs, p_resp->num_rxqs, 280 p_req->num_rxqs, p_resp->num_txqs, 281 p_req->num_sbs, p_resp->num_sbs, 282 p_req->num_mac_filters, p_resp->num_mac_filters, 283 p_req->num_vlan_filters, p_resp->num_vlan_filters, 284 p_req->num_mc_filters, p_resp->num_mc_filters, 285 p_req->num_cids, p_resp->num_cids); 286 287 /* humble our request */ 288 p_req->num_txqs = p_resp->num_txqs; 289 p_req->num_rxqs = p_resp->num_rxqs; 290 p_req->num_sbs = p_resp->num_sbs; 291 p_req->num_mac_filters = p_resp->num_mac_filters; 292 p_req->num_vlan_filters = p_resp->num_vlan_filters; 293 p_req->num_mc_filters = p_resp->num_mc_filters; 294 p_req->num_cids = p_resp->num_cids; 295 } 296 297 static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn) 298 { 299 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 300 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 301 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 302 struct ecore_vf_acquire_sw_info vf_sw_info; 303 struct vf_pf_resc_request *p_resc; 304 bool resources_acquired = false; 305 struct vfpf_acquire_tlv *req; 306 int attempts = 0; 307 enum _ecore_status_t rc = ECORE_SUCCESS; 308 int eth_hsi_minor_ver; 309 310 /* clear mailbox and prep first tlv */ 311 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 312 p_resc = &req->resc_request; 313 314 /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */ 315 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 316 317 p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF; 318 p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF; 319 p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF; 320 p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS; 321 p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS; 322 p_resc->num_cids = ECORE_ETH_VF_DEFAULT_NUM_CIDS; 323 324 OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info)); 325 OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info); 326 327 req->vfdev_info.os_type = vf_sw_info.os_type; 328 req->vfdev_info.driver_version = vf_sw_info.driver_version; 329 req->vfdev_info.fw_major = FW_MAJOR_VERSION; 330 req->vfdev_info.fw_minor = FW_MINOR_VERSION; 331 req->vfdev_info.fw_revision = FW_REVISION_VERSION; 332 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 333 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 334 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; 335 336 /* Fill capability field with any non-deprecated config we support */ 337 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 338 339 /* If we've mapped the doorbell bar, try using queue qids */ 340 if (p_iov->b_doorbell_bar) { 341 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | 342 VFPF_ACQUIRE_CAP_QUEUE_QIDS; 343 p_resc->num_cids = ECORE_ETH_VF_MAX_NUM_CIDS; 344 } 345 346 /* pf 2 vf bulletin board address */ 347 req->bulletin_addr = p_iov->bulletin.phys; 348 req->bulletin_size = p_iov->bulletin.size; 349 350 /* add list termination tlv */ 351 ecore_add_tlv(&p_iov->offset, 352 CHANNEL_TLV_LIST_END, 353 sizeof(struct channel_list_end_tlv)); 354 355 while (!resources_acquired) { 356 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "attempting to acquire resources\n"); 357 358 /* Clear response buffer, as this might be a re-send */ 359 OSAL_MEMSET(p_iov->pf2vf_reply, 0, 360 sizeof(union pfvf_tlvs)); 361 362 /* send acquire request */ 363 rc = ecore_send_msg2pf(p_hwfn, 364 &resp->hdr.status, 365 sizeof(*resp)); 366 if (rc != ECORE_SUCCESS) 367 goto exit; 368 369 /* copy acquire response from buffer to p_hwfn */ 370 OSAL_MEMCPY(&p_iov->acquire_resp, 371 resp, 372 sizeof(p_iov->acquire_resp)); 373 374 attempts++; 375 376 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 377 /* PF agrees to allocate our resources */ 378 if (!(resp->pfdev_info.capabilities & 379 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { 380 /* It's possible legacy PF mistakenly accepted; 381 * but we don't care - simply mark it as 382 * legacy and continue. 383 */ 384 req->vfdev_info.capabilities |= 385 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 386 } 387 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "resources acquired\n"); 388 resources_acquired = true; 389 } /* PF refuses to allocate our resources */ 390 else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 391 attempts < VF_ACQUIRE_THRESH) { 392 ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, 393 &resp->resc); 394 395 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { 396 if (pfdev_info->major_fp_hsi && 397 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { 398 DP_NOTICE(p_hwfn, false, 399 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", 400 pfdev_info->major_fp_hsi, 401 pfdev_info->minor_fp_hsi, 402 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR, 403 pfdev_info->major_fp_hsi); 404 rc = ECORE_INVAL; 405 goto exit; 406 } 407 408 if (!pfdev_info->major_fp_hsi) { 409 if (req->vfdev_info.capabilities & 410 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 411 DP_NOTICE(p_hwfn, false, 412 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); 413 rc = ECORE_INVAL; 414 goto exit; 415 } else { 416 DP_INFO(p_hwfn, 417 "PF is old - try re-acquire to see if it supports FW-version override\n"); 418 req->vfdev_info.capabilities |= 419 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 420 continue; 421 } 422 } 423 424 /* If PF/VF are using same Major, PF must have had 425 * it's reasons. Simply fail. 426 */ 427 DP_NOTICE(p_hwfn, false, 428 "PF rejected acquisition by VF\n"); 429 rc = ECORE_INVAL; 430 goto exit; 431 } else { 432 DP_ERR(p_hwfn, "PF returned error %d to VF acquisition request\n", 433 resp->hdr.status); 434 rc = ECORE_AGAIN; 435 goto exit; 436 } 437 } 438 439 /* Mark the PF as legacy, if needed */ 440 if (req->vfdev_info.capabilities & 441 VFPF_ACQUIRE_CAP_PRE_FP_HSI) 442 p_iov->b_pre_fp_hsi = true; 443 444 /* In case PF doesn't support multi-queue Tx, update the number of 445 * CIDs to reflect the number of queues [older PFs didn't fill that 446 * field]. 447 */ 448 if (!(resp->pfdev_info.capabilities & 449 PFVF_ACQUIRE_CAP_QUEUE_QIDS)) 450 resp->resc.num_cids = resp->resc.num_rxqs + 451 resp->resc.num_txqs; 452 453 #ifndef LINUX_REMOVE 454 rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc); 455 if (rc) { 456 DP_NOTICE(p_hwfn, true, 457 "VF_UPDATE_ACQUIRE_RESC_RESP Failed: status = 0x%x.\n", 458 rc); 459 rc = ECORE_AGAIN; 460 goto exit; 461 } 462 #endif 463 464 /* Update bulletin board size with response from PF */ 465 p_iov->bulletin.size = resp->bulletin_size; 466 467 /* get HW info */ 468 p_hwfn->p_dev->type = resp->pfdev_info.dev_type; 469 p_hwfn->p_dev->chip_rev = (u8) resp->pfdev_info.chip_rev; 470 471 DP_INFO(p_hwfn, "Chip details - %s%d\n", 472 ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH", 473 CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1); 474 475 p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff; 476 477 /* Learn of the possibility of CMT */ 478 if (IS_LEAD_HWFN(p_hwfn)) { 479 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { 480 DP_NOTICE(p_hwfn, false, "100g VF\n"); 481 p_hwfn->p_dev->num_hwfns = 2; 482 } 483 } 484 485 eth_hsi_minor_ver = ETH_HSI_VER_MINOR; 486 487 if (!p_iov->b_pre_fp_hsi && 488 (eth_hsi_minor_ver) && 489 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { 490 DP_INFO(p_hwfn, 491 "PF is using older fastpath HSI; %02x.%02x is configured\n", 492 ETH_HSI_VER_MAJOR, 493 resp->pfdev_info.minor_fp_hsi); 494 } 495 496 exit: 497 ecore_vf_pf_req_end(p_hwfn, rc); 498 499 return rc; 500 } 501 502 u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn, 503 enum BAR_ID bar_id) 504 { 505 u32 bar_size; 506 507 /* Regview size is fixed */ 508 if (bar_id == BAR_ID_0) 509 return 1 << 17; 510 511 /* Doorbell is received from PF */ 512 bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size; 513 if (bar_size) 514 return 1 << bar_size; 515 return 0; 516 } 517 518 enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn) 519 { 520 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev); 521 struct ecore_vf_iov *p_iov; 522 u32 reg; 523 enum _ecore_status_t rc; 524 525 /* Set number of hwfns - might be overriden once leading hwfn learns 526 * actual configuration from PF. 527 */ 528 if (IS_LEAD_HWFN(p_hwfn)) 529 p_hwfn->p_dev->num_hwfns = 1; 530 531 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; 532 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); 533 534 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; 535 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); 536 537 /* Allocate vf sriov info */ 538 p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov)); 539 if (!p_iov) { 540 DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_sriov'\n"); 541 return ECORE_NOMEM; 542 } 543 544 /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell 545 * value, but there are several incompatibily scenarios where that 546 * would be incorrect and we'd need to override it. 547 */ 548 if (p_hwfn->doorbells == OSAL_NULL) { 549 p_hwfn->doorbells = (u8 OSAL_IOMEM*)p_hwfn->regview + 550 PXP_VF_BAR0_START_DQ; 551 #ifndef LINUX_REMOVE 552 p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - 553 (u8 *)p_hwfn->p_dev->doorbells; 554 #endif 555 556 } else if (p_hwfn == p_lead) { 557 /* For leading hw-function, value is always correct, but need 558 * to handle scenario where legacy PF would not support 100g 559 * mapped bars later. 560 */ 561 p_iov->b_doorbell_bar = true; 562 } else { 563 /* here, value would be correct ONLY if the leading hwfn 564 * received indication that mapped-bars are supported. 565 */ 566 if (p_lead->vf_iov_info->b_doorbell_bar) 567 p_iov->b_doorbell_bar = true; 568 #ifdef LINUX_REMOVE 569 else 570 p_hwfn->doorbells = (u8 OSAL_IOMEM*) 571 p_hwfn->regview + 572 PXP_VF_BAR0_START_DQ; 573 #else 574 else { 575 p_hwfn->doorbells = (u8 OSAL_IOMEM*)p_hwfn->regview + 576 PXP_VF_BAR0_START_DQ; 577 p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - 578 (u8 *)p_hwfn->p_dev->doorbells; 579 } 580 #endif 581 } 582 583 /* Allocate vf2pf msg */ 584 p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 585 &p_iov->vf2pf_request_phys, 586 sizeof(union vfpf_tlvs)); 587 if (!p_iov->vf2pf_request) { 588 DP_NOTICE(p_hwfn, true, "Failed to allocate `vf2pf_request' DMA memory\n"); 589 goto free_p_iov; 590 } 591 592 p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 593 &p_iov->pf2vf_reply_phys, 594 sizeof(union pfvf_tlvs)); 595 if (!p_iov->pf2vf_reply) { 596 DP_NOTICE(p_hwfn, true, "Failed to allocate `pf2vf_reply' DMA memory\n"); 597 goto free_vf2pf_request; 598 } 599 600 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 601 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", 602 p_iov->vf2pf_request, 603 (unsigned long long)p_iov->vf2pf_request_phys, 604 p_iov->pf2vf_reply, 605 (unsigned long long)p_iov->pf2vf_reply_phys); 606 607 /* Allocate Bulletin board */ 608 p_iov->bulletin.size = sizeof(struct ecore_bulletin_content); 609 p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 610 &p_iov->bulletin.phys, 611 p_iov->bulletin.size); 612 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 613 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", 614 p_iov->bulletin.p_virt, 615 (unsigned long long)p_iov->bulletin.phys, 616 p_iov->bulletin.size); 617 618 #ifdef CONFIG_ECORE_LOCK_ALLOC 619 OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex); 620 #endif 621 OSAL_MUTEX_INIT(&p_iov->mutex); 622 623 p_hwfn->vf_iov_info = p_iov; 624 625 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 626 627 rc = ecore_vf_pf_acquire(p_hwfn); 628 629 /* If VF is 100g using a mapped bar and PF is too old to support that, 630 * acquisition would succeed - but the VF would have no way knowing 631 * the size of the doorbell bar configured in HW and thus will not 632 * know how to split it for 2nd hw-function. 633 * In this case we re-try without the indication of the mapped 634 * doorbell. 635 */ 636 if (rc == ECORE_SUCCESS && 637 p_iov->b_doorbell_bar && 638 !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) && 639 ECORE_IS_CMT(p_hwfn->p_dev)) { 640 rc = _ecore_vf_pf_release(p_hwfn, false); 641 if (rc != ECORE_SUCCESS) 642 return rc; 643 644 p_iov->b_doorbell_bar = false; 645 p_hwfn->doorbells = (u8 OSAL_IOMEM*)p_hwfn->regview + 646 PXP_VF_BAR0_START_DQ; 647 #ifndef LINUX_REMOVE 648 p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - 649 (u8 *)p_hwfn->p_dev->doorbells; 650 #endif 651 rc = ecore_vf_pf_acquire(p_hwfn); 652 } 653 654 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 655 "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n", 656 p_hwfn->regview, p_hwfn->doorbells, 657 p_hwfn->p_dev->doorbells); 658 659 return rc; 660 661 free_vf2pf_request: 662 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request, 663 p_iov->vf2pf_request_phys, 664 sizeof(union vfpf_tlvs)); 665 free_p_iov: 666 OSAL_FREE(p_hwfn->p_dev, p_iov); 667 668 return ECORE_NOMEM; 669 } 670 671 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A 672 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ 673 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) 674 675 static void 676 __ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 677 struct ecore_tunn_update_type *p_src, 678 enum ecore_tunn_clss mask, u8 *p_cls) 679 { 680 if (p_src->b_update_mode) { 681 p_req->tun_mode_update_mask |= (1 << mask); 682 683 if (p_src->b_mode_enabled) 684 p_req->tunn_mode |= (1 << mask); 685 } 686 687 *p_cls = p_src->tun_cls; 688 } 689 690 static void 691 ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 692 struct ecore_tunn_update_type *p_src, 693 enum ecore_tunn_clss mask, u8 *p_cls, 694 struct ecore_tunn_update_udp_port *p_port, 695 u8 *p_update_port, u16 *p_udp_port) 696 { 697 if (p_port->b_update_port) { 698 *p_update_port = 1; 699 *p_udp_port = p_port->port; 700 } 701 702 __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); 703 } 704 705 void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun) 706 { 707 if (p_tun->vxlan.b_mode_enabled) 708 p_tun->vxlan.b_update_mode = true; 709 if (p_tun->l2_geneve.b_mode_enabled) 710 p_tun->l2_geneve.b_update_mode = true; 711 if (p_tun->ip_geneve.b_mode_enabled) 712 p_tun->ip_geneve.b_update_mode = true; 713 if (p_tun->l2_gre.b_mode_enabled) 714 p_tun->l2_gre.b_update_mode = true; 715 if (p_tun->ip_gre.b_mode_enabled) 716 p_tun->ip_gre.b_update_mode = true; 717 718 p_tun->b_update_rx_cls = true; 719 p_tun->b_update_tx_cls = true; 720 } 721 722 static void 723 __ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun, 724 u16 feature_mask, u8 tunn_mode, u8 tunn_cls, 725 enum ecore_tunn_mode val) 726 { 727 if (feature_mask & (1 << val)) { 728 p_tun->b_mode_enabled = tunn_mode; 729 p_tun->tun_cls = tunn_cls; 730 } else { 731 p_tun->b_mode_enabled = false; 732 } 733 } 734 735 static void 736 ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn, 737 struct ecore_tunnel_info *p_tun, 738 struct pfvf_update_tunn_param_tlv *p_resp) 739 { 740 /* Update mode and classes provided by PF */ 741 u16 feat_mask = p_resp->tunn_feature_mask; 742 743 __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask, 744 p_resp->vxlan_mode, p_resp->vxlan_clss, 745 ECORE_MODE_VXLAN_TUNN); 746 __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, 747 p_resp->l2geneve_mode, 748 p_resp->l2geneve_clss, 749 ECORE_MODE_L2GENEVE_TUNN); 750 __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, 751 p_resp->ipgeneve_mode, 752 p_resp->ipgeneve_clss, 753 ECORE_MODE_IPGENEVE_TUNN); 754 __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, 755 p_resp->l2gre_mode, p_resp->l2gre_clss, 756 ECORE_MODE_L2GRE_TUNN); 757 __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, 758 p_resp->ipgre_mode, p_resp->ipgre_clss, 759 ECORE_MODE_IPGRE_TUNN); 760 p_tun->geneve_port.port = p_resp->geneve_udp_port; 761 p_tun->vxlan_port.port = p_resp->vxlan_udp_port; 762 763 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 764 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", 765 p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, 766 p_tun->ip_geneve.b_mode_enabled, 767 p_tun->l2_gre.b_mode_enabled, 768 p_tun->ip_gre.b_mode_enabled); 769 } 770 771 enum _ecore_status_t 772 ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn, 773 struct ecore_tunnel_info *p_src) 774 { 775 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; 776 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 777 struct pfvf_update_tunn_param_tlv *p_resp; 778 struct vfpf_update_tunn_param_tlv *p_req; 779 enum _ecore_status_t rc; 780 781 p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, 782 sizeof(*p_req)); 783 784 if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) 785 p_req->update_tun_cls = 1; 786 787 ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, (enum ecore_tunn_clss)ECORE_MODE_VXLAN_TUNN, 788 &p_req->vxlan_clss, &p_src->vxlan_port, 789 &p_req->update_vxlan_port, 790 &p_req->vxlan_port); 791 ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, 792 (enum ecore_tunn_clss)ECORE_MODE_L2GENEVE_TUNN, 793 &p_req->l2geneve_clss, &p_src->geneve_port, 794 &p_req->update_geneve_port, 795 &p_req->geneve_port); 796 __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, 797 (enum ecore_tunn_clss)ECORE_MODE_IPGENEVE_TUNN, 798 &p_req->ipgeneve_clss); 799 __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, 800 (enum ecore_tunn_clss)ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss); 801 __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, 802 (enum ecore_tunn_clss)ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss); 803 804 /* add list termination tlv */ 805 ecore_add_tlv(&p_iov->offset, 806 CHANNEL_TLV_LIST_END, 807 sizeof(struct channel_list_end_tlv)); 808 809 p_resp = &p_iov->pf2vf_reply->tunn_param_resp; 810 rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); 811 812 if (rc) 813 goto exit; 814 815 if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { 816 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 817 "Failed to update tunnel parameters\n"); 818 rc = ECORE_INVAL; 819 } 820 821 ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp); 822 exit: 823 ecore_vf_pf_req_end(p_hwfn, rc); 824 return rc; 825 } 826 827 enum _ecore_status_t 828 ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, 829 struct ecore_queue_cid *p_cid, 830 u16 bd_max_bytes, 831 dma_addr_t bd_chain_phys_addr, 832 dma_addr_t cqe_pbl_addr, 833 u16 cqe_pbl_size, 834 void OSAL_IOMEM **pp_prod) 835 { 836 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 837 struct pfvf_start_queue_resp_tlv *resp; 838 struct vfpf_start_rxq_tlv *req; 839 u16 rx_qid = p_cid->rel.queue_id; 840 enum _ecore_status_t rc; 841 842 /* clear mailbox and prep first tlv */ 843 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); 844 845 req->rx_qid = rx_qid; 846 req->cqe_pbl_addr = cqe_pbl_addr; 847 req->cqe_pbl_size = cqe_pbl_size; 848 req->rxq_addr = bd_chain_phys_addr; 849 req->hw_sb = p_cid->sb_igu_id; 850 req->sb_index = p_cid->sb_idx; 851 req->bd_max_bytes = bd_max_bytes; 852 req->stat_id = -1; /* Keep initialized, for future compatibility */ 853 854 /* If PF is legacy, we'll need to calculate producers ourselves 855 * as well as clean them. 856 */ 857 if (p_iov->b_pre_fp_hsi) { 858 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; 859 u32 init_prod_val = 0; 860 861 *pp_prod = (u8 OSAL_IOMEM*) 862 p_hwfn->regview + 863 MSTORM_QZONE_START(p_hwfn->p_dev) + 864 hw_qid * MSTORM_QZONE_SIZE; 865 866 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 867 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 868 (u32 *)(&init_prod_val)); 869 } 870 871 ecore_vf_pf_add_qid(p_hwfn, p_cid); 872 873 /* add list termination tlv */ 874 ecore_add_tlv(&p_iov->offset, 875 CHANNEL_TLV_LIST_END, 876 sizeof(struct channel_list_end_tlv)); 877 878 resp = &p_iov->pf2vf_reply->queue_start; 879 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 880 if (rc) 881 goto exit; 882 883 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 884 rc = ECORE_INVAL; 885 goto exit; 886 } 887 888 /* Learn the address of the producer from the response */ 889 if (!p_iov->b_pre_fp_hsi) { 890 u32 init_prod_val = 0; 891 892 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset; 893 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 894 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", 895 rx_qid, *pp_prod, resp->offset); 896 897 /* Init the rcq, rx bd and rx sge (if valid) producers to 0. 898 * It was actually the PF's responsibility, but since some 899 * old PFs might fail to do so, we do this as well. 900 */ 901 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3); 902 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 903 (u32 *)&init_prod_val); 904 } 905 906 exit: 907 ecore_vf_pf_req_end(p_hwfn, rc); 908 909 return rc; 910 } 911 912 enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn, 913 struct ecore_queue_cid *p_cid, 914 bool cqe_completion) 915 { 916 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 917 struct vfpf_stop_rxqs_tlv *req; 918 struct pfvf_def_resp_tlv *resp; 919 enum _ecore_status_t rc; 920 921 /* clear mailbox and prep first tlv */ 922 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); 923 924 req->rx_qid = p_cid->rel.queue_id; 925 req->num_rxqs = 1; 926 req->cqe_completion = cqe_completion; 927 928 ecore_vf_pf_add_qid(p_hwfn, p_cid); 929 930 /* add list termination tlv */ 931 ecore_add_tlv(&p_iov->offset, 932 CHANNEL_TLV_LIST_END, 933 sizeof(struct channel_list_end_tlv)); 934 935 resp = &p_iov->pf2vf_reply->default_resp; 936 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 937 if (rc) 938 goto exit; 939 940 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 941 rc = ECORE_INVAL; 942 goto exit; 943 } 944 945 exit: 946 ecore_vf_pf_req_end(p_hwfn, rc); 947 948 return rc; 949 } 950 951 enum _ecore_status_t 952 ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn, 953 struct ecore_queue_cid *p_cid, 954 dma_addr_t pbl_addr, u16 pbl_size, 955 void OSAL_IOMEM **pp_doorbell) 956 { 957 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 958 struct pfvf_start_queue_resp_tlv *resp; 959 struct vfpf_start_txq_tlv *req; 960 u16 qid = p_cid->rel.queue_id; 961 enum _ecore_status_t rc; 962 963 /* clear mailbox and prep first tlv */ 964 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); 965 966 req->tx_qid = qid; 967 968 /* Tx */ 969 req->pbl_addr = pbl_addr; 970 req->pbl_size = pbl_size; 971 req->hw_sb = p_cid->sb_igu_id; 972 req->sb_index = p_cid->sb_idx; 973 974 ecore_vf_pf_add_qid(p_hwfn, p_cid); 975 976 /* add list termination tlv */ 977 ecore_add_tlv(&p_iov->offset, 978 CHANNEL_TLV_LIST_END, 979 sizeof(struct channel_list_end_tlv)); 980 981 resp = &p_iov->pf2vf_reply->queue_start; 982 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 983 if (rc) 984 goto exit; 985 986 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 987 rc = ECORE_INVAL; 988 goto exit; 989 } 990 991 /* Modern PFs provide the actual offsets, while legacy 992 * provided only the queue id. 993 */ 994 if (!p_iov->b_pre_fp_hsi) { 995 *pp_doorbell = (u8 OSAL_IOMEM*)p_hwfn->doorbells + 996 resp->offset; 997 } else { 998 u8 cid = p_iov->acquire_resp.resc.cid[qid]; 999 1000 *pp_doorbell = (u8 OSAL_IOMEM*)p_hwfn->doorbells + 1001 DB_ADDR_VF(cid, DQ_DEMS_LEGACY); 1002 } 1003 1004 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1005 "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n", 1006 qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset); 1007 exit: 1008 ecore_vf_pf_req_end(p_hwfn, rc); 1009 1010 return rc; 1011 } 1012 1013 enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, 1014 struct ecore_queue_cid *p_cid) 1015 { 1016 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1017 struct vfpf_stop_txqs_tlv *req; 1018 struct pfvf_def_resp_tlv *resp; 1019 enum _ecore_status_t rc; 1020 1021 /* clear mailbox and prep first tlv */ 1022 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); 1023 1024 req->tx_qid = p_cid->rel.queue_id; 1025 req->num_txqs = 1; 1026 1027 ecore_vf_pf_add_qid(p_hwfn, p_cid); 1028 1029 /* add list termination tlv */ 1030 ecore_add_tlv(&p_iov->offset, 1031 CHANNEL_TLV_LIST_END, 1032 sizeof(struct channel_list_end_tlv)); 1033 1034 resp = &p_iov->pf2vf_reply->default_resp; 1035 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1036 if (rc) 1037 goto exit; 1038 1039 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1040 rc = ECORE_INVAL; 1041 goto exit; 1042 } 1043 1044 exit: 1045 ecore_vf_pf_req_end(p_hwfn, rc); 1046 1047 return rc; 1048 } 1049 1050 #ifndef LINUX_REMOVE 1051 enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn, 1052 struct ecore_queue_cid **pp_cid, 1053 u8 num_rxqs, 1054 u8 comp_cqe_flg, 1055 u8 comp_event_flg) 1056 { 1057 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1058 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1059 struct vfpf_update_rxq_tlv *req; 1060 enum _ecore_status_t rc; 1061 1062 /* Starting with CHANNEL_TLV_QID and the need for additional queue 1063 * information, this API stopped supporting multiple rxqs. 1064 * TODO - remove this and change the API to accept a single queue-cid 1065 * in a follow-up patch. 1066 */ 1067 if (num_rxqs != 1) { 1068 DP_NOTICE(p_hwfn, true, 1069 "VFs can no longer update more than a single queue\n"); 1070 return ECORE_INVAL; 1071 } 1072 1073 /* clear mailbox and prep first tlv */ 1074 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req)); 1075 1076 req->rx_qid = (*pp_cid)->rel.queue_id; 1077 req->num_rxqs = 1; 1078 1079 if (comp_cqe_flg) 1080 req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG; 1081 if (comp_event_flg) 1082 req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG; 1083 1084 ecore_vf_pf_add_qid(p_hwfn, *pp_cid); 1085 1086 /* add list termination tlv */ 1087 ecore_add_tlv(&p_iov->offset, 1088 CHANNEL_TLV_LIST_END, 1089 sizeof(struct channel_list_end_tlv)); 1090 1091 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1092 if (rc) 1093 goto exit; 1094 1095 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1096 rc = ECORE_INVAL; 1097 goto exit; 1098 } 1099 1100 exit: 1101 ecore_vf_pf_req_end(p_hwfn, rc); 1102 return rc; 1103 } 1104 #endif 1105 1106 enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, 1107 u8 vport_id, 1108 u16 mtu, 1109 u8 inner_vlan_removal, 1110 enum ecore_tpa_mode tpa_mode, 1111 u8 max_buffers_per_cqe, 1112 u8 only_untagged, 1113 u8 zero_placement_offset) 1114 { 1115 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1116 struct vfpf_vport_start_tlv *req; 1117 struct pfvf_def_resp_tlv *resp; 1118 enum _ecore_status_t rc; 1119 int i; 1120 1121 /* clear mailbox and prep first tlv */ 1122 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); 1123 1124 req->mtu = mtu; 1125 req->vport_id = vport_id; 1126 req->inner_vlan_removal = inner_vlan_removal; 1127 req->tpa_mode = tpa_mode; 1128 req->max_buffers_per_cqe = max_buffers_per_cqe; 1129 req->only_untagged = only_untagged; 1130 req->zero_placement_offset = zero_placement_offset; 1131 1132 /* status blocks */ 1133 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) { 1134 struct ecore_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i]; 1135 1136 if (p_sb) 1137 req->sb_addr[i] = p_sb->sb_phys; 1138 } 1139 1140 /* add list termination tlv */ 1141 ecore_add_tlv(&p_iov->offset, 1142 CHANNEL_TLV_LIST_END, 1143 sizeof(struct channel_list_end_tlv)); 1144 1145 resp = &p_iov->pf2vf_reply->default_resp; 1146 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1147 if (rc) 1148 goto exit; 1149 1150 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1151 rc = ECORE_INVAL; 1152 goto exit; 1153 } 1154 1155 exit: 1156 ecore_vf_pf_req_end(p_hwfn, rc); 1157 1158 return rc; 1159 } 1160 1161 enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn) 1162 { 1163 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1164 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1165 enum _ecore_status_t rc; 1166 1167 /* clear mailbox and prep first tlv */ 1168 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, 1169 sizeof(struct vfpf_first_tlv)); 1170 1171 /* add list termination tlv */ 1172 ecore_add_tlv(&p_iov->offset, 1173 CHANNEL_TLV_LIST_END, 1174 sizeof(struct channel_list_end_tlv)); 1175 1176 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1177 if (rc) 1178 goto exit; 1179 1180 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1181 rc = ECORE_INVAL; 1182 goto exit; 1183 } 1184 1185 exit: 1186 ecore_vf_pf_req_end(p_hwfn, rc); 1187 1188 return rc; 1189 } 1190 1191 static bool 1192 ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn, 1193 struct ecore_sp_vport_update_params *p_data, 1194 u16 tlv) 1195 { 1196 switch (tlv) { 1197 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: 1198 return !!(p_data->update_vport_active_rx_flg || 1199 p_data->update_vport_active_tx_flg); 1200 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: 1201 #ifndef ASIC_ONLY 1202 /* FPGA doesn't have PVFC and so can't support tx-switching */ 1203 return !!(p_data->update_tx_switching_flg && 1204 !CHIP_REV_IS_FPGA(p_hwfn->p_dev)); 1205 #else 1206 return !!p_data->update_tx_switching_flg; 1207 #endif 1208 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: 1209 return !!p_data->update_inner_vlan_removal_flg; 1210 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: 1211 return !!p_data->update_accept_any_vlan_flg; 1212 case CHANNEL_TLV_VPORT_UPDATE_MCAST: 1213 return !!p_data->update_approx_mcast_flg; 1214 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: 1215 return !!(p_data->accept_flags.update_rx_mode_config || 1216 p_data->accept_flags.update_tx_mode_config); 1217 case CHANNEL_TLV_VPORT_UPDATE_RSS: 1218 return !!p_data->rss_params; 1219 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: 1220 return !!p_data->sge_tpa_params; 1221 default: 1222 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n", 1223 tlv, ecore_channel_tlvs_string[tlv]); 1224 return false; 1225 } 1226 } 1227 1228 static void 1229 ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn, 1230 struct ecore_sp_vport_update_params *p_data) 1231 { 1232 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1233 struct pfvf_def_resp_tlv *p_resp; 1234 u16 tlv; 1235 1236 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 1237 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; 1238 tlv++) { 1239 if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) 1240 continue; 1241 1242 p_resp = (struct pfvf_def_resp_tlv *) 1243 ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, 1244 tlv); 1245 if (p_resp && p_resp->hdr.status) 1246 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1247 "TLV[%d] type %s Configuration %s\n", 1248 tlv, ecore_channel_tlvs_string[tlv], 1249 (p_resp && p_resp->hdr.status) ? "succeeded" 1250 : "failed"); 1251 } 1252 } 1253 1254 enum _ecore_status_t ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, 1255 struct ecore_sp_vport_update_params *p_params) 1256 { 1257 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1258 struct vfpf_vport_update_tlv *req; 1259 struct pfvf_def_resp_tlv *resp; 1260 u8 update_rx, update_tx; 1261 u32 resp_size = 0; 1262 u16 size, tlv; 1263 enum _ecore_status_t rc; 1264 1265 resp = &p_iov->pf2vf_reply->default_resp; 1266 resp_size = sizeof(*resp); 1267 1268 update_rx = p_params->update_vport_active_rx_flg; 1269 update_tx = p_params->update_vport_active_tx_flg; 1270 1271 /* clear mailbox and prep header tlv */ 1272 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); 1273 1274 /* Prepare extended tlvs */ 1275 if (update_rx || update_tx) { 1276 struct vfpf_vport_update_activate_tlv *p_act_tlv; 1277 1278 size = sizeof(struct vfpf_vport_update_activate_tlv); 1279 p_act_tlv = ecore_add_tlv(&p_iov->offset, 1280 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 1281 size); 1282 resp_size += sizeof(struct pfvf_def_resp_tlv); 1283 1284 if (update_rx) { 1285 p_act_tlv->update_rx = update_rx; 1286 p_act_tlv->active_rx = p_params->vport_active_rx_flg; 1287 } 1288 1289 if (update_tx) { 1290 p_act_tlv->update_tx = update_tx; 1291 p_act_tlv->active_tx = p_params->vport_active_tx_flg; 1292 } 1293 } 1294 1295 #ifndef ECORE_UPSTREAM 1296 if (p_params->update_inner_vlan_removal_flg) { 1297 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 1298 1299 size = sizeof(struct vfpf_vport_update_vlan_strip_tlv); 1300 p_vlan_tlv = ecore_add_tlv(&p_iov->offset, 1301 CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, 1302 size); 1303 resp_size += sizeof(struct pfvf_def_resp_tlv); 1304 1305 p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg; 1306 } 1307 #endif 1308 1309 if (p_params->update_tx_switching_flg) { 1310 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 1311 1312 size = sizeof(struct vfpf_vport_update_tx_switch_tlv); 1313 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1314 p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset, 1315 tlv, size); 1316 resp_size += sizeof(struct pfvf_def_resp_tlv); 1317 1318 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; 1319 } 1320 1321 if (p_params->update_approx_mcast_flg) { 1322 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 1323 1324 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); 1325 p_mcast_tlv = ecore_add_tlv(&p_iov->offset, 1326 CHANNEL_TLV_VPORT_UPDATE_MCAST, 1327 size); 1328 resp_size += sizeof(struct pfvf_def_resp_tlv); 1329 1330 OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins, 1331 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1332 } 1333 1334 update_rx = p_params->accept_flags.update_rx_mode_config; 1335 update_tx = p_params->accept_flags.update_tx_mode_config; 1336 1337 if (update_rx || update_tx) { 1338 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 1339 1340 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1341 size = sizeof(struct vfpf_vport_update_accept_param_tlv); 1342 p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size); 1343 resp_size += sizeof(struct pfvf_def_resp_tlv); 1344 1345 if (update_rx) { 1346 p_accept_tlv->update_rx_mode = update_rx; 1347 p_accept_tlv->rx_accept_filter = 1348 p_params->accept_flags.rx_accept_filter; 1349 } 1350 1351 if (update_tx) { 1352 p_accept_tlv->update_tx_mode = update_tx; 1353 p_accept_tlv->tx_accept_filter = 1354 p_params->accept_flags.tx_accept_filter; 1355 } 1356 } 1357 1358 if (p_params->rss_params) { 1359 struct ecore_rss_params *rss_params = p_params->rss_params; 1360 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 1361 int i, table_size; 1362 1363 size = sizeof(struct vfpf_vport_update_rss_tlv); 1364 p_rss_tlv = ecore_add_tlv(&p_iov->offset, 1365 CHANNEL_TLV_VPORT_UPDATE_RSS, size); 1366 resp_size += sizeof(struct pfvf_def_resp_tlv); 1367 1368 if (rss_params->update_rss_config) 1369 p_rss_tlv->update_rss_flags |= 1370 VFPF_UPDATE_RSS_CONFIG_FLAG; 1371 if (rss_params->update_rss_capabilities) 1372 p_rss_tlv->update_rss_flags |= 1373 VFPF_UPDATE_RSS_CAPS_FLAG; 1374 if (rss_params->update_rss_ind_table) 1375 p_rss_tlv->update_rss_flags |= 1376 VFPF_UPDATE_RSS_IND_TABLE_FLAG; 1377 if (rss_params->update_rss_key) 1378 p_rss_tlv->update_rss_flags |= 1379 VFPF_UPDATE_RSS_KEY_FLAG; 1380 1381 p_rss_tlv->rss_enable = rss_params->rss_enable; 1382 p_rss_tlv->rss_caps = rss_params->rss_caps; 1383 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; 1384 1385 table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE, 1386 1 << p_rss_tlv->rss_table_size_log); 1387 for (i = 0; i < table_size; i++) { 1388 struct ecore_queue_cid *p_queue; 1389 1390 p_queue = rss_params->rss_ind_table[i]; 1391 p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; 1392 } 1393 1394 OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key, 1395 sizeof(rss_params->rss_key)); 1396 } 1397 1398 if (p_params->update_accept_any_vlan_flg) { 1399 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; 1400 1401 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); 1402 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 1403 p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size); 1404 1405 resp_size += sizeof(struct pfvf_def_resp_tlv); 1406 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; 1407 p_any_vlan_tlv->update_accept_any_vlan_flg = 1408 p_params->update_accept_any_vlan_flg; 1409 } 1410 1411 #ifndef LINUX_REMOVE 1412 if (p_params->sge_tpa_params) { 1413 struct ecore_sge_tpa_params *sge_tpa_params; 1414 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 1415 1416 sge_tpa_params = p_params->sge_tpa_params; 1417 size = sizeof(struct vfpf_vport_update_sge_tpa_tlv); 1418 p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset, 1419 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, 1420 size); 1421 resp_size += sizeof(struct pfvf_def_resp_tlv); 1422 1423 if (sge_tpa_params->update_tpa_en_flg) 1424 p_sge_tpa_tlv->update_sge_tpa_flags |= 1425 VFPF_UPDATE_TPA_EN_FLAG; 1426 if (sge_tpa_params->update_tpa_param_flg) 1427 p_sge_tpa_tlv->update_sge_tpa_flags |= 1428 VFPF_UPDATE_TPA_PARAM_FLAG; 1429 1430 if (sge_tpa_params->tpa_ipv4_en_flg) 1431 p_sge_tpa_tlv->sge_tpa_flags |= 1432 VFPF_TPA_IPV4_EN_FLAG; 1433 if (sge_tpa_params->tpa_ipv6_en_flg) 1434 p_sge_tpa_tlv->sge_tpa_flags |= 1435 VFPF_TPA_IPV6_EN_FLAG; 1436 if (sge_tpa_params->tpa_pkt_split_flg) 1437 p_sge_tpa_tlv->sge_tpa_flags |= 1438 VFPF_TPA_PKT_SPLIT_FLAG; 1439 if (sge_tpa_params->tpa_hdr_data_split_flg) 1440 p_sge_tpa_tlv->sge_tpa_flags |= 1441 VFPF_TPA_HDR_DATA_SPLIT_FLAG; 1442 if (sge_tpa_params->tpa_gro_consistent_flg) 1443 p_sge_tpa_tlv->sge_tpa_flags |= 1444 VFPF_TPA_GRO_CONSIST_FLAG; 1445 1446 p_sge_tpa_tlv->tpa_max_aggs_num = 1447 sge_tpa_params->tpa_max_aggs_num; 1448 p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size; 1449 p_sge_tpa_tlv->tpa_min_size_to_start = 1450 sge_tpa_params->tpa_min_size_to_start; 1451 p_sge_tpa_tlv->tpa_min_size_to_cont = 1452 sge_tpa_params->tpa_min_size_to_cont; 1453 1454 p_sge_tpa_tlv->max_buffers_per_cqe = 1455 sge_tpa_params->max_buffers_per_cqe; 1456 } 1457 #endif 1458 1459 /* add list termination tlv */ 1460 ecore_add_tlv(&p_iov->offset, 1461 CHANNEL_TLV_LIST_END, 1462 sizeof(struct channel_list_end_tlv)); 1463 1464 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); 1465 if (rc) 1466 goto exit; 1467 1468 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1469 rc = ECORE_INVAL; 1470 goto exit; 1471 } 1472 1473 ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); 1474 1475 exit: 1476 ecore_vf_pf_req_end(p_hwfn, rc); 1477 1478 return rc; 1479 } 1480 1481 enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn) 1482 { 1483 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1484 struct pfvf_def_resp_tlv *resp; 1485 struct vfpf_first_tlv *req; 1486 enum _ecore_status_t rc; 1487 1488 /* clear mailbox and prep first tlv */ 1489 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); 1490 1491 /* add list termination tlv */ 1492 ecore_add_tlv(&p_iov->offset, 1493 CHANNEL_TLV_LIST_END, 1494 sizeof(struct channel_list_end_tlv)); 1495 1496 resp = &p_iov->pf2vf_reply->default_resp; 1497 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1498 if (rc) 1499 goto exit; 1500 1501 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1502 rc = ECORE_AGAIN; 1503 goto exit; 1504 } 1505 1506 p_hwfn->b_int_enabled = 0; 1507 1508 exit: 1509 ecore_vf_pf_req_end(p_hwfn, rc); 1510 1511 return rc; 1512 } 1513 1514 void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, 1515 struct ecore_filter_mcast *p_filter_cmd) 1516 { 1517 struct ecore_sp_vport_update_params sp_params; 1518 int i; 1519 1520 OSAL_MEMSET(&sp_params, 0, sizeof(sp_params)); 1521 sp_params.update_approx_mcast_flg = 1; 1522 1523 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { 1524 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1525 u32 bit; 1526 1527 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1528 sp_params.bins[bit / 32] |= 1 << (bit % 32); 1529 } 1530 } 1531 1532 ecore_vf_pf_vport_update(p_hwfn, &sp_params); 1533 } 1534 1535 enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn, 1536 struct ecore_filter_ucast *p_ucast) 1537 { 1538 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1539 struct vfpf_ucast_filter_tlv *req; 1540 struct pfvf_def_resp_tlv *resp; 1541 enum _ecore_status_t rc; 1542 1543 #ifndef LINUX_REMOVE 1544 /* Sanitize */ 1545 if (p_ucast->opcode == ECORE_FILTER_MOVE) { 1546 DP_NOTICE(p_hwfn, true, "VFs don't support Moving of filters\n"); 1547 return ECORE_INVAL; 1548 } 1549 #endif 1550 1551 /* clear mailbox and prep first tlv */ 1552 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); 1553 req->opcode = (u8)p_ucast->opcode; 1554 req->type = (u8)p_ucast->type; 1555 OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN); 1556 req->vlan = p_ucast->vlan; 1557 1558 /* add list termination tlv */ 1559 ecore_add_tlv(&p_iov->offset, 1560 CHANNEL_TLV_LIST_END, 1561 sizeof(struct channel_list_end_tlv)); 1562 1563 resp = &p_iov->pf2vf_reply->default_resp; 1564 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1565 if (rc) 1566 goto exit; 1567 1568 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1569 rc = ECORE_AGAIN; 1570 goto exit; 1571 } 1572 1573 exit: 1574 ecore_vf_pf_req_end(p_hwfn, rc); 1575 1576 return rc; 1577 } 1578 1579 enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn) 1580 { 1581 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1582 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1583 enum _ecore_status_t rc; 1584 1585 /* clear mailbox and prep first tlv */ 1586 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, 1587 sizeof(struct vfpf_first_tlv)); 1588 1589 /* add list termination tlv */ 1590 ecore_add_tlv(&p_iov->offset, 1591 CHANNEL_TLV_LIST_END, 1592 sizeof(struct channel_list_end_tlv)); 1593 1594 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1595 if (rc) 1596 goto exit; 1597 1598 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1599 rc = ECORE_INVAL; 1600 goto exit; 1601 } 1602 1603 exit: 1604 ecore_vf_pf_req_end(p_hwfn, rc); 1605 1606 return rc; 1607 } 1608 1609 enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn, 1610 u16 *p_coal, 1611 struct ecore_queue_cid *p_cid) 1612 { 1613 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1614 struct pfvf_read_coal_resp_tlv *resp; 1615 struct vfpf_read_coal_req_tlv *req; 1616 enum _ecore_status_t rc; 1617 1618 /* clear mailbox and prep header tlv */ 1619 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, 1620 sizeof(*req)); 1621 req->qid = p_cid->rel.queue_id; 1622 req->is_rx = p_cid->b_is_rx ? 1 : 0; 1623 1624 ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END, 1625 sizeof(struct channel_list_end_tlv)); 1626 resp = &p_iov->pf2vf_reply->read_coal_resp; 1627 1628 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1629 if (rc != ECORE_SUCCESS) 1630 goto exit; 1631 1632 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 1633 goto exit; 1634 1635 *p_coal = resp->coal; 1636 exit: 1637 ecore_vf_pf_req_end(p_hwfn, rc); 1638 1639 return rc; 1640 } 1641 1642 enum _ecore_status_t 1643 ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, 1644 struct ecore_queue_cid *p_cid) 1645 { 1646 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1647 struct vfpf_update_coalesce *req; 1648 struct pfvf_def_resp_tlv *resp; 1649 enum _ecore_status_t rc; 1650 1651 /* clear mailbox and prep header tlv */ 1652 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, 1653 sizeof(*req)); 1654 1655 req->rx_coal = rx_coal; 1656 req->tx_coal = tx_coal; 1657 req->qid = p_cid->rel.queue_id; 1658 1659 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1660 "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", 1661 rx_coal, tx_coal, req->qid); 1662 1663 /* add list termination tlv */ 1664 ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END, 1665 sizeof(struct channel_list_end_tlv)); 1666 1667 resp = &p_iov->pf2vf_reply->default_resp; 1668 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1669 1670 if (rc != ECORE_SUCCESS) 1671 goto exit; 1672 1673 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 1674 goto exit; 1675 1676 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 1677 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 1678 1679 exit: 1680 ecore_vf_pf_req_end(p_hwfn, rc); 1681 return rc; 1682 } 1683 1684 u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, 1685 u16 sb_id) 1686 { 1687 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1688 1689 if (!p_iov) { 1690 DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); 1691 return 0; 1692 } 1693 1694 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; 1695 } 1696 1697 void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn, 1698 u16 sb_id, struct ecore_sb_info *p_sb) 1699 { 1700 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1701 1702 if (!p_iov) { 1703 DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); 1704 return; 1705 } 1706 1707 if (sb_id >= PFVF_MAX_SBS_PER_VF) { 1708 DP_NOTICE(p_hwfn, true, "Can't configure SB %04x\n", sb_id); 1709 return; 1710 } 1711 1712 p_iov->sbs_info[sb_id] = p_sb; 1713 } 1714 1715 enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn, 1716 u8 *p_change) 1717 { 1718 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1719 struct ecore_bulletin_content shadow; 1720 u32 crc, crc_size; 1721 1722 crc_size = sizeof(p_iov->bulletin.p_virt->crc); 1723 *p_change = 0; 1724 1725 /* Need to guarantee PF is not in the middle of writing it */ 1726 OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); 1727 1728 /* If version did not update, no need to do anything */ 1729 if (shadow.version == p_iov->bulletin_shadow.version) 1730 return ECORE_SUCCESS; 1731 1732 /* Verify the bulletin we see is valid */ 1733 crc = OSAL_CRC32(0, (u8 *)&shadow + crc_size, 1734 p_iov->bulletin.size - crc_size); 1735 if (crc != shadow.crc) 1736 return ECORE_AGAIN; 1737 1738 /* Set the shadow bulletin and process it */ 1739 OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); 1740 1741 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1742 "Read a bulletin update %08x\n", shadow.version); 1743 1744 *p_change = 1; 1745 1746 return ECORE_SUCCESS; 1747 } 1748 1749 void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params, 1750 struct ecore_bulletin_content *p_bulletin) 1751 { 1752 OSAL_MEMSET(p_params, 0, sizeof(*p_params)); 1753 1754 p_params->speed.autoneg = p_bulletin->req_autoneg; 1755 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; 1756 p_params->speed.forced_speed = p_bulletin->req_forced_speed; 1757 p_params->pause.autoneg = p_bulletin->req_autoneg_pause; 1758 p_params->pause.forced_rx = p_bulletin->req_forced_rx; 1759 p_params->pause.forced_tx = p_bulletin->req_forced_tx; 1760 p_params->loopback_mode = p_bulletin->req_loopback; 1761 } 1762 1763 void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, 1764 struct ecore_mcp_link_params *params) 1765 { 1766 __ecore_vf_get_link_params(params, 1767 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1768 } 1769 1770 void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link, 1771 struct ecore_bulletin_content *p_bulletin) 1772 { 1773 OSAL_MEMSET(p_link, 0, sizeof(*p_link)); 1774 1775 p_link->link_up = p_bulletin->link_up; 1776 p_link->speed = p_bulletin->speed; 1777 p_link->full_duplex = p_bulletin->full_duplex; 1778 p_link->an = p_bulletin->autoneg; 1779 p_link->an_complete = p_bulletin->autoneg_complete; 1780 p_link->parallel_detection = p_bulletin->parallel_detection; 1781 p_link->pfc_enabled = p_bulletin->pfc_enabled; 1782 p_link->partner_adv_speed = p_bulletin->partner_adv_speed; 1783 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; 1784 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; 1785 p_link->partner_adv_pause = p_bulletin->partner_adv_pause; 1786 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; 1787 } 1788 1789 void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, 1790 struct ecore_mcp_link_state *link) 1791 { 1792 __ecore_vf_get_link_state(link, 1793 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1794 } 1795 1796 void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps, 1797 struct ecore_bulletin_content *p_bulletin) 1798 { 1799 OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps)); 1800 p_link_caps->speed_capabilities = p_bulletin->capability_speed; 1801 } 1802 1803 void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, 1804 struct ecore_mcp_link_capabilities *p_link_caps) 1805 { 1806 __ecore_vf_get_link_caps(p_link_caps, 1807 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1808 } 1809 1810 void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, 1811 u8 *num_rxqs) 1812 { 1813 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; 1814 } 1815 1816 void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn, 1817 u8 *num_txqs) 1818 { 1819 *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; 1820 } 1821 1822 void ecore_vf_get_num_cids(struct ecore_hwfn *p_hwfn, 1823 u8 *num_cids) 1824 { 1825 *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids; 1826 } 1827 1828 void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, 1829 u8 *port_mac) 1830 { 1831 OSAL_MEMCPY(port_mac, 1832 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, 1833 ETH_ALEN); 1834 } 1835 1836 void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, 1837 u8 *num_vlan_filters) 1838 { 1839 struct ecore_vf_iov *p_vf; 1840 1841 p_vf = p_hwfn->vf_iov_info; 1842 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; 1843 } 1844 1845 void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, 1846 u8 *num_mac_filters) 1847 { 1848 struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info; 1849 1850 *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; 1851 } 1852 1853 bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac) 1854 { 1855 struct ecore_bulletin_content *bulletin; 1856 1857 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1858 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) 1859 return true; 1860 1861 /* Forbid VF from changing a MAC enforced by PF */ 1862 if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN)) 1863 return false; 1864 1865 return false; 1866 } 1867 1868 bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac, 1869 u8 *p_is_forced) 1870 { 1871 struct ecore_bulletin_content *bulletin; 1872 1873 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1874 1875 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 1876 if (p_is_forced) 1877 *p_is_forced = 1; 1878 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { 1879 if (p_is_forced) 1880 *p_is_forced = 0; 1881 } else { 1882 return false; 1883 } 1884 1885 OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN); 1886 1887 return true; 1888 } 1889 1890 void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn, 1891 u16 *p_vxlan_port, 1892 u16 *p_geneve_port) 1893 { 1894 struct ecore_bulletin_content *p_bulletin; 1895 1896 p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1897 1898 *p_vxlan_port = p_bulletin->vxlan_udp_port; 1899 *p_geneve_port = p_bulletin->geneve_udp_port; 1900 } 1901 1902 #ifndef LINUX_REMOVE 1903 bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid) 1904 { 1905 struct ecore_bulletin_content *bulletin; 1906 1907 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1908 1909 if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED))) 1910 return false; 1911 1912 if (dst_pvid) 1913 *dst_pvid = bulletin->pvid; 1914 1915 return true; 1916 } 1917 1918 bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn) 1919 { 1920 return p_hwfn->vf_iov_info->b_pre_fp_hsi; 1921 } 1922 #endif 1923 1924 void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, 1925 u16 *fw_major, u16 *fw_minor, u16 *fw_rev, 1926 u16 *fw_eng) 1927 { 1928 struct pf_vf_pfdev_info *info; 1929 1930 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; 1931 1932 *fw_major = info->fw_major; 1933 *fw_minor = info->fw_minor; 1934 *fw_rev = info->fw_rev; 1935 *fw_eng = info->fw_eng; 1936 } 1937 1938 #ifdef CONFIG_ECORE_SW_CHANNEL 1939 void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw) 1940 { 1941 p_hwfn->vf_iov_info->b_hw_channel = b_is_hw; 1942 } 1943 #endif 1944 1945 #ifdef _NTDDK_ 1946 #pragma warning(pop) 1947 #endif 1948