1 /* 2 * Copyright (c) 2018-2019 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "bcm_osal.h" 32 #include "ecore.h" 33 #include "ecore_hsi_eth.h" 34 #include "ecore_sriov.h" 35 #include "ecore_l2_api.h" 36 #include "ecore_vf.h" 37 #include "ecore_vfpf_if.h" 38 #include "ecore_status.h" 39 #include "reg_addr.h" 40 #include "ecore_int.h" 41 #include "ecore_l2.h" 42 #include "ecore_mcp_api.h" 43 #include "ecore_vf_api.h" 44 45 #ifdef _NTDDK_ 46 #pragma warning(push) 47 #pragma warning(disable : 28167) 48 #pragma warning(disable : 28123) 49 #pragma warning(disable : 28121) 50 #endif 51 52 static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, 53 u16 type, u16 length) 54 { 55 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 56 void *p_tlv; 57 58 /* This lock is released when we receive PF's response 59 * in ecore_send_msg2pf(). 60 * So, ecore_vf_pf_prep() and ecore_send_msg2pf() 61 * must come in sequence. 62 */ 63 OSAL_MUTEX_ACQUIRE(&(p_iov->mutex)); 64 65 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "preparing to send %s tlv over vf pf channel\n", 66 ecore_channel_tlvs_string[type]); 67 68 /* Reset Requst offset */ 69 p_iov->offset = (u8 *)p_iov->vf2pf_request; 70 71 /* Clear mailbox - both request and reply */ 72 OSAL_MEMSET(p_iov->vf2pf_request, 0, 73 sizeof(union vfpf_tlvs)); 74 OSAL_MEMSET(p_iov->pf2vf_reply, 0, 75 sizeof(union pfvf_tlvs)); 76 77 /* Init type and length */ 78 p_tlv = ecore_add_tlv(&p_iov->offset, type, length); 79 80 /* Init first tlv header */ 81 ((struct vfpf_first_tlv *)p_tlv)->reply_address = 82 (u64)p_iov->pf2vf_reply_phys; 83 84 return p_tlv; 85 } 86 87 static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn, 88 enum _ecore_status_t req_status) 89 { 90 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; 91 92 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 93 "VF request status = 0x%x, PF reply status = 0x%x\n", 94 req_status, resp->default_resp.hdr.status); 95 96 OSAL_MUTEX_RELEASE(&(p_hwfn->vf_iov_info->mutex)); 97 } 98 99 #ifdef CONFIG_ECORE_SW_CHANNEL 100 /* The SW channel implementation of Windows needs to know the 'exact' 101 * response size of any given message. That means that for future 102 * messages we'd be unable to send TLVs to PF if he'll be unable to 103 * answer them if the |response| != |default response|. 104 * We'd need to handshake in acquire capabilities for any such. 105 */ 106 #endif 107 static enum _ecore_status_t 108 ecore_send_msg2pf(struct ecore_hwfn *p_hwfn, 109 u8 *done, u32 resp_size) 110 { 111 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; 112 struct ustorm_trigger_vf_zone trigger; 113 struct ustorm_vf_zone *zone_data; 114 enum _ecore_status_t rc = ECORE_SUCCESS; 115 int time = 100; 116 117 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; 118 119 /* output tlvs list */ 120 ecore_dp_tlv_list(p_hwfn, p_req); 121 122 /* need to add the END TLV to the message size */ 123 resp_size += sizeof(struct channel_list_end_tlv); 124 125 #ifdef CONFIG_ECORE_SW_CHANNEL 126 if (!p_hwfn->vf_iov_info->b_hw_channel) { 127 rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev, 128 done, 129 p_req, 130 p_hwfn->vf_iov_info->pf2vf_reply, 131 sizeof(union vfpf_tlvs), 132 resp_size); 133 /* TODO - no prints about message ? */ 134 return rc; 135 } 136 #endif 137 138 /* Send TLVs over HW channel */ 139 OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); 140 trigger.vf_pf_msg_valid = 1; 141 142 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 143 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", 144 GET_FIELD(p_hwfn->hw_info.concrete_fid, 145 PXP_CONCRETE_FID_PFID), 146 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys), 147 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys), 148 &zone_data->non_trigger.vf_pf_msg_addr, 149 *((u32 *)&trigger), 150 &zone_data->trigger); 151 152 REG_WR(p_hwfn, 153 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, 154 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys)); 155 156 REG_WR(p_hwfn, 157 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, 158 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys)); 159 160 /* The message data must be written first, to prevent trigger before 161 * data is written. 162 */ 163 OSAL_WMB(p_hwfn->p_dev); 164 165 REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); 166 167 /* When PF would be done with the response, it would write back to the 168 * `done' address. Poll until then. 169 */ 170 while ((!*done) && time) { 171 OSAL_MSLEEP(25); 172 time--; 173 } 174 175 if (!*done) { 176 DP_NOTICE(p_hwfn, true, 177 "VF <-- PF Timeout [Type %d]\n", 178 p_req->first_tlv.tl.type); 179 rc = ECORE_TIMEOUT; 180 } else { 181 if ((*done != PFVF_STATUS_SUCCESS) && 182 (*done != PFVF_STATUS_NO_RESOURCE)) 183 DP_NOTICE(p_hwfn, false, 184 "PF response: %d [Type %d]\n", 185 *done, p_req->first_tlv.tl.type); 186 else 187 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 188 "PF response: %d [Type %d]\n", 189 *done, p_req->first_tlv.tl.type); 190 } 191 192 return rc; 193 } 194 195 static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn, 196 struct ecore_queue_cid *p_cid) 197 { 198 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 199 struct vfpf_qid_tlv *p_qid_tlv; 200 201 /* Only add QIDs for the queue if it was negotiated with PF */ 202 if (!(p_iov->acquire_resp.pfdev_info.capabilities & 203 PFVF_ACQUIRE_CAP_QUEUE_QIDS)) 204 return; 205 206 p_qid_tlv = ecore_add_tlv(&p_iov->offset, 207 CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); 208 p_qid_tlv->qid = p_cid->qid_usage_idx; 209 } 210 211 static enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn, 212 bool b_final) 213 { 214 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 215 struct pfvf_def_resp_tlv *resp; 216 struct vfpf_first_tlv *req; 217 u32 size; 218 enum _ecore_status_t rc; 219 220 /* clear mailbox and prep first tlv */ 221 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); 222 223 /* add list termination tlv */ 224 ecore_add_tlv(&p_iov->offset, 225 CHANNEL_TLV_LIST_END, 226 sizeof(struct channel_list_end_tlv)); 227 228 resp = &p_iov->pf2vf_reply->default_resp; 229 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 230 231 if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS) 232 rc = ECORE_AGAIN; 233 234 ecore_vf_pf_req_end(p_hwfn, rc); 235 if (!b_final) 236 return rc; 237 238 p_hwfn->b_int_enabled = 0; 239 240 if (p_iov->vf2pf_request) 241 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 242 p_iov->vf2pf_request, 243 p_iov->vf2pf_request_phys, 244 sizeof(union vfpf_tlvs)); 245 if (p_iov->pf2vf_reply) 246 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 247 p_iov->pf2vf_reply, 248 p_iov->pf2vf_reply_phys, 249 sizeof(union pfvf_tlvs)); 250 251 if (p_iov->bulletin.p_virt) { 252 size = sizeof(struct ecore_bulletin_content); 253 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 254 p_iov->bulletin.p_virt, 255 p_iov->bulletin.phys, 256 size); 257 } 258 259 #ifdef CONFIG_ECORE_LOCK_ALLOC 260 OSAL_MUTEX_DEALLOC(&p_iov->mutex); 261 #endif 262 263 OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info); 264 p_hwfn->vf_iov_info = OSAL_NULL; 265 266 return rc; 267 } 268 269 enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn) 270 { 271 return _ecore_vf_pf_release(p_hwfn, true); 272 } 273 274 #define VF_ACQUIRE_THRESH 3 275 static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn, 276 struct vf_pf_resc_request *p_req, 277 struct pf_vf_resc *p_resp) 278 { 279 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 280 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", 281 p_req->num_rxqs, p_resp->num_rxqs, 282 p_req->num_rxqs, p_resp->num_txqs, 283 p_req->num_sbs, p_resp->num_sbs, 284 p_req->num_mac_filters, p_resp->num_mac_filters, 285 p_req->num_vlan_filters, p_resp->num_vlan_filters, 286 p_req->num_mc_filters, p_resp->num_mc_filters, 287 p_req->num_cids, p_resp->num_cids); 288 289 /* humble our request */ 290 p_req->num_txqs = p_resp->num_txqs; 291 p_req->num_rxqs = p_resp->num_rxqs; 292 p_req->num_sbs = p_resp->num_sbs; 293 p_req->num_mac_filters = p_resp->num_mac_filters; 294 p_req->num_vlan_filters = p_resp->num_vlan_filters; 295 p_req->num_mc_filters = p_resp->num_mc_filters; 296 p_req->num_cids = p_resp->num_cids; 297 } 298 299 static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn) 300 { 301 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 302 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 303 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 304 struct ecore_vf_acquire_sw_info vf_sw_info; 305 struct vf_pf_resc_request *p_resc; 306 bool resources_acquired = false; 307 struct vfpf_acquire_tlv *req; 308 int attempts = 0; 309 enum _ecore_status_t rc = ECORE_SUCCESS; 310 int eth_hsi_minor_ver; 311 312 /* clear mailbox and prep first tlv */ 313 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 314 p_resc = &req->resc_request; 315 316 /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */ 317 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 318 319 p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF; 320 p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF; 321 p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF; 322 p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS; 323 p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS; 324 p_resc->num_cids = ECORE_ETH_VF_DEFAULT_NUM_CIDS; 325 326 OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info)); 327 OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info); 328 329 req->vfdev_info.os_type = vf_sw_info.os_type; 330 req->vfdev_info.driver_version = vf_sw_info.driver_version; 331 req->vfdev_info.fw_major = FW_MAJOR_VERSION; 332 req->vfdev_info.fw_minor = FW_MINOR_VERSION; 333 req->vfdev_info.fw_revision = FW_REVISION_VERSION; 334 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 335 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 336 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; 337 338 /* Fill capability field with any non-deprecated config we support */ 339 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 340 341 /* If we've mapped the doorbell bar, try using queue qids */ 342 if (p_iov->b_doorbell_bar) { 343 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | 344 VFPF_ACQUIRE_CAP_QUEUE_QIDS; 345 p_resc->num_cids = ECORE_ETH_VF_MAX_NUM_CIDS; 346 } 347 348 /* pf 2 vf bulletin board address */ 349 req->bulletin_addr = p_iov->bulletin.phys; 350 req->bulletin_size = p_iov->bulletin.size; 351 352 /* add list termination tlv */ 353 ecore_add_tlv(&p_iov->offset, 354 CHANNEL_TLV_LIST_END, 355 sizeof(struct channel_list_end_tlv)); 356 357 while (!resources_acquired) { 358 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "attempting to acquire resources\n"); 359 360 /* Clear response buffer, as this might be a re-send */ 361 OSAL_MEMSET(p_iov->pf2vf_reply, 0, 362 sizeof(union pfvf_tlvs)); 363 364 /* send acquire request */ 365 rc = ecore_send_msg2pf(p_hwfn, 366 &resp->hdr.status, 367 sizeof(*resp)); 368 if (rc != ECORE_SUCCESS) 369 goto exit; 370 371 /* copy acquire response from buffer to p_hwfn */ 372 OSAL_MEMCPY(&p_iov->acquire_resp, 373 resp, 374 sizeof(p_iov->acquire_resp)); 375 376 attempts++; 377 378 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 379 /* PF agrees to allocate our resources */ 380 if (!(resp->pfdev_info.capabilities & 381 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { 382 /* It's possible legacy PF mistakenly accepted; 383 * but we don't care - simply mark it as 384 * legacy and continue. 385 */ 386 req->vfdev_info.capabilities |= 387 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 388 } 389 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "resources acquired\n"); 390 resources_acquired = true; 391 } /* PF refuses to allocate our resources */ 392 else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 393 attempts < VF_ACQUIRE_THRESH) { 394 ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, 395 &resp->resc); 396 397 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { 398 if (pfdev_info->major_fp_hsi && 399 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { 400 DP_NOTICE(p_hwfn, false, 401 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", 402 pfdev_info->major_fp_hsi, 403 pfdev_info->minor_fp_hsi, 404 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR, 405 pfdev_info->major_fp_hsi); 406 rc = ECORE_INVAL; 407 goto exit; 408 } 409 410 if (!pfdev_info->major_fp_hsi) { 411 if (req->vfdev_info.capabilities & 412 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 413 DP_NOTICE(p_hwfn, false, 414 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); 415 rc = ECORE_INVAL; 416 goto exit; 417 } else { 418 DP_INFO(p_hwfn, 419 "PF is old - try re-acquire to see if it supports FW-version override\n"); 420 req->vfdev_info.capabilities |= 421 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 422 continue; 423 } 424 } 425 426 /* If PF/VF are using same Major, PF must have had 427 * it's reasons. Simply fail. 428 */ 429 DP_NOTICE(p_hwfn, false, 430 "PF rejected acquisition by VF\n"); 431 rc = ECORE_INVAL; 432 goto exit; 433 } else { 434 DP_ERR(p_hwfn, "PF returned error %d to VF acquisition request\n", 435 resp->hdr.status); 436 rc = ECORE_AGAIN; 437 goto exit; 438 } 439 } 440 441 /* Mark the PF as legacy, if needed */ 442 if (req->vfdev_info.capabilities & 443 VFPF_ACQUIRE_CAP_PRE_FP_HSI) 444 p_iov->b_pre_fp_hsi = true; 445 446 /* In case PF doesn't support multi-queue Tx, update the number of 447 * CIDs to reflect the number of queues [older PFs didn't fill that 448 * field]. 449 */ 450 if (!(resp->pfdev_info.capabilities & 451 PFVF_ACQUIRE_CAP_QUEUE_QIDS)) 452 resp->resc.num_cids = resp->resc.num_rxqs + 453 resp->resc.num_txqs; 454 455 #ifndef LINUX_REMOVE 456 rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc); 457 if (rc) { 458 DP_NOTICE(p_hwfn, true, 459 "VF_UPDATE_ACQUIRE_RESC_RESP Failed: status = 0x%x.\n", 460 rc); 461 rc = ECORE_AGAIN; 462 goto exit; 463 } 464 #endif 465 466 /* Update bulletin board size with response from PF */ 467 p_iov->bulletin.size = resp->bulletin_size; 468 469 /* get HW info */ 470 p_hwfn->p_dev->type = resp->pfdev_info.dev_type; 471 p_hwfn->p_dev->chip_rev = (u8) resp->pfdev_info.chip_rev; 472 473 DP_INFO(p_hwfn, "Chip details - %s%d\n", 474 ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH", 475 CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1); 476 477 p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff; 478 479 /* Learn of the possibility of CMT */ 480 if (IS_LEAD_HWFN(p_hwfn)) { 481 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { 482 DP_NOTICE(p_hwfn, false, "100g VF\n"); 483 p_hwfn->p_dev->num_hwfns = 2; 484 } 485 } 486 487 eth_hsi_minor_ver = ETH_HSI_VER_MINOR; 488 489 if (!p_iov->b_pre_fp_hsi && 490 (eth_hsi_minor_ver) && 491 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { 492 DP_INFO(p_hwfn, 493 "PF is using older fastpath HSI; %02x.%02x is configured\n", 494 ETH_HSI_VER_MAJOR, 495 resp->pfdev_info.minor_fp_hsi); 496 } 497 498 exit: 499 ecore_vf_pf_req_end(p_hwfn, rc); 500 501 return rc; 502 } 503 504 u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn, 505 enum BAR_ID bar_id) 506 { 507 u32 bar_size; 508 509 /* Regview size is fixed */ 510 if (bar_id == BAR_ID_0) 511 return 1 << 17; 512 513 /* Doorbell is received from PF */ 514 bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size; 515 if (bar_size) 516 return 1 << bar_size; 517 return 0; 518 } 519 520 enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn) 521 { 522 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev); 523 struct ecore_vf_iov *p_iov; 524 u32 reg; 525 enum _ecore_status_t rc; 526 527 /* Set number of hwfns - might be overriden once leading hwfn learns 528 * actual configuration from PF. 529 */ 530 if (IS_LEAD_HWFN(p_hwfn)) 531 p_hwfn->p_dev->num_hwfns = 1; 532 533 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; 534 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); 535 536 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; 537 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); 538 539 /* Allocate vf sriov info */ 540 p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov)); 541 if (!p_iov) { 542 DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_sriov'\n"); 543 return ECORE_NOMEM; 544 } 545 546 /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell 547 * value, but there are several incompatibily scenarios where that 548 * would be incorrect and we'd need to override it. 549 */ 550 if (p_hwfn->doorbells == OSAL_NULL) { 551 p_hwfn->doorbells = (u8 OSAL_IOMEM*)p_hwfn->regview + 552 PXP_VF_BAR0_START_DQ; 553 #ifndef LINUX_REMOVE 554 p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - 555 (u8 *)p_hwfn->p_dev->doorbells; 556 #endif 557 558 } else if (p_hwfn == p_lead) { 559 /* For leading hw-function, value is always correct, but need 560 * to handle scenario where legacy PF would not support 100g 561 * mapped bars later. 562 */ 563 p_iov->b_doorbell_bar = true; 564 } else { 565 /* here, value would be correct ONLY if the leading hwfn 566 * received indication that mapped-bars are supported. 567 */ 568 if (p_lead->vf_iov_info->b_doorbell_bar) 569 p_iov->b_doorbell_bar = true; 570 #ifdef LINUX_REMOVE 571 else 572 p_hwfn->doorbells = (u8 OSAL_IOMEM*) 573 p_hwfn->regview + 574 PXP_VF_BAR0_START_DQ; 575 #else 576 else { 577 p_hwfn->doorbells = (u8 OSAL_IOMEM*)p_hwfn->regview + 578 PXP_VF_BAR0_START_DQ; 579 p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - 580 (u8 *)p_hwfn->p_dev->doorbells; 581 } 582 #endif 583 } 584 585 /* Allocate vf2pf msg */ 586 p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 587 &p_iov->vf2pf_request_phys, 588 sizeof(union vfpf_tlvs)); 589 if (!p_iov->vf2pf_request) { 590 DP_NOTICE(p_hwfn, true, "Failed to allocate `vf2pf_request' DMA memory\n"); 591 goto free_p_iov; 592 } 593 594 p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 595 &p_iov->pf2vf_reply_phys, 596 sizeof(union pfvf_tlvs)); 597 if (!p_iov->pf2vf_reply) { 598 DP_NOTICE(p_hwfn, true, "Failed to allocate `pf2vf_reply' DMA memory\n"); 599 goto free_vf2pf_request; 600 } 601 602 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 603 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", 604 p_iov->vf2pf_request, 605 (unsigned long long)p_iov->vf2pf_request_phys, 606 p_iov->pf2vf_reply, 607 (unsigned long long)p_iov->pf2vf_reply_phys); 608 609 /* Allocate Bulletin board */ 610 p_iov->bulletin.size = sizeof(struct ecore_bulletin_content); 611 p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 612 &p_iov->bulletin.phys, 613 p_iov->bulletin.size); 614 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 615 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", 616 p_iov->bulletin.p_virt, 617 (unsigned long long)p_iov->bulletin.phys, 618 p_iov->bulletin.size); 619 620 #ifdef CONFIG_ECORE_LOCK_ALLOC 621 OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex); 622 #endif 623 OSAL_MUTEX_INIT(&p_iov->mutex); 624 625 p_hwfn->vf_iov_info = p_iov; 626 627 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 628 629 rc = ecore_vf_pf_acquire(p_hwfn); 630 631 /* If VF is 100g using a mapped bar and PF is too old to support that, 632 * acquisition would succeed - but the VF would have no way knowing 633 * the size of the doorbell bar configured in HW and thus will not 634 * know how to split it for 2nd hw-function. 635 * In this case we re-try without the indication of the mapped 636 * doorbell. 637 */ 638 if (rc == ECORE_SUCCESS && 639 p_iov->b_doorbell_bar && 640 !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) && 641 ECORE_IS_CMT(p_hwfn->p_dev)) { 642 rc = _ecore_vf_pf_release(p_hwfn, false); 643 if (rc != ECORE_SUCCESS) 644 return rc; 645 646 p_iov->b_doorbell_bar = false; 647 p_hwfn->doorbells = (u8 OSAL_IOMEM*)p_hwfn->regview + 648 PXP_VF_BAR0_START_DQ; 649 #ifndef LINUX_REMOVE 650 p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - 651 (u8 *)p_hwfn->p_dev->doorbells; 652 #endif 653 rc = ecore_vf_pf_acquire(p_hwfn); 654 } 655 656 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 657 "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n", 658 p_hwfn->regview, p_hwfn->doorbells, 659 p_hwfn->p_dev->doorbells); 660 661 return rc; 662 663 free_vf2pf_request: 664 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request, 665 p_iov->vf2pf_request_phys, 666 sizeof(union vfpf_tlvs)); 667 free_p_iov: 668 OSAL_FREE(p_hwfn->p_dev, p_iov); 669 670 return ECORE_NOMEM; 671 } 672 673 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A 674 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ 675 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) 676 677 static void 678 __ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 679 struct ecore_tunn_update_type *p_src, 680 enum ecore_tunn_clss mask, u8 *p_cls) 681 { 682 if (p_src->b_update_mode) { 683 p_req->tun_mode_update_mask |= (1 << mask); 684 685 if (p_src->b_mode_enabled) 686 p_req->tunn_mode |= (1 << mask); 687 } 688 689 *p_cls = p_src->tun_cls; 690 } 691 692 static void 693 ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 694 struct ecore_tunn_update_type *p_src, 695 enum ecore_tunn_clss mask, u8 *p_cls, 696 struct ecore_tunn_update_udp_port *p_port, 697 u8 *p_update_port, u16 *p_udp_port) 698 { 699 if (p_port->b_update_port) { 700 *p_update_port = 1; 701 *p_udp_port = p_port->port; 702 } 703 704 __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); 705 } 706 707 void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun) 708 { 709 if (p_tun->vxlan.b_mode_enabled) 710 p_tun->vxlan.b_update_mode = true; 711 if (p_tun->l2_geneve.b_mode_enabled) 712 p_tun->l2_geneve.b_update_mode = true; 713 if (p_tun->ip_geneve.b_mode_enabled) 714 p_tun->ip_geneve.b_update_mode = true; 715 if (p_tun->l2_gre.b_mode_enabled) 716 p_tun->l2_gre.b_update_mode = true; 717 if (p_tun->ip_gre.b_mode_enabled) 718 p_tun->ip_gre.b_update_mode = true; 719 720 p_tun->b_update_rx_cls = true; 721 p_tun->b_update_tx_cls = true; 722 } 723 724 static void 725 __ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun, 726 u16 feature_mask, u8 tunn_mode, u8 tunn_cls, 727 enum ecore_tunn_mode val) 728 { 729 if (feature_mask & (1 << val)) { 730 p_tun->b_mode_enabled = tunn_mode; 731 p_tun->tun_cls = tunn_cls; 732 } else { 733 p_tun->b_mode_enabled = false; 734 } 735 } 736 737 static void 738 ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn, 739 struct ecore_tunnel_info *p_tun, 740 struct pfvf_update_tunn_param_tlv *p_resp) 741 { 742 /* Update mode and classes provided by PF */ 743 u16 feat_mask = p_resp->tunn_feature_mask; 744 745 __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask, 746 p_resp->vxlan_mode, p_resp->vxlan_clss, 747 ECORE_MODE_VXLAN_TUNN); 748 __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, 749 p_resp->l2geneve_mode, 750 p_resp->l2geneve_clss, 751 ECORE_MODE_L2GENEVE_TUNN); 752 __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, 753 p_resp->ipgeneve_mode, 754 p_resp->ipgeneve_clss, 755 ECORE_MODE_IPGENEVE_TUNN); 756 __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, 757 p_resp->l2gre_mode, p_resp->l2gre_clss, 758 ECORE_MODE_L2GRE_TUNN); 759 __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, 760 p_resp->ipgre_mode, p_resp->ipgre_clss, 761 ECORE_MODE_IPGRE_TUNN); 762 p_tun->geneve_port.port = p_resp->geneve_udp_port; 763 p_tun->vxlan_port.port = p_resp->vxlan_udp_port; 764 765 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 766 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", 767 p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, 768 p_tun->ip_geneve.b_mode_enabled, 769 p_tun->l2_gre.b_mode_enabled, 770 p_tun->ip_gre.b_mode_enabled); 771 } 772 773 enum _ecore_status_t 774 ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn, 775 struct ecore_tunnel_info *p_src) 776 { 777 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; 778 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 779 struct pfvf_update_tunn_param_tlv *p_resp; 780 struct vfpf_update_tunn_param_tlv *p_req; 781 enum _ecore_status_t rc; 782 783 p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, 784 sizeof(*p_req)); 785 786 if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) 787 p_req->update_tun_cls = 1; 788 789 ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, (enum ecore_tunn_clss)ECORE_MODE_VXLAN_TUNN, 790 &p_req->vxlan_clss, &p_src->vxlan_port, 791 &p_req->update_vxlan_port, 792 &p_req->vxlan_port); 793 ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, 794 (enum ecore_tunn_clss)ECORE_MODE_L2GENEVE_TUNN, 795 &p_req->l2geneve_clss, &p_src->geneve_port, 796 &p_req->update_geneve_port, 797 &p_req->geneve_port); 798 __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, 799 (enum ecore_tunn_clss)ECORE_MODE_IPGENEVE_TUNN, 800 &p_req->ipgeneve_clss); 801 __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, 802 (enum ecore_tunn_clss)ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss); 803 __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, 804 (enum ecore_tunn_clss)ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss); 805 806 /* add list termination tlv */ 807 ecore_add_tlv(&p_iov->offset, 808 CHANNEL_TLV_LIST_END, 809 sizeof(struct channel_list_end_tlv)); 810 811 p_resp = &p_iov->pf2vf_reply->tunn_param_resp; 812 rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); 813 814 if (rc) 815 goto exit; 816 817 if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { 818 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 819 "Failed to update tunnel parameters\n"); 820 rc = ECORE_INVAL; 821 } 822 823 ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp); 824 exit: 825 ecore_vf_pf_req_end(p_hwfn, rc); 826 return rc; 827 } 828 829 enum _ecore_status_t 830 ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, 831 struct ecore_queue_cid *p_cid, 832 u16 bd_max_bytes, 833 dma_addr_t bd_chain_phys_addr, 834 dma_addr_t cqe_pbl_addr, 835 u16 cqe_pbl_size, 836 void OSAL_IOMEM **pp_prod) 837 { 838 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 839 struct pfvf_start_queue_resp_tlv *resp; 840 struct vfpf_start_rxq_tlv *req; 841 u16 rx_qid = p_cid->rel.queue_id; 842 enum _ecore_status_t rc; 843 844 /* clear mailbox and prep first tlv */ 845 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); 846 847 req->rx_qid = rx_qid; 848 req->cqe_pbl_addr = cqe_pbl_addr; 849 req->cqe_pbl_size = cqe_pbl_size; 850 req->rxq_addr = bd_chain_phys_addr; 851 req->hw_sb = p_cid->sb_igu_id; 852 req->sb_index = p_cid->sb_idx; 853 req->bd_max_bytes = bd_max_bytes; 854 req->stat_id = -1; /* Keep initialized, for future compatibility */ 855 856 /* If PF is legacy, we'll need to calculate producers ourselves 857 * as well as clean them. 858 */ 859 if (p_iov->b_pre_fp_hsi) { 860 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; 861 u32 init_prod_val = 0; 862 863 *pp_prod = (u8 OSAL_IOMEM*) 864 p_hwfn->regview + 865 MSTORM_QZONE_START(p_hwfn->p_dev) + 866 hw_qid * MSTORM_QZONE_SIZE; 867 868 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 869 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 870 (u32 *)(&init_prod_val)); 871 } 872 873 ecore_vf_pf_add_qid(p_hwfn, p_cid); 874 875 /* add list termination tlv */ 876 ecore_add_tlv(&p_iov->offset, 877 CHANNEL_TLV_LIST_END, 878 sizeof(struct channel_list_end_tlv)); 879 880 resp = &p_iov->pf2vf_reply->queue_start; 881 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 882 if (rc) 883 goto exit; 884 885 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 886 rc = ECORE_INVAL; 887 goto exit; 888 } 889 890 /* Learn the address of the producer from the response */ 891 if (!p_iov->b_pre_fp_hsi) { 892 u32 init_prod_val = 0; 893 894 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset; 895 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 896 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", 897 rx_qid, *pp_prod, resp->offset); 898 899 /* Init the rcq, rx bd and rx sge (if valid) producers to 0. 900 * It was actually the PF's responsibility, but since some 901 * old PFs might fail to do so, we do this as well. 902 */ 903 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3); 904 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 905 (u32 *)&init_prod_val); 906 } 907 908 exit: 909 ecore_vf_pf_req_end(p_hwfn, rc); 910 911 return rc; 912 } 913 914 enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn, 915 struct ecore_queue_cid *p_cid, 916 bool cqe_completion) 917 { 918 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 919 struct vfpf_stop_rxqs_tlv *req; 920 struct pfvf_def_resp_tlv *resp; 921 enum _ecore_status_t rc; 922 923 /* clear mailbox and prep first tlv */ 924 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); 925 926 req->rx_qid = p_cid->rel.queue_id; 927 req->num_rxqs = 1; 928 req->cqe_completion = cqe_completion; 929 930 ecore_vf_pf_add_qid(p_hwfn, p_cid); 931 932 /* add list termination tlv */ 933 ecore_add_tlv(&p_iov->offset, 934 CHANNEL_TLV_LIST_END, 935 sizeof(struct channel_list_end_tlv)); 936 937 resp = &p_iov->pf2vf_reply->default_resp; 938 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 939 if (rc) 940 goto exit; 941 942 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 943 rc = ECORE_INVAL; 944 goto exit; 945 } 946 947 exit: 948 ecore_vf_pf_req_end(p_hwfn, rc); 949 950 return rc; 951 } 952 953 enum _ecore_status_t 954 ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn, 955 struct ecore_queue_cid *p_cid, 956 dma_addr_t pbl_addr, u16 pbl_size, 957 void OSAL_IOMEM **pp_doorbell) 958 { 959 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 960 struct pfvf_start_queue_resp_tlv *resp; 961 struct vfpf_start_txq_tlv *req; 962 u16 qid = p_cid->rel.queue_id; 963 enum _ecore_status_t rc; 964 965 /* clear mailbox and prep first tlv */ 966 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); 967 968 req->tx_qid = qid; 969 970 /* Tx */ 971 req->pbl_addr = pbl_addr; 972 req->pbl_size = pbl_size; 973 req->hw_sb = p_cid->sb_igu_id; 974 req->sb_index = p_cid->sb_idx; 975 976 ecore_vf_pf_add_qid(p_hwfn, p_cid); 977 978 /* add list termination tlv */ 979 ecore_add_tlv(&p_iov->offset, 980 CHANNEL_TLV_LIST_END, 981 sizeof(struct channel_list_end_tlv)); 982 983 resp = &p_iov->pf2vf_reply->queue_start; 984 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 985 if (rc) 986 goto exit; 987 988 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 989 rc = ECORE_INVAL; 990 goto exit; 991 } 992 993 /* Modern PFs provide the actual offsets, while legacy 994 * provided only the queue id. 995 */ 996 if (!p_iov->b_pre_fp_hsi) { 997 *pp_doorbell = (u8 OSAL_IOMEM*)p_hwfn->doorbells + 998 resp->offset; 999 } else { 1000 u8 cid = p_iov->acquire_resp.resc.cid[qid]; 1001 1002 *pp_doorbell = (u8 OSAL_IOMEM*)p_hwfn->doorbells + 1003 DB_ADDR_VF(cid, DQ_DEMS_LEGACY); 1004 } 1005 1006 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1007 "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n", 1008 qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset); 1009 exit: 1010 ecore_vf_pf_req_end(p_hwfn, rc); 1011 1012 return rc; 1013 } 1014 1015 enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, 1016 struct ecore_queue_cid *p_cid) 1017 { 1018 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1019 struct vfpf_stop_txqs_tlv *req; 1020 struct pfvf_def_resp_tlv *resp; 1021 enum _ecore_status_t rc; 1022 1023 /* clear mailbox and prep first tlv */ 1024 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); 1025 1026 req->tx_qid = p_cid->rel.queue_id; 1027 req->num_txqs = 1; 1028 1029 ecore_vf_pf_add_qid(p_hwfn, p_cid); 1030 1031 /* add list termination tlv */ 1032 ecore_add_tlv(&p_iov->offset, 1033 CHANNEL_TLV_LIST_END, 1034 sizeof(struct channel_list_end_tlv)); 1035 1036 resp = &p_iov->pf2vf_reply->default_resp; 1037 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1038 if (rc) 1039 goto exit; 1040 1041 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1042 rc = ECORE_INVAL; 1043 goto exit; 1044 } 1045 1046 exit: 1047 ecore_vf_pf_req_end(p_hwfn, rc); 1048 1049 return rc; 1050 } 1051 1052 #ifndef LINUX_REMOVE 1053 enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn, 1054 struct ecore_queue_cid **pp_cid, 1055 u8 num_rxqs, 1056 u8 comp_cqe_flg, 1057 u8 comp_event_flg) 1058 { 1059 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1060 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1061 struct vfpf_update_rxq_tlv *req; 1062 enum _ecore_status_t rc; 1063 1064 /* Starting with CHANNEL_TLV_QID and the need for additional queue 1065 * information, this API stopped supporting multiple rxqs. 1066 * TODO - remove this and change the API to accept a single queue-cid 1067 * in a follow-up patch. 1068 */ 1069 if (num_rxqs != 1) { 1070 DP_NOTICE(p_hwfn, true, 1071 "VFs can no longer update more than a single queue\n"); 1072 return ECORE_INVAL; 1073 } 1074 1075 /* clear mailbox and prep first tlv */ 1076 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req)); 1077 1078 req->rx_qid = (*pp_cid)->rel.queue_id; 1079 req->num_rxqs = 1; 1080 1081 if (comp_cqe_flg) 1082 req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG; 1083 if (comp_event_flg) 1084 req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG; 1085 1086 ecore_vf_pf_add_qid(p_hwfn, *pp_cid); 1087 1088 /* add list termination tlv */ 1089 ecore_add_tlv(&p_iov->offset, 1090 CHANNEL_TLV_LIST_END, 1091 sizeof(struct channel_list_end_tlv)); 1092 1093 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1094 if (rc) 1095 goto exit; 1096 1097 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1098 rc = ECORE_INVAL; 1099 goto exit; 1100 } 1101 1102 exit: 1103 ecore_vf_pf_req_end(p_hwfn, rc); 1104 return rc; 1105 } 1106 #endif 1107 1108 enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, 1109 u8 vport_id, 1110 u16 mtu, 1111 u8 inner_vlan_removal, 1112 enum ecore_tpa_mode tpa_mode, 1113 u8 max_buffers_per_cqe, 1114 u8 only_untagged, 1115 u8 zero_placement_offset) 1116 { 1117 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1118 struct vfpf_vport_start_tlv *req; 1119 struct pfvf_def_resp_tlv *resp; 1120 enum _ecore_status_t rc; 1121 int i; 1122 1123 /* clear mailbox and prep first tlv */ 1124 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); 1125 1126 req->mtu = mtu; 1127 req->vport_id = vport_id; 1128 req->inner_vlan_removal = inner_vlan_removal; 1129 req->tpa_mode = tpa_mode; 1130 req->max_buffers_per_cqe = max_buffers_per_cqe; 1131 req->only_untagged = only_untagged; 1132 req->zero_placement_offset = zero_placement_offset; 1133 1134 /* status blocks */ 1135 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) { 1136 struct ecore_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i]; 1137 1138 if (p_sb) 1139 req->sb_addr[i] = p_sb->sb_phys; 1140 } 1141 1142 /* add list termination tlv */ 1143 ecore_add_tlv(&p_iov->offset, 1144 CHANNEL_TLV_LIST_END, 1145 sizeof(struct channel_list_end_tlv)); 1146 1147 resp = &p_iov->pf2vf_reply->default_resp; 1148 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1149 if (rc) 1150 goto exit; 1151 1152 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1153 rc = ECORE_INVAL; 1154 goto exit; 1155 } 1156 1157 exit: 1158 ecore_vf_pf_req_end(p_hwfn, rc); 1159 1160 return rc; 1161 } 1162 1163 enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn) 1164 { 1165 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1166 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1167 enum _ecore_status_t rc; 1168 1169 /* clear mailbox and prep first tlv */ 1170 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, 1171 sizeof(struct vfpf_first_tlv)); 1172 1173 /* add list termination tlv */ 1174 ecore_add_tlv(&p_iov->offset, 1175 CHANNEL_TLV_LIST_END, 1176 sizeof(struct channel_list_end_tlv)); 1177 1178 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1179 if (rc) 1180 goto exit; 1181 1182 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1183 rc = ECORE_INVAL; 1184 goto exit; 1185 } 1186 1187 exit: 1188 ecore_vf_pf_req_end(p_hwfn, rc); 1189 1190 return rc; 1191 } 1192 1193 static bool 1194 ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn, 1195 struct ecore_sp_vport_update_params *p_data, 1196 u16 tlv) 1197 { 1198 switch (tlv) { 1199 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: 1200 return !!(p_data->update_vport_active_rx_flg || 1201 p_data->update_vport_active_tx_flg); 1202 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: 1203 #ifndef ASIC_ONLY 1204 /* FPGA doesn't have PVFC and so can't support tx-switching */ 1205 return !!(p_data->update_tx_switching_flg && 1206 !CHIP_REV_IS_FPGA(p_hwfn->p_dev)); 1207 #else 1208 return !!p_data->update_tx_switching_flg; 1209 #endif 1210 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: 1211 return !!p_data->update_inner_vlan_removal_flg; 1212 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: 1213 return !!p_data->update_accept_any_vlan_flg; 1214 case CHANNEL_TLV_VPORT_UPDATE_MCAST: 1215 return !!p_data->update_approx_mcast_flg; 1216 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: 1217 return !!(p_data->accept_flags.update_rx_mode_config || 1218 p_data->accept_flags.update_tx_mode_config); 1219 case CHANNEL_TLV_VPORT_UPDATE_RSS: 1220 return !!p_data->rss_params; 1221 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: 1222 return !!p_data->sge_tpa_params; 1223 default: 1224 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n", 1225 tlv, ecore_channel_tlvs_string[tlv]); 1226 return false; 1227 } 1228 } 1229 1230 static void 1231 ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn, 1232 struct ecore_sp_vport_update_params *p_data) 1233 { 1234 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1235 struct pfvf_def_resp_tlv *p_resp; 1236 u16 tlv; 1237 1238 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 1239 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; 1240 tlv++) { 1241 if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) 1242 continue; 1243 1244 p_resp = (struct pfvf_def_resp_tlv *) 1245 ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, 1246 tlv); 1247 if (p_resp && p_resp->hdr.status) 1248 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1249 "TLV[%d] type %s Configuration %s\n", 1250 tlv, ecore_channel_tlvs_string[tlv], 1251 (p_resp && p_resp->hdr.status) ? "succeeded" 1252 : "failed"); 1253 } 1254 } 1255 1256 enum _ecore_status_t ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, 1257 struct ecore_sp_vport_update_params *p_params) 1258 { 1259 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1260 struct vfpf_vport_update_tlv *req; 1261 struct pfvf_def_resp_tlv *resp; 1262 u8 update_rx, update_tx; 1263 u32 resp_size = 0; 1264 u16 size, tlv; 1265 enum _ecore_status_t rc; 1266 1267 resp = &p_iov->pf2vf_reply->default_resp; 1268 resp_size = sizeof(*resp); 1269 1270 update_rx = p_params->update_vport_active_rx_flg; 1271 update_tx = p_params->update_vport_active_tx_flg; 1272 1273 /* clear mailbox and prep header tlv */ 1274 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); 1275 1276 /* Prepare extended tlvs */ 1277 if (update_rx || update_tx) { 1278 struct vfpf_vport_update_activate_tlv *p_act_tlv; 1279 1280 size = sizeof(struct vfpf_vport_update_activate_tlv); 1281 p_act_tlv = ecore_add_tlv(&p_iov->offset, 1282 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 1283 size); 1284 resp_size += sizeof(struct pfvf_def_resp_tlv); 1285 1286 if (update_rx) { 1287 p_act_tlv->update_rx = update_rx; 1288 p_act_tlv->active_rx = p_params->vport_active_rx_flg; 1289 } 1290 1291 if (update_tx) { 1292 p_act_tlv->update_tx = update_tx; 1293 p_act_tlv->active_tx = p_params->vport_active_tx_flg; 1294 } 1295 } 1296 1297 #ifndef ECORE_UPSTREAM 1298 if (p_params->update_inner_vlan_removal_flg) { 1299 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 1300 1301 size = sizeof(struct vfpf_vport_update_vlan_strip_tlv); 1302 p_vlan_tlv = ecore_add_tlv(&p_iov->offset, 1303 CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, 1304 size); 1305 resp_size += sizeof(struct pfvf_def_resp_tlv); 1306 1307 p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg; 1308 } 1309 #endif 1310 1311 if (p_params->update_tx_switching_flg) { 1312 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 1313 1314 size = sizeof(struct vfpf_vport_update_tx_switch_tlv); 1315 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1316 p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset, 1317 tlv, size); 1318 resp_size += sizeof(struct pfvf_def_resp_tlv); 1319 1320 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; 1321 } 1322 1323 if (p_params->update_approx_mcast_flg) { 1324 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 1325 1326 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); 1327 p_mcast_tlv = ecore_add_tlv(&p_iov->offset, 1328 CHANNEL_TLV_VPORT_UPDATE_MCAST, 1329 size); 1330 resp_size += sizeof(struct pfvf_def_resp_tlv); 1331 1332 OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins, 1333 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1334 } 1335 1336 update_rx = p_params->accept_flags.update_rx_mode_config; 1337 update_tx = p_params->accept_flags.update_tx_mode_config; 1338 1339 if (update_rx || update_tx) { 1340 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 1341 1342 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1343 size = sizeof(struct vfpf_vport_update_accept_param_tlv); 1344 p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size); 1345 resp_size += sizeof(struct pfvf_def_resp_tlv); 1346 1347 if (update_rx) { 1348 p_accept_tlv->update_rx_mode = update_rx; 1349 p_accept_tlv->rx_accept_filter = 1350 p_params->accept_flags.rx_accept_filter; 1351 } 1352 1353 if (update_tx) { 1354 p_accept_tlv->update_tx_mode = update_tx; 1355 p_accept_tlv->tx_accept_filter = 1356 p_params->accept_flags.tx_accept_filter; 1357 } 1358 } 1359 1360 if (p_params->rss_params) { 1361 struct ecore_rss_params *rss_params = p_params->rss_params; 1362 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 1363 int i, table_size; 1364 1365 size = sizeof(struct vfpf_vport_update_rss_tlv); 1366 p_rss_tlv = ecore_add_tlv(&p_iov->offset, 1367 CHANNEL_TLV_VPORT_UPDATE_RSS, size); 1368 resp_size += sizeof(struct pfvf_def_resp_tlv); 1369 1370 if (rss_params->update_rss_config) 1371 p_rss_tlv->update_rss_flags |= 1372 VFPF_UPDATE_RSS_CONFIG_FLAG; 1373 if (rss_params->update_rss_capabilities) 1374 p_rss_tlv->update_rss_flags |= 1375 VFPF_UPDATE_RSS_CAPS_FLAG; 1376 if (rss_params->update_rss_ind_table) 1377 p_rss_tlv->update_rss_flags |= 1378 VFPF_UPDATE_RSS_IND_TABLE_FLAG; 1379 if (rss_params->update_rss_key) 1380 p_rss_tlv->update_rss_flags |= 1381 VFPF_UPDATE_RSS_KEY_FLAG; 1382 1383 p_rss_tlv->rss_enable = rss_params->rss_enable; 1384 p_rss_tlv->rss_caps = rss_params->rss_caps; 1385 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; 1386 1387 table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE, 1388 1 << p_rss_tlv->rss_table_size_log); 1389 for (i = 0; i < table_size; i++) { 1390 struct ecore_queue_cid *p_queue; 1391 1392 p_queue = rss_params->rss_ind_table[i]; 1393 p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; 1394 } 1395 1396 OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key, 1397 sizeof(rss_params->rss_key)); 1398 } 1399 1400 if (p_params->update_accept_any_vlan_flg) { 1401 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; 1402 1403 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); 1404 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 1405 p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size); 1406 1407 resp_size += sizeof(struct pfvf_def_resp_tlv); 1408 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; 1409 p_any_vlan_tlv->update_accept_any_vlan_flg = 1410 p_params->update_accept_any_vlan_flg; 1411 } 1412 1413 #ifndef LINUX_REMOVE 1414 if (p_params->sge_tpa_params) { 1415 struct ecore_sge_tpa_params *sge_tpa_params; 1416 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 1417 1418 sge_tpa_params = p_params->sge_tpa_params; 1419 size = sizeof(struct vfpf_vport_update_sge_tpa_tlv); 1420 p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset, 1421 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, 1422 size); 1423 resp_size += sizeof(struct pfvf_def_resp_tlv); 1424 1425 if (sge_tpa_params->update_tpa_en_flg) 1426 p_sge_tpa_tlv->update_sge_tpa_flags |= 1427 VFPF_UPDATE_TPA_EN_FLAG; 1428 if (sge_tpa_params->update_tpa_param_flg) 1429 p_sge_tpa_tlv->update_sge_tpa_flags |= 1430 VFPF_UPDATE_TPA_PARAM_FLAG; 1431 1432 if (sge_tpa_params->tpa_ipv4_en_flg) 1433 p_sge_tpa_tlv->sge_tpa_flags |= 1434 VFPF_TPA_IPV4_EN_FLAG; 1435 if (sge_tpa_params->tpa_ipv6_en_flg) 1436 p_sge_tpa_tlv->sge_tpa_flags |= 1437 VFPF_TPA_IPV6_EN_FLAG; 1438 if (sge_tpa_params->tpa_pkt_split_flg) 1439 p_sge_tpa_tlv->sge_tpa_flags |= 1440 VFPF_TPA_PKT_SPLIT_FLAG; 1441 if (sge_tpa_params->tpa_hdr_data_split_flg) 1442 p_sge_tpa_tlv->sge_tpa_flags |= 1443 VFPF_TPA_HDR_DATA_SPLIT_FLAG; 1444 if (sge_tpa_params->tpa_gro_consistent_flg) 1445 p_sge_tpa_tlv->sge_tpa_flags |= 1446 VFPF_TPA_GRO_CONSIST_FLAG; 1447 1448 p_sge_tpa_tlv->tpa_max_aggs_num = 1449 sge_tpa_params->tpa_max_aggs_num; 1450 p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size; 1451 p_sge_tpa_tlv->tpa_min_size_to_start = 1452 sge_tpa_params->tpa_min_size_to_start; 1453 p_sge_tpa_tlv->tpa_min_size_to_cont = 1454 sge_tpa_params->tpa_min_size_to_cont; 1455 1456 p_sge_tpa_tlv->max_buffers_per_cqe = 1457 sge_tpa_params->max_buffers_per_cqe; 1458 } 1459 #endif 1460 1461 /* add list termination tlv */ 1462 ecore_add_tlv(&p_iov->offset, 1463 CHANNEL_TLV_LIST_END, 1464 sizeof(struct channel_list_end_tlv)); 1465 1466 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); 1467 if (rc) 1468 goto exit; 1469 1470 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1471 rc = ECORE_INVAL; 1472 goto exit; 1473 } 1474 1475 ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); 1476 1477 exit: 1478 ecore_vf_pf_req_end(p_hwfn, rc); 1479 1480 return rc; 1481 } 1482 1483 enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn) 1484 { 1485 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1486 struct pfvf_def_resp_tlv *resp; 1487 struct vfpf_first_tlv *req; 1488 enum _ecore_status_t rc; 1489 1490 /* clear mailbox and prep first tlv */ 1491 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); 1492 1493 /* add list termination tlv */ 1494 ecore_add_tlv(&p_iov->offset, 1495 CHANNEL_TLV_LIST_END, 1496 sizeof(struct channel_list_end_tlv)); 1497 1498 resp = &p_iov->pf2vf_reply->default_resp; 1499 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1500 if (rc) 1501 goto exit; 1502 1503 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1504 rc = ECORE_AGAIN; 1505 goto exit; 1506 } 1507 1508 p_hwfn->b_int_enabled = 0; 1509 1510 exit: 1511 ecore_vf_pf_req_end(p_hwfn, rc); 1512 1513 return rc; 1514 } 1515 1516 void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, 1517 struct ecore_filter_mcast *p_filter_cmd) 1518 { 1519 struct ecore_sp_vport_update_params sp_params; 1520 int i; 1521 1522 OSAL_MEMSET(&sp_params, 0, sizeof(sp_params)); 1523 sp_params.update_approx_mcast_flg = 1; 1524 1525 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { 1526 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1527 u32 bit; 1528 1529 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1530 sp_params.bins[bit / 32] |= 1 << (bit % 32); 1531 } 1532 } 1533 1534 ecore_vf_pf_vport_update(p_hwfn, &sp_params); 1535 } 1536 1537 enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn, 1538 struct ecore_filter_ucast *p_ucast) 1539 { 1540 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1541 struct vfpf_ucast_filter_tlv *req; 1542 struct pfvf_def_resp_tlv *resp; 1543 enum _ecore_status_t rc; 1544 1545 #ifndef LINUX_REMOVE 1546 /* Sanitize */ 1547 if (p_ucast->opcode == ECORE_FILTER_MOVE) { 1548 DP_NOTICE(p_hwfn, true, "VFs don't support Moving of filters\n"); 1549 return ECORE_INVAL; 1550 } 1551 #endif 1552 1553 /* clear mailbox and prep first tlv */ 1554 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); 1555 req->opcode = (u8)p_ucast->opcode; 1556 req->type = (u8)p_ucast->type; 1557 OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN); 1558 req->vlan = p_ucast->vlan; 1559 1560 /* add list termination tlv */ 1561 ecore_add_tlv(&p_iov->offset, 1562 CHANNEL_TLV_LIST_END, 1563 sizeof(struct channel_list_end_tlv)); 1564 1565 resp = &p_iov->pf2vf_reply->default_resp; 1566 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1567 if (rc) 1568 goto exit; 1569 1570 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1571 rc = ECORE_AGAIN; 1572 goto exit; 1573 } 1574 1575 exit: 1576 ecore_vf_pf_req_end(p_hwfn, rc); 1577 1578 return rc; 1579 } 1580 1581 enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn) 1582 { 1583 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1584 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1585 enum _ecore_status_t rc; 1586 1587 /* clear mailbox and prep first tlv */ 1588 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, 1589 sizeof(struct vfpf_first_tlv)); 1590 1591 /* add list termination tlv */ 1592 ecore_add_tlv(&p_iov->offset, 1593 CHANNEL_TLV_LIST_END, 1594 sizeof(struct channel_list_end_tlv)); 1595 1596 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1597 if (rc) 1598 goto exit; 1599 1600 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1601 rc = ECORE_INVAL; 1602 goto exit; 1603 } 1604 1605 exit: 1606 ecore_vf_pf_req_end(p_hwfn, rc); 1607 1608 return rc; 1609 } 1610 1611 enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn, 1612 u16 *p_coal, 1613 struct ecore_queue_cid *p_cid) 1614 { 1615 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1616 struct pfvf_read_coal_resp_tlv *resp; 1617 struct vfpf_read_coal_req_tlv *req; 1618 enum _ecore_status_t rc; 1619 1620 /* clear mailbox and prep header tlv */ 1621 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, 1622 sizeof(*req)); 1623 req->qid = p_cid->rel.queue_id; 1624 req->is_rx = p_cid->b_is_rx ? 1 : 0; 1625 1626 ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END, 1627 sizeof(struct channel_list_end_tlv)); 1628 resp = &p_iov->pf2vf_reply->read_coal_resp; 1629 1630 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1631 if (rc != ECORE_SUCCESS) 1632 goto exit; 1633 1634 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 1635 goto exit; 1636 1637 *p_coal = resp->coal; 1638 exit: 1639 ecore_vf_pf_req_end(p_hwfn, rc); 1640 1641 return rc; 1642 } 1643 1644 enum _ecore_status_t 1645 ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, 1646 struct ecore_queue_cid *p_cid) 1647 { 1648 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1649 struct vfpf_update_coalesce *req; 1650 struct pfvf_def_resp_tlv *resp; 1651 enum _ecore_status_t rc; 1652 1653 /* clear mailbox and prep header tlv */ 1654 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, 1655 sizeof(*req)); 1656 1657 req->rx_coal = rx_coal; 1658 req->tx_coal = tx_coal; 1659 req->qid = p_cid->rel.queue_id; 1660 1661 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1662 "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", 1663 rx_coal, tx_coal, req->qid); 1664 1665 /* add list termination tlv */ 1666 ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END, 1667 sizeof(struct channel_list_end_tlv)); 1668 1669 resp = &p_iov->pf2vf_reply->default_resp; 1670 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1671 1672 if (rc != ECORE_SUCCESS) 1673 goto exit; 1674 1675 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 1676 goto exit; 1677 1678 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 1679 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 1680 1681 exit: 1682 ecore_vf_pf_req_end(p_hwfn, rc); 1683 return rc; 1684 } 1685 1686 u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, 1687 u16 sb_id) 1688 { 1689 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1690 1691 if (!p_iov) { 1692 DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); 1693 return 0; 1694 } 1695 1696 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; 1697 } 1698 1699 void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn, 1700 u16 sb_id, struct ecore_sb_info *p_sb) 1701 { 1702 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1703 1704 if (!p_iov) { 1705 DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); 1706 return; 1707 } 1708 1709 if (sb_id >= PFVF_MAX_SBS_PER_VF) { 1710 DP_NOTICE(p_hwfn, true, "Can't configure SB %04x\n", sb_id); 1711 return; 1712 } 1713 1714 p_iov->sbs_info[sb_id] = p_sb; 1715 } 1716 1717 enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn, 1718 u8 *p_change) 1719 { 1720 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1721 struct ecore_bulletin_content shadow; 1722 u32 crc, crc_size; 1723 1724 crc_size = sizeof(p_iov->bulletin.p_virt->crc); 1725 *p_change = 0; 1726 1727 /* Need to guarantee PF is not in the middle of writing it */ 1728 OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); 1729 1730 /* If version did not update, no need to do anything */ 1731 if (shadow.version == p_iov->bulletin_shadow.version) 1732 return ECORE_SUCCESS; 1733 1734 /* Verify the bulletin we see is valid */ 1735 crc = OSAL_CRC32(0, (u8 *)&shadow + crc_size, 1736 p_iov->bulletin.size - crc_size); 1737 if (crc != shadow.crc) 1738 return ECORE_AGAIN; 1739 1740 /* Set the shadow bulletin and process it */ 1741 OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); 1742 1743 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1744 "Read a bulletin update %08x\n", shadow.version); 1745 1746 *p_change = 1; 1747 1748 return ECORE_SUCCESS; 1749 } 1750 1751 void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params, 1752 struct ecore_bulletin_content *p_bulletin) 1753 { 1754 OSAL_MEMSET(p_params, 0, sizeof(*p_params)); 1755 1756 p_params->speed.autoneg = p_bulletin->req_autoneg; 1757 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; 1758 p_params->speed.forced_speed = p_bulletin->req_forced_speed; 1759 p_params->pause.autoneg = p_bulletin->req_autoneg_pause; 1760 p_params->pause.forced_rx = p_bulletin->req_forced_rx; 1761 p_params->pause.forced_tx = p_bulletin->req_forced_tx; 1762 p_params->loopback_mode = p_bulletin->req_loopback; 1763 } 1764 1765 void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, 1766 struct ecore_mcp_link_params *params) 1767 { 1768 __ecore_vf_get_link_params(params, 1769 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1770 } 1771 1772 void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link, 1773 struct ecore_bulletin_content *p_bulletin) 1774 { 1775 OSAL_MEMSET(p_link, 0, sizeof(*p_link)); 1776 1777 p_link->link_up = p_bulletin->link_up; 1778 p_link->speed = p_bulletin->speed; 1779 p_link->full_duplex = p_bulletin->full_duplex; 1780 p_link->an = p_bulletin->autoneg; 1781 p_link->an_complete = p_bulletin->autoneg_complete; 1782 p_link->parallel_detection = p_bulletin->parallel_detection; 1783 p_link->pfc_enabled = p_bulletin->pfc_enabled; 1784 p_link->partner_adv_speed = p_bulletin->partner_adv_speed; 1785 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; 1786 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; 1787 p_link->partner_adv_pause = p_bulletin->partner_adv_pause; 1788 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; 1789 } 1790 1791 void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, 1792 struct ecore_mcp_link_state *link) 1793 { 1794 __ecore_vf_get_link_state(link, 1795 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1796 } 1797 1798 void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps, 1799 struct ecore_bulletin_content *p_bulletin) 1800 { 1801 OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps)); 1802 p_link_caps->speed_capabilities = p_bulletin->capability_speed; 1803 } 1804 1805 void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, 1806 struct ecore_mcp_link_capabilities *p_link_caps) 1807 { 1808 __ecore_vf_get_link_caps(p_link_caps, 1809 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1810 } 1811 1812 void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, 1813 u8 *num_rxqs) 1814 { 1815 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; 1816 } 1817 1818 void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn, 1819 u8 *num_txqs) 1820 { 1821 *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; 1822 } 1823 1824 void ecore_vf_get_num_cids(struct ecore_hwfn *p_hwfn, 1825 u8 *num_cids) 1826 { 1827 *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids; 1828 } 1829 1830 void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, 1831 u8 *port_mac) 1832 { 1833 OSAL_MEMCPY(port_mac, 1834 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, 1835 ETH_ALEN); 1836 } 1837 1838 void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, 1839 u8 *num_vlan_filters) 1840 { 1841 struct ecore_vf_iov *p_vf; 1842 1843 p_vf = p_hwfn->vf_iov_info; 1844 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; 1845 } 1846 1847 void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, 1848 u8 *num_mac_filters) 1849 { 1850 struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info; 1851 1852 *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; 1853 } 1854 1855 bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac) 1856 { 1857 struct ecore_bulletin_content *bulletin; 1858 1859 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1860 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) 1861 return true; 1862 1863 /* Forbid VF from changing a MAC enforced by PF */ 1864 if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN)) 1865 return false; 1866 1867 return false; 1868 } 1869 1870 bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac, 1871 u8 *p_is_forced) 1872 { 1873 struct ecore_bulletin_content *bulletin; 1874 1875 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1876 1877 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 1878 if (p_is_forced) 1879 *p_is_forced = 1; 1880 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { 1881 if (p_is_forced) 1882 *p_is_forced = 0; 1883 } else { 1884 return false; 1885 } 1886 1887 OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN); 1888 1889 return true; 1890 } 1891 1892 void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn, 1893 u16 *p_vxlan_port, 1894 u16 *p_geneve_port) 1895 { 1896 struct ecore_bulletin_content *p_bulletin; 1897 1898 p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1899 1900 *p_vxlan_port = p_bulletin->vxlan_udp_port; 1901 *p_geneve_port = p_bulletin->geneve_udp_port; 1902 } 1903 1904 #ifndef LINUX_REMOVE 1905 bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid) 1906 { 1907 struct ecore_bulletin_content *bulletin; 1908 1909 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1910 1911 if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED))) 1912 return false; 1913 1914 if (dst_pvid) 1915 *dst_pvid = bulletin->pvid; 1916 1917 return true; 1918 } 1919 1920 bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn) 1921 { 1922 return p_hwfn->vf_iov_info->b_pre_fp_hsi; 1923 } 1924 #endif 1925 1926 void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, 1927 u16 *fw_major, u16 *fw_minor, u16 *fw_rev, 1928 u16 *fw_eng) 1929 { 1930 struct pf_vf_pfdev_info *info; 1931 1932 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; 1933 1934 *fw_major = info->fw_major; 1935 *fw_minor = info->fw_minor; 1936 *fw_rev = info->fw_rev; 1937 *fw_eng = info->fw_eng; 1938 } 1939 1940 #ifdef CONFIG_ECORE_SW_CHANNEL 1941 void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw) 1942 { 1943 p_hwfn->vf_iov_info->b_hw_channel = b_is_hw; 1944 } 1945 #endif 1946 1947 #ifdef _NTDDK_ 1948 #pragma warning(pop) 1949 #endif 1950