1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/etherdevice.h> 11 #include "qed.h" 12 #include "qed_sriov.h" 13 #include "qed_vf.h" 14 15 static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) 16 { 17 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 18 void *p_tlv; 19 20 /* This lock is released when we receive PF's response 21 * in qed_send_msg2pf(). 22 * So, qed_vf_pf_prep() and qed_send_msg2pf() 23 * must come in sequence. 24 */ 25 mutex_lock(&(p_iov->mutex)); 26 27 DP_VERBOSE(p_hwfn, 28 QED_MSG_IOV, 29 "preparing to send 0x%04x tlv over vf pf channel\n", 30 type); 31 32 /* Reset Requst offset */ 33 p_iov->offset = (u8 *)p_iov->vf2pf_request; 34 35 /* Clear mailbox - both request and reply */ 36 memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); 37 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 38 39 /* Init type and length */ 40 p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); 41 42 /* Init first tlv header */ 43 ((struct vfpf_first_tlv *)p_tlv)->reply_address = 44 (u64)p_iov->pf2vf_reply_phys; 45 46 return p_tlv; 47 } 48 49 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) 50 { 51 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; 52 struct ustorm_trigger_vf_zone trigger; 53 struct ustorm_vf_zone *zone_data; 54 int rc = 0, time = 100; 55 56 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; 57 58 /* output tlvs list */ 59 qed_dp_tlv_list(p_hwfn, p_req); 60 61 /* need to add the END TLV to the message size */ 62 resp_size += sizeof(struct channel_list_end_tlv); 63 64 /* Send TLVs over HW channel */ 65 memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); 66 trigger.vf_pf_msg_valid = 1; 67 68 DP_VERBOSE(p_hwfn, 69 QED_MSG_IOV, 70 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", 71 GET_FIELD(p_hwfn->hw_info.concrete_fid, 72 PXP_CONCRETE_FID_PFID), 73 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 74 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 75 &zone_data->non_trigger.vf_pf_msg_addr, 76 *((u32 *)&trigger), &zone_data->trigger); 77 78 REG_WR(p_hwfn, 79 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, 80 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 81 82 REG_WR(p_hwfn, 83 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, 84 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 85 86 /* The message data must be written first, to prevent trigger before 87 * data is written. 88 */ 89 wmb(); 90 91 REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); 92 93 /* When PF would be done with the response, it would write back to the 94 * `done' address. Poll until then. 95 */ 96 while ((!*done) && time) { 97 msleep(25); 98 time--; 99 } 100 101 if (!*done) { 102 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 103 "VF <-- PF Timeout [Type %d]\n", 104 p_req->first_tlv.tl.type); 105 rc = -EBUSY; 106 goto exit; 107 } else { 108 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 109 "PF response: %d [Type %d]\n", 110 *done, p_req->first_tlv.tl.type); 111 } 112 113 exit: 114 mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); 115 116 return rc; 117 } 118 119 #define VF_ACQUIRE_THRESH 3 120 #define VF_ACQUIRE_MAC_FILTERS 1 121 122 static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) 123 { 124 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 125 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 126 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 127 u8 rx_count = 1, tx_count = 1, num_sbs = 1; 128 u8 num_mac = VF_ACQUIRE_MAC_FILTERS; 129 bool resources_acquired = false; 130 struct vfpf_acquire_tlv *req; 131 int rc = 0, attempts = 0; 132 133 /* clear mailbox and prep first tlv */ 134 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 135 136 /* starting filling the request */ 137 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 138 139 req->resc_request.num_rxqs = rx_count; 140 req->resc_request.num_txqs = tx_count; 141 req->resc_request.num_sbs = num_sbs; 142 req->resc_request.num_mac_filters = num_mac; 143 req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 144 145 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; 146 req->vfdev_info.fw_major = FW_MAJOR_VERSION; 147 req->vfdev_info.fw_minor = FW_MINOR_VERSION; 148 req->vfdev_info.fw_revision = FW_REVISION_VERSION; 149 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 150 151 /* Fill capability field with any non-deprecated config we support */ 152 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 153 154 /* pf 2 vf bulletin board address */ 155 req->bulletin_addr = p_iov->bulletin.phys; 156 req->bulletin_size = p_iov->bulletin.size; 157 158 /* add list termination tlv */ 159 qed_add_tlv(p_hwfn, &p_iov->offset, 160 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 161 162 while (!resources_acquired) { 163 DP_VERBOSE(p_hwfn, 164 QED_MSG_IOV, "attempting to acquire resources\n"); 165 166 /* send acquire request */ 167 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 168 if (rc) 169 return rc; 170 171 /* copy acquire response from buffer to p_hwfn */ 172 memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); 173 174 attempts++; 175 176 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 177 /* PF agrees to allocate our resources */ 178 if (!(resp->pfdev_info.capabilities & 179 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { 180 DP_INFO(p_hwfn, 181 "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n"); 182 return -EINVAL; 183 } 184 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); 185 resources_acquired = true; 186 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 187 attempts < VF_ACQUIRE_THRESH) { 188 DP_VERBOSE(p_hwfn, 189 QED_MSG_IOV, 190 "PF unwilling to fullfill resource request. Try PF recommended amount\n"); 191 192 /* humble our request */ 193 req->resc_request.num_txqs = resp->resc.num_txqs; 194 req->resc_request.num_rxqs = resp->resc.num_rxqs; 195 req->resc_request.num_sbs = resp->resc.num_sbs; 196 req->resc_request.num_mac_filters = 197 resp->resc.num_mac_filters; 198 req->resc_request.num_vlan_filters = 199 resp->resc.num_vlan_filters; 200 201 /* Clear response buffer */ 202 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 203 } else { 204 DP_ERR(p_hwfn, 205 "PF returned error %d to VF acquisition request\n", 206 resp->hdr.status); 207 return -EAGAIN; 208 } 209 } 210 211 /* Update bulletin board size with response from PF */ 212 p_iov->bulletin.size = resp->bulletin_size; 213 214 /* get HW info */ 215 p_hwfn->cdev->type = resp->pfdev_info.dev_type; 216 p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; 217 218 p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; 219 220 /* Learn of the possibility of CMT */ 221 if (IS_LEAD_HWFN(p_hwfn)) { 222 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { 223 DP_NOTICE(p_hwfn, "100g VF\n"); 224 p_hwfn->cdev->num_hwfns = 2; 225 } 226 } 227 228 return 0; 229 } 230 231 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) 232 { 233 struct qed_vf_iov *p_iov; 234 u32 reg; 235 236 /* Set number of hwfns - might be overriden once leading hwfn learns 237 * actual configuration from PF. 238 */ 239 if (IS_LEAD_HWFN(p_hwfn)) 240 p_hwfn->cdev->num_hwfns = 1; 241 242 /* Set the doorbell bar. Assumption: regview is set */ 243 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + 244 PXP_VF_BAR0_START_DQ; 245 246 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; 247 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); 248 249 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; 250 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); 251 252 /* Allocate vf sriov info */ 253 p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); 254 if (!p_iov) { 255 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n"); 256 return -ENOMEM; 257 } 258 259 /* Allocate vf2pf msg */ 260 p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 261 sizeof(union vfpf_tlvs), 262 &p_iov->vf2pf_request_phys, 263 GFP_KERNEL); 264 if (!p_iov->vf2pf_request) { 265 DP_NOTICE(p_hwfn, 266 "Failed to allocate `vf2pf_request' DMA memory\n"); 267 goto free_p_iov; 268 } 269 270 p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 271 sizeof(union pfvf_tlvs), 272 &p_iov->pf2vf_reply_phys, 273 GFP_KERNEL); 274 if (!p_iov->pf2vf_reply) { 275 DP_NOTICE(p_hwfn, 276 "Failed to allocate `pf2vf_reply' DMA memory\n"); 277 goto free_vf2pf_request; 278 } 279 280 DP_VERBOSE(p_hwfn, 281 QED_MSG_IOV, 282 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", 283 p_iov->vf2pf_request, 284 (u64) p_iov->vf2pf_request_phys, 285 p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); 286 287 /* Allocate Bulletin board */ 288 p_iov->bulletin.size = sizeof(struct qed_bulletin_content); 289 p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 290 p_iov->bulletin.size, 291 &p_iov->bulletin.phys, 292 GFP_KERNEL); 293 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 294 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", 295 p_iov->bulletin.p_virt, 296 (u64)p_iov->bulletin.phys, p_iov->bulletin.size); 297 298 mutex_init(&p_iov->mutex); 299 300 p_hwfn->vf_iov_info = p_iov; 301 302 p_hwfn->hw_info.personality = QED_PCI_ETH; 303 304 return qed_vf_pf_acquire(p_hwfn); 305 306 free_vf2pf_request: 307 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 308 sizeof(union vfpf_tlvs), 309 p_iov->vf2pf_request, p_iov->vf2pf_request_phys); 310 free_p_iov: 311 kfree(p_iov); 312 313 return -ENOMEM; 314 } 315 316 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 317 u8 rx_qid, 318 u16 sb, 319 u8 sb_index, 320 u16 bd_max_bytes, 321 dma_addr_t bd_chain_phys_addr, 322 dma_addr_t cqe_pbl_addr, 323 u16 cqe_pbl_size, void __iomem **pp_prod) 324 { 325 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 326 struct pfvf_start_queue_resp_tlv *resp; 327 struct vfpf_start_rxq_tlv *req; 328 int rc; 329 330 /* clear mailbox and prep first tlv */ 331 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); 332 333 req->rx_qid = rx_qid; 334 req->cqe_pbl_addr = cqe_pbl_addr; 335 req->cqe_pbl_size = cqe_pbl_size; 336 req->rxq_addr = bd_chain_phys_addr; 337 req->hw_sb = sb; 338 req->sb_index = sb_index; 339 req->bd_max_bytes = bd_max_bytes; 340 req->stat_id = -1; 341 342 /* add list termination tlv */ 343 qed_add_tlv(p_hwfn, &p_iov->offset, 344 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 345 346 resp = &p_iov->pf2vf_reply->queue_start; 347 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 348 if (rc) 349 return rc; 350 351 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 352 return -EINVAL; 353 354 /* Learn the address of the producer from the response */ 355 if (pp_prod) { 356 u64 init_prod_val = 0; 357 358 *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; 359 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 360 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", 361 rx_qid, *pp_prod, resp->offset); 362 363 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 364 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), 365 (u32 *)&init_prod_val); 366 } 367 368 return rc; 369 } 370 371 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) 372 { 373 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 374 struct vfpf_stop_rxqs_tlv *req; 375 struct pfvf_def_resp_tlv *resp; 376 int rc; 377 378 /* clear mailbox and prep first tlv */ 379 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); 380 381 req->rx_qid = rx_qid; 382 req->num_rxqs = 1; 383 req->cqe_completion = cqe_completion; 384 385 /* add list termination tlv */ 386 qed_add_tlv(p_hwfn, &p_iov->offset, 387 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 388 389 resp = &p_iov->pf2vf_reply->default_resp; 390 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 391 if (rc) 392 return rc; 393 394 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 395 return -EINVAL; 396 397 return rc; 398 } 399 400 int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, 401 u16 tx_queue_id, 402 u16 sb, 403 u8 sb_index, 404 dma_addr_t pbl_addr, 405 u16 pbl_size, void __iomem **pp_doorbell) 406 { 407 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 408 struct vfpf_start_txq_tlv *req; 409 struct pfvf_def_resp_tlv *resp; 410 int rc; 411 412 /* clear mailbox and prep first tlv */ 413 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); 414 415 req->tx_qid = tx_queue_id; 416 417 /* Tx */ 418 req->pbl_addr = pbl_addr; 419 req->pbl_size = pbl_size; 420 req->hw_sb = sb; 421 req->sb_index = sb_index; 422 423 /* add list termination tlv */ 424 qed_add_tlv(p_hwfn, &p_iov->offset, 425 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 426 427 resp = &p_iov->pf2vf_reply->default_resp; 428 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 429 if (rc) 430 return rc; 431 432 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 433 return -EINVAL; 434 435 if (pp_doorbell) { 436 u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; 437 438 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 439 qed_db_addr(cid, DQ_DEMS_LEGACY); 440 } 441 442 return rc; 443 } 444 445 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) 446 { 447 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 448 struct vfpf_stop_txqs_tlv *req; 449 struct pfvf_def_resp_tlv *resp; 450 int rc; 451 452 /* clear mailbox and prep first tlv */ 453 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); 454 455 req->tx_qid = tx_qid; 456 req->num_txqs = 1; 457 458 /* add list termination tlv */ 459 qed_add_tlv(p_hwfn, &p_iov->offset, 460 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 461 462 resp = &p_iov->pf2vf_reply->default_resp; 463 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 464 if (rc) 465 return rc; 466 467 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 468 return -EINVAL; 469 470 return rc; 471 } 472 473 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, 474 u8 vport_id, 475 u16 mtu, 476 u8 inner_vlan_removal, 477 enum qed_tpa_mode tpa_mode, 478 u8 max_buffers_per_cqe, u8 only_untagged) 479 { 480 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 481 struct vfpf_vport_start_tlv *req; 482 struct pfvf_def_resp_tlv *resp; 483 int rc, i; 484 485 /* clear mailbox and prep first tlv */ 486 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); 487 488 req->mtu = mtu; 489 req->vport_id = vport_id; 490 req->inner_vlan_removal = inner_vlan_removal; 491 req->tpa_mode = tpa_mode; 492 req->max_buffers_per_cqe = max_buffers_per_cqe; 493 req->only_untagged = only_untagged; 494 495 /* status blocks */ 496 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) 497 if (p_hwfn->sbs_info[i]) 498 req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; 499 500 /* add list termination tlv */ 501 qed_add_tlv(p_hwfn, &p_iov->offset, 502 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 503 504 resp = &p_iov->pf2vf_reply->default_resp; 505 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 506 if (rc) 507 return rc; 508 509 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 510 return -EINVAL; 511 512 return rc; 513 } 514 515 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) 516 { 517 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 518 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 519 int rc; 520 521 /* clear mailbox and prep first tlv */ 522 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, 523 sizeof(struct vfpf_first_tlv)); 524 525 /* add list termination tlv */ 526 qed_add_tlv(p_hwfn, &p_iov->offset, 527 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 528 529 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 530 if (rc) 531 return rc; 532 533 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 534 return -EINVAL; 535 536 return rc; 537 } 538 539 static bool 540 qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, 541 struct qed_sp_vport_update_params *p_data, 542 u16 tlv) 543 { 544 switch (tlv) { 545 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: 546 return !!(p_data->update_vport_active_rx_flg || 547 p_data->update_vport_active_tx_flg); 548 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: 549 return !!p_data->update_tx_switching_flg; 550 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: 551 return !!p_data->update_inner_vlan_removal_flg; 552 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: 553 return !!p_data->update_accept_any_vlan_flg; 554 case CHANNEL_TLV_VPORT_UPDATE_MCAST: 555 return !!p_data->update_approx_mcast_flg; 556 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: 557 return !!(p_data->accept_flags.update_rx_mode_config || 558 p_data->accept_flags.update_tx_mode_config); 559 case CHANNEL_TLV_VPORT_UPDATE_RSS: 560 return !!p_data->rss_params; 561 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: 562 return !!p_data->sge_tpa_params; 563 default: 564 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", 565 tlv); 566 return false; 567 } 568 } 569 570 static void 571 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, 572 struct qed_sp_vport_update_params *p_data) 573 { 574 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 575 struct pfvf_def_resp_tlv *p_resp; 576 u16 tlv; 577 578 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 579 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { 580 if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) 581 continue; 582 583 p_resp = (struct pfvf_def_resp_tlv *) 584 qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, 585 tlv); 586 if (p_resp && p_resp->hdr.status) 587 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 588 "TLV[%d] Configuration %s\n", 589 tlv, 590 (p_resp && p_resp->hdr.status) ? "succeeded" 591 : "failed"); 592 } 593 } 594 595 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, 596 struct qed_sp_vport_update_params *p_params) 597 { 598 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 599 struct vfpf_vport_update_tlv *req; 600 struct pfvf_def_resp_tlv *resp; 601 u8 update_rx, update_tx; 602 u32 resp_size = 0; 603 u16 size, tlv; 604 int rc; 605 606 resp = &p_iov->pf2vf_reply->default_resp; 607 resp_size = sizeof(*resp); 608 609 update_rx = p_params->update_vport_active_rx_flg; 610 update_tx = p_params->update_vport_active_tx_flg; 611 612 /* clear mailbox and prep header tlv */ 613 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); 614 615 /* Prepare extended tlvs */ 616 if (update_rx || update_tx) { 617 struct vfpf_vport_update_activate_tlv *p_act_tlv; 618 619 size = sizeof(struct vfpf_vport_update_activate_tlv); 620 p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 621 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 622 size); 623 resp_size += sizeof(struct pfvf_def_resp_tlv); 624 625 if (update_rx) { 626 p_act_tlv->update_rx = update_rx; 627 p_act_tlv->active_rx = p_params->vport_active_rx_flg; 628 } 629 630 if (update_tx) { 631 p_act_tlv->update_tx = update_tx; 632 p_act_tlv->active_tx = p_params->vport_active_tx_flg; 633 } 634 } 635 636 if (p_params->update_tx_switching_flg) { 637 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 638 639 size = sizeof(struct vfpf_vport_update_tx_switch_tlv); 640 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 641 p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 642 tlv, size); 643 resp_size += sizeof(struct pfvf_def_resp_tlv); 644 645 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; 646 } 647 648 if (p_params->update_approx_mcast_flg) { 649 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 650 651 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); 652 p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 653 CHANNEL_TLV_VPORT_UPDATE_MCAST, size); 654 resp_size += sizeof(struct pfvf_def_resp_tlv); 655 656 memcpy(p_mcast_tlv->bins, p_params->bins, 657 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 658 } 659 660 update_rx = p_params->accept_flags.update_rx_mode_config; 661 update_tx = p_params->accept_flags.update_tx_mode_config; 662 663 if (update_rx || update_tx) { 664 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 665 666 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 667 size = sizeof(struct vfpf_vport_update_accept_param_tlv); 668 p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 669 resp_size += sizeof(struct pfvf_def_resp_tlv); 670 671 if (update_rx) { 672 p_accept_tlv->update_rx_mode = update_rx; 673 p_accept_tlv->rx_accept_filter = 674 p_params->accept_flags.rx_accept_filter; 675 } 676 677 if (update_tx) { 678 p_accept_tlv->update_tx_mode = update_tx; 679 p_accept_tlv->tx_accept_filter = 680 p_params->accept_flags.tx_accept_filter; 681 } 682 } 683 684 if (p_params->rss_params) { 685 struct qed_rss_params *rss_params = p_params->rss_params; 686 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 687 688 size = sizeof(struct vfpf_vport_update_rss_tlv); 689 p_rss_tlv = qed_add_tlv(p_hwfn, 690 &p_iov->offset, 691 CHANNEL_TLV_VPORT_UPDATE_RSS, size); 692 resp_size += sizeof(struct pfvf_def_resp_tlv); 693 694 if (rss_params->update_rss_config) 695 p_rss_tlv->update_rss_flags |= 696 VFPF_UPDATE_RSS_CONFIG_FLAG; 697 if (rss_params->update_rss_capabilities) 698 p_rss_tlv->update_rss_flags |= 699 VFPF_UPDATE_RSS_CAPS_FLAG; 700 if (rss_params->update_rss_ind_table) 701 p_rss_tlv->update_rss_flags |= 702 VFPF_UPDATE_RSS_IND_TABLE_FLAG; 703 if (rss_params->update_rss_key) 704 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; 705 706 p_rss_tlv->rss_enable = rss_params->rss_enable; 707 p_rss_tlv->rss_caps = rss_params->rss_caps; 708 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; 709 memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table, 710 sizeof(rss_params->rss_ind_table)); 711 memcpy(p_rss_tlv->rss_key, rss_params->rss_key, 712 sizeof(rss_params->rss_key)); 713 } 714 715 if (p_params->update_accept_any_vlan_flg) { 716 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; 717 718 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); 719 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 720 p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 721 722 resp_size += sizeof(struct pfvf_def_resp_tlv); 723 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; 724 p_any_vlan_tlv->update_accept_any_vlan_flg = 725 p_params->update_accept_any_vlan_flg; 726 } 727 728 /* add list termination tlv */ 729 qed_add_tlv(p_hwfn, &p_iov->offset, 730 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 731 732 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); 733 if (rc) 734 return rc; 735 736 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 737 return -EINVAL; 738 739 qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); 740 741 return rc; 742 } 743 744 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) 745 { 746 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 747 struct pfvf_def_resp_tlv *resp; 748 struct vfpf_first_tlv *req; 749 int rc; 750 751 /* clear mailbox and prep first tlv */ 752 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); 753 754 /* add list termination tlv */ 755 qed_add_tlv(p_hwfn, &p_iov->offset, 756 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 757 758 resp = &p_iov->pf2vf_reply->default_resp; 759 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 760 if (rc) 761 return rc; 762 763 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 764 return -EAGAIN; 765 766 p_hwfn->b_int_enabled = 0; 767 768 return 0; 769 } 770 771 int qed_vf_pf_release(struct qed_hwfn *p_hwfn) 772 { 773 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 774 struct pfvf_def_resp_tlv *resp; 775 struct vfpf_first_tlv *req; 776 u32 size; 777 int rc; 778 779 /* clear mailbox and prep first tlv */ 780 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); 781 782 /* add list termination tlv */ 783 qed_add_tlv(p_hwfn, &p_iov->offset, 784 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 785 786 resp = &p_iov->pf2vf_reply->default_resp; 787 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 788 789 if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) 790 rc = -EAGAIN; 791 792 p_hwfn->b_int_enabled = 0; 793 794 if (p_iov->vf2pf_request) 795 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 796 sizeof(union vfpf_tlvs), 797 p_iov->vf2pf_request, 798 p_iov->vf2pf_request_phys); 799 if (p_iov->pf2vf_reply) 800 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 801 sizeof(union pfvf_tlvs), 802 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); 803 804 if (p_iov->bulletin.p_virt) { 805 size = sizeof(struct qed_bulletin_content); 806 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 807 size, 808 p_iov->bulletin.p_virt, p_iov->bulletin.phys); 809 } 810 811 kfree(p_hwfn->vf_iov_info); 812 p_hwfn->vf_iov_info = NULL; 813 814 return rc; 815 } 816 817 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, 818 struct qed_filter_mcast *p_filter_cmd) 819 { 820 struct qed_sp_vport_update_params sp_params; 821 int i; 822 823 memset(&sp_params, 0, sizeof(sp_params)); 824 sp_params.update_approx_mcast_flg = 1; 825 826 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 827 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 828 u32 bit; 829 830 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 831 __set_bit(bit, sp_params.bins); 832 } 833 } 834 835 qed_vf_pf_vport_update(p_hwfn, &sp_params); 836 } 837 838 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, 839 struct qed_filter_ucast *p_ucast) 840 { 841 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 842 struct vfpf_ucast_filter_tlv *req; 843 struct pfvf_def_resp_tlv *resp; 844 int rc; 845 846 /* clear mailbox and prep first tlv */ 847 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); 848 req->opcode = (u8) p_ucast->opcode; 849 req->type = (u8) p_ucast->type; 850 memcpy(req->mac, p_ucast->mac, ETH_ALEN); 851 req->vlan = p_ucast->vlan; 852 853 /* add list termination tlv */ 854 qed_add_tlv(p_hwfn, &p_iov->offset, 855 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 856 857 resp = &p_iov->pf2vf_reply->default_resp; 858 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 859 if (rc) 860 return rc; 861 862 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 863 return -EAGAIN; 864 865 return 0; 866 } 867 868 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) 869 { 870 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 871 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 872 int rc; 873 874 /* clear mailbox and prep first tlv */ 875 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, 876 sizeof(struct vfpf_first_tlv)); 877 878 /* add list termination tlv */ 879 qed_add_tlv(p_hwfn, &p_iov->offset, 880 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 881 882 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 883 if (rc) 884 return rc; 885 886 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 887 return -EINVAL; 888 889 return 0; 890 } 891 892 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 893 { 894 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 895 896 if (!p_iov) { 897 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); 898 return 0; 899 } 900 901 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; 902 } 903 904 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) 905 { 906 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 907 struct qed_bulletin_content shadow; 908 u32 crc, crc_size; 909 910 crc_size = sizeof(p_iov->bulletin.p_virt->crc); 911 *p_change = 0; 912 913 /* Need to guarantee PF is not in the middle of writing it */ 914 memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); 915 916 /* If version did not update, no need to do anything */ 917 if (shadow.version == p_iov->bulletin_shadow.version) 918 return 0; 919 920 /* Verify the bulletin we see is valid */ 921 crc = crc32(0, (u8 *)&shadow + crc_size, 922 p_iov->bulletin.size - crc_size); 923 if (crc != shadow.crc) 924 return -EAGAIN; 925 926 /* Set the shadow bulletin and process it */ 927 memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); 928 929 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 930 "Read a bulletin update %08x\n", shadow.version); 931 932 *p_change = 1; 933 934 return 0; 935 } 936 937 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 938 struct qed_mcp_link_params *p_params, 939 struct qed_bulletin_content *p_bulletin) 940 { 941 memset(p_params, 0, sizeof(*p_params)); 942 943 p_params->speed.autoneg = p_bulletin->req_autoneg; 944 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; 945 p_params->speed.forced_speed = p_bulletin->req_forced_speed; 946 p_params->pause.autoneg = p_bulletin->req_autoneg_pause; 947 p_params->pause.forced_rx = p_bulletin->req_forced_rx; 948 p_params->pause.forced_tx = p_bulletin->req_forced_tx; 949 p_params->loopback_mode = p_bulletin->req_loopback; 950 } 951 952 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 953 struct qed_mcp_link_params *params) 954 { 955 __qed_vf_get_link_params(p_hwfn, params, 956 &(p_hwfn->vf_iov_info->bulletin_shadow)); 957 } 958 959 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 960 struct qed_mcp_link_state *p_link, 961 struct qed_bulletin_content *p_bulletin) 962 { 963 memset(p_link, 0, sizeof(*p_link)); 964 965 p_link->link_up = p_bulletin->link_up; 966 p_link->speed = p_bulletin->speed; 967 p_link->full_duplex = p_bulletin->full_duplex; 968 p_link->an = p_bulletin->autoneg; 969 p_link->an_complete = p_bulletin->autoneg_complete; 970 p_link->parallel_detection = p_bulletin->parallel_detection; 971 p_link->pfc_enabled = p_bulletin->pfc_enabled; 972 p_link->partner_adv_speed = p_bulletin->partner_adv_speed; 973 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; 974 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; 975 p_link->partner_adv_pause = p_bulletin->partner_adv_pause; 976 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; 977 } 978 979 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 980 struct qed_mcp_link_state *link) 981 { 982 __qed_vf_get_link_state(p_hwfn, link, 983 &(p_hwfn->vf_iov_info->bulletin_shadow)); 984 } 985 986 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 987 struct qed_mcp_link_capabilities *p_link_caps, 988 struct qed_bulletin_content *p_bulletin) 989 { 990 memset(p_link_caps, 0, sizeof(*p_link_caps)); 991 p_link_caps->speed_capabilities = p_bulletin->capability_speed; 992 } 993 994 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 995 struct qed_mcp_link_capabilities *p_link_caps) 996 { 997 __qed_vf_get_link_caps(p_hwfn, p_link_caps, 998 &(p_hwfn->vf_iov_info->bulletin_shadow)); 999 } 1000 1001 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) 1002 { 1003 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; 1004 } 1005 1006 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) 1007 { 1008 memcpy(port_mac, 1009 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); 1010 } 1011 1012 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) 1013 { 1014 struct qed_vf_iov *p_vf; 1015 1016 p_vf = p_hwfn->vf_iov_info; 1017 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; 1018 } 1019 1020 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) 1021 { 1022 struct qed_bulletin_content *bulletin; 1023 1024 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1025 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) 1026 return true; 1027 1028 /* Forbid VF from changing a MAC enforced by PF */ 1029 if (ether_addr_equal(bulletin->mac, mac)) 1030 return false; 1031 1032 return false; 1033 } 1034 1035 bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, 1036 u8 *dst_mac, u8 *p_is_forced) 1037 { 1038 struct qed_bulletin_content *bulletin; 1039 1040 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1041 1042 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 1043 if (p_is_forced) 1044 *p_is_forced = 1; 1045 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { 1046 if (p_is_forced) 1047 *p_is_forced = 0; 1048 } else { 1049 return false; 1050 } 1051 1052 ether_addr_copy(dst_mac, bulletin->mac); 1053 1054 return true; 1055 } 1056 1057 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 1058 u16 *fw_major, u16 *fw_minor, 1059 u16 *fw_rev, u16 *fw_eng) 1060 { 1061 struct pf_vf_pfdev_info *info; 1062 1063 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; 1064 1065 *fw_major = info->fw_major; 1066 *fw_minor = info->fw_minor; 1067 *fw_rev = info->fw_rev; 1068 *fw_eng = info->fw_eng; 1069 } 1070 1071 static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) 1072 { 1073 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; 1074 u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; 1075 void *cookie = hwfn->cdev->ops_cookie; 1076 1077 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, 1078 &is_mac_forced); 1079 if (is_mac_exist && is_mac_forced && cookie) 1080 ops->force_mac(cookie, mac); 1081 1082 /* Always update link configuration according to bulletin */ 1083 qed_link_update(hwfn); 1084 } 1085 1086 void qed_iov_vf_task(struct work_struct *work) 1087 { 1088 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1089 iov_task.work); 1090 u8 change = 0; 1091 1092 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 1093 return; 1094 1095 /* Handle bulletin board changes */ 1096 qed_vf_read_bulletin(hwfn, &change); 1097 if (change) 1098 qed_handle_bulletin_change(hwfn); 1099 1100 /* As VF is polling bulletin board, need to constantly re-schedule */ 1101 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); 1102 } 1103