1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 #include "i40e_lan_hmc.h" 6 #include "i40e_virtchnl_pf.h" 7 8 /*********************notification routines***********************/ 9 10 /** 11 * i40e_vc_vf_broadcast 12 * @pf: pointer to the PF structure 13 * @v_opcode: operation code 14 * @v_retval: return value 15 * @msg: pointer to the msg buffer 16 * @msglen: msg length 17 * 18 * send a message to all VFs on a given PF 19 **/ 20 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 21 enum virtchnl_ops v_opcode, 22 int v_retval, u8 *msg, 23 u16 msglen) 24 { 25 struct i40e_hw *hw = &pf->hw; 26 struct i40e_vf *vf = pf->vf; 27 int i; 28 29 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 30 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 31 /* Not all vfs are enabled so skip the ones that are not */ 32 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 33 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 34 continue; 35 36 /* Ignore return value on purpose - a given VF may fail, but 37 * we need to keep going and send to all of them 38 */ 39 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 40 msg, msglen, NULL); 41 } 42 } 43 44 /** 45 * i40e_vc_link_speed2mbps 46 * converts i40e_aq_link_speed to integer value of Mbps 47 * @link_speed: the speed to convert 48 * 49 * return the speed as direct value of Mbps. 50 **/ 51 static u32 52 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed) 53 { 54 switch (link_speed) { 55 case I40E_LINK_SPEED_100MB: 56 return SPEED_100; 57 case I40E_LINK_SPEED_1GB: 58 return SPEED_1000; 59 case I40E_LINK_SPEED_2_5GB: 60 return SPEED_2500; 61 case I40E_LINK_SPEED_5GB: 62 return SPEED_5000; 63 case I40E_LINK_SPEED_10GB: 64 return SPEED_10000; 65 case I40E_LINK_SPEED_20GB: 66 return SPEED_20000; 67 case I40E_LINK_SPEED_25GB: 68 return SPEED_25000; 69 case I40E_LINK_SPEED_40GB: 70 return SPEED_40000; 71 case I40E_LINK_SPEED_UNKNOWN: 72 return SPEED_UNKNOWN; 73 } 74 return SPEED_UNKNOWN; 75 } 76 77 /** 78 * i40e_set_vf_link_state 79 * @vf: pointer to the VF structure 80 * @pfe: pointer to PF event structure 81 * @ls: pointer to link status structure 82 * 83 * set a link state on a single vf 84 **/ 85 static void i40e_set_vf_link_state(struct i40e_vf *vf, 86 struct virtchnl_pf_event *pfe, struct i40e_link_status *ls) 87 { 88 u8 link_status = ls->link_info & I40E_AQ_LINK_UP; 89 90 if (vf->link_forced) 91 link_status = vf->link_up; 92 93 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { 94 pfe->event_data.link_event_adv.link_speed = link_status ? 95 i40e_vc_link_speed2mbps(ls->link_speed) : 0; 96 pfe->event_data.link_event_adv.link_status = link_status; 97 } else { 98 pfe->event_data.link_event.link_speed = link_status ? 99 i40e_virtchnl_link_speed(ls->link_speed) : 0; 100 pfe->event_data.link_event.link_status = link_status; 101 } 102 } 103 104 /** 105 * i40e_vc_notify_vf_link_state 106 * @vf: pointer to the VF structure 107 * 108 * send a link status message to a single VF 109 **/ 110 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 111 { 112 struct virtchnl_pf_event pfe; 113 struct i40e_pf *pf = vf->pf; 114 struct i40e_hw *hw = &pf->hw; 115 struct i40e_link_status *ls = &pf->hw.phy.link_info; 116 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 117 118 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 119 pfe.severity = PF_EVENT_SEVERITY_INFO; 120 121 i40e_set_vf_link_state(vf, &pfe, ls); 122 123 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 124 0, (u8 *)&pfe, sizeof(pfe), NULL); 125 } 126 127 /** 128 * i40e_vc_notify_link_state 129 * @pf: pointer to the PF structure 130 * 131 * send a link status message to all VFs on a given PF 132 **/ 133 void i40e_vc_notify_link_state(struct i40e_pf *pf) 134 { 135 int i; 136 137 for (i = 0; i < pf->num_alloc_vfs; i++) 138 i40e_vc_notify_vf_link_state(&pf->vf[i]); 139 } 140 141 /** 142 * i40e_vc_notify_reset 143 * @pf: pointer to the PF structure 144 * 145 * indicate a pending reset to all VFs on a given PF 146 **/ 147 void i40e_vc_notify_reset(struct i40e_pf *pf) 148 { 149 struct virtchnl_pf_event pfe; 150 151 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 152 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 153 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 154 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 155 } 156 157 #ifdef CONFIG_PCI_IOV 158 void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev) 159 { 160 u16 vf_id; 161 u16 pos; 162 163 /* Continue only if this is a PF */ 164 if (!pdev->is_physfn) 165 return; 166 167 if (!pci_num_vf(pdev)) 168 return; 169 170 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 171 if (pos) { 172 struct pci_dev *vf_dev = NULL; 173 174 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); 175 while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) { 176 if (vf_dev->is_virtfn && vf_dev->physfn == pdev) 177 pci_restore_msi_state(vf_dev); 178 } 179 } 180 } 181 #endif /* CONFIG_PCI_IOV */ 182 183 /** 184 * i40e_vc_notify_vf_reset 185 * @vf: pointer to the VF structure 186 * 187 * indicate a pending reset to the given VF 188 **/ 189 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 190 { 191 struct virtchnl_pf_event pfe; 192 int abs_vf_id; 193 194 /* validate the request */ 195 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 196 return; 197 198 /* verify if the VF is in either init or active before proceeding */ 199 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 200 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 201 return; 202 203 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 204 205 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 206 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 207 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 208 0, (u8 *)&pfe, 209 sizeof(struct virtchnl_pf_event), NULL); 210 } 211 /***********************misc routines*****************************/ 212 213 /** 214 * i40e_vc_reset_vf 215 * @vf: pointer to the VF info 216 * @notify_vf: notify vf about reset or not 217 * Reset VF handler. 218 **/ 219 void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) 220 { 221 struct i40e_pf *pf = vf->pf; 222 int i; 223 224 if (notify_vf) 225 i40e_vc_notify_vf_reset(vf); 226 227 /* We want to ensure that an actual reset occurs initiated after this 228 * function was called. However, we do not want to wait forever, so 229 * we'll give a reasonable time and print a message if we failed to 230 * ensure a reset. 231 */ 232 for (i = 0; i < 20; i++) { 233 /* If PF is in VFs releasing state reset VF is impossible, 234 * so leave it. 235 */ 236 if (test_bit(__I40E_VFS_RELEASING, pf->state)) 237 return; 238 if (i40e_reset_vf(vf, false)) 239 return; 240 usleep_range(10000, 20000); 241 } 242 243 if (notify_vf) 244 dev_warn(&vf->pf->pdev->dev, 245 "Failed to initiate reset for VF %d after 200 milliseconds\n", 246 vf->vf_id); 247 else 248 dev_dbg(&vf->pf->pdev->dev, 249 "Failed to initiate reset for VF %d after 200 milliseconds\n", 250 vf->vf_id); 251 } 252 253 /** 254 * i40e_vc_isvalid_vsi_id 255 * @vf: pointer to the VF info 256 * @vsi_id: VF relative VSI id 257 * 258 * check for the valid VSI id 259 **/ 260 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 261 { 262 struct i40e_pf *pf = vf->pf; 263 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 264 265 return (vsi && (vsi->vf_id == vf->vf_id)); 266 } 267 268 /** 269 * i40e_vc_isvalid_queue_id 270 * @vf: pointer to the VF info 271 * @vsi_id: vsi id 272 * @qid: vsi relative queue id 273 * 274 * check for the valid queue id 275 **/ 276 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 277 u16 qid) 278 { 279 struct i40e_pf *pf = vf->pf; 280 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 281 282 return (vsi && (qid < vsi->alloc_queue_pairs)); 283 } 284 285 /** 286 * i40e_vc_isvalid_vector_id 287 * @vf: pointer to the VF info 288 * @vector_id: VF relative vector id 289 * 290 * check for the valid vector id 291 **/ 292 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) 293 { 294 struct i40e_pf *pf = vf->pf; 295 296 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 297 } 298 299 /***********************vf resource mgmt routines*****************/ 300 301 /** 302 * i40e_vc_get_pf_queue_id 303 * @vf: pointer to the VF info 304 * @vsi_id: id of VSI as provided by the FW 305 * @vsi_queue_id: vsi relative queue id 306 * 307 * return PF relative queue id 308 **/ 309 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 310 u8 vsi_queue_id) 311 { 312 struct i40e_pf *pf = vf->pf; 313 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 314 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 315 316 if (!vsi) 317 return pf_queue_id; 318 319 if (le16_to_cpu(vsi->info.mapping_flags) & 320 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 321 pf_queue_id = 322 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 323 else 324 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 325 vsi_queue_id; 326 327 return pf_queue_id; 328 } 329 330 /** 331 * i40e_get_real_pf_qid 332 * @vf: pointer to the VF info 333 * @vsi_id: vsi id 334 * @queue_id: queue number 335 * 336 * wrapper function to get pf_queue_id handling ADq code as well 337 **/ 338 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 339 { 340 int i; 341 342 if (vf->adq_enabled) { 343 /* Although VF considers all the queues(can be 1 to 16) as its 344 * own but they may actually belong to different VSIs(up to 4). 345 * We need to find which queues belongs to which VSI. 346 */ 347 for (i = 0; i < vf->num_tc; i++) { 348 if (queue_id < vf->ch[i].num_qps) { 349 vsi_id = vf->ch[i].vsi_id; 350 break; 351 } 352 /* find right queue id which is relative to a 353 * given VSI. 354 */ 355 queue_id -= vf->ch[i].num_qps; 356 } 357 } 358 359 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 360 } 361 362 /** 363 * i40e_config_irq_link_list 364 * @vf: pointer to the VF info 365 * @vsi_id: id of VSI as given by the FW 366 * @vecmap: irq map info 367 * 368 * configure irq link list from the map 369 **/ 370 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 371 struct virtchnl_vector_map *vecmap) 372 { 373 unsigned long linklistmap = 0, tempmap; 374 struct i40e_pf *pf = vf->pf; 375 struct i40e_hw *hw = &pf->hw; 376 u16 vsi_queue_id, pf_queue_id; 377 enum i40e_queue_type qtype; 378 u16 next_q, vector_id, size; 379 u32 reg, reg_idx; 380 u16 itr_idx = 0; 381 382 vector_id = vecmap->vector_id; 383 /* setup the head */ 384 if (0 == vector_id) 385 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 386 else 387 reg_idx = I40E_VPINT_LNKLSTN( 388 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 389 (vector_id - 1)); 390 391 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 392 /* Special case - No queues mapped on this vector */ 393 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 394 goto irq_list_done; 395 } 396 tempmap = vecmap->rxq_map; 397 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 398 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 399 vsi_queue_id)); 400 } 401 402 tempmap = vecmap->txq_map; 403 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 404 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 405 vsi_queue_id + 1)); 406 } 407 408 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 409 next_q = find_first_bit(&linklistmap, size); 410 if (unlikely(next_q == size)) 411 goto irq_list_done; 412 413 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 414 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 415 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 416 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 417 418 wr32(hw, reg_idx, reg); 419 420 while (next_q < size) { 421 switch (qtype) { 422 case I40E_QUEUE_TYPE_RX: 423 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 424 itr_idx = vecmap->rxitr_idx; 425 break; 426 case I40E_QUEUE_TYPE_TX: 427 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 428 itr_idx = vecmap->txitr_idx; 429 break; 430 default: 431 break; 432 } 433 434 next_q = find_next_bit(&linklistmap, size, next_q + 1); 435 if (next_q < size) { 436 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 437 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 438 pf_queue_id = i40e_get_real_pf_qid(vf, 439 vsi_id, 440 vsi_queue_id); 441 } else { 442 pf_queue_id = I40E_QUEUE_END_OF_LIST; 443 qtype = 0; 444 } 445 446 /* format for the RQCTL & TQCTL regs is same */ 447 reg = (vector_id) | 448 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 449 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 450 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 451 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 452 wr32(hw, reg_idx, reg); 453 } 454 455 /* if the vf is running in polling mode and using interrupt zero, 456 * need to disable auto-mask on enabling zero interrupt for VFs. 457 */ 458 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 459 (vector_id == 0)) { 460 reg = rd32(hw, I40E_GLINT_CTL); 461 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 462 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 463 wr32(hw, I40E_GLINT_CTL, reg); 464 } 465 } 466 467 irq_list_done: 468 i40e_flush(hw); 469 } 470 471 /** 472 * i40e_release_rdma_qvlist 473 * @vf: pointer to the VF. 474 * 475 **/ 476 static void i40e_release_rdma_qvlist(struct i40e_vf *vf) 477 { 478 struct i40e_pf *pf = vf->pf; 479 struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info; 480 u32 msix_vf; 481 u32 i; 482 483 if (!vf->qvlist_info) 484 return; 485 486 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 487 for (i = 0; i < qvlist_info->num_vectors; i++) { 488 struct virtchnl_rdma_qv_info *qv_info; 489 u32 next_q_index, next_q_type; 490 struct i40e_hw *hw = &pf->hw; 491 u32 v_idx, reg_idx, reg; 492 493 qv_info = &qvlist_info->qv_info[i]; 494 v_idx = qv_info->v_idx; 495 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 496 /* Figure out the queue after CEQ and make that the 497 * first queue. 498 */ 499 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 500 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 501 next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK, 502 reg); 503 next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK, 504 reg); 505 506 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 507 reg = (next_q_index & 508 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 509 (next_q_type << 510 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 511 512 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 513 } 514 } 515 kfree(vf->qvlist_info); 516 vf->qvlist_info = NULL; 517 } 518 519 /** 520 * i40e_config_rdma_qvlist 521 * @vf: pointer to the VF info 522 * @qvlist_info: queue and vector list 523 * 524 * Return 0 on success or < 0 on error 525 **/ 526 static int 527 i40e_config_rdma_qvlist(struct i40e_vf *vf, 528 struct virtchnl_rdma_qvlist_info *qvlist_info) 529 { 530 struct i40e_pf *pf = vf->pf; 531 struct i40e_hw *hw = &pf->hw; 532 struct virtchnl_rdma_qv_info *qv_info; 533 u32 v_idx, i, reg_idx, reg; 534 u32 next_q_idx, next_q_type; 535 size_t size; 536 u32 msix_vf; 537 int ret = 0; 538 539 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 540 541 if (qvlist_info->num_vectors > msix_vf) { 542 dev_warn(&pf->pdev->dev, 543 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", 544 qvlist_info->num_vectors, 545 msix_vf); 546 ret = -EINVAL; 547 goto err_out; 548 } 549 550 kfree(vf->qvlist_info); 551 size = virtchnl_struct_size(vf->qvlist_info, qv_info, 552 qvlist_info->num_vectors); 553 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 554 if (!vf->qvlist_info) { 555 ret = -ENOMEM; 556 goto err_out; 557 } 558 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 559 560 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 561 for (i = 0; i < qvlist_info->num_vectors; i++) { 562 qv_info = &qvlist_info->qv_info[i]; 563 564 /* Validate vector id belongs to this vf */ 565 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { 566 ret = -EINVAL; 567 goto err_free; 568 } 569 570 v_idx = qv_info->v_idx; 571 572 vf->qvlist_info->qv_info[i] = *qv_info; 573 574 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 575 /* We might be sharing the interrupt, so get the first queue 576 * index and type, push it down the list by adding the new 577 * queue on top. Also link it with the new queue in CEQCTL. 578 */ 579 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 580 next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK, 581 reg); 582 next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK, 583 reg); 584 585 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 586 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 587 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 588 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 589 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 590 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 591 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 592 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 593 594 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 595 reg = (qv_info->ceq_idx & 596 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 597 (I40E_QUEUE_TYPE_PE_CEQ << 598 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 599 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 600 } 601 602 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 603 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 604 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 605 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 606 607 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 608 } 609 } 610 611 return 0; 612 err_free: 613 kfree(vf->qvlist_info); 614 vf->qvlist_info = NULL; 615 err_out: 616 return ret; 617 } 618 619 /** 620 * i40e_config_vsi_tx_queue 621 * @vf: pointer to the VF info 622 * @vsi_id: id of VSI as provided by the FW 623 * @vsi_queue_id: vsi relative queue index 624 * @info: config. info 625 * 626 * configure tx queue 627 **/ 628 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 629 u16 vsi_queue_id, 630 struct virtchnl_txq_info *info) 631 { 632 struct i40e_pf *pf = vf->pf; 633 struct i40e_hw *hw = &pf->hw; 634 struct i40e_hmc_obj_txq tx_ctx; 635 struct i40e_vsi *vsi; 636 u16 pf_queue_id; 637 u32 qtx_ctl; 638 int ret = 0; 639 640 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 641 ret = -ENOENT; 642 goto error_context; 643 } 644 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 645 vsi = i40e_find_vsi_from_id(pf, vsi_id); 646 if (!vsi) { 647 ret = -ENOENT; 648 goto error_context; 649 } 650 651 /* clear the context structure first */ 652 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 653 654 /* only set the required fields */ 655 tx_ctx.base = info->dma_ring_addr / 128; 656 tx_ctx.qlen = info->ring_len; 657 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 658 tx_ctx.rdylist_act = 0; 659 tx_ctx.head_wb_ena = info->headwb_enabled; 660 tx_ctx.head_wb_addr = info->dma_headwb_addr; 661 662 /* clear the context in the HMC */ 663 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 664 if (ret) { 665 dev_err(&pf->pdev->dev, 666 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 667 pf_queue_id, ret); 668 ret = -ENOENT; 669 goto error_context; 670 } 671 672 /* set the context in the HMC */ 673 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 674 if (ret) { 675 dev_err(&pf->pdev->dev, 676 "Failed to set VF LAN Tx queue context %d error: %d\n", 677 pf_queue_id, ret); 678 ret = -ENOENT; 679 goto error_context; 680 } 681 682 /* associate this queue with the PCI VF function */ 683 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 684 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id); 685 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK, 686 vf->vf_id + hw->func_caps.vf_base_id); 687 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 688 i40e_flush(hw); 689 690 error_context: 691 return ret; 692 } 693 694 /** 695 * i40e_config_vsi_rx_queue 696 * @vf: pointer to the VF info 697 * @vsi_id: id of VSI as provided by the FW 698 * @vsi_queue_id: vsi relative queue index 699 * @info: config. info 700 * 701 * configure rx queue 702 **/ 703 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 704 u16 vsi_queue_id, 705 struct virtchnl_rxq_info *info) 706 { 707 u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 708 struct i40e_pf *pf = vf->pf; 709 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; 710 struct i40e_hw *hw = &pf->hw; 711 struct i40e_hmc_obj_rxq rx_ctx; 712 int ret = 0; 713 714 /* clear the context structure first */ 715 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 716 717 /* only set the required fields */ 718 rx_ctx.base = info->dma_ring_addr / 128; 719 rx_ctx.qlen = info->ring_len; 720 721 if (info->splithdr_enabled) { 722 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 723 I40E_RX_SPLIT_IP | 724 I40E_RX_SPLIT_TCP_UDP | 725 I40E_RX_SPLIT_SCTP; 726 /* header length validation */ 727 if (info->hdr_size > ((2 * 1024) - 64)) { 728 ret = -EINVAL; 729 goto error_param; 730 } 731 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 732 733 /* set split mode 10b */ 734 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 735 } 736 737 /* databuffer length validation */ 738 if (info->databuffer_size > ((16 * 1024) - 128)) { 739 ret = -EINVAL; 740 goto error_param; 741 } 742 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 743 744 /* max pkt. length validation */ 745 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 746 ret = -EINVAL; 747 goto error_param; 748 } 749 rx_ctx.rxmax = info->max_pkt_size; 750 751 /* if port VLAN is configured increase the max packet size */ 752 if (vsi->info.pvid) 753 rx_ctx.rxmax += VLAN_HLEN; 754 755 /* enable 32bytes desc always */ 756 rx_ctx.dsize = 1; 757 758 /* default values */ 759 rx_ctx.lrxqthresh = 1; 760 rx_ctx.crcstrip = 1; 761 rx_ctx.prefena = 1; 762 rx_ctx.l2tsel = 1; 763 764 /* clear the context in the HMC */ 765 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 766 if (ret) { 767 dev_err(&pf->pdev->dev, 768 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 769 pf_queue_id, ret); 770 ret = -ENOENT; 771 goto error_param; 772 } 773 774 /* set the context in the HMC */ 775 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 776 if (ret) { 777 dev_err(&pf->pdev->dev, 778 "Failed to set VF LAN Rx queue context %d error: %d\n", 779 pf_queue_id, ret); 780 ret = -ENOENT; 781 goto error_param; 782 } 783 784 error_param: 785 return ret; 786 } 787 788 /** 789 * i40e_alloc_vsi_res 790 * @vf: pointer to the VF info 791 * @idx: VSI index, applies only for ADq mode, zero otherwise 792 * 793 * alloc VF vsi context & resources 794 **/ 795 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 796 { 797 struct i40e_mac_filter *f = NULL; 798 struct i40e_vsi *main_vsi, *vsi; 799 struct i40e_pf *pf = vf->pf; 800 u64 max_tx_rate = 0; 801 int ret = 0; 802 803 main_vsi = i40e_pf_get_main_vsi(pf); 804 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id); 805 806 if (!vsi) { 807 dev_err(&pf->pdev->dev, 808 "add vsi failed for VF %d, aq_err %d\n", 809 vf->vf_id, pf->hw.aq.asq_last_status); 810 ret = -ENOENT; 811 goto error_alloc_vsi_res; 812 } 813 814 if (!idx) { 815 u64 hashcfg = i40e_pf_get_default_rss_hashcfg(pf); 816 u8 broadcast[ETH_ALEN]; 817 818 vf->lan_vsi_idx = vsi->idx; 819 vf->lan_vsi_id = vsi->id; 820 /* If the port VLAN has been configured and then the 821 * VF driver was removed then the VSI port VLAN 822 * configuration was destroyed. Check if there is 823 * a port VLAN and restore the VSI configuration if 824 * needed. 825 */ 826 if (vf->port_vlan_id) 827 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 828 829 spin_lock_bh(&vsi->mac_filter_hash_lock); 830 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 831 f = i40e_add_mac_filter(vsi, 832 vf->default_lan_addr.addr); 833 if (!f) 834 dev_info(&pf->pdev->dev, 835 "Could not add MAC filter %pM for VF %d\n", 836 vf->default_lan_addr.addr, vf->vf_id); 837 } 838 eth_broadcast_addr(broadcast); 839 f = i40e_add_mac_filter(vsi, broadcast); 840 if (!f) 841 dev_info(&pf->pdev->dev, 842 "Could not allocate VF broadcast filter\n"); 843 spin_unlock_bh(&vsi->mac_filter_hash_lock); 844 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hashcfg); 845 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), 846 (u32)(hashcfg >> 32)); 847 /* program mac filter only for VF VSI */ 848 ret = i40e_sync_vsi_filters(vsi); 849 if (ret) 850 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 851 } 852 853 /* storing VSI index and id for ADq and don't apply the mac filter */ 854 if (vf->adq_enabled) { 855 vf->ch[idx].vsi_idx = vsi->idx; 856 vf->ch[idx].vsi_id = vsi->id; 857 } 858 859 /* Set VF bandwidth if specified */ 860 if (vf->tx_rate) { 861 max_tx_rate = vf->tx_rate; 862 } else if (vf->ch[idx].max_tx_rate) { 863 max_tx_rate = vf->ch[idx].max_tx_rate; 864 } 865 866 if (max_tx_rate) { 867 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 868 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 869 max_tx_rate, 0, NULL); 870 if (ret) 871 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 872 vf->vf_id, ret); 873 } 874 875 error_alloc_vsi_res: 876 return ret; 877 } 878 879 /** 880 * i40e_map_pf_queues_to_vsi 881 * @vf: pointer to the VF info 882 * 883 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 884 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 885 **/ 886 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 887 { 888 struct i40e_pf *pf = vf->pf; 889 struct i40e_hw *hw = &pf->hw; 890 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 891 u16 vsi_id, qps; 892 int i, j; 893 894 if (vf->adq_enabled) 895 num_tc = vf->num_tc; 896 897 for (i = 0; i < num_tc; i++) { 898 if (vf->adq_enabled) { 899 qps = vf->ch[i].num_qps; 900 vsi_id = vf->ch[i].vsi_id; 901 } else { 902 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 903 vsi_id = vf->lan_vsi_id; 904 } 905 906 for (j = 0; j < 7; j++) { 907 if (j * 2 >= qps) { 908 /* end of list */ 909 reg = 0x07FF07FF; 910 } else { 911 u16 qid = i40e_vc_get_pf_queue_id(vf, 912 vsi_id, 913 j * 2); 914 reg = qid; 915 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 916 (j * 2) + 1); 917 reg |= qid << 16; 918 } 919 i40e_write_rx_ctl(hw, 920 I40E_VSILAN_QTABLE(j, vsi_id), 921 reg); 922 } 923 } 924 } 925 926 /** 927 * i40e_map_pf_to_vf_queues 928 * @vf: pointer to the VF info 929 * 930 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 931 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 932 **/ 933 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 934 { 935 struct i40e_pf *pf = vf->pf; 936 struct i40e_hw *hw = &pf->hw; 937 u32 reg, total_qps = 0; 938 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 939 u16 vsi_id, qid; 940 int i, j; 941 942 if (vf->adq_enabled) 943 num_tc = vf->num_tc; 944 945 for (i = 0; i < num_tc; i++) { 946 if (vf->adq_enabled) { 947 qps = vf->ch[i].num_qps; 948 vsi_id = vf->ch[i].vsi_id; 949 } else { 950 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 951 vsi_id = vf->lan_vsi_id; 952 } 953 954 for (j = 0; j < qps; j++) { 955 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 956 957 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 958 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 959 reg); 960 total_qps++; 961 } 962 } 963 } 964 965 /** 966 * i40e_enable_vf_mappings 967 * @vf: pointer to the VF info 968 * 969 * enable VF mappings 970 **/ 971 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 972 { 973 struct i40e_pf *pf = vf->pf; 974 struct i40e_hw *hw = &pf->hw; 975 u32 reg; 976 977 /* Tell the hardware we're using noncontiguous mapping. HW requires 978 * that VF queues be mapped using this method, even when they are 979 * contiguous in real life 980 */ 981 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 982 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 983 984 /* enable VF vplan_qtable mappings */ 985 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 986 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 987 988 i40e_map_pf_to_vf_queues(vf); 989 i40e_map_pf_queues_to_vsi(vf); 990 991 i40e_flush(hw); 992 } 993 994 /** 995 * i40e_disable_vf_mappings 996 * @vf: pointer to the VF info 997 * 998 * disable VF mappings 999 **/ 1000 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 1001 { 1002 struct i40e_pf *pf = vf->pf; 1003 struct i40e_hw *hw = &pf->hw; 1004 int i; 1005 1006 /* disable qp mappings */ 1007 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 1008 for (i = 0; i < I40E_MAX_VSI_QP; i++) 1009 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 1010 I40E_QUEUE_END_OF_LIST); 1011 i40e_flush(hw); 1012 } 1013 1014 /** 1015 * i40e_free_vf_res 1016 * @vf: pointer to the VF info 1017 * 1018 * free VF resources 1019 **/ 1020 static void i40e_free_vf_res(struct i40e_vf *vf) 1021 { 1022 struct i40e_pf *pf = vf->pf; 1023 struct i40e_hw *hw = &pf->hw; 1024 u32 reg_idx, reg; 1025 int i, j, msix_vf; 1026 1027 /* Start by disabling VF's configuration API to prevent the OS from 1028 * accessing the VF's VSI after it's freed / invalidated. 1029 */ 1030 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1031 1032 /* It's possible the VF had requeuested more queues than the default so 1033 * do the accounting here when we're about to free them. 1034 */ 1035 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 1036 pf->queues_left += vf->num_queue_pairs - 1037 I40E_DEFAULT_QUEUES_PER_VF; 1038 } 1039 1040 /* free vsi & disconnect it from the parent uplink */ 1041 if (vf->lan_vsi_idx) { 1042 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 1043 vf->lan_vsi_idx = 0; 1044 vf->lan_vsi_id = 0; 1045 } 1046 1047 /* do the accounting and remove additional ADq VSI's */ 1048 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 1049 for (j = 0; j < vf->num_tc; j++) { 1050 /* At this point VSI0 is already released so don't 1051 * release it again and only clear their values in 1052 * structure variables 1053 */ 1054 if (j) 1055 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 1056 vf->ch[j].vsi_idx = 0; 1057 vf->ch[j].vsi_id = 0; 1058 } 1059 } 1060 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 1061 1062 /* disable interrupts so the VF starts in a known state */ 1063 for (i = 0; i < msix_vf; i++) { 1064 /* format is same for both registers */ 1065 if (0 == i) 1066 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 1067 else 1068 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 1069 (vf->vf_id)) 1070 + (i - 1)); 1071 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 1072 i40e_flush(hw); 1073 } 1074 1075 /* clear the irq settings */ 1076 for (i = 0; i < msix_vf; i++) { 1077 /* format is same for both registers */ 1078 if (0 == i) 1079 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 1080 else 1081 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 1082 (vf->vf_id)) 1083 + (i - 1)); 1084 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 1085 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 1086 wr32(hw, reg_idx, reg); 1087 i40e_flush(hw); 1088 } 1089 /* reset some of the state variables keeping track of the resources */ 1090 vf->num_queue_pairs = 0; 1091 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1092 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1093 } 1094 1095 /** 1096 * i40e_alloc_vf_res 1097 * @vf: pointer to the VF info 1098 * 1099 * allocate VF resources 1100 **/ 1101 static int i40e_alloc_vf_res(struct i40e_vf *vf) 1102 { 1103 struct i40e_pf *pf = vf->pf; 1104 int total_queue_pairs = 0; 1105 int ret, idx; 1106 1107 if (vf->num_req_queues && 1108 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1109 pf->num_vf_qps = vf->num_req_queues; 1110 else 1111 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1112 1113 /* allocate hw vsi context & associated resources */ 1114 ret = i40e_alloc_vsi_res(vf, 0); 1115 if (ret) 1116 goto error_alloc; 1117 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1118 1119 /* allocate additional VSIs based on tc information for ADq */ 1120 if (vf->adq_enabled) { 1121 if (pf->queues_left >= 1122 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1123 /* TC 0 always belongs to VF VSI */ 1124 for (idx = 1; idx < vf->num_tc; idx++) { 1125 ret = i40e_alloc_vsi_res(vf, idx); 1126 if (ret) 1127 goto error_alloc; 1128 } 1129 /* send correct number of queues */ 1130 total_queue_pairs = I40E_MAX_VF_QUEUES; 1131 } else { 1132 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1133 vf->vf_id); 1134 vf->adq_enabled = false; 1135 } 1136 } 1137 1138 /* We account for each VF to get a default number of queue pairs. If 1139 * the VF has now requested more, we need to account for that to make 1140 * certain we never request more queues than we actually have left in 1141 * HW. 1142 */ 1143 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1144 pf->queues_left -= 1145 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1146 1147 if (vf->trusted) 1148 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1149 else 1150 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1151 1152 /* store the total qps number for the runtime 1153 * VF req validation 1154 */ 1155 vf->num_queue_pairs = total_queue_pairs; 1156 1157 /* VF is now completely initialized */ 1158 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1159 1160 error_alloc: 1161 if (ret) 1162 i40e_free_vf_res(vf); 1163 1164 return ret; 1165 } 1166 1167 #define VF_DEVICE_STATUS 0xAA 1168 #define VF_TRANS_PENDING_MASK 0x20 1169 /** 1170 * i40e_quiesce_vf_pci 1171 * @vf: pointer to the VF structure 1172 * 1173 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1174 * if the transactions never clear. 1175 **/ 1176 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1177 { 1178 struct i40e_pf *pf = vf->pf; 1179 struct i40e_hw *hw = &pf->hw; 1180 int vf_abs_id, i; 1181 u32 reg; 1182 1183 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1184 1185 wr32(hw, I40E_PF_PCI_CIAA, 1186 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1187 for (i = 0; i < 100; i++) { 1188 reg = rd32(hw, I40E_PF_PCI_CIAD); 1189 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1190 return 0; 1191 udelay(1); 1192 } 1193 return -EIO; 1194 } 1195 1196 /** 1197 * __i40e_getnum_vf_vsi_vlan_filters 1198 * @vsi: pointer to the vsi 1199 * 1200 * called to get the number of VLANs offloaded on this VF 1201 **/ 1202 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1203 { 1204 struct i40e_mac_filter *f; 1205 u16 num_vlans = 0, bkt; 1206 1207 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1208 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1209 num_vlans++; 1210 } 1211 1212 return num_vlans; 1213 } 1214 1215 /** 1216 * i40e_getnum_vf_vsi_vlan_filters 1217 * @vsi: pointer to the vsi 1218 * 1219 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held 1220 **/ 1221 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1222 { 1223 int num_vlans; 1224 1225 spin_lock_bh(&vsi->mac_filter_hash_lock); 1226 num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); 1227 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1228 1229 return num_vlans; 1230 } 1231 1232 /** 1233 * i40e_get_vlan_list_sync 1234 * @vsi: pointer to the VSI 1235 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller 1236 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller. 1237 * This array is allocated here, but has to be freed in caller. 1238 * 1239 * Called to get number of VLANs and VLAN list present in mac_filter_hash. 1240 **/ 1241 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans, 1242 s16 **vlan_list) 1243 { 1244 struct i40e_mac_filter *f; 1245 int i = 0; 1246 int bkt; 1247 1248 spin_lock_bh(&vsi->mac_filter_hash_lock); 1249 *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); 1250 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC); 1251 if (!(*vlan_list)) 1252 goto err; 1253 1254 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1255 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1256 continue; 1257 (*vlan_list)[i++] = f->vlan; 1258 } 1259 err: 1260 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1261 } 1262 1263 /** 1264 * i40e_set_vsi_promisc 1265 * @vf: pointer to the VF struct 1266 * @seid: VSI number 1267 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable 1268 * for a given VLAN 1269 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable 1270 * for a given VLAN 1271 * @vl: List of VLANs - apply filter for given VLANs 1272 * @num_vlans: Number of elements in @vl 1273 **/ 1274 static int 1275 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, 1276 bool unicast_enable, s16 *vl, u16 num_vlans) 1277 { 1278 struct i40e_pf *pf = vf->pf; 1279 struct i40e_hw *hw = &pf->hw; 1280 int aq_ret, aq_tmp = 0; 1281 int i; 1282 1283 /* No VLAN to set promisc on, set on VSI */ 1284 if (!num_vlans || !vl) { 1285 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid, 1286 multi_enable, 1287 NULL); 1288 if (aq_ret) { 1289 int aq_err = pf->hw.aq.asq_last_status; 1290 1291 dev_err(&pf->pdev->dev, 1292 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", 1293 vf->vf_id, ERR_PTR(aq_ret), 1294 libie_aq_str(aq_err)); 1295 1296 return aq_ret; 1297 } 1298 1299 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid, 1300 unicast_enable, 1301 NULL, true); 1302 1303 if (aq_ret) { 1304 int aq_err = pf->hw.aq.asq_last_status; 1305 1306 dev_err(&pf->pdev->dev, 1307 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", 1308 vf->vf_id, ERR_PTR(aq_ret), 1309 libie_aq_str(aq_err)); 1310 } 1311 1312 return aq_ret; 1313 } 1314 1315 for (i = 0; i < num_vlans; i++) { 1316 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid, 1317 multi_enable, 1318 vl[i], NULL); 1319 if (aq_ret) { 1320 int aq_err = pf->hw.aq.asq_last_status; 1321 1322 dev_err(&pf->pdev->dev, 1323 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", 1324 vf->vf_id, ERR_PTR(aq_ret), 1325 libie_aq_str(aq_err)); 1326 1327 if (!aq_tmp) 1328 aq_tmp = aq_ret; 1329 } 1330 1331 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid, 1332 unicast_enable, 1333 vl[i], NULL); 1334 if (aq_ret) { 1335 int aq_err = pf->hw.aq.asq_last_status; 1336 1337 dev_err(&pf->pdev->dev, 1338 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", 1339 vf->vf_id, ERR_PTR(aq_ret), 1340 libie_aq_str(aq_err)); 1341 1342 if (!aq_tmp) 1343 aq_tmp = aq_ret; 1344 } 1345 } 1346 1347 if (aq_tmp) 1348 aq_ret = aq_tmp; 1349 1350 return aq_ret; 1351 } 1352 1353 /** 1354 * i40e_config_vf_promiscuous_mode 1355 * @vf: pointer to the VF info 1356 * @vsi_id: VSI id 1357 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1358 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1359 * 1360 * Called from the VF to configure the promiscuous mode of 1361 * VF vsis and from the VF reset path to reset promiscuous mode. 1362 **/ 1363 static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1364 u16 vsi_id, 1365 bool allmulti, 1366 bool alluni) 1367 { 1368 struct i40e_pf *pf = vf->pf; 1369 struct i40e_vsi *vsi; 1370 int aq_ret = 0; 1371 u16 num_vlans; 1372 s16 *vl; 1373 1374 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1375 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1376 return -EINVAL; 1377 1378 if (vf->port_vlan_id) { 1379 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, 1380 alluni, &vf->port_vlan_id, 1); 1381 return aq_ret; 1382 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1383 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl); 1384 1385 if (!vl) 1386 return -ENOMEM; 1387 1388 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, 1389 vl, num_vlans); 1390 kfree(vl); 1391 return aq_ret; 1392 } 1393 1394 /* no VLANs to set on, set on VSI */ 1395 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, 1396 NULL, 0); 1397 return aq_ret; 1398 } 1399 1400 /** 1401 * i40e_sync_vfr_reset 1402 * @hw: pointer to hw struct 1403 * @vf_id: VF identifier 1404 * 1405 * Before trigger hardware reset, we need to know if no other process has 1406 * reserved the hardware for any reset operations. This check is done by 1407 * examining the status of the RSTAT1 register used to signal the reset. 1408 **/ 1409 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id) 1410 { 1411 u32 reg; 1412 int i; 1413 1414 for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) { 1415 reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) & 1416 I40E_VFINT_ICR0_ADMINQ_MASK; 1417 if (reg) 1418 return 0; 1419 1420 usleep_range(100, 200); 1421 } 1422 1423 return -EAGAIN; 1424 } 1425 1426 /** 1427 * i40e_trigger_vf_reset 1428 * @vf: pointer to the VF structure 1429 * @flr: VFLR was issued or not 1430 * 1431 * Trigger hardware to start a reset for a particular VF. Expects the caller 1432 * to wait the proper amount of time to allow hardware to reset the VF before 1433 * it cleans up and restores VF functionality. 1434 **/ 1435 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1436 { 1437 struct i40e_pf *pf = vf->pf; 1438 struct i40e_hw *hw = &pf->hw; 1439 u32 reg, reg_idx, bit_idx; 1440 bool vf_active; 1441 u32 radq; 1442 1443 /* warn the VF */ 1444 vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1445 1446 /* Disable VF's configuration API during reset. The flag is re-enabled 1447 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1448 * It's normally disabled in i40e_free_vf_res(), but it's safer 1449 * to do it earlier to give some time to finish to any VF config 1450 * functions that may still be running at this point. 1451 */ 1452 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1453 1454 /* In the case of a VFLR, the HW has already reset the VF and we 1455 * just need to clean up, so don't hit the VFRTRIG register. 1456 */ 1457 if (!flr) { 1458 /* Sync VFR reset before trigger next one */ 1459 radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) & 1460 I40E_VFINT_ICR0_ADMINQ_MASK; 1461 if (vf_active && !radq) 1462 /* waiting for finish reset by virtual driver */ 1463 if (i40e_sync_vfr_reset(hw, vf->vf_id)) 1464 dev_info(&pf->pdev->dev, 1465 "Reset VF %d never finished\n", 1466 vf->vf_id); 1467 1468 /* Reset VF using VPGEN_VFRTRIG reg. It is also setting 1469 * in progress state in rstat1 register. 1470 */ 1471 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1472 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1473 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1474 i40e_flush(hw); 1475 } 1476 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1477 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1478 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1479 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1480 i40e_flush(hw); 1481 1482 if (i40e_quiesce_vf_pci(vf)) 1483 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1484 vf->vf_id); 1485 } 1486 1487 /** 1488 * i40e_cleanup_reset_vf 1489 * @vf: pointer to the VF structure 1490 * 1491 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1492 * have verified whether the reset is finished properly, and ensure the 1493 * minimum amount of wait time has passed. 1494 **/ 1495 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1496 { 1497 struct i40e_pf *pf = vf->pf; 1498 struct i40e_hw *hw = &pf->hw; 1499 u32 reg; 1500 1501 /* disable promisc modes in case they were enabled */ 1502 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1503 1504 /* free VF resources to begin resetting the VSI state */ 1505 i40e_free_vf_res(vf); 1506 1507 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1508 * By doing this we allow HW to access VF memory at any point. If we 1509 * did it any sooner, HW could access memory while it was being freed 1510 * in i40e_free_vf_res(), causing an IOMMU fault. 1511 * 1512 * On the other hand, this needs to be done ASAP, because the VF driver 1513 * is waiting for this to happen and may report a timeout. It's 1514 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1515 * it. 1516 */ 1517 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1518 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1519 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1520 1521 /* reallocate VF resources to finish resetting the VSI state */ 1522 if (!i40e_alloc_vf_res(vf)) { 1523 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1524 i40e_enable_vf_mappings(vf); 1525 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1526 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1527 /* Do not notify the client during VF init */ 1528 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1529 &vf->vf_states)) 1530 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1531 vf->num_vlan = 0; 1532 } 1533 1534 /* Tell the VF driver the reset is done. This needs to be done only 1535 * after VF has been fully initialized, because the VF driver may 1536 * request resources immediately after setting this flag. 1537 */ 1538 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1539 } 1540 1541 /** 1542 * i40e_reset_vf 1543 * @vf: pointer to the VF structure 1544 * @flr: VFLR was issued or not 1545 * 1546 * Return: True if reset was performed successfully or if resets are disabled. 1547 * False if reset is already in progress. 1548 **/ 1549 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1550 { 1551 struct i40e_pf *pf = vf->pf; 1552 struct i40e_hw *hw = &pf->hw; 1553 bool rsd = false; 1554 u32 reg; 1555 int i; 1556 1557 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) 1558 return true; 1559 1560 /* Bail out if VFs are disabled. */ 1561 if (test_bit(__I40E_VF_DISABLE, pf->state)) 1562 return true; 1563 1564 /* If VF is being reset already we don't need to continue. */ 1565 if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1566 return false; 1567 1568 i40e_trigger_vf_reset(vf, flr); 1569 1570 /* poll VPGEN_VFRSTAT reg to make sure 1571 * that reset is complete 1572 */ 1573 for (i = 0; i < 10; i++) { 1574 /* VF reset requires driver to first reset the VF and then 1575 * poll the status register to make sure that the reset 1576 * completed successfully. Due to internal HW FIFO flushes, 1577 * we must wait 10ms before the register will be valid. 1578 */ 1579 usleep_range(10000, 20000); 1580 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1581 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1582 rsd = true; 1583 break; 1584 } 1585 } 1586 1587 if (flr) 1588 usleep_range(10000, 20000); 1589 1590 if (!rsd) 1591 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1592 vf->vf_id); 1593 usleep_range(10000, 20000); 1594 1595 /* On initial reset, we don't have any queues to disable */ 1596 if (vf->lan_vsi_idx != 0) 1597 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1598 1599 i40e_cleanup_reset_vf(vf); 1600 1601 i40e_flush(hw); 1602 usleep_range(20000, 40000); 1603 clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states); 1604 1605 return true; 1606 } 1607 1608 /** 1609 * i40e_reset_all_vfs 1610 * @pf: pointer to the PF structure 1611 * @flr: VFLR was issued or not 1612 * 1613 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1614 * VF, then do all the waiting in one chunk, and finally finish restoring each 1615 * VF after the wait. This is useful during PF routines which need to reset 1616 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1617 * 1618 * Returns true if any VFs were reset, and false otherwise. 1619 **/ 1620 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1621 { 1622 struct i40e_hw *hw = &pf->hw; 1623 struct i40e_vf *vf; 1624 u32 reg; 1625 int i; 1626 1627 /* If we don't have any VFs, then there is nothing to reset */ 1628 if (!pf->num_alloc_vfs) 1629 return false; 1630 1631 /* If VFs have been disabled, there is no need to reset */ 1632 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1633 return false; 1634 1635 /* Begin reset on all VFs at once */ 1636 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1637 /* If VF is being reset no need to trigger reset again */ 1638 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1639 i40e_trigger_vf_reset(vf, flr); 1640 } 1641 1642 /* HW requires some time to make sure it can flush the FIFO for a VF 1643 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1644 * sequence to make sure that it has completed. We'll keep track of 1645 * the VFs using a simple iterator that increments once that VF has 1646 * finished resetting. 1647 */ 1648 for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) { 1649 usleep_range(10000, 20000); 1650 1651 /* Check each VF in sequence, beginning with the VF to fail 1652 * the previous check. 1653 */ 1654 while (vf < &pf->vf[pf->num_alloc_vfs]) { 1655 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { 1656 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1657 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1658 break; 1659 } 1660 1661 /* If the current VF has finished resetting, move on 1662 * to the next VF in sequence. 1663 */ 1664 ++vf; 1665 } 1666 } 1667 1668 if (flr) 1669 usleep_range(10000, 20000); 1670 1671 /* Display a warning if at least one VF didn't manage to reset in 1672 * time, but continue on with the operation. 1673 */ 1674 if (vf < &pf->vf[pf->num_alloc_vfs]) 1675 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1676 vf->vf_id); 1677 usleep_range(10000, 20000); 1678 1679 /* Begin disabling all the rings associated with VFs, but do not wait 1680 * between each VF. 1681 */ 1682 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1683 /* On initial reset, we don't have any queues to disable */ 1684 if (vf->lan_vsi_idx == 0) 1685 continue; 1686 1687 /* If VF is reset in another thread just continue */ 1688 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1689 continue; 1690 1691 i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]); 1692 } 1693 1694 /* Now that we've notified HW to disable all of the VF rings, wait 1695 * until they finish. 1696 */ 1697 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1698 /* On initial reset, we don't have any queues to disable */ 1699 if (vf->lan_vsi_idx == 0) 1700 continue; 1701 1702 /* If VF is reset in another thread just continue */ 1703 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1704 continue; 1705 1706 i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]); 1707 } 1708 1709 /* Hw may need up to 50ms to finish disabling the RX queues. We 1710 * minimize the wait by delaying only once for all VFs. 1711 */ 1712 mdelay(50); 1713 1714 /* Finish the reset on each VF */ 1715 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1716 /* If VF is reset in another thread just continue */ 1717 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1718 continue; 1719 1720 i40e_cleanup_reset_vf(vf); 1721 } 1722 1723 i40e_flush(hw); 1724 usleep_range(20000, 40000); 1725 clear_bit(__I40E_VF_DISABLE, pf->state); 1726 1727 return true; 1728 } 1729 1730 /** 1731 * i40e_free_vfs 1732 * @pf: pointer to the PF structure 1733 * 1734 * free VF resources 1735 **/ 1736 void i40e_free_vfs(struct i40e_pf *pf) 1737 { 1738 struct i40e_hw *hw = &pf->hw; 1739 u32 reg_idx, bit_idx; 1740 int i, tmp, vf_id; 1741 1742 if (!pf->vf) 1743 return; 1744 1745 set_bit(__I40E_VFS_RELEASING, pf->state); 1746 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1747 usleep_range(1000, 2000); 1748 1749 i40e_notify_client_of_vf_enable(pf, 0); 1750 1751 /* Disable IOV before freeing resources. This lets any VF drivers 1752 * running in the host get themselves cleaned up before we yank 1753 * the carpet out from underneath their feet. 1754 */ 1755 if (!pci_vfs_assigned(pf->pdev)) 1756 pci_disable_sriov(pf->pdev); 1757 else 1758 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1759 1760 /* Amortize wait time by stopping all VFs at the same time */ 1761 for (i = 0; i < pf->num_alloc_vfs; i++) { 1762 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1763 continue; 1764 1765 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1766 } 1767 1768 for (i = 0; i < pf->num_alloc_vfs; i++) { 1769 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1770 continue; 1771 1772 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1773 } 1774 1775 /* free up VF resources */ 1776 tmp = pf->num_alloc_vfs; 1777 pf->num_alloc_vfs = 0; 1778 for (i = 0; i < tmp; i++) { 1779 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1780 i40e_free_vf_res(&pf->vf[i]); 1781 /* disable qp mappings */ 1782 i40e_disable_vf_mappings(&pf->vf[i]); 1783 } 1784 1785 kfree(pf->vf); 1786 pf->vf = NULL; 1787 1788 /* This check is for when the driver is unloaded while VFs are 1789 * assigned. Setting the number of VFs to 0 through sysfs is caught 1790 * before this function ever gets called. 1791 */ 1792 if (!pci_vfs_assigned(pf->pdev)) { 1793 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1794 * work correctly when SR-IOV gets re-enabled. 1795 */ 1796 for (vf_id = 0; vf_id < tmp; vf_id++) { 1797 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1798 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1799 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1800 } 1801 } 1802 clear_bit(__I40E_VF_DISABLE, pf->state); 1803 clear_bit(__I40E_VFS_RELEASING, pf->state); 1804 } 1805 1806 #ifdef CONFIG_PCI_IOV 1807 /** 1808 * i40e_alloc_vfs 1809 * @pf: pointer to the PF structure 1810 * @num_alloc_vfs: number of VFs to allocate 1811 * 1812 * allocate VF resources 1813 **/ 1814 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1815 { 1816 struct i40e_vf *vfs; 1817 int i, ret = 0; 1818 1819 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1820 i40e_irq_dynamic_disable_icr0(pf); 1821 1822 /* Check to see if we're just allocating resources for extant VFs */ 1823 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1824 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1825 if (ret) { 1826 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 1827 pf->num_alloc_vfs = 0; 1828 goto err_iov; 1829 } 1830 } 1831 /* allocate memory */ 1832 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1833 if (!vfs) { 1834 ret = -ENOMEM; 1835 goto err_alloc; 1836 } 1837 pf->vf = vfs; 1838 1839 /* apply default profile */ 1840 for (i = 0; i < num_alloc_vfs; i++) { 1841 vfs[i].pf = pf; 1842 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1843 vfs[i].vf_id = i; 1844 1845 /* assign default capabilities */ 1846 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1847 vfs[i].spoofchk = true; 1848 1849 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1850 1851 } 1852 pf->num_alloc_vfs = num_alloc_vfs; 1853 1854 /* VF resources get allocated during reset */ 1855 i40e_reset_all_vfs(pf, false); 1856 1857 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1858 1859 err_alloc: 1860 if (ret) 1861 i40e_free_vfs(pf); 1862 err_iov: 1863 /* Re-enable interrupt 0. */ 1864 i40e_irq_dynamic_enable_icr0(pf); 1865 return ret; 1866 } 1867 1868 #endif 1869 /** 1870 * i40e_pci_sriov_enable 1871 * @pdev: pointer to a pci_dev structure 1872 * @num_vfs: number of VFs to allocate 1873 * 1874 * Enable or change the number of VFs 1875 **/ 1876 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1877 { 1878 #ifdef CONFIG_PCI_IOV 1879 struct i40e_pf *pf = pci_get_drvdata(pdev); 1880 int pre_existing_vfs = pci_num_vf(pdev); 1881 int err = 0; 1882 1883 if (test_bit(__I40E_TESTING, pf->state)) { 1884 dev_warn(&pdev->dev, 1885 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1886 err = -EPERM; 1887 goto err_out; 1888 } 1889 1890 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1891 i40e_free_vfs(pf); 1892 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1893 goto out; 1894 1895 if (num_vfs > pf->num_req_vfs) { 1896 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1897 num_vfs, pf->num_req_vfs); 1898 err = -EPERM; 1899 goto err_out; 1900 } 1901 1902 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1903 err = i40e_alloc_vfs(pf, num_vfs); 1904 if (err) { 1905 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1906 goto err_out; 1907 } 1908 1909 out: 1910 return num_vfs; 1911 1912 err_out: 1913 return err; 1914 #endif 1915 return 0; 1916 } 1917 1918 /** 1919 * i40e_pci_sriov_configure 1920 * @pdev: pointer to a pci_dev structure 1921 * @num_vfs: number of VFs to allocate 1922 * 1923 * Enable or change the number of VFs. Called when the user updates the number 1924 * of VFs in sysfs. 1925 **/ 1926 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1927 { 1928 struct i40e_pf *pf = pci_get_drvdata(pdev); 1929 int ret = 0; 1930 1931 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1932 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1933 return -EAGAIN; 1934 } 1935 1936 if (num_vfs) { 1937 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { 1938 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 1939 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); 1940 } 1941 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1942 goto sriov_configure_out; 1943 } 1944 1945 if (!pci_vfs_assigned(pf->pdev)) { 1946 i40e_free_vfs(pf); 1947 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 1948 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); 1949 } else { 1950 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1951 ret = -EINVAL; 1952 goto sriov_configure_out; 1953 } 1954 sriov_configure_out: 1955 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1956 return ret; 1957 } 1958 1959 /***********************virtual channel routines******************/ 1960 1961 /** 1962 * i40e_vc_send_msg_to_vf 1963 * @vf: pointer to the VF info 1964 * @v_opcode: virtual channel opcode 1965 * @v_retval: virtual channel return value 1966 * @msg: pointer to the msg buffer 1967 * @msglen: msg length 1968 * 1969 * send msg to VF 1970 **/ 1971 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1972 u32 v_retval, u8 *msg, u16 msglen) 1973 { 1974 struct i40e_pf *pf; 1975 struct i40e_hw *hw; 1976 int abs_vf_id; 1977 int aq_ret; 1978 1979 /* validate the request */ 1980 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1981 return -EINVAL; 1982 1983 pf = vf->pf; 1984 hw = &pf->hw; 1985 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1986 1987 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1988 msg, msglen, NULL); 1989 if (aq_ret) { 1990 dev_info(&pf->pdev->dev, 1991 "Unable to send the message to VF %d aq_err %d\n", 1992 vf->vf_id, pf->hw.aq.asq_last_status); 1993 return -EIO; 1994 } 1995 1996 return 0; 1997 } 1998 1999 /** 2000 * i40e_vc_send_resp_to_vf 2001 * @vf: pointer to the VF info 2002 * @opcode: operation code 2003 * @retval: return value 2004 * 2005 * send resp msg to VF 2006 **/ 2007 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 2008 enum virtchnl_ops opcode, 2009 int retval) 2010 { 2011 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 2012 } 2013 2014 /** 2015 * i40e_sync_vf_state 2016 * @vf: pointer to the VF info 2017 * @state: VF state 2018 * 2019 * Called from a VF message to synchronize the service with a potential 2020 * VF reset state 2021 **/ 2022 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state) 2023 { 2024 int i; 2025 2026 /* When handling some messages, it needs VF state to be set. 2027 * It is possible that this flag is cleared during VF reset, 2028 * so there is a need to wait until the end of the reset to 2029 * handle the request message correctly. 2030 */ 2031 for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) { 2032 if (test_bit(state, &vf->vf_states)) 2033 return true; 2034 usleep_range(10000, 20000); 2035 } 2036 2037 return test_bit(state, &vf->vf_states); 2038 } 2039 2040 /** 2041 * i40e_vc_get_version_msg 2042 * @vf: pointer to the VF info 2043 * @msg: pointer to the msg buffer 2044 * 2045 * called from the VF to request the API version used by the PF 2046 **/ 2047 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 2048 { 2049 struct virtchnl_version_info info = { 2050 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 2051 }; 2052 2053 vf->vf_ver = *(struct virtchnl_version_info *)msg; 2054 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 2055 if (VF_IS_V10(&vf->vf_ver)) 2056 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 2057 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 2058 0, (u8 *)&info, 2059 sizeof(struct virtchnl_version_info)); 2060 } 2061 2062 /** 2063 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 2064 * @vf: pointer to VF structure 2065 **/ 2066 static void i40e_del_qch(struct i40e_vf *vf) 2067 { 2068 struct i40e_pf *pf = vf->pf; 2069 int i; 2070 2071 /* first element in the array belongs to primary VF VSI and we shouldn't 2072 * delete it. We should however delete the rest of the VSIs created 2073 */ 2074 for (i = 1; i < vf->num_tc; i++) { 2075 if (vf->ch[i].vsi_idx) { 2076 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 2077 vf->ch[i].vsi_idx = 0; 2078 vf->ch[i].vsi_id = 0; 2079 } 2080 } 2081 } 2082 2083 /** 2084 * i40e_vc_get_max_frame_size 2085 * @vf: pointer to the VF 2086 * 2087 * Max frame size is determined based on the current port's max frame size and 2088 * whether a port VLAN is configured on this VF. The VF is not aware whether 2089 * it's in a port VLAN so the PF needs to account for this in max frame size 2090 * checks and sending the max frame size to the VF. 2091 **/ 2092 static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) 2093 { 2094 u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; 2095 2096 if (vf->port_vlan_id) 2097 max_frame_size -= VLAN_HLEN; 2098 2099 return max_frame_size; 2100 } 2101 2102 /** 2103 * i40e_vc_get_vf_resources_msg 2104 * @vf: pointer to the VF info 2105 * @msg: pointer to the msg buffer 2106 * 2107 * called from the VF to request its resources 2108 **/ 2109 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 2110 { 2111 struct virtchnl_vf_resource *vfres = NULL; 2112 struct i40e_pf *pf = vf->pf; 2113 struct i40e_vsi *vsi; 2114 int num_vsis = 1; 2115 int aq_ret = 0; 2116 size_t len = 0; 2117 int ret; 2118 2119 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { 2120 aq_ret = -EINVAL; 2121 goto err; 2122 } 2123 2124 len = virtchnl_struct_size(vfres, vsi_res, num_vsis); 2125 vfres = kzalloc(len, GFP_KERNEL); 2126 if (!vfres) { 2127 aq_ret = -ENOMEM; 2128 len = 0; 2129 goto err; 2130 } 2131 if (VF_IS_V11(&vf->vf_ver)) 2132 vf->driver_caps = *(u32 *)msg; 2133 else 2134 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 2135 VIRTCHNL_VF_OFFLOAD_RSS_REG | 2136 VIRTCHNL_VF_OFFLOAD_VLAN; 2137 2138 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 2139 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 2140 vsi = pf->vsi[vf->lan_vsi_idx]; 2141 if (!vsi->info.pvid) 2142 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 2143 2144 if (i40e_vf_client_capable(pf, vf->vf_id) && 2145 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) { 2146 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA; 2147 set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); 2148 } else { 2149 clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); 2150 } 2151 2152 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 2153 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 2154 } else { 2155 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && 2156 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 2157 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 2158 else 2159 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 2160 } 2161 2162 if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) { 2163 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 2164 vfres->vf_cap_flags |= 2165 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 2166 } 2167 2168 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 2169 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 2170 2171 if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) && 2172 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 2173 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 2174 2175 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 2176 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { 2177 dev_err(&pf->pdev->dev, 2178 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 2179 vf->vf_id); 2180 aq_ret = -EINVAL; 2181 goto err; 2182 } 2183 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 2184 } 2185 2186 if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) { 2187 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 2188 vfres->vf_cap_flags |= 2189 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 2190 } 2191 2192 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 2193 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 2194 2195 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 2196 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 2197 2198 vfres->num_vsis = num_vsis; 2199 vfres->num_queue_pairs = vf->num_queue_pairs; 2200 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 2201 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 2202 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 2203 vfres->max_mtu = i40e_vc_get_max_frame_size(vf); 2204 2205 if (vf->lan_vsi_idx) { 2206 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 2207 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 2208 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 2209 /* VFs only use TC 0 */ 2210 vfres->vsi_res[0].qset_handle 2211 = le16_to_cpu(vsi->info.qs_handle[0]); 2212 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) { 2213 spin_lock_bh(&vsi->mac_filter_hash_lock); 2214 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 2215 eth_zero_addr(vf->default_lan_addr.addr); 2216 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2217 } 2218 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 2219 vf->default_lan_addr.addr); 2220 } 2221 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 2222 2223 err: 2224 /* send the response back to the VF */ 2225 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 2226 aq_ret, (u8 *)vfres, len); 2227 2228 kfree(vfres); 2229 return ret; 2230 } 2231 2232 /** 2233 * i40e_vc_config_promiscuous_mode_msg 2234 * @vf: pointer to the VF info 2235 * @msg: pointer to the msg buffer 2236 * 2237 * called from the VF to configure the promiscuous mode of 2238 * VF vsis 2239 **/ 2240 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 2241 { 2242 struct virtchnl_promisc_info *info = 2243 (struct virtchnl_promisc_info *)msg; 2244 struct i40e_pf *pf = vf->pf; 2245 bool allmulti = false; 2246 bool alluni = false; 2247 int aq_ret = 0; 2248 2249 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2250 aq_ret = -EINVAL; 2251 goto err_out; 2252 } 2253 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2254 dev_err(&pf->pdev->dev, 2255 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 2256 vf->vf_id); 2257 2258 /* Lie to the VF on purpose, because this is an error we can 2259 * ignore. Unprivileged VF is not a virtual channel error. 2260 */ 2261 aq_ret = 0; 2262 goto err_out; 2263 } 2264 2265 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { 2266 aq_ret = -EINVAL; 2267 goto err_out; 2268 } 2269 2270 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 2271 aq_ret = -EINVAL; 2272 goto err_out; 2273 } 2274 2275 /* Multicast promiscuous handling*/ 2276 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2277 allmulti = true; 2278 2279 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2280 alluni = true; 2281 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2282 alluni); 2283 if (aq_ret) 2284 goto err_out; 2285 2286 if (allmulti) { 2287 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC, 2288 &vf->vf_states)) 2289 dev_info(&pf->pdev->dev, 2290 "VF %d successfully set multicast promiscuous mode\n", 2291 vf->vf_id); 2292 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC, 2293 &vf->vf_states)) 2294 dev_info(&pf->pdev->dev, 2295 "VF %d successfully unset multicast promiscuous mode\n", 2296 vf->vf_id); 2297 2298 if (alluni) { 2299 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC, 2300 &vf->vf_states)) 2301 dev_info(&pf->pdev->dev, 2302 "VF %d successfully set unicast promiscuous mode\n", 2303 vf->vf_id); 2304 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC, 2305 &vf->vf_states)) 2306 dev_info(&pf->pdev->dev, 2307 "VF %d successfully unset unicast promiscuous mode\n", 2308 vf->vf_id); 2309 2310 err_out: 2311 /* send the response to the VF */ 2312 return i40e_vc_send_resp_to_vf(vf, 2313 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2314 aq_ret); 2315 } 2316 2317 /** 2318 * i40e_vc_config_queues_msg 2319 * @vf: pointer to the VF info 2320 * @msg: pointer to the msg buffer 2321 * 2322 * called from the VF to configure the rx/tx 2323 * queues 2324 **/ 2325 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2326 { 2327 struct virtchnl_vsi_queue_config_info *qci = 2328 (struct virtchnl_vsi_queue_config_info *)msg; 2329 struct virtchnl_queue_pair_info *qpi; 2330 u16 vsi_id, vsi_queue_id = 0; 2331 struct i40e_pf *pf = vf->pf; 2332 int i, j = 0, idx = 0; 2333 struct i40e_vsi *vsi; 2334 u16 num_qps_all = 0; 2335 int aq_ret = 0; 2336 2337 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2338 aq_ret = -EINVAL; 2339 goto error_param; 2340 } 2341 2342 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { 2343 aq_ret = -EINVAL; 2344 goto error_param; 2345 } 2346 2347 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { 2348 aq_ret = -EINVAL; 2349 goto error_param; 2350 } 2351 2352 if (vf->adq_enabled) { 2353 for (i = 0; i < vf->num_tc; i++) 2354 num_qps_all += vf->ch[i].num_qps; 2355 if (num_qps_all != qci->num_queue_pairs) { 2356 aq_ret = -EINVAL; 2357 goto error_param; 2358 } 2359 } 2360 2361 vsi_id = qci->vsi_id; 2362 2363 for (i = 0; i < qci->num_queue_pairs; i++) { 2364 qpi = &qci->qpair[i]; 2365 2366 if (!vf->adq_enabled) { 2367 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 2368 qpi->txq.queue_id)) { 2369 aq_ret = -EINVAL; 2370 goto error_param; 2371 } 2372 2373 vsi_queue_id = qpi->txq.queue_id; 2374 2375 if (qpi->txq.vsi_id != qci->vsi_id || 2376 qpi->rxq.vsi_id != qci->vsi_id || 2377 qpi->rxq.queue_id != vsi_queue_id) { 2378 aq_ret = -EINVAL; 2379 goto error_param; 2380 } 2381 } 2382 2383 if (vf->adq_enabled) { 2384 if (idx >= ARRAY_SIZE(vf->ch)) { 2385 aq_ret = -ENODEV; 2386 goto error_param; 2387 } 2388 vsi_id = vf->ch[idx].vsi_id; 2389 } 2390 2391 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2392 &qpi->rxq) || 2393 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2394 &qpi->txq)) { 2395 aq_ret = -EINVAL; 2396 goto error_param; 2397 } 2398 2399 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2400 * VF does not know about these additional VSIs and all 2401 * it cares is about its own queues. PF configures these queues 2402 * to its appropriate VSIs based on TC mapping 2403 */ 2404 if (vf->adq_enabled) { 2405 if (idx >= ARRAY_SIZE(vf->ch)) { 2406 aq_ret = -ENODEV; 2407 goto error_param; 2408 } 2409 if (j == (vf->ch[idx].num_qps - 1)) { 2410 idx++; 2411 j = 0; /* resetting the queue count */ 2412 vsi_queue_id = 0; 2413 } else { 2414 j++; 2415 vsi_queue_id++; 2416 } 2417 } 2418 } 2419 /* set vsi num_queue_pairs in use to num configured by VF */ 2420 if (!vf->adq_enabled) { 2421 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2422 qci->num_queue_pairs; 2423 } else { 2424 for (i = 0; i < vf->num_tc; i++) { 2425 vsi = pf->vsi[vf->ch[i].vsi_idx]; 2426 vsi->num_queue_pairs = vf->ch[i].num_qps; 2427 2428 if (i40e_update_adq_vsi_queues(vsi, i)) { 2429 aq_ret = -EIO; 2430 goto error_param; 2431 } 2432 } 2433 } 2434 2435 error_param: 2436 /* send the response to the VF */ 2437 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2438 aq_ret); 2439 } 2440 2441 /** 2442 * i40e_validate_queue_map - check queue map is valid 2443 * @vf: the VF structure pointer 2444 * @vsi_id: vsi id 2445 * @queuemap: Tx or Rx queue map 2446 * 2447 * check if Tx or Rx queue map is valid 2448 **/ 2449 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2450 unsigned long queuemap) 2451 { 2452 u16 vsi_queue_id, queue_id; 2453 2454 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2455 if (vf->adq_enabled) { 2456 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; 2457 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2458 } else { 2459 queue_id = vsi_queue_id; 2460 } 2461 2462 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2463 return -EINVAL; 2464 } 2465 2466 return 0; 2467 } 2468 2469 /** 2470 * i40e_vc_config_irq_map_msg 2471 * @vf: pointer to the VF info 2472 * @msg: pointer to the msg buffer 2473 * 2474 * called from the VF to configure the irq to 2475 * queue map 2476 **/ 2477 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2478 { 2479 struct virtchnl_irq_map_info *irqmap_info = 2480 (struct virtchnl_irq_map_info *)msg; 2481 struct virtchnl_vector_map *map; 2482 int aq_ret = 0; 2483 u16 vsi_id; 2484 int i; 2485 2486 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2487 aq_ret = -EINVAL; 2488 goto error_param; 2489 } 2490 2491 if (irqmap_info->num_vectors > 2492 vf->pf->hw.func_caps.num_msix_vectors_vf) { 2493 aq_ret = -EINVAL; 2494 goto error_param; 2495 } 2496 2497 for (i = 0; i < irqmap_info->num_vectors; i++) { 2498 map = &irqmap_info->vecmap[i]; 2499 /* validate msg params */ 2500 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || 2501 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { 2502 aq_ret = -EINVAL; 2503 goto error_param; 2504 } 2505 vsi_id = map->vsi_id; 2506 2507 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2508 aq_ret = -EINVAL; 2509 goto error_param; 2510 } 2511 2512 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2513 aq_ret = -EINVAL; 2514 goto error_param; 2515 } 2516 2517 i40e_config_irq_link_list(vf, vsi_id, map); 2518 } 2519 error_param: 2520 /* send the response to the VF */ 2521 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2522 aq_ret); 2523 } 2524 2525 /** 2526 * i40e_ctrl_vf_tx_rings 2527 * @vsi: the SRIOV VSI being configured 2528 * @q_map: bit map of the queues to be enabled 2529 * @enable: start or stop the queue 2530 **/ 2531 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2532 bool enable) 2533 { 2534 struct i40e_pf *pf = vsi->back; 2535 int ret = 0; 2536 u16 q_id; 2537 2538 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2539 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2540 vsi->base_queue + q_id, 2541 false /*is xdp*/, enable); 2542 if (ret) 2543 break; 2544 } 2545 return ret; 2546 } 2547 2548 /** 2549 * i40e_ctrl_vf_rx_rings 2550 * @vsi: the SRIOV VSI being configured 2551 * @q_map: bit map of the queues to be enabled 2552 * @enable: start or stop the queue 2553 **/ 2554 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2555 bool enable) 2556 { 2557 struct i40e_pf *pf = vsi->back; 2558 int ret = 0; 2559 u16 q_id; 2560 2561 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2562 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2563 enable); 2564 if (ret) 2565 break; 2566 } 2567 return ret; 2568 } 2569 2570 /** 2571 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL 2572 * @vqs: virtchnl_queue_select structure containing bitmaps to validate 2573 * 2574 * Returns true if validation was successful, else false. 2575 */ 2576 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) 2577 { 2578 if ((!vqs->rx_queues && !vqs->tx_queues) || 2579 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) || 2580 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES)) 2581 return false; 2582 2583 return true; 2584 } 2585 2586 /** 2587 * i40e_vc_enable_queues_msg 2588 * @vf: pointer to the VF info 2589 * @msg: pointer to the msg buffer 2590 * 2591 * called from the VF to enable all or specific queue(s) 2592 **/ 2593 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2594 { 2595 struct virtchnl_queue_select *vqs = 2596 (struct virtchnl_queue_select *)msg; 2597 struct i40e_pf *pf = vf->pf; 2598 int aq_ret = 0; 2599 int i; 2600 2601 if (vf->is_disabled_from_host) { 2602 aq_ret = -EPERM; 2603 dev_info(&pf->pdev->dev, 2604 "Admin has disabled VF %d, will not enable queues\n", 2605 vf->vf_id); 2606 goto error_param; 2607 } 2608 2609 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2610 aq_ret = -EINVAL; 2611 goto error_param; 2612 } 2613 2614 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2615 aq_ret = -EINVAL; 2616 goto error_param; 2617 } 2618 2619 if (!i40e_vc_validate_vqs_bitmaps(vqs)) { 2620 aq_ret = -EINVAL; 2621 goto error_param; 2622 } 2623 2624 /* Use the queue bit map sent by the VF */ 2625 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2626 true)) { 2627 aq_ret = -EIO; 2628 goto error_param; 2629 } 2630 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2631 true)) { 2632 aq_ret = -EIO; 2633 goto error_param; 2634 } 2635 2636 /* need to start the rings for additional ADq VSI's as well */ 2637 if (vf->adq_enabled) { 2638 /* zero belongs to LAN VSI */ 2639 for (i = 1; i < vf->num_tc; i++) { 2640 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2641 aq_ret = -EIO; 2642 } 2643 } 2644 2645 error_param: 2646 /* send the response to the VF */ 2647 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2648 aq_ret); 2649 } 2650 2651 /** 2652 * i40e_vc_disable_queues_msg 2653 * @vf: pointer to the VF info 2654 * @msg: pointer to the msg buffer 2655 * 2656 * called from the VF to disable all or specific 2657 * queue(s) 2658 **/ 2659 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2660 { 2661 struct virtchnl_queue_select *vqs = 2662 (struct virtchnl_queue_select *)msg; 2663 struct i40e_pf *pf = vf->pf; 2664 int aq_ret = 0; 2665 2666 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2667 aq_ret = -EINVAL; 2668 goto error_param; 2669 } 2670 2671 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2672 aq_ret = -EINVAL; 2673 goto error_param; 2674 } 2675 2676 if (!i40e_vc_validate_vqs_bitmaps(vqs)) { 2677 aq_ret = -EINVAL; 2678 goto error_param; 2679 } 2680 2681 /* Use the queue bit map sent by the VF */ 2682 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2683 false)) { 2684 aq_ret = -EIO; 2685 goto error_param; 2686 } 2687 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2688 false)) { 2689 aq_ret = -EIO; 2690 goto error_param; 2691 } 2692 error_param: 2693 /* send the response to the VF */ 2694 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2695 aq_ret); 2696 } 2697 2698 /** 2699 * i40e_check_enough_queue - find big enough queue number 2700 * @vf: pointer to the VF info 2701 * @needed: the number of items needed 2702 * 2703 * Returns the base item index of the queue, or negative for error 2704 **/ 2705 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed) 2706 { 2707 unsigned int i, cur_queues, more, pool_size; 2708 struct i40e_lump_tracking *pile; 2709 struct i40e_pf *pf = vf->pf; 2710 struct i40e_vsi *vsi; 2711 2712 vsi = pf->vsi[vf->lan_vsi_idx]; 2713 cur_queues = vsi->alloc_queue_pairs; 2714 2715 /* if current allocated queues are enough for need */ 2716 if (cur_queues >= needed) 2717 return vsi->base_queue; 2718 2719 pile = pf->qp_pile; 2720 if (cur_queues > 0) { 2721 /* if the allocated queues are not zero 2722 * just check if there are enough queues for more 2723 * behind the allocated queues. 2724 */ 2725 more = needed - cur_queues; 2726 for (i = vsi->base_queue + cur_queues; 2727 i < pile->num_entries; i++) { 2728 if (pile->list[i] & I40E_PILE_VALID_BIT) 2729 break; 2730 2731 if (more-- == 1) 2732 /* there is enough */ 2733 return vsi->base_queue; 2734 } 2735 } 2736 2737 pool_size = 0; 2738 for (i = 0; i < pile->num_entries; i++) { 2739 if (pile->list[i] & I40E_PILE_VALID_BIT) { 2740 pool_size = 0; 2741 continue; 2742 } 2743 if (needed <= ++pool_size) 2744 /* there is enough */ 2745 return i; 2746 } 2747 2748 return -ENOMEM; 2749 } 2750 2751 /** 2752 * i40e_vc_request_queues_msg 2753 * @vf: pointer to the VF info 2754 * @msg: pointer to the msg buffer 2755 * 2756 * VFs get a default number of queues but can use this message to request a 2757 * different number. If the request is successful, PF will reset the VF and 2758 * return 0. If unsuccessful, PF will send message informing VF of number of 2759 * available queues and return result of sending VF a message. 2760 **/ 2761 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2762 { 2763 struct virtchnl_vf_res_request *vfres = 2764 (struct virtchnl_vf_res_request *)msg; 2765 u16 req_pairs = vfres->num_queue_pairs; 2766 u8 cur_pairs = vf->num_queue_pairs; 2767 struct i40e_pf *pf = vf->pf; 2768 2769 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) 2770 return -EINVAL; 2771 2772 if (req_pairs > I40E_MAX_VF_QUEUES) { 2773 dev_err(&pf->pdev->dev, 2774 "VF %d tried to request more than %d queues.\n", 2775 vf->vf_id, 2776 I40E_MAX_VF_QUEUES); 2777 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2778 } else if (req_pairs - cur_pairs > pf->queues_left) { 2779 dev_warn(&pf->pdev->dev, 2780 "VF %d requested %d more queues, but only %d left.\n", 2781 vf->vf_id, 2782 req_pairs - cur_pairs, 2783 pf->queues_left); 2784 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2785 } else if (i40e_check_enough_queue(vf, req_pairs) < 0) { 2786 dev_warn(&pf->pdev->dev, 2787 "VF %d requested %d more queues, but there is not enough for it.\n", 2788 vf->vf_id, 2789 req_pairs - cur_pairs); 2790 vfres->num_queue_pairs = cur_pairs; 2791 } else { 2792 /* successful request */ 2793 vf->num_req_queues = req_pairs; 2794 i40e_vc_reset_vf(vf, true); 2795 return 0; 2796 } 2797 2798 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2799 (u8 *)vfres, sizeof(*vfres)); 2800 } 2801 2802 /** 2803 * i40e_vc_get_stats_msg 2804 * @vf: pointer to the VF info 2805 * @msg: pointer to the msg buffer 2806 * 2807 * called from the VF to get vsi stats 2808 **/ 2809 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2810 { 2811 struct virtchnl_queue_select *vqs = 2812 (struct virtchnl_queue_select *)msg; 2813 struct i40e_pf *pf = vf->pf; 2814 struct i40e_eth_stats stats; 2815 int aq_ret = 0; 2816 struct i40e_vsi *vsi; 2817 2818 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2819 2820 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2821 aq_ret = -EINVAL; 2822 goto error_param; 2823 } 2824 2825 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2826 aq_ret = -EINVAL; 2827 goto error_param; 2828 } 2829 2830 vsi = pf->vsi[vf->lan_vsi_idx]; 2831 if (!vsi) { 2832 aq_ret = -EINVAL; 2833 goto error_param; 2834 } 2835 i40e_update_eth_stats(vsi); 2836 stats = vsi->eth_stats; 2837 2838 error_param: 2839 /* send the response back to the VF */ 2840 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2841 (u8 *)&stats, sizeof(stats)); 2842 } 2843 2844 /** 2845 * i40e_can_vf_change_mac 2846 * @vf: pointer to the VF info 2847 * 2848 * Return true if the VF is allowed to change its MAC filters, false otherwise 2849 */ 2850 static bool i40e_can_vf_change_mac(struct i40e_vf *vf) 2851 { 2852 /* If the VF MAC address has been set administratively (via the 2853 * ndo_set_vf_mac command), then deny permission to the VF to 2854 * add/delete unicast MAC addresses, unless the VF is trusted 2855 */ 2856 if (vf->pf_set_mac && !vf->trusted) 2857 return false; 2858 2859 return true; 2860 } 2861 2862 #define I40E_MAX_MACVLAN_PER_HW 3072 2863 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ 2864 (num_ports)) 2865 /* If the VF is not trusted restrict the number of MAC/VLAN it can program 2866 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast 2867 */ 2868 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) 2869 #define I40E_VC_MAX_VLAN_PER_VF 16 2870 2871 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \ 2872 ({ typeof(vf_num) vf_num_ = (vf_num); \ 2873 typeof(num_ports) num_ports_ = (num_ports); \ 2874 ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \ 2875 I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \ 2876 I40E_VC_MAX_MAC_ADDR_PER_VF; }) 2877 /** 2878 * i40e_check_vf_permission 2879 * @vf: pointer to the VF info 2880 * @al: MAC address list from virtchnl 2881 * 2882 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2883 * if any address in the list is not valid. Checks the following conditions: 2884 * 2885 * 1) broadcast and zero addresses are never valid 2886 * 2) unicast addresses are not allowed if the VMM has administratively set 2887 * the VF MAC address, unless the VF is marked as privileged. 2888 * 3) There is enough space to add all the addresses. 2889 * 2890 * Note that to guarantee consistency, it is expected this function be called 2891 * while holding the mac_filter_hash_lock, as otherwise the current number of 2892 * addresses might not be accurate. 2893 **/ 2894 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2895 struct virtchnl_ether_addr_list *al) 2896 { 2897 struct i40e_pf *pf = vf->pf; 2898 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; 2899 struct i40e_hw *hw = &pf->hw; 2900 int mac2add_cnt = 0; 2901 int i; 2902 2903 for (i = 0; i < al->num_elements; i++) { 2904 struct i40e_mac_filter *f; 2905 u8 *addr = al->list[i].addr; 2906 2907 if (is_broadcast_ether_addr(addr) || 2908 is_zero_ether_addr(addr)) { 2909 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2910 addr); 2911 return -EINVAL; 2912 } 2913 2914 /* If the host VMM administrator has set the VF MAC address 2915 * administratively via the ndo_set_vf_mac command then deny 2916 * permission to the VF to add or delete unicast MAC addresses. 2917 * Unless the VF is privileged and then it can do whatever. 2918 * The VF may request to set the MAC address filter already 2919 * assigned to it so do not return an error in that case. 2920 */ 2921 if (!i40e_can_vf_change_mac(vf) && 2922 !is_multicast_ether_addr(addr) && 2923 !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2924 dev_err(&pf->pdev->dev, 2925 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2926 return -EPERM; 2927 } 2928 2929 /*count filters that really will be added*/ 2930 f = i40e_find_mac(vsi, addr); 2931 if (!f) 2932 ++mac2add_cnt; 2933 } 2934 2935 /* If this VF is not privileged, then we can't add more than a limited 2936 * number of addresses. Check to make sure that the additions do not 2937 * push us over the limit. 2938 */ 2939 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2940 if ((i40e_count_filters(vsi) + mac2add_cnt) > 2941 I40E_VC_MAX_MAC_ADDR_PER_VF) { 2942 dev_err(&pf->pdev->dev, 2943 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2944 return -EPERM; 2945 } 2946 /* If this VF is trusted, it can use more resources than untrusted. 2947 * However to ensure that every trusted VF has appropriate number of 2948 * resources, divide whole pool of resources per port and then across 2949 * all VFs. 2950 */ 2951 } else { 2952 if ((i40e_count_filters(vsi) + mac2add_cnt) > 2953 I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, 2954 hw->num_ports)) { 2955 dev_err(&pf->pdev->dev, 2956 "Cannot add more MAC addresses, trusted VF exhausted it's resources\n"); 2957 return -EPERM; 2958 } 2959 } 2960 return 0; 2961 } 2962 2963 /** 2964 * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr 2965 * @vc_ether_addr: used to extract the type 2966 **/ 2967 static u8 2968 i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) 2969 { 2970 return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK; 2971 } 2972 2973 /** 2974 * i40e_is_vc_addr_legacy 2975 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 2976 * 2977 * check if the MAC address is from an older VF 2978 **/ 2979 static bool 2980 i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr) 2981 { 2982 return i40e_vc_ether_addr_type(vc_ether_addr) == 2983 VIRTCHNL_ETHER_ADDR_LEGACY; 2984 } 2985 2986 /** 2987 * i40e_is_vc_addr_primary 2988 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 2989 * 2990 * check if the MAC address is the VF's primary MAC 2991 * This function should only be called when the MAC address in 2992 * virtchnl_ether_addr is a valid unicast MAC 2993 **/ 2994 static bool 2995 i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr) 2996 { 2997 return i40e_vc_ether_addr_type(vc_ether_addr) == 2998 VIRTCHNL_ETHER_ADDR_PRIMARY; 2999 } 3000 3001 /** 3002 * i40e_update_vf_mac_addr 3003 * @vf: VF to update 3004 * @vc_ether_addr: structure from VIRTCHNL with MAC to add 3005 * 3006 * update the VF's cached hardware MAC if allowed 3007 **/ 3008 static void 3009 i40e_update_vf_mac_addr(struct i40e_vf *vf, 3010 struct virtchnl_ether_addr *vc_ether_addr) 3011 { 3012 u8 *mac_addr = vc_ether_addr->addr; 3013 3014 if (!is_valid_ether_addr(mac_addr)) 3015 return; 3016 3017 /* If request to add MAC filter is a primary request update its default 3018 * MAC address with the requested one. If it is a legacy request then 3019 * check if current default is empty if so update the default MAC 3020 */ 3021 if (i40e_is_vc_addr_primary(vc_ether_addr)) { 3022 ether_addr_copy(vf->default_lan_addr.addr, mac_addr); 3023 } else if (i40e_is_vc_addr_legacy(vc_ether_addr)) { 3024 if (is_zero_ether_addr(vf->default_lan_addr.addr)) 3025 ether_addr_copy(vf->default_lan_addr.addr, mac_addr); 3026 } 3027 } 3028 3029 /** 3030 * i40e_vc_add_mac_addr_msg 3031 * @vf: pointer to the VF info 3032 * @msg: pointer to the msg buffer 3033 * 3034 * add guest mac address filter 3035 **/ 3036 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 3037 { 3038 struct virtchnl_ether_addr_list *al = 3039 (struct virtchnl_ether_addr_list *)msg; 3040 struct i40e_pf *pf = vf->pf; 3041 struct i40e_vsi *vsi = NULL; 3042 int ret = 0; 3043 int i; 3044 3045 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3046 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 3047 ret = -EINVAL; 3048 goto error_param; 3049 } 3050 3051 vsi = pf->vsi[vf->lan_vsi_idx]; 3052 3053 /* Lock once, because all function inside for loop accesses VSI's 3054 * MAC filter list which needs to be protected using same lock. 3055 */ 3056 spin_lock_bh(&vsi->mac_filter_hash_lock); 3057 3058 ret = i40e_check_vf_permission(vf, al); 3059 if (ret) { 3060 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3061 goto error_param; 3062 } 3063 3064 /* add new addresses to the list */ 3065 for (i = 0; i < al->num_elements; i++) { 3066 struct i40e_mac_filter *f; 3067 3068 f = i40e_find_mac(vsi, al->list[i].addr); 3069 if (!f) { 3070 f = i40e_add_mac_filter(vsi, al->list[i].addr); 3071 3072 if (!f) { 3073 dev_err(&pf->pdev->dev, 3074 "Unable to add MAC filter %pM for VF %d\n", 3075 al->list[i].addr, vf->vf_id); 3076 ret = -EINVAL; 3077 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3078 goto error_param; 3079 } 3080 } 3081 i40e_update_vf_mac_addr(vf, &al->list[i]); 3082 } 3083 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3084 3085 /* program the updated filter list */ 3086 ret = i40e_sync_vsi_filters(vsi); 3087 if (ret) 3088 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 3089 vf->vf_id, ret); 3090 3091 error_param: 3092 /* send the response to the VF */ 3093 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 3094 ret, NULL, 0); 3095 } 3096 3097 /** 3098 * i40e_vc_del_mac_addr_msg 3099 * @vf: pointer to the VF info 3100 * @msg: pointer to the msg buffer 3101 * 3102 * remove guest mac address filter 3103 **/ 3104 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 3105 { 3106 struct virtchnl_ether_addr_list *al = 3107 (struct virtchnl_ether_addr_list *)msg; 3108 bool was_unimac_deleted = false; 3109 struct i40e_pf *pf = vf->pf; 3110 struct i40e_vsi *vsi = NULL; 3111 int ret = 0; 3112 int i; 3113 3114 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3115 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 3116 ret = -EINVAL; 3117 goto error_param; 3118 } 3119 3120 for (i = 0; i < al->num_elements; i++) { 3121 if (is_broadcast_ether_addr(al->list[i].addr) || 3122 is_zero_ether_addr(al->list[i].addr)) { 3123 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 3124 al->list[i].addr, vf->vf_id); 3125 ret = -EINVAL; 3126 goto error_param; 3127 } 3128 } 3129 vsi = pf->vsi[vf->lan_vsi_idx]; 3130 3131 spin_lock_bh(&vsi->mac_filter_hash_lock); 3132 /* delete addresses from the list */ 3133 for (i = 0; i < al->num_elements; i++) { 3134 const u8 *addr = al->list[i].addr; 3135 3136 /* Allow to delete VF primary MAC only if it was not set 3137 * administratively by PF. 3138 */ 3139 if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { 3140 if (!vf->pf_set_mac) 3141 was_unimac_deleted = true; 3142 else 3143 continue; 3144 } 3145 3146 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 3147 ret = -EINVAL; 3148 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3149 goto error_param; 3150 } 3151 } 3152 3153 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3154 3155 if (was_unimac_deleted) 3156 eth_zero_addr(vf->default_lan_addr.addr); 3157 3158 /* program the updated filter list */ 3159 ret = i40e_sync_vsi_filters(vsi); 3160 if (ret) 3161 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 3162 vf->vf_id, ret); 3163 3164 if (vf->trusted && was_unimac_deleted) { 3165 struct i40e_mac_filter *f; 3166 struct hlist_node *h; 3167 u8 *macaddr = NULL; 3168 int bkt; 3169 3170 /* set last unicast mac address as default */ 3171 spin_lock_bh(&vsi->mac_filter_hash_lock); 3172 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 3173 if (is_valid_ether_addr(f->macaddr)) 3174 macaddr = f->macaddr; 3175 } 3176 if (macaddr) 3177 ether_addr_copy(vf->default_lan_addr.addr, macaddr); 3178 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3179 } 3180 error_param: 3181 /* send the response to the VF */ 3182 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret); 3183 } 3184 3185 /** 3186 * i40e_vc_add_vlan_msg 3187 * @vf: pointer to the VF info 3188 * @msg: pointer to the msg buffer 3189 * 3190 * program guest vlan id 3191 **/ 3192 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 3193 { 3194 struct virtchnl_vlan_filter_list *vfl = 3195 (struct virtchnl_vlan_filter_list *)msg; 3196 struct i40e_pf *pf = vf->pf; 3197 struct i40e_vsi *vsi = NULL; 3198 int aq_ret = 0; 3199 int i; 3200 3201 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 3202 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3203 dev_err(&pf->pdev->dev, 3204 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 3205 goto error_param; 3206 } 3207 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3208 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 3209 aq_ret = -EINVAL; 3210 goto error_param; 3211 } 3212 3213 for (i = 0; i < vfl->num_elements; i++) { 3214 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 3215 aq_ret = -EINVAL; 3216 dev_err(&pf->pdev->dev, 3217 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 3218 goto error_param; 3219 } 3220 } 3221 vsi = pf->vsi[vf->lan_vsi_idx]; 3222 if (vsi->info.pvid) { 3223 aq_ret = -EINVAL; 3224 goto error_param; 3225 } 3226 3227 i40e_vlan_stripping_enable(vsi); 3228 for (i = 0; i < vfl->num_elements; i++) { 3229 /* add new VLAN filter */ 3230 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 3231 if (!ret) 3232 vf->num_vlan++; 3233 3234 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 3235 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 3236 true, 3237 vfl->vlan_id[i], 3238 NULL); 3239 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 3240 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 3241 true, 3242 vfl->vlan_id[i], 3243 NULL); 3244 3245 if (ret) 3246 dev_err(&pf->pdev->dev, 3247 "Unable to add VLAN filter %d for VF %d, error %d\n", 3248 vfl->vlan_id[i], vf->vf_id, ret); 3249 } 3250 3251 error_param: 3252 /* send the response to the VF */ 3253 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 3254 } 3255 3256 /** 3257 * i40e_vc_remove_vlan_msg 3258 * @vf: pointer to the VF info 3259 * @msg: pointer to the msg buffer 3260 * 3261 * remove programmed guest vlan id 3262 **/ 3263 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 3264 { 3265 struct virtchnl_vlan_filter_list *vfl = 3266 (struct virtchnl_vlan_filter_list *)msg; 3267 struct i40e_pf *pf = vf->pf; 3268 struct i40e_vsi *vsi = NULL; 3269 int aq_ret = 0; 3270 int i; 3271 3272 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3273 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 3274 aq_ret = -EINVAL; 3275 goto error_param; 3276 } 3277 3278 for (i = 0; i < vfl->num_elements; i++) { 3279 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 3280 aq_ret = -EINVAL; 3281 goto error_param; 3282 } 3283 } 3284 3285 vsi = pf->vsi[vf->lan_vsi_idx]; 3286 if (vsi->info.pvid) { 3287 if (vfl->num_elements > 1 || vfl->vlan_id[0]) 3288 aq_ret = -EINVAL; 3289 goto error_param; 3290 } 3291 3292 for (i = 0; i < vfl->num_elements; i++) { 3293 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 3294 vf->num_vlan--; 3295 3296 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 3297 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 3298 false, 3299 vfl->vlan_id[i], 3300 NULL); 3301 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 3302 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 3303 false, 3304 vfl->vlan_id[i], 3305 NULL); 3306 } 3307 3308 error_param: 3309 /* send the response to the VF */ 3310 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 3311 } 3312 3313 /** 3314 * i40e_vc_rdma_msg 3315 * @vf: pointer to the VF info 3316 * @msg: pointer to the msg buffer 3317 * @msglen: msg length 3318 * 3319 * called from the VF for the iwarp msgs 3320 **/ 3321 static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 3322 { 3323 struct i40e_pf *pf = vf->pf; 3324 struct i40e_vsi *main_vsi; 3325 int aq_ret = 0; 3326 int abs_vf_id; 3327 3328 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3329 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { 3330 aq_ret = -EINVAL; 3331 goto error_param; 3332 } 3333 3334 main_vsi = i40e_pf_get_main_vsi(pf); 3335 abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 3336 i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen); 3337 3338 error_param: 3339 /* send the response to the VF */ 3340 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA, 3341 aq_ret); 3342 } 3343 3344 /** 3345 * i40e_vc_rdma_qvmap_msg 3346 * @vf: pointer to the VF info 3347 * @msg: pointer to the msg buffer 3348 * @config: config qvmap or release it 3349 * 3350 * called from the VF for the iwarp msgs 3351 **/ 3352 static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 3353 { 3354 struct virtchnl_rdma_qvlist_info *qvlist_info = 3355 (struct virtchnl_rdma_qvlist_info *)msg; 3356 int aq_ret = 0; 3357 3358 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3359 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { 3360 aq_ret = -EINVAL; 3361 goto error_param; 3362 } 3363 3364 if (config) { 3365 if (i40e_config_rdma_qvlist(vf, qvlist_info)) 3366 aq_ret = -EINVAL; 3367 } else { 3368 i40e_release_rdma_qvlist(vf); 3369 } 3370 3371 error_param: 3372 /* send the response to the VF */ 3373 return i40e_vc_send_resp_to_vf(vf, 3374 config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP : 3375 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, 3376 aq_ret); 3377 } 3378 3379 /** 3380 * i40e_vc_config_rss_key 3381 * @vf: pointer to the VF info 3382 * @msg: pointer to the msg buffer 3383 * 3384 * Configure the VF's RSS key 3385 **/ 3386 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 3387 { 3388 struct virtchnl_rss_key *vrk = 3389 (struct virtchnl_rss_key *)msg; 3390 struct i40e_pf *pf = vf->pf; 3391 struct i40e_vsi *vsi = NULL; 3392 int aq_ret = 0; 3393 3394 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3395 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || 3396 vrk->key_len != I40E_HKEY_ARRAY_SIZE) { 3397 aq_ret = -EINVAL; 3398 goto err; 3399 } 3400 3401 vsi = pf->vsi[vf->lan_vsi_idx]; 3402 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 3403 err: 3404 /* send the response to the VF */ 3405 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 3406 aq_ret); 3407 } 3408 3409 /** 3410 * i40e_vc_config_rss_lut 3411 * @vf: pointer to the VF info 3412 * @msg: pointer to the msg buffer 3413 * 3414 * Configure the VF's RSS LUT 3415 **/ 3416 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 3417 { 3418 struct virtchnl_rss_lut *vrl = 3419 (struct virtchnl_rss_lut *)msg; 3420 struct i40e_pf *pf = vf->pf; 3421 struct i40e_vsi *vsi = NULL; 3422 int aq_ret = 0; 3423 u16 i; 3424 3425 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3426 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || 3427 vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { 3428 aq_ret = -EINVAL; 3429 goto err; 3430 } 3431 3432 for (i = 0; i < vrl->lut_entries; i++) 3433 if (vrl->lut[i] >= vf->num_queue_pairs) { 3434 aq_ret = -EINVAL; 3435 goto err; 3436 } 3437 3438 vsi = pf->vsi[vf->lan_vsi_idx]; 3439 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 3440 /* send the response to the VF */ 3441 err: 3442 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 3443 aq_ret); 3444 } 3445 3446 /** 3447 * i40e_vc_get_rss_hashcfg 3448 * @vf: pointer to the VF info 3449 * @msg: pointer to the msg buffer 3450 * 3451 * Return the RSS Hash configuration bits allowed by the hardware 3452 **/ 3453 static int i40e_vc_get_rss_hashcfg(struct i40e_vf *vf, u8 *msg) 3454 { 3455 struct virtchnl_rss_hashcfg *vrh = NULL; 3456 struct i40e_pf *pf = vf->pf; 3457 int aq_ret = 0; 3458 int len = 0; 3459 3460 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3461 aq_ret = -EINVAL; 3462 goto err; 3463 } 3464 len = sizeof(struct virtchnl_rss_hashcfg); 3465 3466 vrh = kzalloc(len, GFP_KERNEL); 3467 if (!vrh) { 3468 aq_ret = -ENOMEM; 3469 len = 0; 3470 goto err; 3471 } 3472 vrh->hashcfg = i40e_pf_get_default_rss_hashcfg(pf); 3473 err: 3474 /* send the response back to the VF */ 3475 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, 3476 aq_ret, (u8 *)vrh, len); 3477 kfree(vrh); 3478 return aq_ret; 3479 } 3480 3481 /** 3482 * i40e_vc_set_rss_hashcfg 3483 * @vf: pointer to the VF info 3484 * @msg: pointer to the msg buffer 3485 * 3486 * Set the RSS Hash configuration bits for the VF 3487 **/ 3488 static int i40e_vc_set_rss_hashcfg(struct i40e_vf *vf, u8 *msg) 3489 { 3490 struct virtchnl_rss_hashcfg *vrh = 3491 (struct virtchnl_rss_hashcfg *)msg; 3492 struct i40e_pf *pf = vf->pf; 3493 struct i40e_hw *hw = &pf->hw; 3494 int aq_ret = 0; 3495 3496 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3497 aq_ret = -EINVAL; 3498 goto err; 3499 } 3500 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), 3501 (u32)vrh->hashcfg); 3502 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 3503 (u32)(vrh->hashcfg >> 32)); 3504 3505 /* send the response to the VF */ 3506 err: 3507 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, aq_ret); 3508 } 3509 3510 /** 3511 * i40e_vc_enable_vlan_stripping 3512 * @vf: pointer to the VF info 3513 * @msg: pointer to the msg buffer 3514 * 3515 * Enable vlan header stripping for the VF 3516 **/ 3517 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3518 { 3519 struct i40e_vsi *vsi; 3520 int aq_ret = 0; 3521 3522 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3523 aq_ret = -EINVAL; 3524 goto err; 3525 } 3526 3527 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3528 i40e_vlan_stripping_enable(vsi); 3529 3530 /* send the response to the VF */ 3531 err: 3532 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 3533 aq_ret); 3534 } 3535 3536 /** 3537 * i40e_vc_disable_vlan_stripping 3538 * @vf: pointer to the VF info 3539 * @msg: pointer to the msg buffer 3540 * 3541 * Disable vlan header stripping for the VF 3542 **/ 3543 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3544 { 3545 struct i40e_vsi *vsi; 3546 int aq_ret = 0; 3547 3548 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3549 aq_ret = -EINVAL; 3550 goto err; 3551 } 3552 3553 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3554 i40e_vlan_stripping_disable(vsi); 3555 3556 /* send the response to the VF */ 3557 err: 3558 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3559 aq_ret); 3560 } 3561 3562 /** 3563 * i40e_validate_cloud_filter 3564 * @vf: pointer to VF structure 3565 * @tc_filter: pointer to filter requested 3566 * 3567 * This function validates cloud filter programmed as TC filter for ADq 3568 **/ 3569 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3570 struct virtchnl_filter *tc_filter) 3571 { 3572 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3573 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3574 struct i40e_pf *pf = vf->pf; 3575 struct i40e_vsi *vsi = NULL; 3576 struct i40e_mac_filter *f; 3577 struct hlist_node *h; 3578 bool found = false; 3579 int bkt; 3580 3581 if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) { 3582 dev_info(&pf->pdev->dev, 3583 "VF %d: ADQ doesn't support this action (%d)\n", 3584 vf->vf_id, tc_filter->action); 3585 goto err; 3586 } 3587 3588 /* action_meta is TC number here to which the filter is applied */ 3589 if (!tc_filter->action_meta || 3590 tc_filter->action_meta > vf->num_tc) { 3591 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3592 vf->vf_id, tc_filter->action_meta); 3593 goto err; 3594 } 3595 3596 /* Check filter if it's programmed for advanced mode or basic mode. 3597 * There are two ADq modes (for VF only), 3598 * 1. Basic mode: intended to allow as many filter options as possible 3599 * to be added to a VF in Non-trusted mode. Main goal is 3600 * to add filters to its own MAC and VLAN id. 3601 * 2. Advanced mode: is for allowing filters to be applied other than 3602 * its own MAC or VLAN. This mode requires the VF to be 3603 * Trusted. 3604 */ 3605 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3606 vsi = pf->vsi[vf->lan_vsi_idx]; 3607 f = i40e_find_mac(vsi, data.dst_mac); 3608 3609 if (!f) { 3610 dev_info(&pf->pdev->dev, 3611 "Destination MAC %pM doesn't belong to VF %d\n", 3612 data.dst_mac, vf->vf_id); 3613 goto err; 3614 } 3615 3616 if (mask.vlan_id) { 3617 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3618 hlist) { 3619 if (f->vlan == ntohs(data.vlan_id)) { 3620 found = true; 3621 break; 3622 } 3623 } 3624 if (!found) { 3625 dev_info(&pf->pdev->dev, 3626 "VF %d doesn't have any VLAN id %u\n", 3627 vf->vf_id, ntohs(data.vlan_id)); 3628 goto err; 3629 } 3630 } 3631 } else { 3632 /* Check if VF is trusted */ 3633 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3634 dev_err(&pf->pdev->dev, 3635 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3636 vf->vf_id); 3637 return -EIO; 3638 } 3639 } 3640 3641 if (mask.dst_mac[0] & data.dst_mac[0]) { 3642 if (is_broadcast_ether_addr(data.dst_mac) || 3643 is_zero_ether_addr(data.dst_mac)) { 3644 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3645 vf->vf_id, data.dst_mac); 3646 goto err; 3647 } 3648 } 3649 3650 if (mask.src_mac[0] & data.src_mac[0]) { 3651 if (is_broadcast_ether_addr(data.src_mac) || 3652 is_zero_ether_addr(data.src_mac)) { 3653 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3654 vf->vf_id, data.src_mac); 3655 goto err; 3656 } 3657 } 3658 3659 if (mask.dst_port & data.dst_port) { 3660 if (!data.dst_port) { 3661 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3662 vf->vf_id); 3663 goto err; 3664 } 3665 } 3666 3667 if (mask.src_port & data.src_port) { 3668 if (!data.src_port) { 3669 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3670 vf->vf_id); 3671 goto err; 3672 } 3673 } 3674 3675 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3676 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3677 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3678 vf->vf_id); 3679 goto err; 3680 } 3681 3682 if (mask.vlan_id & data.vlan_id) { 3683 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3684 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3685 vf->vf_id); 3686 goto err; 3687 } 3688 } 3689 3690 return 0; 3691 err: 3692 return -EIO; 3693 } 3694 3695 /** 3696 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3697 * @vf: pointer to the VF info 3698 * @seid: seid of the vsi it is searching for 3699 **/ 3700 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3701 { 3702 struct i40e_pf *pf = vf->pf; 3703 struct i40e_vsi *vsi = NULL; 3704 int i; 3705 3706 for (i = 0; i < vf->num_tc ; i++) { 3707 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3708 if (vsi && vsi->seid == seid) 3709 return vsi; 3710 } 3711 return NULL; 3712 } 3713 3714 /** 3715 * i40e_del_all_cloud_filters 3716 * @vf: pointer to the VF info 3717 * 3718 * This function deletes all cloud filters 3719 **/ 3720 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3721 { 3722 struct i40e_cloud_filter *cfilter = NULL; 3723 struct i40e_pf *pf = vf->pf; 3724 struct i40e_vsi *vsi = NULL; 3725 struct hlist_node *node; 3726 int ret; 3727 3728 hlist_for_each_entry_safe(cfilter, node, 3729 &vf->cloud_filter_list, cloud_node) { 3730 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3731 3732 if (!vsi) { 3733 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3734 vf->vf_id, cfilter->seid); 3735 continue; 3736 } 3737 3738 if (cfilter->dst_port) 3739 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3740 false); 3741 else 3742 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3743 if (ret) 3744 dev_err(&pf->pdev->dev, 3745 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", 3746 vf->vf_id, ERR_PTR(ret), 3747 libie_aq_str(pf->hw.aq.asq_last_status)); 3748 3749 hlist_del(&cfilter->cloud_node); 3750 kfree(cfilter); 3751 vf->num_cloud_filters--; 3752 } 3753 } 3754 3755 /** 3756 * i40e_vc_del_cloud_filter 3757 * @vf: pointer to the VF info 3758 * @msg: pointer to the msg buffer 3759 * 3760 * This function deletes a cloud filter programmed as TC filter for ADq 3761 **/ 3762 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3763 { 3764 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3765 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3766 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3767 struct i40e_cloud_filter cfilter, *cf = NULL; 3768 struct i40e_pf *pf = vf->pf; 3769 struct i40e_vsi *vsi = NULL; 3770 struct hlist_node *node; 3771 int aq_ret = 0; 3772 int i, ret; 3773 3774 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3775 aq_ret = -EINVAL; 3776 goto err; 3777 } 3778 3779 if (!vf->adq_enabled) { 3780 dev_info(&pf->pdev->dev, 3781 "VF %d: ADq not enabled, can't apply cloud filter\n", 3782 vf->vf_id); 3783 aq_ret = -EINVAL; 3784 goto err; 3785 } 3786 3787 if (i40e_validate_cloud_filter(vf, vcf)) { 3788 dev_info(&pf->pdev->dev, 3789 "VF %d: Invalid input, can't apply cloud filter\n", 3790 vf->vf_id); 3791 aq_ret = -EINVAL; 3792 goto err; 3793 } 3794 3795 memset(&cfilter, 0, sizeof(cfilter)); 3796 /* parse destination mac address */ 3797 for (i = 0; i < ETH_ALEN; i++) 3798 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3799 3800 /* parse source mac address */ 3801 for (i = 0; i < ETH_ALEN; i++) 3802 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3803 3804 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3805 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3806 cfilter.src_port = mask.src_port & tcf.src_port; 3807 3808 switch (vcf->flow_type) { 3809 case VIRTCHNL_TCP_V4_FLOW: 3810 cfilter.n_proto = ETH_P_IP; 3811 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3812 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3813 ARRAY_SIZE(tcf.dst_ip)); 3814 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3815 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3816 ARRAY_SIZE(tcf.dst_ip)); 3817 break; 3818 case VIRTCHNL_TCP_V6_FLOW: 3819 cfilter.n_proto = ETH_P_IPV6; 3820 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3821 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3822 sizeof(cfilter.ip.v6.dst_ip6)); 3823 if (mask.src_ip[3] & tcf.src_ip[3]) 3824 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3825 sizeof(cfilter.ip.v6.src_ip6)); 3826 break; 3827 default: 3828 /* TC filter can be configured based on different combinations 3829 * and in this case IP is not a part of filter config 3830 */ 3831 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3832 vf->vf_id); 3833 } 3834 3835 /* get the vsi to which the tc belongs to */ 3836 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3837 cfilter.seid = vsi->seid; 3838 cfilter.flags = vcf->field_flags; 3839 3840 /* Deleting TC filter */ 3841 if (tcf.dst_port) 3842 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3843 else 3844 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3845 if (ret) { 3846 dev_err(&pf->pdev->dev, 3847 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", 3848 vf->vf_id, ERR_PTR(ret), 3849 libie_aq_str(pf->hw.aq.asq_last_status)); 3850 goto err; 3851 } 3852 3853 hlist_for_each_entry_safe(cf, node, 3854 &vf->cloud_filter_list, cloud_node) { 3855 if (cf->seid != cfilter.seid) 3856 continue; 3857 if (mask.dst_port) 3858 if (cfilter.dst_port != cf->dst_port) 3859 continue; 3860 if (mask.dst_mac[0]) 3861 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3862 continue; 3863 /* for ipv4 data to be valid, only first byte of mask is set */ 3864 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3865 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3866 ARRAY_SIZE(tcf.dst_ip))) 3867 continue; 3868 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3869 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3870 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3871 sizeof(cfilter.ip.v6.src_ip6))) 3872 continue; 3873 if (mask.vlan_id) 3874 if (cfilter.vlan_id != cf->vlan_id) 3875 continue; 3876 3877 hlist_del(&cf->cloud_node); 3878 kfree(cf); 3879 vf->num_cloud_filters--; 3880 } 3881 3882 err: 3883 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3884 aq_ret); 3885 } 3886 3887 /** 3888 * i40e_vc_add_cloud_filter 3889 * @vf: pointer to the VF info 3890 * @msg: pointer to the msg buffer 3891 * 3892 * This function adds a cloud filter programmed as TC filter for ADq 3893 **/ 3894 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3895 { 3896 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3897 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3898 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3899 struct i40e_cloud_filter *cfilter = NULL; 3900 struct i40e_pf *pf = vf->pf; 3901 struct i40e_vsi *vsi = NULL; 3902 int aq_ret = 0; 3903 int i; 3904 3905 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3906 aq_ret = -EINVAL; 3907 goto err_out; 3908 } 3909 3910 if (!vf->adq_enabled) { 3911 dev_info(&pf->pdev->dev, 3912 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3913 vf->vf_id); 3914 aq_ret = -EINVAL; 3915 goto err_out; 3916 } 3917 3918 if (i40e_validate_cloud_filter(vf, vcf)) { 3919 dev_info(&pf->pdev->dev, 3920 "VF %d: Invalid input/s, can't apply cloud filter\n", 3921 vf->vf_id); 3922 aq_ret = -EINVAL; 3923 goto err_out; 3924 } 3925 3926 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3927 if (!cfilter) { 3928 aq_ret = -ENOMEM; 3929 goto err_out; 3930 } 3931 3932 /* parse destination mac address */ 3933 for (i = 0; i < ETH_ALEN; i++) 3934 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3935 3936 /* parse source mac address */ 3937 for (i = 0; i < ETH_ALEN; i++) 3938 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3939 3940 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3941 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3942 cfilter->src_port = mask.src_port & tcf.src_port; 3943 3944 switch (vcf->flow_type) { 3945 case VIRTCHNL_TCP_V4_FLOW: 3946 cfilter->n_proto = ETH_P_IP; 3947 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3948 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3949 ARRAY_SIZE(tcf.dst_ip)); 3950 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3951 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3952 ARRAY_SIZE(tcf.dst_ip)); 3953 break; 3954 case VIRTCHNL_TCP_V6_FLOW: 3955 cfilter->n_proto = ETH_P_IPV6; 3956 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3957 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3958 sizeof(cfilter->ip.v6.dst_ip6)); 3959 if (mask.src_ip[3] & tcf.src_ip[3]) 3960 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3961 sizeof(cfilter->ip.v6.src_ip6)); 3962 break; 3963 default: 3964 /* TC filter can be configured based on different combinations 3965 * and in this case IP is not a part of filter config 3966 */ 3967 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3968 vf->vf_id); 3969 } 3970 3971 /* get the VSI to which the TC belongs to */ 3972 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3973 cfilter->seid = vsi->seid; 3974 cfilter->flags = vcf->field_flags; 3975 3976 /* Adding cloud filter programmed as TC filter */ 3977 if (tcf.dst_port) 3978 aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3979 else 3980 aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3981 if (aq_ret) { 3982 dev_err(&pf->pdev->dev, 3983 "VF %d: Failed to add cloud filter, err %pe aq_err %s\n", 3984 vf->vf_id, ERR_PTR(aq_ret), 3985 libie_aq_str(pf->hw.aq.asq_last_status)); 3986 goto err_free; 3987 } 3988 3989 INIT_HLIST_NODE(&cfilter->cloud_node); 3990 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 3991 /* release the pointer passing it to the collection */ 3992 cfilter = NULL; 3993 vf->num_cloud_filters++; 3994 err_free: 3995 kfree(cfilter); 3996 err_out: 3997 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 3998 aq_ret); 3999 } 4000 4001 /** 4002 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 4003 * @vf: pointer to the VF info 4004 * @msg: pointer to the msg buffer 4005 **/ 4006 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 4007 { 4008 struct virtchnl_tc_info *tci = 4009 (struct virtchnl_tc_info *)msg; 4010 struct i40e_pf *pf = vf->pf; 4011 struct i40e_link_status *ls = &pf->hw.phy.link_info; 4012 int i, adq_request_qps = 0; 4013 int aq_ret = 0; 4014 u64 speed = 0; 4015 4016 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 4017 aq_ret = -EINVAL; 4018 goto err; 4019 } 4020 4021 /* ADq cannot be applied if spoof check is ON */ 4022 if (vf->spoofchk) { 4023 dev_err(&pf->pdev->dev, 4024 "Spoof check is ON, turn it OFF to enable ADq\n"); 4025 aq_ret = -EINVAL; 4026 goto err; 4027 } 4028 4029 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 4030 dev_err(&pf->pdev->dev, 4031 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 4032 vf->vf_id); 4033 aq_ret = -EINVAL; 4034 goto err; 4035 } 4036 4037 /* max number of traffic classes for VF currently capped at 4 */ 4038 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 4039 dev_err(&pf->pdev->dev, 4040 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", 4041 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); 4042 aq_ret = -EINVAL; 4043 goto err; 4044 } 4045 4046 /* validate queues for each TC */ 4047 for (i = 0; i < tci->num_tc; i++) 4048 if (!tci->list[i].count || 4049 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 4050 dev_err(&pf->pdev->dev, 4051 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", 4052 vf->vf_id, i, tci->list[i].count, 4053 I40E_DEFAULT_QUEUES_PER_VF); 4054 aq_ret = -EINVAL; 4055 goto err; 4056 } 4057 4058 /* need Max VF queues but already have default number of queues */ 4059 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 4060 4061 if (pf->queues_left < adq_request_qps) { 4062 dev_err(&pf->pdev->dev, 4063 "No queues left to allocate to VF %d\n", 4064 vf->vf_id); 4065 aq_ret = -EINVAL; 4066 goto err; 4067 } else { 4068 /* we need to allocate max VF queues to enable ADq so as to 4069 * make sure ADq enabled VF always gets back queues when it 4070 * goes through a reset. 4071 */ 4072 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 4073 } 4074 4075 /* get link speed in MB to validate rate limit */ 4076 speed = i40e_vc_link_speed2mbps(ls->link_speed); 4077 if (speed == SPEED_UNKNOWN) { 4078 dev_err(&pf->pdev->dev, 4079 "Cannot detect link speed\n"); 4080 aq_ret = -EINVAL; 4081 goto err; 4082 } 4083 4084 /* parse data from the queue channel info */ 4085 vf->num_tc = tci->num_tc; 4086 for (i = 0; i < vf->num_tc; i++) { 4087 if (tci->list[i].max_tx_rate) { 4088 if (tci->list[i].max_tx_rate > speed) { 4089 dev_err(&pf->pdev->dev, 4090 "Invalid max tx rate %llu specified for VF %d.", 4091 tci->list[i].max_tx_rate, 4092 vf->vf_id); 4093 aq_ret = -EINVAL; 4094 goto err; 4095 } else { 4096 vf->ch[i].max_tx_rate = 4097 tci->list[i].max_tx_rate; 4098 } 4099 } 4100 vf->ch[i].num_qps = tci->list[i].count; 4101 } 4102 4103 /* set this flag only after making sure all inputs are sane */ 4104 vf->adq_enabled = true; 4105 4106 /* reset the VF in order to allocate resources */ 4107 i40e_vc_reset_vf(vf, true); 4108 4109 return 0; 4110 4111 /* send the response to the VF */ 4112 err: 4113 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 4114 aq_ret); 4115 } 4116 4117 /** 4118 * i40e_vc_del_qch_msg 4119 * @vf: pointer to the VF info 4120 * @msg: pointer to the msg buffer 4121 **/ 4122 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 4123 { 4124 struct i40e_pf *pf = vf->pf; 4125 int aq_ret = 0; 4126 4127 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 4128 aq_ret = -EINVAL; 4129 goto err; 4130 } 4131 4132 if (vf->adq_enabled) { 4133 i40e_del_all_cloud_filters(vf); 4134 i40e_del_qch(vf); 4135 vf->adq_enabled = false; 4136 vf->num_tc = 0; 4137 dev_info(&pf->pdev->dev, 4138 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 4139 vf->vf_id); 4140 } else { 4141 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 4142 vf->vf_id); 4143 aq_ret = -EINVAL; 4144 } 4145 4146 /* reset the VF in order to allocate resources */ 4147 i40e_vc_reset_vf(vf, true); 4148 4149 return 0; 4150 4151 err: 4152 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 4153 aq_ret); 4154 } 4155 4156 /** 4157 * i40e_vc_process_vf_msg 4158 * @pf: pointer to the PF structure 4159 * @vf_id: source VF id 4160 * @v_opcode: operation code 4161 * @v_retval: unused return value code 4162 * @msg: pointer to the msg buffer 4163 * @msglen: msg length 4164 * 4165 * called from the common aeq/arq handler to 4166 * process request from VF 4167 **/ 4168 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 4169 u32 __always_unused v_retval, u8 *msg, u16 msglen) 4170 { 4171 struct i40e_hw *hw = &pf->hw; 4172 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 4173 struct i40e_vf *vf; 4174 int ret; 4175 4176 pf->vf_aq_requests++; 4177 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) 4178 return -EINVAL; 4179 vf = &(pf->vf[local_vf_id]); 4180 4181 /* Check if VF is disabled. */ 4182 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 4183 return -EINVAL; 4184 4185 /* perform basic checks on the msg */ 4186 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 4187 4188 if (ret) { 4189 i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL); 4190 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 4191 local_vf_id, v_opcode, msglen); 4192 return ret; 4193 } 4194 4195 switch (v_opcode) { 4196 case VIRTCHNL_OP_VERSION: 4197 ret = i40e_vc_get_version_msg(vf, msg); 4198 break; 4199 case VIRTCHNL_OP_GET_VF_RESOURCES: 4200 ret = i40e_vc_get_vf_resources_msg(vf, msg); 4201 i40e_vc_notify_vf_link_state(vf); 4202 break; 4203 case VIRTCHNL_OP_RESET_VF: 4204 i40e_vc_reset_vf(vf, false); 4205 ret = 0; 4206 break; 4207 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 4208 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 4209 break; 4210 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 4211 ret = i40e_vc_config_queues_msg(vf, msg); 4212 break; 4213 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 4214 ret = i40e_vc_config_irq_map_msg(vf, msg); 4215 break; 4216 case VIRTCHNL_OP_ENABLE_QUEUES: 4217 ret = i40e_vc_enable_queues_msg(vf, msg); 4218 i40e_vc_notify_vf_link_state(vf); 4219 break; 4220 case VIRTCHNL_OP_DISABLE_QUEUES: 4221 ret = i40e_vc_disable_queues_msg(vf, msg); 4222 break; 4223 case VIRTCHNL_OP_ADD_ETH_ADDR: 4224 ret = i40e_vc_add_mac_addr_msg(vf, msg); 4225 break; 4226 case VIRTCHNL_OP_DEL_ETH_ADDR: 4227 ret = i40e_vc_del_mac_addr_msg(vf, msg); 4228 break; 4229 case VIRTCHNL_OP_ADD_VLAN: 4230 ret = i40e_vc_add_vlan_msg(vf, msg); 4231 break; 4232 case VIRTCHNL_OP_DEL_VLAN: 4233 ret = i40e_vc_remove_vlan_msg(vf, msg); 4234 break; 4235 case VIRTCHNL_OP_GET_STATS: 4236 ret = i40e_vc_get_stats_msg(vf, msg); 4237 break; 4238 case VIRTCHNL_OP_RDMA: 4239 ret = i40e_vc_rdma_msg(vf, msg, msglen); 4240 break; 4241 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: 4242 ret = i40e_vc_rdma_qvmap_msg(vf, msg, true); 4243 break; 4244 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: 4245 ret = i40e_vc_rdma_qvmap_msg(vf, msg, false); 4246 break; 4247 case VIRTCHNL_OP_CONFIG_RSS_KEY: 4248 ret = i40e_vc_config_rss_key(vf, msg); 4249 break; 4250 case VIRTCHNL_OP_CONFIG_RSS_LUT: 4251 ret = i40e_vc_config_rss_lut(vf, msg); 4252 break; 4253 case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: 4254 ret = i40e_vc_get_rss_hashcfg(vf, msg); 4255 break; 4256 case VIRTCHNL_OP_SET_RSS_HASHCFG: 4257 ret = i40e_vc_set_rss_hashcfg(vf, msg); 4258 break; 4259 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 4260 ret = i40e_vc_enable_vlan_stripping(vf, msg); 4261 break; 4262 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 4263 ret = i40e_vc_disable_vlan_stripping(vf, msg); 4264 break; 4265 case VIRTCHNL_OP_REQUEST_QUEUES: 4266 ret = i40e_vc_request_queues_msg(vf, msg); 4267 break; 4268 case VIRTCHNL_OP_ENABLE_CHANNELS: 4269 ret = i40e_vc_add_qch_msg(vf, msg); 4270 break; 4271 case VIRTCHNL_OP_DISABLE_CHANNELS: 4272 ret = i40e_vc_del_qch_msg(vf, msg); 4273 break; 4274 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 4275 ret = i40e_vc_add_cloud_filter(vf, msg); 4276 break; 4277 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 4278 ret = i40e_vc_del_cloud_filter(vf, msg); 4279 break; 4280 case VIRTCHNL_OP_UNKNOWN: 4281 default: 4282 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 4283 v_opcode, local_vf_id); 4284 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 4285 -EOPNOTSUPP); 4286 break; 4287 } 4288 4289 return ret; 4290 } 4291 4292 /** 4293 * i40e_vc_process_vflr_event 4294 * @pf: pointer to the PF structure 4295 * 4296 * called from the vlfr irq handler to 4297 * free up VF resources and state variables 4298 **/ 4299 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 4300 { 4301 struct i40e_hw *hw = &pf->hw; 4302 u32 reg, reg_idx, bit_idx; 4303 struct i40e_vf *vf; 4304 int vf_id; 4305 4306 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 4307 return 0; 4308 4309 /* Re-enable the VFLR interrupt cause here, before looking for which 4310 * VF got reset. Otherwise, if another VF gets a reset while the 4311 * first one is being processed, that interrupt will be lost, and 4312 * that VF will be stuck in reset forever. 4313 */ 4314 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 4315 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 4316 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 4317 i40e_flush(hw); 4318 4319 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 4320 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 4321 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 4322 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 4323 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 4324 vf = &pf->vf[vf_id]; 4325 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 4326 if (reg & BIT(bit_idx)) 4327 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 4328 if (!i40e_reset_vf(vf, true)) { 4329 /* At least one VF did not finish resetting, retry next time */ 4330 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 4331 } 4332 } 4333 4334 return 0; 4335 } 4336 4337 /** 4338 * i40e_validate_vf 4339 * @pf: the physical function 4340 * @vf_id: VF identifier 4341 * 4342 * Check that the VF is enabled and the VSI exists. 4343 * 4344 * Returns 0 on success, negative on failure 4345 **/ 4346 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 4347 { 4348 struct i40e_vsi *vsi; 4349 struct i40e_vf *vf; 4350 int ret = 0; 4351 4352 if (vf_id >= pf->num_alloc_vfs) { 4353 dev_err(&pf->pdev->dev, 4354 "Invalid VF Identifier %d\n", vf_id); 4355 ret = -EINVAL; 4356 goto err_out; 4357 } 4358 vf = &pf->vf[vf_id]; 4359 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 4360 if (!vsi) 4361 ret = -EINVAL; 4362 err_out: 4363 return ret; 4364 } 4365 4366 /** 4367 * i40e_check_vf_init_timeout 4368 * @vf: the virtual function 4369 * 4370 * Check that the VF's initialization was successfully done and if not 4371 * wait up to 300ms for its finish. 4372 * 4373 * Returns true when VF is initialized, false on timeout 4374 **/ 4375 static bool i40e_check_vf_init_timeout(struct i40e_vf *vf) 4376 { 4377 int i; 4378 4379 /* When the VF is resetting wait until it is done. 4380 * It can take up to 200 milliseconds, but wait for 4381 * up to 300 milliseconds to be safe. 4382 */ 4383 for (i = 0; i < 15; i++) { 4384 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 4385 return true; 4386 msleep(20); 4387 } 4388 4389 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4390 dev_err(&vf->pf->pdev->dev, 4391 "VF %d still in reset. Try again.\n", vf->vf_id); 4392 return false; 4393 } 4394 4395 return true; 4396 } 4397 4398 /** 4399 * i40e_ndo_set_vf_mac 4400 * @netdev: network interface device structure 4401 * @vf_id: VF identifier 4402 * @mac: mac address 4403 * 4404 * program VF mac address 4405 **/ 4406 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 4407 { 4408 struct i40e_netdev_priv *np = netdev_priv(netdev); 4409 struct i40e_vsi *vsi = np->vsi; 4410 struct i40e_pf *pf = vsi->back; 4411 struct i40e_mac_filter *f; 4412 struct i40e_vf *vf; 4413 int ret = 0; 4414 struct hlist_node *h; 4415 int bkt; 4416 4417 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4418 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4419 return -EAGAIN; 4420 } 4421 4422 /* validate the request */ 4423 ret = i40e_validate_vf(pf, vf_id); 4424 if (ret) 4425 goto error_param; 4426 4427 vf = &pf->vf[vf_id]; 4428 if (!i40e_check_vf_init_timeout(vf)) { 4429 ret = -EAGAIN; 4430 goto error_param; 4431 } 4432 vsi = pf->vsi[vf->lan_vsi_idx]; 4433 4434 if (is_multicast_ether_addr(mac)) { 4435 dev_err(&pf->pdev->dev, 4436 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 4437 ret = -EINVAL; 4438 goto error_param; 4439 } 4440 4441 /* Lock once because below invoked function add/del_filter requires 4442 * mac_filter_hash_lock to be held 4443 */ 4444 spin_lock_bh(&vsi->mac_filter_hash_lock); 4445 4446 /* delete the temporary mac address */ 4447 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 4448 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 4449 4450 /* Delete all the filters for this VSI - we're going to kill it 4451 * anyway. 4452 */ 4453 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 4454 __i40e_del_filter(vsi, f); 4455 4456 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4457 4458 /* program mac filter */ 4459 if (i40e_sync_vsi_filters(vsi)) { 4460 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 4461 ret = -EIO; 4462 goto error_param; 4463 } 4464 ether_addr_copy(vf->default_lan_addr.addr, mac); 4465 4466 if (is_zero_ether_addr(mac)) { 4467 vf->pf_set_mac = false; 4468 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 4469 } else { 4470 vf->pf_set_mac = true; 4471 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 4472 mac, vf_id); 4473 } 4474 4475 /* Force the VF interface down so it has to bring up with new MAC 4476 * address 4477 */ 4478 i40e_vc_reset_vf(vf, true); 4479 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 4480 4481 error_param: 4482 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4483 return ret; 4484 } 4485 4486 /** 4487 * i40e_ndo_set_vf_port_vlan 4488 * @netdev: network interface device structure 4489 * @vf_id: VF identifier 4490 * @vlan_id: mac address 4491 * @qos: priority setting 4492 * @vlan_proto: vlan protocol 4493 * 4494 * program VF vlan id and/or qos 4495 **/ 4496 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4497 u16 vlan_id, u8 qos, __be16 vlan_proto) 4498 { 4499 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4500 struct i40e_netdev_priv *np = netdev_priv(netdev); 4501 bool allmulti = false, alluni = false; 4502 struct i40e_pf *pf = np->vsi->back; 4503 struct i40e_vsi *vsi; 4504 struct i40e_vf *vf; 4505 int ret = 0; 4506 4507 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4508 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4509 return -EAGAIN; 4510 } 4511 4512 /* validate the request */ 4513 ret = i40e_validate_vf(pf, vf_id); 4514 if (ret) 4515 goto error_pvid; 4516 4517 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4518 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4519 ret = -EINVAL; 4520 goto error_pvid; 4521 } 4522 4523 if (vlan_proto != htons(ETH_P_8021Q)) { 4524 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4525 ret = -EPROTONOSUPPORT; 4526 goto error_pvid; 4527 } 4528 4529 vf = &pf->vf[vf_id]; 4530 if (!i40e_check_vf_init_timeout(vf)) { 4531 ret = -EAGAIN; 4532 goto error_pvid; 4533 } 4534 vsi = pf->vsi[vf->lan_vsi_idx]; 4535 4536 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4537 /* duplicate request, so just return success */ 4538 goto error_pvid; 4539 4540 i40e_vlan_stripping_enable(vsi); 4541 4542 /* Locked once because multiple functions below iterate list */ 4543 spin_lock_bh(&vsi->mac_filter_hash_lock); 4544 4545 /* Check for condition where there was already a port VLAN ID 4546 * filter set and now it is being deleted by setting it to zero. 4547 * Additionally check for the condition where there was a port 4548 * VLAN but now there is a new and different port VLAN being set. 4549 * Before deleting all the old VLAN filters we must add new ones 4550 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4551 * MAC addresses deleted. 4552 */ 4553 if ((!(vlan_id || qos) || 4554 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4555 vsi->info.pvid) { 4556 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4557 if (ret) { 4558 dev_info(&vsi->back->pdev->dev, 4559 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4560 vsi->back->hw.aq.asq_last_status); 4561 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4562 goto error_pvid; 4563 } 4564 } 4565 4566 if (vsi->info.pvid) { 4567 /* remove all filters on the old VLAN */ 4568 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4569 VLAN_VID_MASK)); 4570 } 4571 4572 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4573 4574 /* disable promisc modes in case they were enabled */ 4575 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, 4576 allmulti, alluni); 4577 if (ret) { 4578 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n"); 4579 goto error_pvid; 4580 } 4581 4582 if (vlan_id || qos) 4583 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4584 else 4585 i40e_vsi_remove_pvid(vsi); 4586 spin_lock_bh(&vsi->mac_filter_hash_lock); 4587 4588 if (vlan_id) { 4589 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4590 vlan_id, qos, vf_id); 4591 4592 /* add new VLAN filter for each MAC */ 4593 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4594 if (ret) { 4595 dev_info(&vsi->back->pdev->dev, 4596 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4597 vsi->back->hw.aq.asq_last_status); 4598 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4599 goto error_pvid; 4600 } 4601 4602 /* remove the previously added non-VLAN MAC filters */ 4603 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4604 } 4605 4606 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4607 4608 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 4609 alluni = true; 4610 4611 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 4612 allmulti = true; 4613 4614 /* Schedule the worker thread to take care of applying changes */ 4615 i40e_service_event_schedule(vsi->back); 4616 4617 if (ret) { 4618 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4619 goto error_pvid; 4620 } 4621 4622 /* The Port VLAN needs to be saved across resets the same as the 4623 * default LAN MAC address. 4624 */ 4625 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4626 4627 i40e_vc_reset_vf(vf, true); 4628 /* During reset the VF got a new VSI, so refresh a pointer. */ 4629 vsi = pf->vsi[vf->lan_vsi_idx]; 4630 4631 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); 4632 if (ret) { 4633 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); 4634 goto error_pvid; 4635 } 4636 4637 ret = 0; 4638 4639 error_pvid: 4640 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4641 return ret; 4642 } 4643 4644 /** 4645 * i40e_ndo_set_vf_bw 4646 * @netdev: network interface device structure 4647 * @vf_id: VF identifier 4648 * @min_tx_rate: Minimum Tx rate 4649 * @max_tx_rate: Maximum Tx rate 4650 * 4651 * configure VF Tx rate 4652 **/ 4653 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4654 int max_tx_rate) 4655 { 4656 struct i40e_netdev_priv *np = netdev_priv(netdev); 4657 struct i40e_pf *pf = np->vsi->back; 4658 struct i40e_vsi *vsi; 4659 struct i40e_vf *vf; 4660 int ret = 0; 4661 4662 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4663 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4664 return -EAGAIN; 4665 } 4666 4667 /* validate the request */ 4668 ret = i40e_validate_vf(pf, vf_id); 4669 if (ret) 4670 goto error; 4671 4672 if (min_tx_rate) { 4673 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4674 min_tx_rate, vf_id); 4675 ret = -EINVAL; 4676 goto error; 4677 } 4678 4679 vf = &pf->vf[vf_id]; 4680 if (!i40e_check_vf_init_timeout(vf)) { 4681 ret = -EAGAIN; 4682 goto error; 4683 } 4684 vsi = pf->vsi[vf->lan_vsi_idx]; 4685 4686 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4687 if (ret) 4688 goto error; 4689 4690 vf->tx_rate = max_tx_rate; 4691 error: 4692 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4693 return ret; 4694 } 4695 4696 /** 4697 * i40e_ndo_get_vf_config 4698 * @netdev: network interface device structure 4699 * @vf_id: VF identifier 4700 * @ivi: VF configuration structure 4701 * 4702 * return VF configuration 4703 **/ 4704 int i40e_ndo_get_vf_config(struct net_device *netdev, 4705 int vf_id, struct ifla_vf_info *ivi) 4706 { 4707 struct i40e_netdev_priv *np = netdev_priv(netdev); 4708 struct i40e_vsi *vsi = np->vsi; 4709 struct i40e_pf *pf = vsi->back; 4710 struct i40e_vf *vf; 4711 int ret = 0; 4712 4713 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4714 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4715 return -EAGAIN; 4716 } 4717 4718 /* validate the request */ 4719 ret = i40e_validate_vf(pf, vf_id); 4720 if (ret) 4721 goto error_param; 4722 4723 vf = &pf->vf[vf_id]; 4724 /* first vsi is always the LAN vsi */ 4725 vsi = pf->vsi[vf->lan_vsi_idx]; 4726 if (!vsi) { 4727 ret = -ENOENT; 4728 goto error_param; 4729 } 4730 4731 ivi->vf = vf_id; 4732 4733 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4734 4735 ivi->max_tx_rate = vf->tx_rate; 4736 ivi->min_tx_rate = 0; 4737 ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK); 4738 ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK); 4739 if (vf->link_forced == false) 4740 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4741 else if (vf->link_up == true) 4742 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4743 else 4744 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4745 ivi->spoofchk = vf->spoofchk; 4746 ivi->trusted = vf->trusted; 4747 ret = 0; 4748 4749 error_param: 4750 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4751 return ret; 4752 } 4753 4754 /** 4755 * i40e_ndo_set_vf_link_state 4756 * @netdev: network interface device structure 4757 * @vf_id: VF identifier 4758 * @link: required link state 4759 * 4760 * Set the link state of a specified VF, regardless of physical link state 4761 **/ 4762 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4763 { 4764 struct i40e_netdev_priv *np = netdev_priv(netdev); 4765 struct i40e_pf *pf = np->vsi->back; 4766 struct i40e_link_status *ls = &pf->hw.phy.link_info; 4767 struct virtchnl_pf_event pfe; 4768 struct i40e_hw *hw = &pf->hw; 4769 struct i40e_vsi *vsi; 4770 unsigned long q_map; 4771 struct i40e_vf *vf; 4772 int abs_vf_id; 4773 int ret = 0; 4774 int tmp; 4775 4776 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4777 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4778 return -EAGAIN; 4779 } 4780 4781 /* validate the request */ 4782 if (vf_id >= pf->num_alloc_vfs) { 4783 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4784 ret = -EINVAL; 4785 goto error_out; 4786 } 4787 4788 vf = &pf->vf[vf_id]; 4789 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4790 4791 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4792 pfe.severity = PF_EVENT_SEVERITY_INFO; 4793 4794 switch (link) { 4795 case IFLA_VF_LINK_STATE_AUTO: 4796 vf->link_forced = false; 4797 vf->is_disabled_from_host = false; 4798 /* reset needed to reinit VF resources */ 4799 i40e_vc_reset_vf(vf, true); 4800 i40e_set_vf_link_state(vf, &pfe, ls); 4801 break; 4802 case IFLA_VF_LINK_STATE_ENABLE: 4803 vf->link_forced = true; 4804 vf->link_up = true; 4805 vf->is_disabled_from_host = false; 4806 /* reset needed to reinit VF resources */ 4807 i40e_vc_reset_vf(vf, true); 4808 i40e_set_vf_link_state(vf, &pfe, ls); 4809 break; 4810 case IFLA_VF_LINK_STATE_DISABLE: 4811 vf->link_forced = true; 4812 vf->link_up = false; 4813 i40e_set_vf_link_state(vf, &pfe, ls); 4814 4815 vsi = pf->vsi[vf->lan_vsi_idx]; 4816 q_map = BIT(vsi->num_queue_pairs) - 1; 4817 4818 vf->is_disabled_from_host = true; 4819 4820 /* Try to stop both Tx&Rx rings even if one of the calls fails 4821 * to ensure we stop the rings even in case of errors. 4822 * If any of them returns with an error then the first 4823 * error that occurred will be returned. 4824 */ 4825 tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false); 4826 ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false); 4827 4828 ret = tmp ? tmp : ret; 4829 break; 4830 default: 4831 ret = -EINVAL; 4832 goto error_out; 4833 } 4834 /* Notify the VF of its new link state */ 4835 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4836 0, (u8 *)&pfe, sizeof(pfe), NULL); 4837 4838 error_out: 4839 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4840 return ret; 4841 } 4842 4843 /** 4844 * i40e_ndo_set_vf_spoofchk 4845 * @netdev: network interface device structure 4846 * @vf_id: VF identifier 4847 * @enable: flag to enable or disable feature 4848 * 4849 * Enable or disable VF spoof checking 4850 **/ 4851 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4852 { 4853 struct i40e_netdev_priv *np = netdev_priv(netdev); 4854 struct i40e_vsi *vsi = np->vsi; 4855 struct i40e_pf *pf = vsi->back; 4856 struct i40e_vsi_context ctxt; 4857 struct i40e_hw *hw = &pf->hw; 4858 struct i40e_vf *vf; 4859 int ret = 0; 4860 4861 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4862 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4863 return -EAGAIN; 4864 } 4865 4866 /* validate the request */ 4867 if (vf_id >= pf->num_alloc_vfs) { 4868 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4869 ret = -EINVAL; 4870 goto out; 4871 } 4872 4873 vf = &(pf->vf[vf_id]); 4874 if (!i40e_check_vf_init_timeout(vf)) { 4875 ret = -EAGAIN; 4876 goto out; 4877 } 4878 4879 if (enable == vf->spoofchk) 4880 goto out; 4881 4882 vf->spoofchk = enable; 4883 memset(&ctxt, 0, sizeof(ctxt)); 4884 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4885 ctxt.pf_num = pf->hw.pf_id; 4886 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4887 if (enable) 4888 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4889 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4890 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4891 if (ret) { 4892 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4893 ret); 4894 ret = -EIO; 4895 } 4896 out: 4897 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4898 return ret; 4899 } 4900 4901 /** 4902 * i40e_ndo_set_vf_trust 4903 * @netdev: network interface device structure of the pf 4904 * @vf_id: VF identifier 4905 * @setting: trust setting 4906 * 4907 * Enable or disable VF trust setting 4908 **/ 4909 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4910 { 4911 struct i40e_netdev_priv *np = netdev_priv(netdev); 4912 struct i40e_pf *pf = np->vsi->back; 4913 struct i40e_vf *vf; 4914 int ret = 0; 4915 4916 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4917 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4918 return -EAGAIN; 4919 } 4920 4921 /* validate the request */ 4922 if (vf_id >= pf->num_alloc_vfs) { 4923 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4924 ret = -EINVAL; 4925 goto out; 4926 } 4927 4928 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { 4929 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4930 ret = -EINVAL; 4931 goto out; 4932 } 4933 4934 vf = &pf->vf[vf_id]; 4935 4936 if (setting == vf->trusted) 4937 goto out; 4938 4939 vf->trusted = setting; 4940 4941 /* request PF to sync mac/vlan filters for the VF */ 4942 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); 4943 pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 4944 4945 i40e_vc_reset_vf(vf, true); 4946 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4947 vf_id, setting ? "" : "un"); 4948 4949 if (vf->adq_enabled) { 4950 if (!vf->trusted) { 4951 dev_info(&pf->pdev->dev, 4952 "VF %u no longer Trusted, deleting all cloud filters\n", 4953 vf_id); 4954 i40e_del_all_cloud_filters(vf); 4955 } 4956 } 4957 4958 out: 4959 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4960 return ret; 4961 } 4962 4963 /** 4964 * i40e_get_vf_stats - populate some stats for the VF 4965 * @netdev: the netdev of the PF 4966 * @vf_id: the host OS identifier (0-127) 4967 * @vf_stats: pointer to the OS memory to be initialized 4968 */ 4969 int i40e_get_vf_stats(struct net_device *netdev, int vf_id, 4970 struct ifla_vf_stats *vf_stats) 4971 { 4972 struct i40e_netdev_priv *np = netdev_priv(netdev); 4973 struct i40e_pf *pf = np->vsi->back; 4974 struct i40e_eth_stats *stats; 4975 struct i40e_vsi *vsi; 4976 struct i40e_vf *vf; 4977 4978 /* validate the request */ 4979 if (i40e_validate_vf(pf, vf_id)) 4980 return -EINVAL; 4981 4982 vf = &pf->vf[vf_id]; 4983 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4984 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); 4985 return -EBUSY; 4986 } 4987 4988 vsi = pf->vsi[vf->lan_vsi_idx]; 4989 if (!vsi) 4990 return -EINVAL; 4991 4992 i40e_update_eth_stats(vsi); 4993 stats = &vsi->eth_stats; 4994 4995 memset(vf_stats, 0, sizeof(*vf_stats)); 4996 4997 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 4998 stats->rx_multicast; 4999 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 5000 stats->tx_multicast; 5001 vf_stats->rx_bytes = stats->rx_bytes; 5002 vf_stats->tx_bytes = stats->tx_bytes; 5003 vf_stats->broadcast = stats->rx_broadcast; 5004 vf_stats->multicast = stats->rx_multicast; 5005 vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; 5006 vf_stats->tx_dropped = stats->tx_errors; 5007 5008 return 0; 5009 } 5010