1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 #include "i40e_lan_hmc.h" 6 #include "i40e_virtchnl_pf.h" 7 8 /*********************notification routines***********************/ 9 10 /** 11 * i40e_vc_vf_broadcast 12 * @pf: pointer to the PF structure 13 * @v_opcode: operation code 14 * @v_retval: return value 15 * @msg: pointer to the msg buffer 16 * @msglen: msg length 17 * 18 * send a message to all VFs on a given PF 19 **/ 20 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 21 enum virtchnl_ops v_opcode, 22 int v_retval, u8 *msg, 23 u16 msglen) 24 { 25 struct i40e_hw *hw = &pf->hw; 26 struct i40e_vf *vf = pf->vf; 27 int i; 28 29 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 30 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 31 /* Not all vfs are enabled so skip the ones that are not */ 32 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 33 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 34 continue; 35 36 /* Ignore return value on purpose - a given VF may fail, but 37 * we need to keep going and send to all of them 38 */ 39 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 40 msg, msglen, NULL); 41 } 42 } 43 44 /** 45 * i40e_vc_link_speed2mbps 46 * converts i40e_aq_link_speed to integer value of Mbps 47 * @link_speed: the speed to convert 48 * 49 * return the speed as direct value of Mbps. 50 **/ 51 static u32 52 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed) 53 { 54 switch (link_speed) { 55 case I40E_LINK_SPEED_100MB: 56 return SPEED_100; 57 case I40E_LINK_SPEED_1GB: 58 return SPEED_1000; 59 case I40E_LINK_SPEED_2_5GB: 60 return SPEED_2500; 61 case I40E_LINK_SPEED_5GB: 62 return SPEED_5000; 63 case I40E_LINK_SPEED_10GB: 64 return SPEED_10000; 65 case I40E_LINK_SPEED_20GB: 66 return SPEED_20000; 67 case I40E_LINK_SPEED_25GB: 68 return SPEED_25000; 69 case I40E_LINK_SPEED_40GB: 70 return SPEED_40000; 71 case I40E_LINK_SPEED_UNKNOWN: 72 return SPEED_UNKNOWN; 73 } 74 return SPEED_UNKNOWN; 75 } 76 77 /** 78 * i40e_set_vf_link_state 79 * @vf: pointer to the VF structure 80 * @pfe: pointer to PF event structure 81 * @ls: pointer to link status structure 82 * 83 * set a link state on a single vf 84 **/ 85 static void i40e_set_vf_link_state(struct i40e_vf *vf, 86 struct virtchnl_pf_event *pfe, struct i40e_link_status *ls) 87 { 88 u8 link_status = ls->link_info & I40E_AQ_LINK_UP; 89 90 if (vf->link_forced) 91 link_status = vf->link_up; 92 93 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { 94 pfe->event_data.link_event_adv.link_speed = link_status ? 95 i40e_vc_link_speed2mbps(ls->link_speed) : 0; 96 pfe->event_data.link_event_adv.link_status = link_status; 97 } else { 98 pfe->event_data.link_event.link_speed = link_status ? 99 i40e_virtchnl_link_speed(ls->link_speed) : 0; 100 pfe->event_data.link_event.link_status = link_status; 101 } 102 } 103 104 /** 105 * i40e_vc_notify_vf_link_state 106 * @vf: pointer to the VF structure 107 * 108 * send a link status message to a single VF 109 **/ 110 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 111 { 112 struct virtchnl_pf_event pfe; 113 struct i40e_pf *pf = vf->pf; 114 struct i40e_hw *hw = &pf->hw; 115 struct i40e_link_status *ls = &pf->hw.phy.link_info; 116 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 117 118 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 119 pfe.severity = PF_EVENT_SEVERITY_INFO; 120 121 i40e_set_vf_link_state(vf, &pfe, ls); 122 123 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 124 0, (u8 *)&pfe, sizeof(pfe), NULL); 125 } 126 127 /** 128 * i40e_vc_notify_link_state 129 * @pf: pointer to the PF structure 130 * 131 * send a link status message to all VFs on a given PF 132 **/ 133 void i40e_vc_notify_link_state(struct i40e_pf *pf) 134 { 135 int i; 136 137 for (i = 0; i < pf->num_alloc_vfs; i++) 138 i40e_vc_notify_vf_link_state(&pf->vf[i]); 139 } 140 141 /** 142 * i40e_vc_notify_reset 143 * @pf: pointer to the PF structure 144 * 145 * indicate a pending reset to all VFs on a given PF 146 **/ 147 void i40e_vc_notify_reset(struct i40e_pf *pf) 148 { 149 struct virtchnl_pf_event pfe; 150 151 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 152 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 153 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 154 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 155 } 156 157 #ifdef CONFIG_PCI_IOV 158 void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev) 159 { 160 u16 vf_id; 161 u16 pos; 162 163 /* Continue only if this is a PF */ 164 if (!pdev->is_physfn) 165 return; 166 167 if (!pci_num_vf(pdev)) 168 return; 169 170 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 171 if (pos) { 172 struct pci_dev *vf_dev = NULL; 173 174 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); 175 while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) { 176 if (vf_dev->is_virtfn && vf_dev->physfn == pdev) 177 pci_restore_msi_state(vf_dev); 178 } 179 } 180 } 181 #endif /* CONFIG_PCI_IOV */ 182 183 /** 184 * i40e_vc_notify_vf_reset 185 * @vf: pointer to the VF structure 186 * 187 * indicate a pending reset to the given VF 188 **/ 189 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 190 { 191 struct virtchnl_pf_event pfe; 192 int abs_vf_id; 193 194 /* validate the request */ 195 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 196 return; 197 198 /* verify if the VF is in either init or active before proceeding */ 199 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 200 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 201 return; 202 203 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 204 205 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 206 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 207 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 208 0, (u8 *)&pfe, 209 sizeof(struct virtchnl_pf_event), NULL); 210 } 211 /***********************misc routines*****************************/ 212 213 /** 214 * i40e_vc_reset_vf 215 * @vf: pointer to the VF info 216 * @notify_vf: notify vf about reset or not 217 * Reset VF handler. 218 **/ 219 void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) 220 { 221 struct i40e_pf *pf = vf->pf; 222 int i; 223 224 if (notify_vf) 225 i40e_vc_notify_vf_reset(vf); 226 227 /* We want to ensure that an actual reset occurs initiated after this 228 * function was called. However, we do not want to wait forever, so 229 * we'll give a reasonable time and print a message if we failed to 230 * ensure a reset. 231 */ 232 for (i = 0; i < 20; i++) { 233 /* If PF is in VFs releasing state reset VF is impossible, 234 * so leave it. 235 */ 236 if (test_bit(__I40E_VFS_RELEASING, pf->state)) 237 return; 238 if (i40e_reset_vf(vf, false)) 239 return; 240 usleep_range(10000, 20000); 241 } 242 243 if (notify_vf) 244 dev_warn(&vf->pf->pdev->dev, 245 "Failed to initiate reset for VF %d after 200 milliseconds\n", 246 vf->vf_id); 247 else 248 dev_dbg(&vf->pf->pdev->dev, 249 "Failed to initiate reset for VF %d after 200 milliseconds\n", 250 vf->vf_id); 251 } 252 253 /** 254 * i40e_vc_isvalid_vsi_id 255 * @vf: pointer to the VF info 256 * @vsi_id: VF relative VSI id 257 * 258 * check for the valid VSI id 259 **/ 260 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 261 { 262 struct i40e_pf *pf = vf->pf; 263 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 264 265 return (vsi && (vsi->vf_id == vf->vf_id)); 266 } 267 268 /** 269 * i40e_vc_isvalid_queue_id 270 * @vf: pointer to the VF info 271 * @vsi_id: vsi id 272 * @qid: vsi relative queue id 273 * 274 * check for the valid queue id 275 **/ 276 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 277 u16 qid) 278 { 279 struct i40e_pf *pf = vf->pf; 280 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 281 282 return (vsi && (qid < vsi->alloc_queue_pairs)); 283 } 284 285 /** 286 * i40e_vc_isvalid_vector_id 287 * @vf: pointer to the VF info 288 * @vector_id: VF relative vector id 289 * 290 * check for the valid vector id 291 **/ 292 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) 293 { 294 struct i40e_pf *pf = vf->pf; 295 296 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 297 } 298 299 /***********************vf resource mgmt routines*****************/ 300 301 /** 302 * i40e_vc_get_pf_queue_id 303 * @vf: pointer to the VF info 304 * @vsi_id: id of VSI as provided by the FW 305 * @vsi_queue_id: vsi relative queue id 306 * 307 * return PF relative queue id 308 **/ 309 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 310 u8 vsi_queue_id) 311 { 312 struct i40e_pf *pf = vf->pf; 313 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 314 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 315 316 if (!vsi) 317 return pf_queue_id; 318 319 if (le16_to_cpu(vsi->info.mapping_flags) & 320 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 321 pf_queue_id = 322 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 323 else 324 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 325 vsi_queue_id; 326 327 return pf_queue_id; 328 } 329 330 /** 331 * i40e_get_real_pf_qid 332 * @vf: pointer to the VF info 333 * @vsi_id: vsi id 334 * @queue_id: queue number 335 * 336 * wrapper function to get pf_queue_id handling ADq code as well 337 **/ 338 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 339 { 340 int i; 341 342 if (vf->adq_enabled) { 343 /* Although VF considers all the queues(can be 1 to 16) as its 344 * own but they may actually belong to different VSIs(up to 4). 345 * We need to find which queues belongs to which VSI. 346 */ 347 for (i = 0; i < vf->num_tc; i++) { 348 if (queue_id < vf->ch[i].num_qps) { 349 vsi_id = vf->ch[i].vsi_id; 350 break; 351 } 352 /* find right queue id which is relative to a 353 * given VSI. 354 */ 355 queue_id -= vf->ch[i].num_qps; 356 } 357 } 358 359 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 360 } 361 362 /** 363 * i40e_config_irq_link_list 364 * @vf: pointer to the VF info 365 * @vsi_id: id of VSI as given by the FW 366 * @vecmap: irq map info 367 * 368 * configure irq link list from the map 369 **/ 370 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 371 struct virtchnl_vector_map *vecmap) 372 { 373 unsigned long linklistmap = 0, tempmap; 374 struct i40e_pf *pf = vf->pf; 375 struct i40e_hw *hw = &pf->hw; 376 u16 vsi_queue_id, pf_queue_id; 377 enum i40e_queue_type qtype; 378 u16 next_q, vector_id, size; 379 u32 reg, reg_idx; 380 u16 itr_idx = 0; 381 382 vector_id = vecmap->vector_id; 383 /* setup the head */ 384 if (0 == vector_id) 385 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 386 else 387 reg_idx = I40E_VPINT_LNKLSTN( 388 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 389 (vector_id - 1)); 390 391 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 392 /* Special case - No queues mapped on this vector */ 393 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 394 goto irq_list_done; 395 } 396 tempmap = vecmap->rxq_map; 397 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 398 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 399 vsi_queue_id)); 400 } 401 402 tempmap = vecmap->txq_map; 403 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 404 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 405 vsi_queue_id + 1)); 406 } 407 408 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 409 next_q = find_first_bit(&linklistmap, size); 410 if (unlikely(next_q == size)) 411 goto irq_list_done; 412 413 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 414 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 415 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 416 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 417 418 wr32(hw, reg_idx, reg); 419 420 while (next_q < size) { 421 switch (qtype) { 422 case I40E_QUEUE_TYPE_RX: 423 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 424 itr_idx = vecmap->rxitr_idx; 425 break; 426 case I40E_QUEUE_TYPE_TX: 427 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 428 itr_idx = vecmap->txitr_idx; 429 break; 430 default: 431 break; 432 } 433 434 next_q = find_next_bit(&linklistmap, size, next_q + 1); 435 if (next_q < size) { 436 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 437 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 438 pf_queue_id = i40e_get_real_pf_qid(vf, 439 vsi_id, 440 vsi_queue_id); 441 } else { 442 pf_queue_id = I40E_QUEUE_END_OF_LIST; 443 qtype = 0; 444 } 445 446 /* format for the RQCTL & TQCTL regs is same */ 447 reg = (vector_id) | 448 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 449 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 450 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 451 FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx); 452 wr32(hw, reg_idx, reg); 453 } 454 455 /* if the vf is running in polling mode and using interrupt zero, 456 * need to disable auto-mask on enabling zero interrupt for VFs. 457 */ 458 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 459 (vector_id == 0)) { 460 reg = rd32(hw, I40E_GLINT_CTL); 461 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 462 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 463 wr32(hw, I40E_GLINT_CTL, reg); 464 } 465 } 466 467 irq_list_done: 468 i40e_flush(hw); 469 } 470 471 /** 472 * i40e_release_rdma_qvlist 473 * @vf: pointer to the VF. 474 * 475 **/ 476 static void i40e_release_rdma_qvlist(struct i40e_vf *vf) 477 { 478 struct i40e_pf *pf = vf->pf; 479 struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info; 480 u32 msix_vf; 481 u32 i; 482 483 if (!vf->qvlist_info) 484 return; 485 486 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 487 for (i = 0; i < qvlist_info->num_vectors; i++) { 488 struct virtchnl_rdma_qv_info *qv_info; 489 u32 next_q_index, next_q_type; 490 struct i40e_hw *hw = &pf->hw; 491 u32 v_idx, reg_idx, reg; 492 493 qv_info = &qvlist_info->qv_info[i]; 494 v_idx = qv_info->v_idx; 495 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 496 /* Figure out the queue after CEQ and make that the 497 * first queue. 498 */ 499 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 500 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 501 next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK, 502 reg); 503 next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK, 504 reg); 505 506 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 507 reg = (next_q_index & 508 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 509 (next_q_type << 510 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 511 512 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 513 } 514 } 515 kfree(vf->qvlist_info); 516 vf->qvlist_info = NULL; 517 } 518 519 /** 520 * i40e_config_rdma_qvlist 521 * @vf: pointer to the VF info 522 * @qvlist_info: queue and vector list 523 * 524 * Return 0 on success or < 0 on error 525 **/ 526 static int 527 i40e_config_rdma_qvlist(struct i40e_vf *vf, 528 struct virtchnl_rdma_qvlist_info *qvlist_info) 529 { 530 struct i40e_pf *pf = vf->pf; 531 struct i40e_hw *hw = &pf->hw; 532 struct virtchnl_rdma_qv_info *qv_info; 533 u32 v_idx, i, reg_idx, reg; 534 u32 next_q_idx, next_q_type; 535 size_t size; 536 u32 msix_vf; 537 int ret = 0; 538 539 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 540 541 if (qvlist_info->num_vectors > msix_vf) { 542 dev_warn(&pf->pdev->dev, 543 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", 544 qvlist_info->num_vectors, 545 msix_vf); 546 ret = -EINVAL; 547 goto err_out; 548 } 549 550 kfree(vf->qvlist_info); 551 size = virtchnl_struct_size(vf->qvlist_info, qv_info, 552 qvlist_info->num_vectors); 553 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 554 if (!vf->qvlist_info) { 555 ret = -ENOMEM; 556 goto err_out; 557 } 558 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 559 560 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 561 for (i = 0; i < qvlist_info->num_vectors; i++) { 562 qv_info = &qvlist_info->qv_info[i]; 563 564 /* Validate vector id belongs to this vf */ 565 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { 566 ret = -EINVAL; 567 goto err_free; 568 } 569 570 v_idx = qv_info->v_idx; 571 572 vf->qvlist_info->qv_info[i] = *qv_info; 573 574 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 575 /* We might be sharing the interrupt, so get the first queue 576 * index and type, push it down the list by adding the new 577 * queue on top. Also link it with the new queue in CEQCTL. 578 */ 579 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 580 next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK, 581 reg); 582 next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK, 583 reg); 584 585 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 586 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 587 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 588 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 589 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 590 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 591 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 592 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 593 594 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 595 reg = (qv_info->ceq_idx & 596 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 597 (I40E_QUEUE_TYPE_PE_CEQ << 598 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 599 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 600 } 601 602 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 603 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 604 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 605 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 606 607 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 608 } 609 } 610 611 return 0; 612 err_free: 613 kfree(vf->qvlist_info); 614 vf->qvlist_info = NULL; 615 err_out: 616 return ret; 617 } 618 619 /** 620 * i40e_config_vsi_tx_queue 621 * @vf: pointer to the VF info 622 * @vsi_id: id of VSI as provided by the FW 623 * @vsi_queue_id: vsi relative queue index 624 * @info: config. info 625 * 626 * configure tx queue 627 **/ 628 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 629 u16 vsi_queue_id, 630 struct virtchnl_txq_info *info) 631 { 632 struct i40e_pf *pf = vf->pf; 633 struct i40e_hw *hw = &pf->hw; 634 struct i40e_hmc_obj_txq tx_ctx; 635 struct i40e_vsi *vsi; 636 u16 pf_queue_id; 637 u32 qtx_ctl; 638 int ret = 0; 639 640 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 641 ret = -ENOENT; 642 goto error_context; 643 } 644 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 645 vsi = i40e_find_vsi_from_id(pf, vsi_id); 646 if (!vsi) { 647 ret = -ENOENT; 648 goto error_context; 649 } 650 651 /* clear the context structure first */ 652 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 653 654 /* only set the required fields */ 655 tx_ctx.base = info->dma_ring_addr / 128; 656 657 /* ring_len has to be multiple of 8 */ 658 if (!IS_ALIGNED(info->ring_len, 8) || 659 info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { 660 ret = -EINVAL; 661 goto error_context; 662 } 663 tx_ctx.qlen = info->ring_len; 664 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 665 tx_ctx.rdylist_act = 0; 666 tx_ctx.head_wb_ena = info->headwb_enabled; 667 tx_ctx.head_wb_addr = info->dma_headwb_addr; 668 669 /* clear the context in the HMC */ 670 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 671 if (ret) { 672 dev_err(&pf->pdev->dev, 673 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 674 pf_queue_id, ret); 675 ret = -ENOENT; 676 goto error_context; 677 } 678 679 /* set the context in the HMC */ 680 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 681 if (ret) { 682 dev_err(&pf->pdev->dev, 683 "Failed to set VF LAN Tx queue context %d error: %d\n", 684 pf_queue_id, ret); 685 ret = -ENOENT; 686 goto error_context; 687 } 688 689 /* associate this queue with the PCI VF function */ 690 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 691 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id); 692 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK, 693 vf->vf_id + hw->func_caps.vf_base_id); 694 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 695 i40e_flush(hw); 696 697 error_context: 698 return ret; 699 } 700 701 /** 702 * i40e_config_vsi_rx_queue 703 * @vf: pointer to the VF info 704 * @vsi_id: id of VSI as provided by the FW 705 * @vsi_queue_id: vsi relative queue index 706 * @info: config. info 707 * 708 * configure rx queue 709 **/ 710 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 711 u16 vsi_queue_id, 712 struct virtchnl_rxq_info *info) 713 { 714 u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 715 struct i40e_pf *pf = vf->pf; 716 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; 717 struct i40e_hw *hw = &pf->hw; 718 struct i40e_hmc_obj_rxq rx_ctx; 719 int ret = 0; 720 721 /* clear the context structure first */ 722 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 723 724 /* only set the required fields */ 725 rx_ctx.base = info->dma_ring_addr / 128; 726 727 /* ring_len has to be multiple of 32 */ 728 if (!IS_ALIGNED(info->ring_len, 32) || 729 info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { 730 ret = -EINVAL; 731 goto error_param; 732 } 733 rx_ctx.qlen = info->ring_len; 734 735 if (info->splithdr_enabled) { 736 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 737 I40E_RX_SPLIT_IP | 738 I40E_RX_SPLIT_TCP_UDP | 739 I40E_RX_SPLIT_SCTP; 740 /* header length validation */ 741 if (info->hdr_size > ((2 * 1024) - 64)) { 742 ret = -EINVAL; 743 goto error_param; 744 } 745 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 746 747 /* set split mode 10b */ 748 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 749 } 750 751 /* databuffer length validation */ 752 if (info->databuffer_size > ((16 * 1024) - 128)) { 753 ret = -EINVAL; 754 goto error_param; 755 } 756 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 757 758 /* max pkt. length validation */ 759 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 760 ret = -EINVAL; 761 goto error_param; 762 } 763 rx_ctx.rxmax = info->max_pkt_size; 764 765 /* if port VLAN is configured increase the max packet size */ 766 if (vsi->info.pvid) 767 rx_ctx.rxmax += VLAN_HLEN; 768 769 /* enable 32bytes desc always */ 770 rx_ctx.dsize = 1; 771 772 /* default values */ 773 rx_ctx.lrxqthresh = 1; 774 rx_ctx.crcstrip = 1; 775 rx_ctx.prefena = 1; 776 rx_ctx.l2tsel = 1; 777 778 /* clear the context in the HMC */ 779 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 780 if (ret) { 781 dev_err(&pf->pdev->dev, 782 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 783 pf_queue_id, ret); 784 ret = -ENOENT; 785 goto error_param; 786 } 787 788 /* set the context in the HMC */ 789 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 790 if (ret) { 791 dev_err(&pf->pdev->dev, 792 "Failed to set VF LAN Rx queue context %d error: %d\n", 793 pf_queue_id, ret); 794 ret = -ENOENT; 795 goto error_param; 796 } 797 798 error_param: 799 return ret; 800 } 801 802 /** 803 * i40e_alloc_vsi_res 804 * @vf: pointer to the VF info 805 * @idx: VSI index, applies only for ADq mode, zero otherwise 806 * 807 * alloc VF vsi context & resources 808 **/ 809 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 810 { 811 struct i40e_mac_filter *f = NULL; 812 struct i40e_vsi *main_vsi, *vsi; 813 struct i40e_pf *pf = vf->pf; 814 u64 max_tx_rate = 0; 815 int ret = 0; 816 817 main_vsi = i40e_pf_get_main_vsi(pf); 818 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id); 819 820 if (!vsi) { 821 dev_err(&pf->pdev->dev, 822 "add vsi failed for VF %d, aq_err %d\n", 823 vf->vf_id, pf->hw.aq.asq_last_status); 824 ret = -ENOENT; 825 goto error_alloc_vsi_res; 826 } 827 828 if (!idx) { 829 u64 hashcfg = i40e_pf_get_default_rss_hashcfg(pf); 830 u8 broadcast[ETH_ALEN]; 831 832 vf->lan_vsi_idx = vsi->idx; 833 vf->lan_vsi_id = vsi->id; 834 /* If the port VLAN has been configured and then the 835 * VF driver was removed then the VSI port VLAN 836 * configuration was destroyed. Check if there is 837 * a port VLAN and restore the VSI configuration if 838 * needed. 839 */ 840 if (vf->port_vlan_id) 841 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 842 843 spin_lock_bh(&vsi->mac_filter_hash_lock); 844 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 845 f = i40e_add_mac_filter(vsi, 846 vf->default_lan_addr.addr); 847 if (!f) 848 dev_info(&pf->pdev->dev, 849 "Could not add MAC filter %pM for VF %d\n", 850 vf->default_lan_addr.addr, vf->vf_id); 851 } 852 eth_broadcast_addr(broadcast); 853 f = i40e_add_mac_filter(vsi, broadcast); 854 if (!f) 855 dev_info(&pf->pdev->dev, 856 "Could not allocate VF broadcast filter\n"); 857 spin_unlock_bh(&vsi->mac_filter_hash_lock); 858 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hashcfg); 859 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), 860 (u32)(hashcfg >> 32)); 861 /* program mac filter only for VF VSI */ 862 ret = i40e_sync_vsi_filters(vsi); 863 if (ret) 864 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 865 } 866 867 /* storing VSI index and id for ADq and don't apply the mac filter */ 868 if (vf->adq_enabled) { 869 vf->ch[idx].vsi_idx = vsi->idx; 870 vf->ch[idx].vsi_id = vsi->id; 871 } 872 873 /* Set VF bandwidth if specified */ 874 if (vf->tx_rate) { 875 max_tx_rate = vf->tx_rate; 876 } else if (vf->ch[idx].max_tx_rate) { 877 max_tx_rate = vf->ch[idx].max_tx_rate; 878 } 879 880 if (max_tx_rate) { 881 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 882 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 883 max_tx_rate, 0, NULL); 884 if (ret) 885 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 886 vf->vf_id, ret); 887 } 888 889 error_alloc_vsi_res: 890 return ret; 891 } 892 893 /** 894 * i40e_map_pf_queues_to_vsi 895 * @vf: pointer to the VF info 896 * 897 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 898 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 899 **/ 900 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 901 { 902 struct i40e_pf *pf = vf->pf; 903 struct i40e_hw *hw = &pf->hw; 904 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 905 u16 vsi_id, qps; 906 int i, j; 907 908 if (vf->adq_enabled) 909 num_tc = vf->num_tc; 910 911 for (i = 0; i < num_tc; i++) { 912 if (vf->adq_enabled) { 913 qps = vf->ch[i].num_qps; 914 vsi_id = vf->ch[i].vsi_id; 915 } else { 916 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 917 vsi_id = vf->lan_vsi_id; 918 } 919 920 for (j = 0; j < 7; j++) { 921 if (j * 2 >= qps) { 922 /* end of list */ 923 reg = 0x07FF07FF; 924 } else { 925 u16 qid = i40e_vc_get_pf_queue_id(vf, 926 vsi_id, 927 j * 2); 928 reg = qid; 929 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 930 (j * 2) + 1); 931 reg |= qid << 16; 932 } 933 i40e_write_rx_ctl(hw, 934 I40E_VSILAN_QTABLE(j, vsi_id), 935 reg); 936 } 937 } 938 } 939 940 /** 941 * i40e_map_pf_to_vf_queues 942 * @vf: pointer to the VF info 943 * 944 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 945 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 946 **/ 947 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 948 { 949 struct i40e_pf *pf = vf->pf; 950 struct i40e_hw *hw = &pf->hw; 951 u32 reg, total_qps = 0; 952 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 953 u16 vsi_id, qid; 954 int i, j; 955 956 if (vf->adq_enabled) 957 num_tc = vf->num_tc; 958 959 for (i = 0; i < num_tc; i++) { 960 if (vf->adq_enabled) { 961 qps = vf->ch[i].num_qps; 962 vsi_id = vf->ch[i].vsi_id; 963 } else { 964 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 965 vsi_id = vf->lan_vsi_id; 966 } 967 968 for (j = 0; j < qps; j++) { 969 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 970 971 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 972 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 973 reg); 974 total_qps++; 975 } 976 } 977 } 978 979 /** 980 * i40e_enable_vf_mappings 981 * @vf: pointer to the VF info 982 * 983 * enable VF mappings 984 **/ 985 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 986 { 987 struct i40e_pf *pf = vf->pf; 988 struct i40e_hw *hw = &pf->hw; 989 u32 reg; 990 991 /* Tell the hardware we're using noncontiguous mapping. HW requires 992 * that VF queues be mapped using this method, even when they are 993 * contiguous in real life 994 */ 995 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 996 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 997 998 /* enable VF vplan_qtable mappings */ 999 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 1000 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 1001 1002 i40e_map_pf_to_vf_queues(vf); 1003 i40e_map_pf_queues_to_vsi(vf); 1004 1005 i40e_flush(hw); 1006 } 1007 1008 /** 1009 * i40e_disable_vf_mappings 1010 * @vf: pointer to the VF info 1011 * 1012 * disable VF mappings 1013 **/ 1014 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 1015 { 1016 struct i40e_pf *pf = vf->pf; 1017 struct i40e_hw *hw = &pf->hw; 1018 int i; 1019 1020 /* disable qp mappings */ 1021 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 1022 for (i = 0; i < I40E_MAX_VSI_QP; i++) 1023 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 1024 I40E_QUEUE_END_OF_LIST); 1025 i40e_flush(hw); 1026 } 1027 1028 /** 1029 * i40e_free_vf_res 1030 * @vf: pointer to the VF info 1031 * 1032 * free VF resources 1033 **/ 1034 static void i40e_free_vf_res(struct i40e_vf *vf) 1035 { 1036 struct i40e_pf *pf = vf->pf; 1037 struct i40e_hw *hw = &pf->hw; 1038 u32 reg_idx, reg; 1039 int i, j, msix_vf; 1040 1041 /* Start by disabling VF's configuration API to prevent the OS from 1042 * accessing the VF's VSI after it's freed / invalidated. 1043 */ 1044 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1045 1046 /* It's possible the VF had requeuested more queues than the default so 1047 * do the accounting here when we're about to free them. 1048 */ 1049 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 1050 pf->queues_left += vf->num_queue_pairs - 1051 I40E_DEFAULT_QUEUES_PER_VF; 1052 } 1053 1054 /* free vsi & disconnect it from the parent uplink */ 1055 if (vf->lan_vsi_idx) { 1056 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 1057 vf->lan_vsi_idx = 0; 1058 vf->lan_vsi_id = 0; 1059 } 1060 1061 /* do the accounting and remove additional ADq VSI's */ 1062 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 1063 for (j = 0; j < vf->num_tc; j++) { 1064 /* At this point VSI0 is already released so don't 1065 * release it again and only clear their values in 1066 * structure variables 1067 */ 1068 if (j) 1069 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 1070 vf->ch[j].vsi_idx = 0; 1071 vf->ch[j].vsi_id = 0; 1072 } 1073 } 1074 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 1075 1076 /* disable interrupts so the VF starts in a known state */ 1077 for (i = 0; i < msix_vf; i++) { 1078 /* format is same for both registers */ 1079 if (0 == i) 1080 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 1081 else 1082 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 1083 (vf->vf_id)) 1084 + (i - 1)); 1085 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 1086 i40e_flush(hw); 1087 } 1088 1089 /* clear the irq settings */ 1090 for (i = 0; i < msix_vf; i++) { 1091 /* format is same for both registers */ 1092 if (0 == i) 1093 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 1094 else 1095 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 1096 (vf->vf_id)) 1097 + (i - 1)); 1098 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 1099 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 1100 wr32(hw, reg_idx, reg); 1101 i40e_flush(hw); 1102 } 1103 /* reset some of the state variables keeping track of the resources */ 1104 vf->num_queue_pairs = 0; 1105 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1106 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1107 } 1108 1109 /** 1110 * i40e_alloc_vf_res 1111 * @vf: pointer to the VF info 1112 * 1113 * allocate VF resources 1114 **/ 1115 static int i40e_alloc_vf_res(struct i40e_vf *vf) 1116 { 1117 struct i40e_pf *pf = vf->pf; 1118 int total_queue_pairs = 0; 1119 int ret, idx; 1120 1121 if (vf->num_req_queues && 1122 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1123 pf->num_vf_qps = vf->num_req_queues; 1124 else 1125 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1126 1127 /* allocate hw vsi context & associated resources */ 1128 ret = i40e_alloc_vsi_res(vf, 0); 1129 if (ret) 1130 goto error_alloc; 1131 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1132 1133 /* allocate additional VSIs based on tc information for ADq */ 1134 if (vf->adq_enabled) { 1135 if (pf->queues_left >= 1136 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1137 /* TC 0 always belongs to VF VSI */ 1138 for (idx = 1; idx < vf->num_tc; idx++) { 1139 ret = i40e_alloc_vsi_res(vf, idx); 1140 if (ret) 1141 goto error_alloc; 1142 } 1143 /* send correct number of queues */ 1144 total_queue_pairs = I40E_MAX_VF_QUEUES; 1145 } else { 1146 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1147 vf->vf_id); 1148 vf->adq_enabled = false; 1149 } 1150 } 1151 1152 /* We account for each VF to get a default number of queue pairs. If 1153 * the VF has now requested more, we need to account for that to make 1154 * certain we never request more queues than we actually have left in 1155 * HW. 1156 */ 1157 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1158 pf->queues_left -= 1159 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1160 1161 if (vf->trusted) 1162 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1163 else 1164 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1165 1166 /* store the total qps number for the runtime 1167 * VF req validation 1168 */ 1169 vf->num_queue_pairs = total_queue_pairs; 1170 1171 /* VF is now completely initialized */ 1172 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1173 1174 error_alloc: 1175 if (ret) 1176 i40e_free_vf_res(vf); 1177 1178 return ret; 1179 } 1180 1181 #define VF_DEVICE_STATUS 0xAA 1182 #define VF_TRANS_PENDING_MASK 0x20 1183 /** 1184 * i40e_quiesce_vf_pci 1185 * @vf: pointer to the VF structure 1186 * 1187 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1188 * if the transactions never clear. 1189 **/ 1190 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1191 { 1192 struct i40e_pf *pf = vf->pf; 1193 struct i40e_hw *hw = &pf->hw; 1194 int vf_abs_id, i; 1195 u32 reg; 1196 1197 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1198 1199 wr32(hw, I40E_PF_PCI_CIAA, 1200 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1201 for (i = 0; i < 100; i++) { 1202 reg = rd32(hw, I40E_PF_PCI_CIAD); 1203 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1204 return 0; 1205 udelay(1); 1206 } 1207 return -EIO; 1208 } 1209 1210 /** 1211 * __i40e_getnum_vf_vsi_vlan_filters 1212 * @vsi: pointer to the vsi 1213 * 1214 * called to get the number of VLANs offloaded on this VF 1215 **/ 1216 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1217 { 1218 struct i40e_mac_filter *f; 1219 u16 num_vlans = 0, bkt; 1220 1221 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1222 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1223 num_vlans++; 1224 } 1225 1226 return num_vlans; 1227 } 1228 1229 /** 1230 * i40e_getnum_vf_vsi_vlan_filters 1231 * @vsi: pointer to the vsi 1232 * 1233 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held 1234 **/ 1235 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1236 { 1237 int num_vlans; 1238 1239 spin_lock_bh(&vsi->mac_filter_hash_lock); 1240 num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); 1241 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1242 1243 return num_vlans; 1244 } 1245 1246 /** 1247 * i40e_get_vlan_list_sync 1248 * @vsi: pointer to the VSI 1249 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller 1250 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller. 1251 * This array is allocated here, but has to be freed in caller. 1252 * 1253 * Called to get number of VLANs and VLAN list present in mac_filter_hash. 1254 **/ 1255 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans, 1256 s16 **vlan_list) 1257 { 1258 struct i40e_mac_filter *f; 1259 int i = 0; 1260 int bkt; 1261 1262 spin_lock_bh(&vsi->mac_filter_hash_lock); 1263 *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); 1264 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC); 1265 if (!(*vlan_list)) 1266 goto err; 1267 1268 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1269 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1270 continue; 1271 (*vlan_list)[i++] = f->vlan; 1272 } 1273 err: 1274 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1275 } 1276 1277 /** 1278 * i40e_set_vsi_promisc 1279 * @vf: pointer to the VF struct 1280 * @seid: VSI number 1281 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable 1282 * for a given VLAN 1283 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable 1284 * for a given VLAN 1285 * @vl: List of VLANs - apply filter for given VLANs 1286 * @num_vlans: Number of elements in @vl 1287 **/ 1288 static int 1289 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, 1290 bool unicast_enable, s16 *vl, u16 num_vlans) 1291 { 1292 struct i40e_pf *pf = vf->pf; 1293 struct i40e_hw *hw = &pf->hw; 1294 int aq_ret, aq_tmp = 0; 1295 int i; 1296 1297 /* No VLAN to set promisc on, set on VSI */ 1298 if (!num_vlans || !vl) { 1299 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid, 1300 multi_enable, 1301 NULL); 1302 if (aq_ret) { 1303 int aq_err = pf->hw.aq.asq_last_status; 1304 1305 dev_err(&pf->pdev->dev, 1306 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", 1307 vf->vf_id, ERR_PTR(aq_ret), 1308 libie_aq_str(aq_err)); 1309 1310 return aq_ret; 1311 } 1312 1313 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid, 1314 unicast_enable, 1315 NULL, true); 1316 1317 if (aq_ret) { 1318 int aq_err = pf->hw.aq.asq_last_status; 1319 1320 dev_err(&pf->pdev->dev, 1321 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", 1322 vf->vf_id, ERR_PTR(aq_ret), 1323 libie_aq_str(aq_err)); 1324 } 1325 1326 return aq_ret; 1327 } 1328 1329 for (i = 0; i < num_vlans; i++) { 1330 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid, 1331 multi_enable, 1332 vl[i], NULL); 1333 if (aq_ret) { 1334 int aq_err = pf->hw.aq.asq_last_status; 1335 1336 dev_err(&pf->pdev->dev, 1337 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", 1338 vf->vf_id, ERR_PTR(aq_ret), 1339 libie_aq_str(aq_err)); 1340 1341 if (!aq_tmp) 1342 aq_tmp = aq_ret; 1343 } 1344 1345 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid, 1346 unicast_enable, 1347 vl[i], NULL); 1348 if (aq_ret) { 1349 int aq_err = pf->hw.aq.asq_last_status; 1350 1351 dev_err(&pf->pdev->dev, 1352 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", 1353 vf->vf_id, ERR_PTR(aq_ret), 1354 libie_aq_str(aq_err)); 1355 1356 if (!aq_tmp) 1357 aq_tmp = aq_ret; 1358 } 1359 } 1360 1361 if (aq_tmp) 1362 aq_ret = aq_tmp; 1363 1364 return aq_ret; 1365 } 1366 1367 /** 1368 * i40e_config_vf_promiscuous_mode 1369 * @vf: pointer to the VF info 1370 * @vsi_id: VSI id 1371 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1372 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1373 * 1374 * Called from the VF to configure the promiscuous mode of 1375 * VF vsis and from the VF reset path to reset promiscuous mode. 1376 **/ 1377 static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1378 u16 vsi_id, 1379 bool allmulti, 1380 bool alluni) 1381 { 1382 struct i40e_pf *pf = vf->pf; 1383 struct i40e_vsi *vsi; 1384 int aq_ret = 0; 1385 u16 num_vlans; 1386 s16 *vl; 1387 1388 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1389 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1390 return -EINVAL; 1391 1392 if (vf->port_vlan_id) { 1393 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, 1394 alluni, &vf->port_vlan_id, 1); 1395 return aq_ret; 1396 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1397 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl); 1398 1399 if (!vl) 1400 return -ENOMEM; 1401 1402 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, 1403 vl, num_vlans); 1404 kfree(vl); 1405 return aq_ret; 1406 } 1407 1408 /* no VLANs to set on, set on VSI */ 1409 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, 1410 NULL, 0); 1411 return aq_ret; 1412 } 1413 1414 /** 1415 * i40e_sync_vfr_reset 1416 * @hw: pointer to hw struct 1417 * @vf_id: VF identifier 1418 * 1419 * Before trigger hardware reset, we need to know if no other process has 1420 * reserved the hardware for any reset operations. This check is done by 1421 * examining the status of the RSTAT1 register used to signal the reset. 1422 **/ 1423 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id) 1424 { 1425 u32 reg; 1426 int i; 1427 1428 for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) { 1429 reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) & 1430 I40E_VFINT_ICR0_ADMINQ_MASK; 1431 if (reg) 1432 return 0; 1433 1434 usleep_range(100, 200); 1435 } 1436 1437 return -EAGAIN; 1438 } 1439 1440 /** 1441 * i40e_trigger_vf_reset 1442 * @vf: pointer to the VF structure 1443 * @flr: VFLR was issued or not 1444 * 1445 * Trigger hardware to start a reset for a particular VF. Expects the caller 1446 * to wait the proper amount of time to allow hardware to reset the VF before 1447 * it cleans up and restores VF functionality. 1448 **/ 1449 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1450 { 1451 struct i40e_pf *pf = vf->pf; 1452 struct i40e_hw *hw = &pf->hw; 1453 u32 reg, reg_idx, bit_idx; 1454 bool vf_active; 1455 u32 radq; 1456 1457 /* warn the VF */ 1458 vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1459 1460 /* Disable VF's configuration API during reset. The flag is re-enabled 1461 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1462 * It's normally disabled in i40e_free_vf_res(), but it's safer 1463 * to do it earlier to give some time to finish to any VF config 1464 * functions that may still be running at this point. 1465 */ 1466 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1467 clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states); 1468 1469 /* In the case of a VFLR, the HW has already reset the VF and we 1470 * just need to clean up, so don't hit the VFRTRIG register. 1471 */ 1472 if (!flr) { 1473 /* Sync VFR reset before trigger next one */ 1474 radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) & 1475 I40E_VFINT_ICR0_ADMINQ_MASK; 1476 if (vf_active && !radq) 1477 /* waiting for finish reset by virtual driver */ 1478 if (i40e_sync_vfr_reset(hw, vf->vf_id)) 1479 dev_info(&pf->pdev->dev, 1480 "Reset VF %d never finished\n", 1481 vf->vf_id); 1482 1483 /* Reset VF using VPGEN_VFRTRIG reg. It is also setting 1484 * in progress state in rstat1 register. 1485 */ 1486 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1487 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1488 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1489 i40e_flush(hw); 1490 } 1491 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1492 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1493 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1494 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1495 i40e_flush(hw); 1496 1497 if (i40e_quiesce_vf_pci(vf)) 1498 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1499 vf->vf_id); 1500 } 1501 1502 /** 1503 * i40e_cleanup_reset_vf 1504 * @vf: pointer to the VF structure 1505 * 1506 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1507 * have verified whether the reset is finished properly, and ensure the 1508 * minimum amount of wait time has passed. 1509 **/ 1510 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1511 { 1512 struct i40e_pf *pf = vf->pf; 1513 struct i40e_hw *hw = &pf->hw; 1514 u32 reg; 1515 1516 /* disable promisc modes in case they were enabled */ 1517 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1518 1519 /* free VF resources to begin resetting the VSI state */ 1520 i40e_free_vf_res(vf); 1521 1522 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1523 * By doing this we allow HW to access VF memory at any point. If we 1524 * did it any sooner, HW could access memory while it was being freed 1525 * in i40e_free_vf_res(), causing an IOMMU fault. 1526 * 1527 * On the other hand, this needs to be done ASAP, because the VF driver 1528 * is waiting for this to happen and may report a timeout. It's 1529 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1530 * it. 1531 */ 1532 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1533 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1534 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1535 1536 /* reallocate VF resources to finish resetting the VSI state */ 1537 if (!i40e_alloc_vf_res(vf)) { 1538 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1539 i40e_enable_vf_mappings(vf); 1540 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1541 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1542 /* Do not notify the client during VF init */ 1543 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1544 &vf->vf_states)) 1545 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1546 vf->num_vlan = 0; 1547 } 1548 1549 /* Tell the VF driver the reset is done. This needs to be done only 1550 * after VF has been fully initialized, because the VF driver may 1551 * request resources immediately after setting this flag. 1552 */ 1553 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1554 } 1555 1556 /** 1557 * i40e_reset_vf 1558 * @vf: pointer to the VF structure 1559 * @flr: VFLR was issued or not 1560 * 1561 * Return: True if reset was performed successfully or if resets are disabled. 1562 * False if reset is already in progress. 1563 **/ 1564 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1565 { 1566 struct i40e_pf *pf = vf->pf; 1567 struct i40e_hw *hw = &pf->hw; 1568 bool rsd = false; 1569 u32 reg; 1570 int i; 1571 1572 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) 1573 return true; 1574 1575 /* Bail out if VFs are disabled. */ 1576 if (test_bit(__I40E_VF_DISABLE, pf->state)) 1577 return true; 1578 1579 /* If VF is being reset already we don't need to continue. */ 1580 if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1581 return false; 1582 1583 i40e_trigger_vf_reset(vf, flr); 1584 1585 /* poll VPGEN_VFRSTAT reg to make sure 1586 * that reset is complete 1587 */ 1588 for (i = 0; i < 10; i++) { 1589 /* VF reset requires driver to first reset the VF and then 1590 * poll the status register to make sure that the reset 1591 * completed successfully. Due to internal HW FIFO flushes, 1592 * we must wait 10ms before the register will be valid. 1593 */ 1594 usleep_range(10000, 20000); 1595 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1596 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1597 rsd = true; 1598 break; 1599 } 1600 } 1601 1602 if (flr) 1603 usleep_range(10000, 20000); 1604 1605 if (!rsd) 1606 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1607 vf->vf_id); 1608 usleep_range(10000, 20000); 1609 1610 /* On initial reset, we don't have any queues to disable */ 1611 if (vf->lan_vsi_idx != 0) 1612 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1613 1614 i40e_cleanup_reset_vf(vf); 1615 1616 i40e_flush(hw); 1617 usleep_range(20000, 40000); 1618 clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states); 1619 1620 return true; 1621 } 1622 1623 /** 1624 * i40e_reset_all_vfs 1625 * @pf: pointer to the PF structure 1626 * @flr: VFLR was issued or not 1627 * 1628 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1629 * VF, then do all the waiting in one chunk, and finally finish restoring each 1630 * VF after the wait. This is useful during PF routines which need to reset 1631 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1632 * 1633 * Returns true if any VFs were reset, and false otherwise. 1634 **/ 1635 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1636 { 1637 struct i40e_hw *hw = &pf->hw; 1638 struct i40e_vf *vf; 1639 u32 reg; 1640 int i; 1641 1642 /* If we don't have any VFs, then there is nothing to reset */ 1643 if (!pf->num_alloc_vfs) 1644 return false; 1645 1646 /* If VFs have been disabled, there is no need to reset */ 1647 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1648 return false; 1649 1650 /* Begin reset on all VFs at once */ 1651 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1652 /* If VF is being reset no need to trigger reset again */ 1653 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1654 i40e_trigger_vf_reset(vf, flr); 1655 } 1656 1657 /* HW requires some time to make sure it can flush the FIFO for a VF 1658 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1659 * sequence to make sure that it has completed. We'll keep track of 1660 * the VFs using a simple iterator that increments once that VF has 1661 * finished resetting. 1662 */ 1663 for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) { 1664 usleep_range(10000, 20000); 1665 1666 /* Check each VF in sequence, beginning with the VF to fail 1667 * the previous check. 1668 */ 1669 while (vf < &pf->vf[pf->num_alloc_vfs]) { 1670 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { 1671 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1672 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1673 break; 1674 } 1675 1676 /* If the current VF has finished resetting, move on 1677 * to the next VF in sequence. 1678 */ 1679 ++vf; 1680 } 1681 } 1682 1683 if (flr) 1684 usleep_range(10000, 20000); 1685 1686 /* Display a warning if at least one VF didn't manage to reset in 1687 * time, but continue on with the operation. 1688 */ 1689 if (vf < &pf->vf[pf->num_alloc_vfs]) 1690 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1691 vf->vf_id); 1692 usleep_range(10000, 20000); 1693 1694 /* Begin disabling all the rings associated with VFs, but do not wait 1695 * between each VF. 1696 */ 1697 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1698 /* On initial reset, we don't have any queues to disable */ 1699 if (vf->lan_vsi_idx == 0) 1700 continue; 1701 1702 /* If VF is reset in another thread just continue */ 1703 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1704 continue; 1705 1706 i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]); 1707 } 1708 1709 /* Now that we've notified HW to disable all of the VF rings, wait 1710 * until they finish. 1711 */ 1712 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1713 /* On initial reset, we don't have any queues to disable */ 1714 if (vf->lan_vsi_idx == 0) 1715 continue; 1716 1717 /* If VF is reset in another thread just continue */ 1718 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1719 continue; 1720 1721 i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]); 1722 } 1723 1724 /* Hw may need up to 50ms to finish disabling the RX queues. We 1725 * minimize the wait by delaying only once for all VFs. 1726 */ 1727 mdelay(50); 1728 1729 /* Finish the reset on each VF */ 1730 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1731 /* If VF is reset in another thread just continue */ 1732 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1733 continue; 1734 1735 i40e_cleanup_reset_vf(vf); 1736 } 1737 1738 i40e_flush(hw); 1739 usleep_range(20000, 40000); 1740 clear_bit(__I40E_VF_DISABLE, pf->state); 1741 1742 return true; 1743 } 1744 1745 /** 1746 * i40e_free_vfs 1747 * @pf: pointer to the PF structure 1748 * 1749 * free VF resources 1750 **/ 1751 void i40e_free_vfs(struct i40e_pf *pf) 1752 { 1753 struct i40e_hw *hw = &pf->hw; 1754 u32 reg_idx, bit_idx; 1755 int i, tmp, vf_id; 1756 1757 if (!pf->vf) 1758 return; 1759 1760 set_bit(__I40E_VFS_RELEASING, pf->state); 1761 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1762 usleep_range(1000, 2000); 1763 1764 i40e_notify_client_of_vf_enable(pf, 0); 1765 1766 /* Disable IOV before freeing resources. This lets any VF drivers 1767 * running in the host get themselves cleaned up before we yank 1768 * the carpet out from underneath their feet. 1769 */ 1770 if (!pci_vfs_assigned(pf->pdev)) 1771 pci_disable_sriov(pf->pdev); 1772 else 1773 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1774 1775 /* Amortize wait time by stopping all VFs at the same time */ 1776 for (i = 0; i < pf->num_alloc_vfs; i++) { 1777 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1778 continue; 1779 1780 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1781 } 1782 1783 for (i = 0; i < pf->num_alloc_vfs; i++) { 1784 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1785 continue; 1786 1787 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1788 } 1789 1790 /* free up VF resources */ 1791 tmp = pf->num_alloc_vfs; 1792 pf->num_alloc_vfs = 0; 1793 for (i = 0; i < tmp; i++) { 1794 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1795 i40e_free_vf_res(&pf->vf[i]); 1796 /* disable qp mappings */ 1797 i40e_disable_vf_mappings(&pf->vf[i]); 1798 } 1799 1800 kfree(pf->vf); 1801 pf->vf = NULL; 1802 1803 /* This check is for when the driver is unloaded while VFs are 1804 * assigned. Setting the number of VFs to 0 through sysfs is caught 1805 * before this function ever gets called. 1806 */ 1807 if (!pci_vfs_assigned(pf->pdev)) { 1808 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1809 * work correctly when SR-IOV gets re-enabled. 1810 */ 1811 for (vf_id = 0; vf_id < tmp; vf_id++) { 1812 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1813 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1814 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1815 } 1816 } 1817 clear_bit(__I40E_VF_DISABLE, pf->state); 1818 clear_bit(__I40E_VFS_RELEASING, pf->state); 1819 } 1820 1821 #ifdef CONFIG_PCI_IOV 1822 /** 1823 * i40e_alloc_vfs 1824 * @pf: pointer to the PF structure 1825 * @num_alloc_vfs: number of VFs to allocate 1826 * 1827 * allocate VF resources 1828 **/ 1829 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1830 { 1831 struct i40e_vf *vfs; 1832 int i, ret = 0; 1833 1834 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1835 i40e_irq_dynamic_disable_icr0(pf); 1836 1837 /* Check to see if we're just allocating resources for extant VFs */ 1838 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1839 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1840 if (ret) { 1841 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 1842 pf->num_alloc_vfs = 0; 1843 goto err_iov; 1844 } 1845 } 1846 /* allocate memory */ 1847 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1848 if (!vfs) { 1849 ret = -ENOMEM; 1850 goto err_alloc; 1851 } 1852 pf->vf = vfs; 1853 1854 /* apply default profile */ 1855 for (i = 0; i < num_alloc_vfs; i++) { 1856 vfs[i].pf = pf; 1857 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1858 vfs[i].vf_id = i; 1859 1860 /* assign default capabilities */ 1861 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1862 vfs[i].spoofchk = true; 1863 1864 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1865 1866 } 1867 pf->num_alloc_vfs = num_alloc_vfs; 1868 1869 /* VF resources get allocated during reset */ 1870 i40e_reset_all_vfs(pf, false); 1871 1872 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1873 1874 err_alloc: 1875 if (ret) 1876 i40e_free_vfs(pf); 1877 err_iov: 1878 /* Re-enable interrupt 0. */ 1879 i40e_irq_dynamic_enable_icr0(pf); 1880 return ret; 1881 } 1882 1883 #endif 1884 /** 1885 * i40e_pci_sriov_enable 1886 * @pdev: pointer to a pci_dev structure 1887 * @num_vfs: number of VFs to allocate 1888 * 1889 * Enable or change the number of VFs 1890 **/ 1891 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1892 { 1893 #ifdef CONFIG_PCI_IOV 1894 struct i40e_pf *pf = pci_get_drvdata(pdev); 1895 int pre_existing_vfs = pci_num_vf(pdev); 1896 int err = 0; 1897 1898 if (test_bit(__I40E_TESTING, pf->state)) { 1899 dev_warn(&pdev->dev, 1900 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1901 err = -EPERM; 1902 goto err_out; 1903 } 1904 1905 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1906 i40e_free_vfs(pf); 1907 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1908 goto out; 1909 1910 if (num_vfs > pf->num_req_vfs) { 1911 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1912 num_vfs, pf->num_req_vfs); 1913 err = -EPERM; 1914 goto err_out; 1915 } 1916 1917 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1918 err = i40e_alloc_vfs(pf, num_vfs); 1919 if (err) { 1920 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1921 goto err_out; 1922 } 1923 1924 out: 1925 return num_vfs; 1926 1927 err_out: 1928 return err; 1929 #endif 1930 return 0; 1931 } 1932 1933 /** 1934 * i40e_pci_sriov_configure 1935 * @pdev: pointer to a pci_dev structure 1936 * @num_vfs: number of VFs to allocate 1937 * 1938 * Enable or change the number of VFs. Called when the user updates the number 1939 * of VFs in sysfs. 1940 **/ 1941 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1942 { 1943 struct i40e_pf *pf = pci_get_drvdata(pdev); 1944 int ret = 0; 1945 1946 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1947 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1948 return -EAGAIN; 1949 } 1950 1951 if (num_vfs) { 1952 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { 1953 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 1954 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); 1955 } 1956 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1957 goto sriov_configure_out; 1958 } 1959 1960 if (!pci_vfs_assigned(pf->pdev)) { 1961 i40e_free_vfs(pf); 1962 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 1963 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); 1964 } else { 1965 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1966 ret = -EINVAL; 1967 goto sriov_configure_out; 1968 } 1969 sriov_configure_out: 1970 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1971 return ret; 1972 } 1973 1974 /***********************virtual channel routines******************/ 1975 1976 /** 1977 * i40e_vc_send_msg_to_vf 1978 * @vf: pointer to the VF info 1979 * @v_opcode: virtual channel opcode 1980 * @v_retval: virtual channel return value 1981 * @msg: pointer to the msg buffer 1982 * @msglen: msg length 1983 * 1984 * send msg to VF 1985 **/ 1986 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1987 u32 v_retval, u8 *msg, u16 msglen) 1988 { 1989 struct i40e_pf *pf; 1990 struct i40e_hw *hw; 1991 int abs_vf_id; 1992 int aq_ret; 1993 1994 /* validate the request */ 1995 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1996 return -EINVAL; 1997 1998 pf = vf->pf; 1999 hw = &pf->hw; 2000 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 2001 2002 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 2003 msg, msglen, NULL); 2004 if (aq_ret) { 2005 dev_info(&pf->pdev->dev, 2006 "Unable to send the message to VF %d aq_err %d\n", 2007 vf->vf_id, pf->hw.aq.asq_last_status); 2008 return -EIO; 2009 } 2010 2011 return 0; 2012 } 2013 2014 /** 2015 * i40e_vc_send_resp_to_vf 2016 * @vf: pointer to the VF info 2017 * @opcode: operation code 2018 * @retval: return value 2019 * 2020 * send resp msg to VF 2021 **/ 2022 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 2023 enum virtchnl_ops opcode, 2024 int retval) 2025 { 2026 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 2027 } 2028 2029 /** 2030 * i40e_sync_vf_state 2031 * @vf: pointer to the VF info 2032 * @state: VF state 2033 * 2034 * Called from a VF message to synchronize the service with a potential 2035 * VF reset state 2036 **/ 2037 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state) 2038 { 2039 int i; 2040 2041 /* When handling some messages, it needs VF state to be set. 2042 * It is possible that this flag is cleared during VF reset, 2043 * so there is a need to wait until the end of the reset to 2044 * handle the request message correctly. 2045 */ 2046 for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) { 2047 if (test_bit(state, &vf->vf_states)) 2048 return true; 2049 usleep_range(10000, 20000); 2050 } 2051 2052 return test_bit(state, &vf->vf_states); 2053 } 2054 2055 /** 2056 * i40e_vc_get_version_msg 2057 * @vf: pointer to the VF info 2058 * @msg: pointer to the msg buffer 2059 * 2060 * called from the VF to request the API version used by the PF 2061 **/ 2062 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 2063 { 2064 struct virtchnl_version_info info = { 2065 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 2066 }; 2067 2068 vf->vf_ver = *(struct virtchnl_version_info *)msg; 2069 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 2070 if (VF_IS_V10(&vf->vf_ver)) 2071 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 2072 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 2073 0, (u8 *)&info, 2074 sizeof(struct virtchnl_version_info)); 2075 } 2076 2077 /** 2078 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 2079 * @vf: pointer to VF structure 2080 **/ 2081 static void i40e_del_qch(struct i40e_vf *vf) 2082 { 2083 struct i40e_pf *pf = vf->pf; 2084 int i; 2085 2086 /* first element in the array belongs to primary VF VSI and we shouldn't 2087 * delete it. We should however delete the rest of the VSIs created 2088 */ 2089 for (i = 1; i < vf->num_tc; i++) { 2090 if (vf->ch[i].vsi_idx) { 2091 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 2092 vf->ch[i].vsi_idx = 0; 2093 vf->ch[i].vsi_id = 0; 2094 } 2095 } 2096 } 2097 2098 /** 2099 * i40e_vc_get_max_frame_size 2100 * @vf: pointer to the VF 2101 * 2102 * Max frame size is determined based on the current port's max frame size and 2103 * whether a port VLAN is configured on this VF. The VF is not aware whether 2104 * it's in a port VLAN so the PF needs to account for this in max frame size 2105 * checks and sending the max frame size to the VF. 2106 **/ 2107 static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) 2108 { 2109 u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; 2110 2111 if (vf->port_vlan_id) 2112 max_frame_size -= VLAN_HLEN; 2113 2114 return max_frame_size; 2115 } 2116 2117 /** 2118 * i40e_vc_get_vf_resources_msg 2119 * @vf: pointer to the VF info 2120 * @msg: pointer to the msg buffer 2121 * 2122 * called from the VF to request its resources 2123 **/ 2124 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 2125 { 2126 struct virtchnl_vf_resource *vfres = NULL; 2127 struct i40e_pf *pf = vf->pf; 2128 struct i40e_vsi *vsi; 2129 int num_vsis = 1; 2130 int aq_ret = 0; 2131 size_t len = 0; 2132 int ret; 2133 2134 i40e_sync_vf_state(vf, I40E_VF_STATE_INIT); 2135 2136 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) || 2137 test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) { 2138 aq_ret = -EINVAL; 2139 goto err; 2140 } 2141 2142 len = virtchnl_struct_size(vfres, vsi_res, num_vsis); 2143 vfres = kzalloc(len, GFP_KERNEL); 2144 if (!vfres) { 2145 aq_ret = -ENOMEM; 2146 len = 0; 2147 goto err; 2148 } 2149 if (VF_IS_V11(&vf->vf_ver)) 2150 vf->driver_caps = *(u32 *)msg; 2151 else 2152 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 2153 VIRTCHNL_VF_OFFLOAD_RSS_REG | 2154 VIRTCHNL_VF_OFFLOAD_VLAN; 2155 2156 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 2157 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 2158 vsi = pf->vsi[vf->lan_vsi_idx]; 2159 if (!vsi->info.pvid) 2160 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 2161 2162 if (i40e_vf_client_capable(pf, vf->vf_id) && 2163 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) { 2164 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA; 2165 set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); 2166 } else { 2167 clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); 2168 } 2169 2170 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 2171 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 2172 } else { 2173 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && 2174 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 2175 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 2176 else 2177 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 2178 } 2179 2180 if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) { 2181 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 2182 vfres->vf_cap_flags |= 2183 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 2184 } 2185 2186 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 2187 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 2188 2189 if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) && 2190 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 2191 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 2192 2193 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 2194 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { 2195 dev_err(&pf->pdev->dev, 2196 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 2197 vf->vf_id); 2198 aq_ret = -EINVAL; 2199 goto err; 2200 } 2201 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 2202 } 2203 2204 if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) { 2205 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 2206 vfres->vf_cap_flags |= 2207 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 2208 } 2209 2210 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 2211 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 2212 2213 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 2214 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 2215 2216 vfres->num_vsis = num_vsis; 2217 vfres->num_queue_pairs = vf->num_queue_pairs; 2218 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 2219 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 2220 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 2221 vfres->max_mtu = i40e_vc_get_max_frame_size(vf); 2222 2223 if (vf->lan_vsi_idx) { 2224 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 2225 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 2226 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 2227 /* VFs only use TC 0 */ 2228 vfres->vsi_res[0].qset_handle 2229 = le16_to_cpu(vsi->info.qs_handle[0]); 2230 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) { 2231 spin_lock_bh(&vsi->mac_filter_hash_lock); 2232 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 2233 eth_zero_addr(vf->default_lan_addr.addr); 2234 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2235 } 2236 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 2237 vf->default_lan_addr.addr); 2238 } 2239 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 2240 set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states); 2241 2242 err: 2243 /* send the response back to the VF */ 2244 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 2245 aq_ret, (u8 *)vfres, len); 2246 2247 kfree(vfres); 2248 return ret; 2249 } 2250 2251 /** 2252 * i40e_vc_config_promiscuous_mode_msg 2253 * @vf: pointer to the VF info 2254 * @msg: pointer to the msg buffer 2255 * 2256 * called from the VF to configure the promiscuous mode of 2257 * VF vsis 2258 **/ 2259 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 2260 { 2261 struct virtchnl_promisc_info *info = 2262 (struct virtchnl_promisc_info *)msg; 2263 struct i40e_pf *pf = vf->pf; 2264 bool allmulti = false; 2265 bool alluni = false; 2266 int aq_ret = 0; 2267 2268 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2269 aq_ret = -EINVAL; 2270 goto err_out; 2271 } 2272 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2273 dev_err(&pf->pdev->dev, 2274 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 2275 vf->vf_id); 2276 2277 /* Lie to the VF on purpose, because this is an error we can 2278 * ignore. Unprivileged VF is not a virtual channel error. 2279 */ 2280 aq_ret = 0; 2281 goto err_out; 2282 } 2283 2284 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { 2285 aq_ret = -EINVAL; 2286 goto err_out; 2287 } 2288 2289 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 2290 aq_ret = -EINVAL; 2291 goto err_out; 2292 } 2293 2294 /* Multicast promiscuous handling*/ 2295 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2296 allmulti = true; 2297 2298 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2299 alluni = true; 2300 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2301 alluni); 2302 if (aq_ret) 2303 goto err_out; 2304 2305 if (allmulti) { 2306 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC, 2307 &vf->vf_states)) 2308 dev_info(&pf->pdev->dev, 2309 "VF %d successfully set multicast promiscuous mode\n", 2310 vf->vf_id); 2311 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC, 2312 &vf->vf_states)) 2313 dev_info(&pf->pdev->dev, 2314 "VF %d successfully unset multicast promiscuous mode\n", 2315 vf->vf_id); 2316 2317 if (alluni) { 2318 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC, 2319 &vf->vf_states)) 2320 dev_info(&pf->pdev->dev, 2321 "VF %d successfully set unicast promiscuous mode\n", 2322 vf->vf_id); 2323 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC, 2324 &vf->vf_states)) 2325 dev_info(&pf->pdev->dev, 2326 "VF %d successfully unset unicast promiscuous mode\n", 2327 vf->vf_id); 2328 2329 err_out: 2330 /* send the response to the VF */ 2331 return i40e_vc_send_resp_to_vf(vf, 2332 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2333 aq_ret); 2334 } 2335 2336 /** 2337 * i40e_vc_config_queues_msg 2338 * @vf: pointer to the VF info 2339 * @msg: pointer to the msg buffer 2340 * 2341 * called from the VF to configure the rx/tx 2342 * queues 2343 **/ 2344 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2345 { 2346 struct virtchnl_vsi_queue_config_info *qci = 2347 (struct virtchnl_vsi_queue_config_info *)msg; 2348 struct virtchnl_queue_pair_info *qpi; 2349 u16 vsi_id, vsi_queue_id = 0; 2350 struct i40e_pf *pf = vf->pf; 2351 int i, j = 0, idx = 0; 2352 struct i40e_vsi *vsi; 2353 u16 num_qps_all = 0; 2354 int aq_ret = 0; 2355 2356 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2357 aq_ret = -EINVAL; 2358 goto error_param; 2359 } 2360 2361 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { 2362 aq_ret = -EINVAL; 2363 goto error_param; 2364 } 2365 2366 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { 2367 aq_ret = -EINVAL; 2368 goto error_param; 2369 } 2370 2371 if (vf->adq_enabled) { 2372 for (i = 0; i < vf->num_tc; i++) 2373 num_qps_all += vf->ch[i].num_qps; 2374 if (num_qps_all != qci->num_queue_pairs) { 2375 aq_ret = -EINVAL; 2376 goto error_param; 2377 } 2378 } 2379 2380 vsi_id = qci->vsi_id; 2381 2382 for (i = 0; i < qci->num_queue_pairs; i++) { 2383 qpi = &qci->qpair[i]; 2384 2385 if (!vf->adq_enabled) { 2386 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 2387 qpi->txq.queue_id)) { 2388 aq_ret = -EINVAL; 2389 goto error_param; 2390 } 2391 2392 vsi_queue_id = qpi->txq.queue_id; 2393 2394 if (qpi->txq.vsi_id != qci->vsi_id || 2395 qpi->rxq.vsi_id != qci->vsi_id || 2396 qpi->rxq.queue_id != vsi_queue_id) { 2397 aq_ret = -EINVAL; 2398 goto error_param; 2399 } 2400 } 2401 2402 if (vf->adq_enabled) { 2403 if (idx >= vf->num_tc) { 2404 aq_ret = -ENODEV; 2405 goto error_param; 2406 } 2407 vsi_id = vf->ch[idx].vsi_id; 2408 } 2409 2410 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2411 &qpi->rxq) || 2412 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2413 &qpi->txq)) { 2414 aq_ret = -EINVAL; 2415 goto error_param; 2416 } 2417 2418 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2419 * VF does not know about these additional VSIs and all 2420 * it cares is about its own queues. PF configures these queues 2421 * to its appropriate VSIs based on TC mapping 2422 */ 2423 if (vf->adq_enabled) { 2424 if (idx >= vf->num_tc) { 2425 aq_ret = -ENODEV; 2426 goto error_param; 2427 } 2428 if (j == (vf->ch[idx].num_qps - 1)) { 2429 idx++; 2430 j = 0; /* resetting the queue count */ 2431 vsi_queue_id = 0; 2432 } else { 2433 j++; 2434 vsi_queue_id++; 2435 } 2436 } 2437 } 2438 /* set vsi num_queue_pairs in use to num configured by VF */ 2439 if (!vf->adq_enabled) { 2440 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2441 qci->num_queue_pairs; 2442 } else { 2443 for (i = 0; i < vf->num_tc; i++) { 2444 vsi = pf->vsi[vf->ch[i].vsi_idx]; 2445 vsi->num_queue_pairs = vf->ch[i].num_qps; 2446 2447 if (i40e_update_adq_vsi_queues(vsi, i)) { 2448 aq_ret = -EIO; 2449 goto error_param; 2450 } 2451 } 2452 } 2453 2454 error_param: 2455 /* send the response to the VF */ 2456 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2457 aq_ret); 2458 } 2459 2460 /** 2461 * i40e_validate_queue_map - check queue map is valid 2462 * @vf: the VF structure pointer 2463 * @vsi_id: vsi id 2464 * @queuemap: Tx or Rx queue map 2465 * 2466 * check if Tx or Rx queue map is valid 2467 **/ 2468 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2469 unsigned long queuemap) 2470 { 2471 u16 vsi_queue_id, queue_id; 2472 2473 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2474 u16 idx = vsi_queue_id / I40E_MAX_VF_VSI; 2475 2476 if (vf->adq_enabled && idx < vf->num_tc) { 2477 vsi_id = vf->ch[idx].vsi_id; 2478 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2479 } else { 2480 queue_id = vsi_queue_id; 2481 } 2482 2483 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2484 return -EINVAL; 2485 } 2486 2487 return 0; 2488 } 2489 2490 /** 2491 * i40e_vc_config_irq_map_msg 2492 * @vf: pointer to the VF info 2493 * @msg: pointer to the msg buffer 2494 * 2495 * called from the VF to configure the irq to 2496 * queue map 2497 **/ 2498 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2499 { 2500 struct virtchnl_irq_map_info *irqmap_info = 2501 (struct virtchnl_irq_map_info *)msg; 2502 struct virtchnl_vector_map *map; 2503 int aq_ret = 0; 2504 u16 vsi_id; 2505 int i; 2506 2507 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2508 aq_ret = -EINVAL; 2509 goto error_param; 2510 } 2511 2512 if (irqmap_info->num_vectors > 2513 vf->pf->hw.func_caps.num_msix_vectors_vf) { 2514 aq_ret = -EINVAL; 2515 goto error_param; 2516 } 2517 2518 for (i = 0; i < irqmap_info->num_vectors; i++) { 2519 map = &irqmap_info->vecmap[i]; 2520 /* validate msg params */ 2521 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || 2522 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { 2523 aq_ret = -EINVAL; 2524 goto error_param; 2525 } 2526 vsi_id = map->vsi_id; 2527 2528 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2529 aq_ret = -EINVAL; 2530 goto error_param; 2531 } 2532 2533 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2534 aq_ret = -EINVAL; 2535 goto error_param; 2536 } 2537 2538 i40e_config_irq_link_list(vf, vsi_id, map); 2539 } 2540 error_param: 2541 /* send the response to the VF */ 2542 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2543 aq_ret); 2544 } 2545 2546 /** 2547 * i40e_ctrl_vf_tx_rings 2548 * @vsi: the SRIOV VSI being configured 2549 * @q_map: bit map of the queues to be enabled 2550 * @enable: start or stop the queue 2551 **/ 2552 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2553 bool enable) 2554 { 2555 struct i40e_pf *pf = vsi->back; 2556 int ret = 0; 2557 u16 q_id; 2558 2559 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2560 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2561 vsi->base_queue + q_id, 2562 false /*is xdp*/, enable); 2563 if (ret) 2564 break; 2565 } 2566 return ret; 2567 } 2568 2569 /** 2570 * i40e_ctrl_vf_rx_rings 2571 * @vsi: the SRIOV VSI being configured 2572 * @q_map: bit map of the queues to be enabled 2573 * @enable: start or stop the queue 2574 **/ 2575 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2576 bool enable) 2577 { 2578 struct i40e_pf *pf = vsi->back; 2579 int ret = 0; 2580 u16 q_id; 2581 2582 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2583 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2584 enable); 2585 if (ret) 2586 break; 2587 } 2588 return ret; 2589 } 2590 2591 /** 2592 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL 2593 * @vqs: virtchnl_queue_select structure containing bitmaps to validate 2594 * 2595 * Returns true if validation was successful, else false. 2596 */ 2597 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) 2598 { 2599 if ((!vqs->rx_queues && !vqs->tx_queues) || 2600 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) || 2601 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES)) 2602 return false; 2603 2604 return true; 2605 } 2606 2607 /** 2608 * i40e_vc_enable_queues_msg 2609 * @vf: pointer to the VF info 2610 * @msg: pointer to the msg buffer 2611 * 2612 * called from the VF to enable all or specific queue(s) 2613 **/ 2614 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2615 { 2616 struct virtchnl_queue_select *vqs = 2617 (struct virtchnl_queue_select *)msg; 2618 struct i40e_pf *pf = vf->pf; 2619 int aq_ret = 0; 2620 int i; 2621 2622 if (vf->is_disabled_from_host) { 2623 aq_ret = -EPERM; 2624 dev_info(&pf->pdev->dev, 2625 "Admin has disabled VF %d, will not enable queues\n", 2626 vf->vf_id); 2627 goto error_param; 2628 } 2629 2630 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2631 aq_ret = -EINVAL; 2632 goto error_param; 2633 } 2634 2635 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2636 aq_ret = -EINVAL; 2637 goto error_param; 2638 } 2639 2640 if (!i40e_vc_validate_vqs_bitmaps(vqs)) { 2641 aq_ret = -EINVAL; 2642 goto error_param; 2643 } 2644 2645 /* Use the queue bit map sent by the VF */ 2646 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2647 true)) { 2648 aq_ret = -EIO; 2649 goto error_param; 2650 } 2651 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2652 true)) { 2653 aq_ret = -EIO; 2654 goto error_param; 2655 } 2656 2657 /* need to start the rings for additional ADq VSI's as well */ 2658 if (vf->adq_enabled) { 2659 /* zero belongs to LAN VSI */ 2660 for (i = 1; i < vf->num_tc; i++) { 2661 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2662 aq_ret = -EIO; 2663 } 2664 } 2665 2666 error_param: 2667 /* send the response to the VF */ 2668 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2669 aq_ret); 2670 } 2671 2672 /** 2673 * i40e_vc_disable_queues_msg 2674 * @vf: pointer to the VF info 2675 * @msg: pointer to the msg buffer 2676 * 2677 * called from the VF to disable all or specific 2678 * queue(s) 2679 **/ 2680 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2681 { 2682 struct virtchnl_queue_select *vqs = 2683 (struct virtchnl_queue_select *)msg; 2684 struct i40e_pf *pf = vf->pf; 2685 int aq_ret = 0; 2686 2687 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2688 aq_ret = -EINVAL; 2689 goto error_param; 2690 } 2691 2692 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2693 aq_ret = -EINVAL; 2694 goto error_param; 2695 } 2696 2697 if (!i40e_vc_validate_vqs_bitmaps(vqs)) { 2698 aq_ret = -EINVAL; 2699 goto error_param; 2700 } 2701 2702 /* Use the queue bit map sent by the VF */ 2703 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2704 false)) { 2705 aq_ret = -EIO; 2706 goto error_param; 2707 } 2708 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2709 false)) { 2710 aq_ret = -EIO; 2711 goto error_param; 2712 } 2713 error_param: 2714 /* send the response to the VF */ 2715 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2716 aq_ret); 2717 } 2718 2719 /** 2720 * i40e_check_enough_queue - find big enough queue number 2721 * @vf: pointer to the VF info 2722 * @needed: the number of items needed 2723 * 2724 * Returns the base item index of the queue, or negative for error 2725 **/ 2726 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed) 2727 { 2728 unsigned int i, cur_queues, more, pool_size; 2729 struct i40e_lump_tracking *pile; 2730 struct i40e_pf *pf = vf->pf; 2731 struct i40e_vsi *vsi; 2732 2733 vsi = pf->vsi[vf->lan_vsi_idx]; 2734 cur_queues = vsi->alloc_queue_pairs; 2735 2736 /* if current allocated queues are enough for need */ 2737 if (cur_queues >= needed) 2738 return vsi->base_queue; 2739 2740 pile = pf->qp_pile; 2741 if (cur_queues > 0) { 2742 /* if the allocated queues are not zero 2743 * just check if there are enough queues for more 2744 * behind the allocated queues. 2745 */ 2746 more = needed - cur_queues; 2747 for (i = vsi->base_queue + cur_queues; 2748 i < pile->num_entries; i++) { 2749 if (pile->list[i] & I40E_PILE_VALID_BIT) 2750 break; 2751 2752 if (more-- == 1) 2753 /* there is enough */ 2754 return vsi->base_queue; 2755 } 2756 } 2757 2758 pool_size = 0; 2759 for (i = 0; i < pile->num_entries; i++) { 2760 if (pile->list[i] & I40E_PILE_VALID_BIT) { 2761 pool_size = 0; 2762 continue; 2763 } 2764 if (needed <= ++pool_size) 2765 /* there is enough */ 2766 return i; 2767 } 2768 2769 return -ENOMEM; 2770 } 2771 2772 /** 2773 * i40e_vc_request_queues_msg 2774 * @vf: pointer to the VF info 2775 * @msg: pointer to the msg buffer 2776 * 2777 * VFs get a default number of queues but can use this message to request a 2778 * different number. If the request is successful, PF will reset the VF and 2779 * return 0. If unsuccessful, PF will send message informing VF of number of 2780 * available queues and return result of sending VF a message. 2781 **/ 2782 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2783 { 2784 struct virtchnl_vf_res_request *vfres = 2785 (struct virtchnl_vf_res_request *)msg; 2786 u16 req_pairs = vfres->num_queue_pairs; 2787 u8 cur_pairs = vf->num_queue_pairs; 2788 struct i40e_pf *pf = vf->pf; 2789 2790 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) 2791 return -EINVAL; 2792 2793 if (req_pairs > I40E_MAX_VF_QUEUES) { 2794 dev_err(&pf->pdev->dev, 2795 "VF %d tried to request more than %d queues.\n", 2796 vf->vf_id, 2797 I40E_MAX_VF_QUEUES); 2798 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2799 } else if (req_pairs - cur_pairs > pf->queues_left) { 2800 dev_warn(&pf->pdev->dev, 2801 "VF %d requested %d more queues, but only %d left.\n", 2802 vf->vf_id, 2803 req_pairs - cur_pairs, 2804 pf->queues_left); 2805 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2806 } else if (i40e_check_enough_queue(vf, req_pairs) < 0) { 2807 dev_warn(&pf->pdev->dev, 2808 "VF %d requested %d more queues, but there is not enough for it.\n", 2809 vf->vf_id, 2810 req_pairs - cur_pairs); 2811 vfres->num_queue_pairs = cur_pairs; 2812 } else { 2813 /* successful request */ 2814 vf->num_req_queues = req_pairs; 2815 i40e_vc_reset_vf(vf, true); 2816 return 0; 2817 } 2818 2819 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2820 (u8 *)vfres, sizeof(*vfres)); 2821 } 2822 2823 /** 2824 * i40e_vc_get_stats_msg 2825 * @vf: pointer to the VF info 2826 * @msg: pointer to the msg buffer 2827 * 2828 * called from the VF to get vsi stats 2829 **/ 2830 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2831 { 2832 struct virtchnl_queue_select *vqs = 2833 (struct virtchnl_queue_select *)msg; 2834 struct i40e_pf *pf = vf->pf; 2835 struct i40e_eth_stats stats; 2836 int aq_ret = 0; 2837 struct i40e_vsi *vsi; 2838 2839 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2840 2841 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2842 aq_ret = -EINVAL; 2843 goto error_param; 2844 } 2845 2846 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2847 aq_ret = -EINVAL; 2848 goto error_param; 2849 } 2850 2851 vsi = pf->vsi[vf->lan_vsi_idx]; 2852 if (!vsi) { 2853 aq_ret = -EINVAL; 2854 goto error_param; 2855 } 2856 i40e_update_eth_stats(vsi); 2857 stats = vsi->eth_stats; 2858 2859 error_param: 2860 /* send the response back to the VF */ 2861 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2862 (u8 *)&stats, sizeof(stats)); 2863 } 2864 2865 #define I40E_MAX_MACVLAN_PER_HW 3072 2866 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ 2867 (num_ports)) 2868 /* If the VF is not trusted restrict the number of MAC/VLAN it can program 2869 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast 2870 */ 2871 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) 2872 #define I40E_VC_MAX_VLAN_PER_VF 16 2873 2874 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \ 2875 ({ typeof(vf_num) vf_num_ = (vf_num); \ 2876 typeof(num_ports) num_ports_ = (num_ports); \ 2877 ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \ 2878 I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \ 2879 I40E_VC_MAX_MAC_ADDR_PER_VF; }) 2880 /** 2881 * i40e_check_vf_permission 2882 * @vf: pointer to the VF info 2883 * @al: MAC address list from virtchnl 2884 * 2885 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2886 * if any address in the list is not valid. Checks the following conditions: 2887 * 2888 * 1) broadcast and zero addresses are never valid 2889 * 2) unicast addresses are not allowed if the VMM has administratively set 2890 * the VF MAC address, unless the VF is marked as privileged. 2891 * 3) There is enough space to add all the addresses. 2892 * 2893 * Note that to guarantee consistency, it is expected this function be called 2894 * while holding the mac_filter_hash_lock, as otherwise the current number of 2895 * addresses might not be accurate. 2896 **/ 2897 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2898 struct virtchnl_ether_addr_list *al) 2899 { 2900 struct i40e_pf *pf = vf->pf; 2901 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; 2902 struct i40e_hw *hw = &pf->hw; 2903 int i, mac_add_max, mac_add_cnt = 0; 2904 bool vf_trusted; 2905 2906 vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 2907 2908 for (i = 0; i < al->num_elements; i++) { 2909 struct i40e_mac_filter *f; 2910 u8 *addr = al->list[i].addr; 2911 2912 if (is_broadcast_ether_addr(addr) || 2913 is_zero_ether_addr(addr)) { 2914 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2915 addr); 2916 return -EINVAL; 2917 } 2918 2919 /* If the host VMM administrator has set the VF MAC address 2920 * administratively via the ndo_set_vf_mac command then deny 2921 * permission to the VF to add or delete unicast MAC addresses. 2922 * Unless the VF is privileged and then it can do whatever. 2923 * The VF may request to set the MAC address filter already 2924 * assigned to it so do not return an error in that case. 2925 */ 2926 if (!vf_trusted && !is_multicast_ether_addr(addr) && 2927 vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2928 dev_err(&pf->pdev->dev, 2929 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2930 return -EPERM; 2931 } 2932 2933 /*count filters that really will be added*/ 2934 f = i40e_find_mac(vsi, addr); 2935 if (!f) 2936 ++mac_add_cnt; 2937 } 2938 2939 /* If this VF is not privileged, then we can't add more than a limited 2940 * number of addresses. 2941 * 2942 * If this VF is trusted, it can use more resources than untrusted. 2943 * However to ensure that every trusted VF has appropriate number of 2944 * resources, divide whole pool of resources per port and then across 2945 * all VFs. 2946 */ 2947 if (!vf_trusted) 2948 mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF; 2949 else 2950 mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports); 2951 2952 /* VF can replace all its filters in one step, in this case mac_add_max 2953 * will be added as active and another mac_add_max will be in 2954 * a to-be-removed state. Account for that. 2955 */ 2956 if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max || 2957 (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) { 2958 if (!vf_trusted) { 2959 dev_err(&pf->pdev->dev, 2960 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2961 return -EPERM; 2962 } else { 2963 dev_err(&pf->pdev->dev, 2964 "Cannot add more MAC addresses, trusted VF exhausted it's resources\n"); 2965 return -EPERM; 2966 } 2967 } 2968 return 0; 2969 } 2970 2971 /** 2972 * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr 2973 * @vc_ether_addr: used to extract the type 2974 **/ 2975 static u8 2976 i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) 2977 { 2978 return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK; 2979 } 2980 2981 /** 2982 * i40e_is_vc_addr_legacy 2983 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 2984 * 2985 * check if the MAC address is from an older VF 2986 **/ 2987 static bool 2988 i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr) 2989 { 2990 return i40e_vc_ether_addr_type(vc_ether_addr) == 2991 VIRTCHNL_ETHER_ADDR_LEGACY; 2992 } 2993 2994 /** 2995 * i40e_is_vc_addr_primary 2996 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 2997 * 2998 * check if the MAC address is the VF's primary MAC 2999 * This function should only be called when the MAC address in 3000 * virtchnl_ether_addr is a valid unicast MAC 3001 **/ 3002 static bool 3003 i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr) 3004 { 3005 return i40e_vc_ether_addr_type(vc_ether_addr) == 3006 VIRTCHNL_ETHER_ADDR_PRIMARY; 3007 } 3008 3009 /** 3010 * i40e_update_vf_mac_addr 3011 * @vf: VF to update 3012 * @vc_ether_addr: structure from VIRTCHNL with MAC to add 3013 * 3014 * update the VF's cached hardware MAC if allowed 3015 **/ 3016 static void 3017 i40e_update_vf_mac_addr(struct i40e_vf *vf, 3018 struct virtchnl_ether_addr *vc_ether_addr) 3019 { 3020 u8 *mac_addr = vc_ether_addr->addr; 3021 3022 if (!is_valid_ether_addr(mac_addr)) 3023 return; 3024 3025 /* If request to add MAC filter is a primary request update its default 3026 * MAC address with the requested one. If it is a legacy request then 3027 * check if current default is empty if so update the default MAC 3028 */ 3029 if (i40e_is_vc_addr_primary(vc_ether_addr)) { 3030 ether_addr_copy(vf->default_lan_addr.addr, mac_addr); 3031 } else if (i40e_is_vc_addr_legacy(vc_ether_addr)) { 3032 if (is_zero_ether_addr(vf->default_lan_addr.addr)) 3033 ether_addr_copy(vf->default_lan_addr.addr, mac_addr); 3034 } 3035 } 3036 3037 /** 3038 * i40e_vc_add_mac_addr_msg 3039 * @vf: pointer to the VF info 3040 * @msg: pointer to the msg buffer 3041 * 3042 * add guest mac address filter 3043 **/ 3044 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 3045 { 3046 struct virtchnl_ether_addr_list *al = 3047 (struct virtchnl_ether_addr_list *)msg; 3048 struct i40e_pf *pf = vf->pf; 3049 struct i40e_vsi *vsi = NULL; 3050 int ret = 0; 3051 int i; 3052 3053 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3054 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 3055 ret = -EINVAL; 3056 goto error_param; 3057 } 3058 3059 vsi = pf->vsi[vf->lan_vsi_idx]; 3060 3061 /* Lock once, because all function inside for loop accesses VSI's 3062 * MAC filter list which needs to be protected using same lock. 3063 */ 3064 spin_lock_bh(&vsi->mac_filter_hash_lock); 3065 3066 ret = i40e_check_vf_permission(vf, al); 3067 if (ret) { 3068 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3069 goto error_param; 3070 } 3071 3072 /* add new addresses to the list */ 3073 for (i = 0; i < al->num_elements; i++) { 3074 struct i40e_mac_filter *f; 3075 3076 f = i40e_find_mac(vsi, al->list[i].addr); 3077 if (!f) { 3078 f = i40e_add_mac_filter(vsi, al->list[i].addr); 3079 3080 if (!f) { 3081 dev_err(&pf->pdev->dev, 3082 "Unable to add MAC filter %pM for VF %d\n", 3083 al->list[i].addr, vf->vf_id); 3084 ret = -EINVAL; 3085 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3086 goto error_param; 3087 } 3088 } 3089 i40e_update_vf_mac_addr(vf, &al->list[i]); 3090 } 3091 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3092 3093 /* program the updated filter list */ 3094 ret = i40e_sync_vsi_filters(vsi); 3095 if (ret) 3096 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 3097 vf->vf_id, ret); 3098 3099 error_param: 3100 /* send the response to the VF */ 3101 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 3102 ret, NULL, 0); 3103 } 3104 3105 /** 3106 * i40e_vc_del_mac_addr_msg 3107 * @vf: pointer to the VF info 3108 * @msg: pointer to the msg buffer 3109 * 3110 * remove guest mac address filter 3111 **/ 3112 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 3113 { 3114 struct virtchnl_ether_addr_list *al = 3115 (struct virtchnl_ether_addr_list *)msg; 3116 bool was_unimac_deleted = false; 3117 struct i40e_pf *pf = vf->pf; 3118 struct i40e_vsi *vsi = NULL; 3119 int ret = 0; 3120 int i; 3121 3122 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3123 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 3124 ret = -EINVAL; 3125 goto error_param; 3126 } 3127 3128 for (i = 0; i < al->num_elements; i++) { 3129 if (is_broadcast_ether_addr(al->list[i].addr) || 3130 is_zero_ether_addr(al->list[i].addr)) { 3131 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 3132 al->list[i].addr, vf->vf_id); 3133 ret = -EINVAL; 3134 goto error_param; 3135 } 3136 } 3137 vsi = pf->vsi[vf->lan_vsi_idx]; 3138 3139 spin_lock_bh(&vsi->mac_filter_hash_lock); 3140 /* delete addresses from the list */ 3141 for (i = 0; i < al->num_elements; i++) { 3142 const u8 *addr = al->list[i].addr; 3143 3144 /* Allow to delete VF primary MAC only if it was not set 3145 * administratively by PF. 3146 */ 3147 if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { 3148 if (!vf->pf_set_mac) 3149 was_unimac_deleted = true; 3150 else 3151 continue; 3152 } 3153 3154 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 3155 ret = -EINVAL; 3156 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3157 goto error_param; 3158 } 3159 } 3160 3161 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3162 3163 if (was_unimac_deleted) 3164 eth_zero_addr(vf->default_lan_addr.addr); 3165 3166 /* program the updated filter list */ 3167 ret = i40e_sync_vsi_filters(vsi); 3168 if (ret) 3169 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 3170 vf->vf_id, ret); 3171 3172 if (vf->trusted && was_unimac_deleted) { 3173 struct i40e_mac_filter *f; 3174 struct hlist_node *h; 3175 u8 *macaddr = NULL; 3176 int bkt; 3177 3178 /* set last unicast mac address as default */ 3179 spin_lock_bh(&vsi->mac_filter_hash_lock); 3180 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 3181 if (is_valid_ether_addr(f->macaddr)) 3182 macaddr = f->macaddr; 3183 } 3184 if (macaddr) 3185 ether_addr_copy(vf->default_lan_addr.addr, macaddr); 3186 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3187 } 3188 error_param: 3189 /* send the response to the VF */ 3190 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret); 3191 } 3192 3193 /** 3194 * i40e_vc_add_vlan_msg 3195 * @vf: pointer to the VF info 3196 * @msg: pointer to the msg buffer 3197 * 3198 * program guest vlan id 3199 **/ 3200 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 3201 { 3202 struct virtchnl_vlan_filter_list *vfl = 3203 (struct virtchnl_vlan_filter_list *)msg; 3204 struct i40e_pf *pf = vf->pf; 3205 struct i40e_vsi *vsi = NULL; 3206 int aq_ret = 0; 3207 int i; 3208 3209 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 3210 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3211 dev_err(&pf->pdev->dev, 3212 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 3213 goto error_param; 3214 } 3215 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3216 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 3217 aq_ret = -EINVAL; 3218 goto error_param; 3219 } 3220 3221 for (i = 0; i < vfl->num_elements; i++) { 3222 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 3223 aq_ret = -EINVAL; 3224 dev_err(&pf->pdev->dev, 3225 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 3226 goto error_param; 3227 } 3228 } 3229 vsi = pf->vsi[vf->lan_vsi_idx]; 3230 if (vsi->info.pvid) { 3231 aq_ret = -EINVAL; 3232 goto error_param; 3233 } 3234 3235 i40e_vlan_stripping_enable(vsi); 3236 for (i = 0; i < vfl->num_elements; i++) { 3237 /* add new VLAN filter */ 3238 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 3239 if (!ret) 3240 vf->num_vlan++; 3241 3242 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 3243 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 3244 true, 3245 vfl->vlan_id[i], 3246 NULL); 3247 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 3248 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 3249 true, 3250 vfl->vlan_id[i], 3251 NULL); 3252 3253 if (ret) 3254 dev_err(&pf->pdev->dev, 3255 "Unable to add VLAN filter %d for VF %d, error %d\n", 3256 vfl->vlan_id[i], vf->vf_id, ret); 3257 } 3258 3259 error_param: 3260 /* send the response to the VF */ 3261 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 3262 } 3263 3264 /** 3265 * i40e_vc_remove_vlan_msg 3266 * @vf: pointer to the VF info 3267 * @msg: pointer to the msg buffer 3268 * 3269 * remove programmed guest vlan id 3270 **/ 3271 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 3272 { 3273 struct virtchnl_vlan_filter_list *vfl = 3274 (struct virtchnl_vlan_filter_list *)msg; 3275 struct i40e_pf *pf = vf->pf; 3276 struct i40e_vsi *vsi = NULL; 3277 int aq_ret = 0; 3278 int i; 3279 3280 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3281 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 3282 aq_ret = -EINVAL; 3283 goto error_param; 3284 } 3285 3286 for (i = 0; i < vfl->num_elements; i++) { 3287 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 3288 aq_ret = -EINVAL; 3289 goto error_param; 3290 } 3291 } 3292 3293 vsi = pf->vsi[vf->lan_vsi_idx]; 3294 if (vsi->info.pvid) { 3295 if (vfl->num_elements > 1 || vfl->vlan_id[0]) 3296 aq_ret = -EINVAL; 3297 goto error_param; 3298 } 3299 3300 for (i = 0; i < vfl->num_elements; i++) { 3301 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 3302 vf->num_vlan--; 3303 3304 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 3305 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 3306 false, 3307 vfl->vlan_id[i], 3308 NULL); 3309 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 3310 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 3311 false, 3312 vfl->vlan_id[i], 3313 NULL); 3314 } 3315 3316 error_param: 3317 /* send the response to the VF */ 3318 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 3319 } 3320 3321 /** 3322 * i40e_vc_rdma_msg 3323 * @vf: pointer to the VF info 3324 * @msg: pointer to the msg buffer 3325 * @msglen: msg length 3326 * 3327 * called from the VF for the iwarp msgs 3328 **/ 3329 static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 3330 { 3331 struct i40e_pf *pf = vf->pf; 3332 struct i40e_vsi *main_vsi; 3333 int aq_ret = 0; 3334 int abs_vf_id; 3335 3336 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3337 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { 3338 aq_ret = -EINVAL; 3339 goto error_param; 3340 } 3341 3342 main_vsi = i40e_pf_get_main_vsi(pf); 3343 abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 3344 i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen); 3345 3346 error_param: 3347 /* send the response to the VF */ 3348 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA, 3349 aq_ret); 3350 } 3351 3352 /** 3353 * i40e_vc_rdma_qvmap_msg 3354 * @vf: pointer to the VF info 3355 * @msg: pointer to the msg buffer 3356 * @config: config qvmap or release it 3357 * 3358 * called from the VF for the iwarp msgs 3359 **/ 3360 static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 3361 { 3362 struct virtchnl_rdma_qvlist_info *qvlist_info = 3363 (struct virtchnl_rdma_qvlist_info *)msg; 3364 int aq_ret = 0; 3365 3366 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3367 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { 3368 aq_ret = -EINVAL; 3369 goto error_param; 3370 } 3371 3372 if (config) { 3373 if (i40e_config_rdma_qvlist(vf, qvlist_info)) 3374 aq_ret = -EINVAL; 3375 } else { 3376 i40e_release_rdma_qvlist(vf); 3377 } 3378 3379 error_param: 3380 /* send the response to the VF */ 3381 return i40e_vc_send_resp_to_vf(vf, 3382 config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP : 3383 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, 3384 aq_ret); 3385 } 3386 3387 /** 3388 * i40e_vc_config_rss_key 3389 * @vf: pointer to the VF info 3390 * @msg: pointer to the msg buffer 3391 * 3392 * Configure the VF's RSS key 3393 **/ 3394 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 3395 { 3396 struct virtchnl_rss_key *vrk = 3397 (struct virtchnl_rss_key *)msg; 3398 struct i40e_pf *pf = vf->pf; 3399 struct i40e_vsi *vsi = NULL; 3400 int aq_ret = 0; 3401 3402 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3403 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || 3404 vrk->key_len != I40E_HKEY_ARRAY_SIZE) { 3405 aq_ret = -EINVAL; 3406 goto err; 3407 } 3408 3409 vsi = pf->vsi[vf->lan_vsi_idx]; 3410 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 3411 err: 3412 /* send the response to the VF */ 3413 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 3414 aq_ret); 3415 } 3416 3417 /** 3418 * i40e_vc_config_rss_lut 3419 * @vf: pointer to the VF info 3420 * @msg: pointer to the msg buffer 3421 * 3422 * Configure the VF's RSS LUT 3423 **/ 3424 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 3425 { 3426 struct virtchnl_rss_lut *vrl = 3427 (struct virtchnl_rss_lut *)msg; 3428 struct i40e_pf *pf = vf->pf; 3429 struct i40e_vsi *vsi = NULL; 3430 int aq_ret = 0; 3431 u16 i; 3432 3433 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3434 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || 3435 vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { 3436 aq_ret = -EINVAL; 3437 goto err; 3438 } 3439 3440 for (i = 0; i < vrl->lut_entries; i++) 3441 if (vrl->lut[i] >= vf->num_queue_pairs) { 3442 aq_ret = -EINVAL; 3443 goto err; 3444 } 3445 3446 vsi = pf->vsi[vf->lan_vsi_idx]; 3447 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 3448 /* send the response to the VF */ 3449 err: 3450 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 3451 aq_ret); 3452 } 3453 3454 /** 3455 * i40e_vc_get_rss_hashcfg 3456 * @vf: pointer to the VF info 3457 * @msg: pointer to the msg buffer 3458 * 3459 * Return the RSS Hash configuration bits allowed by the hardware 3460 **/ 3461 static int i40e_vc_get_rss_hashcfg(struct i40e_vf *vf, u8 *msg) 3462 { 3463 struct virtchnl_rss_hashcfg *vrh = NULL; 3464 struct i40e_pf *pf = vf->pf; 3465 int aq_ret = 0; 3466 int len = 0; 3467 3468 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3469 aq_ret = -EINVAL; 3470 goto err; 3471 } 3472 len = sizeof(struct virtchnl_rss_hashcfg); 3473 3474 vrh = kzalloc(len, GFP_KERNEL); 3475 if (!vrh) { 3476 aq_ret = -ENOMEM; 3477 len = 0; 3478 goto err; 3479 } 3480 vrh->hashcfg = i40e_pf_get_default_rss_hashcfg(pf); 3481 err: 3482 /* send the response back to the VF */ 3483 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, 3484 aq_ret, (u8 *)vrh, len); 3485 kfree(vrh); 3486 return aq_ret; 3487 } 3488 3489 /** 3490 * i40e_vc_set_rss_hashcfg 3491 * @vf: pointer to the VF info 3492 * @msg: pointer to the msg buffer 3493 * 3494 * Set the RSS Hash configuration bits for the VF 3495 **/ 3496 static int i40e_vc_set_rss_hashcfg(struct i40e_vf *vf, u8 *msg) 3497 { 3498 struct virtchnl_rss_hashcfg *vrh = 3499 (struct virtchnl_rss_hashcfg *)msg; 3500 struct i40e_pf *pf = vf->pf; 3501 struct i40e_hw *hw = &pf->hw; 3502 int aq_ret = 0; 3503 3504 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3505 aq_ret = -EINVAL; 3506 goto err; 3507 } 3508 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), 3509 (u32)vrh->hashcfg); 3510 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 3511 (u32)(vrh->hashcfg >> 32)); 3512 3513 /* send the response to the VF */ 3514 err: 3515 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, aq_ret); 3516 } 3517 3518 /** 3519 * i40e_vc_enable_vlan_stripping 3520 * @vf: pointer to the VF info 3521 * @msg: pointer to the msg buffer 3522 * 3523 * Enable vlan header stripping for the VF 3524 **/ 3525 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3526 { 3527 struct i40e_vsi *vsi; 3528 int aq_ret = 0; 3529 3530 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3531 aq_ret = -EINVAL; 3532 goto err; 3533 } 3534 3535 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3536 i40e_vlan_stripping_enable(vsi); 3537 3538 /* send the response to the VF */ 3539 err: 3540 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 3541 aq_ret); 3542 } 3543 3544 /** 3545 * i40e_vc_disable_vlan_stripping 3546 * @vf: pointer to the VF info 3547 * @msg: pointer to the msg buffer 3548 * 3549 * Disable vlan header stripping for the VF 3550 **/ 3551 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3552 { 3553 struct i40e_vsi *vsi; 3554 int aq_ret = 0; 3555 3556 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3557 aq_ret = -EINVAL; 3558 goto err; 3559 } 3560 3561 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3562 i40e_vlan_stripping_disable(vsi); 3563 3564 /* send the response to the VF */ 3565 err: 3566 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3567 aq_ret); 3568 } 3569 3570 /** 3571 * i40e_validate_cloud_filter 3572 * @vf: pointer to VF structure 3573 * @tc_filter: pointer to filter requested 3574 * 3575 * This function validates cloud filter programmed as TC filter for ADq 3576 **/ 3577 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3578 struct virtchnl_filter *tc_filter) 3579 { 3580 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3581 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3582 struct i40e_pf *pf = vf->pf; 3583 struct i40e_vsi *vsi = NULL; 3584 struct i40e_mac_filter *f; 3585 struct hlist_node *h; 3586 bool found = false; 3587 int bkt; 3588 3589 if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) { 3590 dev_info(&pf->pdev->dev, 3591 "VF %d: ADQ doesn't support this action (%d)\n", 3592 vf->vf_id, tc_filter->action); 3593 goto err; 3594 } 3595 3596 /* action_meta is TC number here to which the filter is applied */ 3597 if (!tc_filter->action_meta || 3598 tc_filter->action_meta >= vf->num_tc) { 3599 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3600 vf->vf_id, tc_filter->action_meta); 3601 goto err; 3602 } 3603 3604 /* Check filter if it's programmed for advanced mode or basic mode. 3605 * There are two ADq modes (for VF only), 3606 * 1. Basic mode: intended to allow as many filter options as possible 3607 * to be added to a VF in Non-trusted mode. Main goal is 3608 * to add filters to its own MAC and VLAN id. 3609 * 2. Advanced mode: is for allowing filters to be applied other than 3610 * its own MAC or VLAN. This mode requires the VF to be 3611 * Trusted. 3612 */ 3613 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3614 vsi = pf->vsi[vf->lan_vsi_idx]; 3615 f = i40e_find_mac(vsi, data.dst_mac); 3616 3617 if (!f) { 3618 dev_info(&pf->pdev->dev, 3619 "Destination MAC %pM doesn't belong to VF %d\n", 3620 data.dst_mac, vf->vf_id); 3621 goto err; 3622 } 3623 3624 if (mask.vlan_id) { 3625 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3626 hlist) { 3627 if (f->vlan == ntohs(data.vlan_id)) { 3628 found = true; 3629 break; 3630 } 3631 } 3632 if (!found) { 3633 dev_info(&pf->pdev->dev, 3634 "VF %d doesn't have any VLAN id %u\n", 3635 vf->vf_id, ntohs(data.vlan_id)); 3636 goto err; 3637 } 3638 } 3639 } else { 3640 /* Check if VF is trusted */ 3641 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3642 dev_err(&pf->pdev->dev, 3643 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3644 vf->vf_id); 3645 return -EIO; 3646 } 3647 } 3648 3649 if (mask.dst_mac[0] & data.dst_mac[0]) { 3650 if (is_broadcast_ether_addr(data.dst_mac) || 3651 is_zero_ether_addr(data.dst_mac)) { 3652 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3653 vf->vf_id, data.dst_mac); 3654 goto err; 3655 } 3656 } 3657 3658 if (mask.src_mac[0] & data.src_mac[0]) { 3659 if (is_broadcast_ether_addr(data.src_mac) || 3660 is_zero_ether_addr(data.src_mac)) { 3661 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3662 vf->vf_id, data.src_mac); 3663 goto err; 3664 } 3665 } 3666 3667 if (mask.dst_port & data.dst_port) { 3668 if (!data.dst_port) { 3669 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3670 vf->vf_id); 3671 goto err; 3672 } 3673 } 3674 3675 if (mask.src_port & data.src_port) { 3676 if (!data.src_port) { 3677 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3678 vf->vf_id); 3679 goto err; 3680 } 3681 } 3682 3683 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3684 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3685 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3686 vf->vf_id); 3687 goto err; 3688 } 3689 3690 if (mask.vlan_id & data.vlan_id) { 3691 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3692 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3693 vf->vf_id); 3694 goto err; 3695 } 3696 } 3697 3698 return 0; 3699 err: 3700 return -EIO; 3701 } 3702 3703 /** 3704 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3705 * @vf: pointer to the VF info 3706 * @seid: seid of the vsi it is searching for 3707 **/ 3708 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3709 { 3710 struct i40e_pf *pf = vf->pf; 3711 struct i40e_vsi *vsi = NULL; 3712 int i; 3713 3714 for (i = 0; i < vf->num_tc ; i++) { 3715 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3716 if (vsi && vsi->seid == seid) 3717 return vsi; 3718 } 3719 return NULL; 3720 } 3721 3722 /** 3723 * i40e_del_all_cloud_filters 3724 * @vf: pointer to the VF info 3725 * 3726 * This function deletes all cloud filters 3727 **/ 3728 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3729 { 3730 struct i40e_cloud_filter *cfilter = NULL; 3731 struct i40e_pf *pf = vf->pf; 3732 struct i40e_vsi *vsi = NULL; 3733 struct hlist_node *node; 3734 int ret; 3735 3736 hlist_for_each_entry_safe(cfilter, node, 3737 &vf->cloud_filter_list, cloud_node) { 3738 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3739 3740 if (!vsi) { 3741 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3742 vf->vf_id, cfilter->seid); 3743 continue; 3744 } 3745 3746 if (cfilter->dst_port) 3747 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3748 false); 3749 else 3750 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3751 if (ret) 3752 dev_err(&pf->pdev->dev, 3753 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", 3754 vf->vf_id, ERR_PTR(ret), 3755 libie_aq_str(pf->hw.aq.asq_last_status)); 3756 3757 hlist_del(&cfilter->cloud_node); 3758 kfree(cfilter); 3759 vf->num_cloud_filters--; 3760 } 3761 } 3762 3763 /** 3764 * i40e_vc_del_cloud_filter 3765 * @vf: pointer to the VF info 3766 * @msg: pointer to the msg buffer 3767 * 3768 * This function deletes a cloud filter programmed as TC filter for ADq 3769 **/ 3770 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3771 { 3772 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3773 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3774 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3775 struct i40e_cloud_filter cfilter, *cf = NULL; 3776 struct i40e_pf *pf = vf->pf; 3777 struct i40e_vsi *vsi = NULL; 3778 struct hlist_node *node; 3779 int aq_ret = 0; 3780 int i, ret; 3781 3782 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3783 aq_ret = -EINVAL; 3784 goto err; 3785 } 3786 3787 if (!vf->adq_enabled) { 3788 dev_info(&pf->pdev->dev, 3789 "VF %d: ADq not enabled, can't apply cloud filter\n", 3790 vf->vf_id); 3791 aq_ret = -EINVAL; 3792 goto err; 3793 } 3794 3795 if (i40e_validate_cloud_filter(vf, vcf)) { 3796 dev_info(&pf->pdev->dev, 3797 "VF %d: Invalid input, can't apply cloud filter\n", 3798 vf->vf_id); 3799 aq_ret = -EINVAL; 3800 goto err; 3801 } 3802 3803 memset(&cfilter, 0, sizeof(cfilter)); 3804 /* parse destination mac address */ 3805 for (i = 0; i < ETH_ALEN; i++) 3806 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3807 3808 /* parse source mac address */ 3809 for (i = 0; i < ETH_ALEN; i++) 3810 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3811 3812 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3813 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3814 cfilter.src_port = mask.src_port & tcf.src_port; 3815 3816 switch (vcf->flow_type) { 3817 case VIRTCHNL_TCP_V4_FLOW: 3818 cfilter.n_proto = ETH_P_IP; 3819 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3820 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3821 ARRAY_SIZE(tcf.dst_ip)); 3822 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3823 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3824 ARRAY_SIZE(tcf.dst_ip)); 3825 break; 3826 case VIRTCHNL_TCP_V6_FLOW: 3827 cfilter.n_proto = ETH_P_IPV6; 3828 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3829 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3830 sizeof(cfilter.ip.v6.dst_ip6)); 3831 if (mask.src_ip[3] & tcf.src_ip[3]) 3832 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3833 sizeof(cfilter.ip.v6.src_ip6)); 3834 break; 3835 default: 3836 /* TC filter can be configured based on different combinations 3837 * and in this case IP is not a part of filter config 3838 */ 3839 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3840 vf->vf_id); 3841 } 3842 3843 /* get the vsi to which the tc belongs to */ 3844 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3845 cfilter.seid = vsi->seid; 3846 cfilter.flags = vcf->field_flags; 3847 3848 /* Deleting TC filter */ 3849 if (tcf.dst_port) 3850 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3851 else 3852 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3853 if (ret) { 3854 dev_err(&pf->pdev->dev, 3855 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", 3856 vf->vf_id, ERR_PTR(ret), 3857 libie_aq_str(pf->hw.aq.asq_last_status)); 3858 goto err; 3859 } 3860 3861 hlist_for_each_entry_safe(cf, node, 3862 &vf->cloud_filter_list, cloud_node) { 3863 if (cf->seid != cfilter.seid) 3864 continue; 3865 if (mask.dst_port) 3866 if (cfilter.dst_port != cf->dst_port) 3867 continue; 3868 if (mask.dst_mac[0]) 3869 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3870 continue; 3871 /* for ipv4 data to be valid, only first byte of mask is set */ 3872 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3873 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3874 ARRAY_SIZE(tcf.dst_ip))) 3875 continue; 3876 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3877 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3878 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3879 sizeof(cfilter.ip.v6.src_ip6))) 3880 continue; 3881 if (mask.vlan_id) 3882 if (cfilter.vlan_id != cf->vlan_id) 3883 continue; 3884 3885 hlist_del(&cf->cloud_node); 3886 kfree(cf); 3887 vf->num_cloud_filters--; 3888 } 3889 3890 err: 3891 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3892 aq_ret); 3893 } 3894 3895 #define I40E_MAX_VF_CLOUD_FILTER 0xFF00 3896 3897 /** 3898 * i40e_vc_add_cloud_filter 3899 * @vf: pointer to the VF info 3900 * @msg: pointer to the msg buffer 3901 * 3902 * This function adds a cloud filter programmed as TC filter for ADq 3903 **/ 3904 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3905 { 3906 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3907 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3908 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3909 struct i40e_cloud_filter *cfilter = NULL; 3910 struct i40e_pf *pf = vf->pf; 3911 struct i40e_vsi *vsi = NULL; 3912 int aq_ret = 0; 3913 int i; 3914 3915 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3916 aq_ret = -EINVAL; 3917 goto err_out; 3918 } 3919 3920 if (!vf->adq_enabled) { 3921 dev_info(&pf->pdev->dev, 3922 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3923 vf->vf_id); 3924 aq_ret = -EINVAL; 3925 goto err_out; 3926 } 3927 3928 if (i40e_validate_cloud_filter(vf, vcf)) { 3929 dev_info(&pf->pdev->dev, 3930 "VF %d: Invalid input/s, can't apply cloud filter\n", 3931 vf->vf_id); 3932 aq_ret = -EINVAL; 3933 goto err_out; 3934 } 3935 3936 if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) { 3937 dev_warn(&pf->pdev->dev, 3938 "VF %d: Max number of filters reached, can't apply cloud filter\n", 3939 vf->vf_id); 3940 aq_ret = -ENOSPC; 3941 goto err_out; 3942 } 3943 3944 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3945 if (!cfilter) { 3946 aq_ret = -ENOMEM; 3947 goto err_out; 3948 } 3949 3950 /* parse destination mac address */ 3951 for (i = 0; i < ETH_ALEN; i++) 3952 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3953 3954 /* parse source mac address */ 3955 for (i = 0; i < ETH_ALEN; i++) 3956 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3957 3958 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3959 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3960 cfilter->src_port = mask.src_port & tcf.src_port; 3961 3962 switch (vcf->flow_type) { 3963 case VIRTCHNL_TCP_V4_FLOW: 3964 cfilter->n_proto = ETH_P_IP; 3965 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3966 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3967 ARRAY_SIZE(tcf.dst_ip)); 3968 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3969 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3970 ARRAY_SIZE(tcf.dst_ip)); 3971 break; 3972 case VIRTCHNL_TCP_V6_FLOW: 3973 cfilter->n_proto = ETH_P_IPV6; 3974 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3975 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3976 sizeof(cfilter->ip.v6.dst_ip6)); 3977 if (mask.src_ip[3] & tcf.src_ip[3]) 3978 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3979 sizeof(cfilter->ip.v6.src_ip6)); 3980 break; 3981 default: 3982 /* TC filter can be configured based on different combinations 3983 * and in this case IP is not a part of filter config 3984 */ 3985 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3986 vf->vf_id); 3987 } 3988 3989 /* get the VSI to which the TC belongs to */ 3990 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3991 cfilter->seid = vsi->seid; 3992 cfilter->flags = vcf->field_flags; 3993 3994 /* Adding cloud filter programmed as TC filter */ 3995 if (tcf.dst_port) 3996 aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3997 else 3998 aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3999 if (aq_ret) { 4000 dev_err(&pf->pdev->dev, 4001 "VF %d: Failed to add cloud filter, err %pe aq_err %s\n", 4002 vf->vf_id, ERR_PTR(aq_ret), 4003 libie_aq_str(pf->hw.aq.asq_last_status)); 4004 goto err_free; 4005 } 4006 4007 INIT_HLIST_NODE(&cfilter->cloud_node); 4008 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 4009 /* release the pointer passing it to the collection */ 4010 cfilter = NULL; 4011 vf->num_cloud_filters++; 4012 err_free: 4013 kfree(cfilter); 4014 err_out: 4015 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 4016 aq_ret); 4017 } 4018 4019 /** 4020 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 4021 * @vf: pointer to the VF info 4022 * @msg: pointer to the msg buffer 4023 **/ 4024 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 4025 { 4026 struct virtchnl_tc_info *tci = 4027 (struct virtchnl_tc_info *)msg; 4028 struct i40e_pf *pf = vf->pf; 4029 struct i40e_link_status *ls = &pf->hw.phy.link_info; 4030 int i, adq_request_qps = 0; 4031 int aq_ret = 0; 4032 u64 speed = 0; 4033 4034 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 4035 aq_ret = -EINVAL; 4036 goto err; 4037 } 4038 4039 /* ADq cannot be applied if spoof check is ON */ 4040 if (vf->spoofchk) { 4041 dev_err(&pf->pdev->dev, 4042 "Spoof check is ON, turn it OFF to enable ADq\n"); 4043 aq_ret = -EINVAL; 4044 goto err; 4045 } 4046 4047 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 4048 dev_err(&pf->pdev->dev, 4049 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 4050 vf->vf_id); 4051 aq_ret = -EINVAL; 4052 goto err; 4053 } 4054 4055 /* max number of traffic classes for VF currently capped at 4 */ 4056 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 4057 dev_err(&pf->pdev->dev, 4058 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", 4059 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); 4060 aq_ret = -EINVAL; 4061 goto err; 4062 } 4063 4064 /* validate queues for each TC */ 4065 for (i = 0; i < tci->num_tc; i++) 4066 if (!tci->list[i].count || 4067 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 4068 dev_err(&pf->pdev->dev, 4069 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", 4070 vf->vf_id, i, tci->list[i].count, 4071 I40E_DEFAULT_QUEUES_PER_VF); 4072 aq_ret = -EINVAL; 4073 goto err; 4074 } 4075 4076 /* need Max VF queues but already have default number of queues */ 4077 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 4078 4079 if (pf->queues_left < adq_request_qps) { 4080 dev_err(&pf->pdev->dev, 4081 "No queues left to allocate to VF %d\n", 4082 vf->vf_id); 4083 aq_ret = -EINVAL; 4084 goto err; 4085 } else { 4086 /* we need to allocate max VF queues to enable ADq so as to 4087 * make sure ADq enabled VF always gets back queues when it 4088 * goes through a reset. 4089 */ 4090 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 4091 } 4092 4093 /* get link speed in MB to validate rate limit */ 4094 speed = i40e_vc_link_speed2mbps(ls->link_speed); 4095 if (speed == SPEED_UNKNOWN) { 4096 dev_err(&pf->pdev->dev, 4097 "Cannot detect link speed\n"); 4098 aq_ret = -EINVAL; 4099 goto err; 4100 } 4101 4102 /* parse data from the queue channel info */ 4103 vf->num_tc = tci->num_tc; 4104 for (i = 0; i < vf->num_tc; i++) { 4105 if (tci->list[i].max_tx_rate) { 4106 if (tci->list[i].max_tx_rate > speed) { 4107 dev_err(&pf->pdev->dev, 4108 "Invalid max tx rate %llu specified for VF %d.", 4109 tci->list[i].max_tx_rate, 4110 vf->vf_id); 4111 aq_ret = -EINVAL; 4112 goto err; 4113 } else { 4114 vf->ch[i].max_tx_rate = 4115 tci->list[i].max_tx_rate; 4116 } 4117 } 4118 vf->ch[i].num_qps = tci->list[i].count; 4119 } 4120 4121 /* set this flag only after making sure all inputs are sane */ 4122 vf->adq_enabled = true; 4123 4124 /* reset the VF in order to allocate resources */ 4125 i40e_vc_reset_vf(vf, true); 4126 4127 return 0; 4128 4129 /* send the response to the VF */ 4130 err: 4131 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 4132 aq_ret); 4133 } 4134 4135 /** 4136 * i40e_vc_del_qch_msg 4137 * @vf: pointer to the VF info 4138 * @msg: pointer to the msg buffer 4139 **/ 4140 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 4141 { 4142 struct i40e_pf *pf = vf->pf; 4143 int aq_ret = 0; 4144 4145 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 4146 aq_ret = -EINVAL; 4147 goto err; 4148 } 4149 4150 if (vf->adq_enabled) { 4151 i40e_del_all_cloud_filters(vf); 4152 i40e_del_qch(vf); 4153 vf->adq_enabled = false; 4154 vf->num_tc = 0; 4155 dev_info(&pf->pdev->dev, 4156 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 4157 vf->vf_id); 4158 } else { 4159 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 4160 vf->vf_id); 4161 aq_ret = -EINVAL; 4162 } 4163 4164 /* reset the VF in order to allocate resources */ 4165 i40e_vc_reset_vf(vf, true); 4166 4167 return 0; 4168 4169 err: 4170 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 4171 aq_ret); 4172 } 4173 4174 /** 4175 * i40e_vc_process_vf_msg 4176 * @pf: pointer to the PF structure 4177 * @vf_id: source VF id 4178 * @v_opcode: operation code 4179 * @v_retval: unused return value code 4180 * @msg: pointer to the msg buffer 4181 * @msglen: msg length 4182 * 4183 * called from the common aeq/arq handler to 4184 * process request from VF 4185 **/ 4186 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 4187 u32 __always_unused v_retval, u8 *msg, u16 msglen) 4188 { 4189 struct i40e_hw *hw = &pf->hw; 4190 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 4191 struct i40e_vf *vf; 4192 int ret; 4193 4194 pf->vf_aq_requests++; 4195 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) 4196 return -EINVAL; 4197 vf = &(pf->vf[local_vf_id]); 4198 4199 /* Check if VF is disabled. */ 4200 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 4201 return -EINVAL; 4202 4203 /* perform basic checks on the msg */ 4204 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 4205 4206 if (ret) { 4207 i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL); 4208 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 4209 local_vf_id, v_opcode, msglen); 4210 return ret; 4211 } 4212 4213 switch (v_opcode) { 4214 case VIRTCHNL_OP_VERSION: 4215 ret = i40e_vc_get_version_msg(vf, msg); 4216 break; 4217 case VIRTCHNL_OP_GET_VF_RESOURCES: 4218 ret = i40e_vc_get_vf_resources_msg(vf, msg); 4219 i40e_vc_notify_vf_link_state(vf); 4220 break; 4221 case VIRTCHNL_OP_RESET_VF: 4222 i40e_vc_reset_vf(vf, false); 4223 ret = 0; 4224 break; 4225 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 4226 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 4227 break; 4228 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 4229 ret = i40e_vc_config_queues_msg(vf, msg); 4230 break; 4231 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 4232 ret = i40e_vc_config_irq_map_msg(vf, msg); 4233 break; 4234 case VIRTCHNL_OP_ENABLE_QUEUES: 4235 ret = i40e_vc_enable_queues_msg(vf, msg); 4236 i40e_vc_notify_vf_link_state(vf); 4237 break; 4238 case VIRTCHNL_OP_DISABLE_QUEUES: 4239 ret = i40e_vc_disable_queues_msg(vf, msg); 4240 break; 4241 case VIRTCHNL_OP_ADD_ETH_ADDR: 4242 ret = i40e_vc_add_mac_addr_msg(vf, msg); 4243 break; 4244 case VIRTCHNL_OP_DEL_ETH_ADDR: 4245 ret = i40e_vc_del_mac_addr_msg(vf, msg); 4246 break; 4247 case VIRTCHNL_OP_ADD_VLAN: 4248 ret = i40e_vc_add_vlan_msg(vf, msg); 4249 break; 4250 case VIRTCHNL_OP_DEL_VLAN: 4251 ret = i40e_vc_remove_vlan_msg(vf, msg); 4252 break; 4253 case VIRTCHNL_OP_GET_STATS: 4254 ret = i40e_vc_get_stats_msg(vf, msg); 4255 break; 4256 case VIRTCHNL_OP_RDMA: 4257 ret = i40e_vc_rdma_msg(vf, msg, msglen); 4258 break; 4259 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: 4260 ret = i40e_vc_rdma_qvmap_msg(vf, msg, true); 4261 break; 4262 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: 4263 ret = i40e_vc_rdma_qvmap_msg(vf, msg, false); 4264 break; 4265 case VIRTCHNL_OP_CONFIG_RSS_KEY: 4266 ret = i40e_vc_config_rss_key(vf, msg); 4267 break; 4268 case VIRTCHNL_OP_CONFIG_RSS_LUT: 4269 ret = i40e_vc_config_rss_lut(vf, msg); 4270 break; 4271 case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: 4272 ret = i40e_vc_get_rss_hashcfg(vf, msg); 4273 break; 4274 case VIRTCHNL_OP_SET_RSS_HASHCFG: 4275 ret = i40e_vc_set_rss_hashcfg(vf, msg); 4276 break; 4277 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 4278 ret = i40e_vc_enable_vlan_stripping(vf, msg); 4279 break; 4280 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 4281 ret = i40e_vc_disable_vlan_stripping(vf, msg); 4282 break; 4283 case VIRTCHNL_OP_REQUEST_QUEUES: 4284 ret = i40e_vc_request_queues_msg(vf, msg); 4285 break; 4286 case VIRTCHNL_OP_ENABLE_CHANNELS: 4287 ret = i40e_vc_add_qch_msg(vf, msg); 4288 break; 4289 case VIRTCHNL_OP_DISABLE_CHANNELS: 4290 ret = i40e_vc_del_qch_msg(vf, msg); 4291 break; 4292 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 4293 ret = i40e_vc_add_cloud_filter(vf, msg); 4294 break; 4295 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 4296 ret = i40e_vc_del_cloud_filter(vf, msg); 4297 break; 4298 case VIRTCHNL_OP_UNKNOWN: 4299 default: 4300 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 4301 v_opcode, local_vf_id); 4302 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 4303 -EOPNOTSUPP); 4304 break; 4305 } 4306 4307 return ret; 4308 } 4309 4310 /** 4311 * i40e_vc_process_vflr_event 4312 * @pf: pointer to the PF structure 4313 * 4314 * called from the vlfr irq handler to 4315 * free up VF resources and state variables 4316 **/ 4317 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 4318 { 4319 struct i40e_hw *hw = &pf->hw; 4320 u32 reg, reg_idx, bit_idx; 4321 struct i40e_vf *vf; 4322 int vf_id; 4323 4324 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 4325 return 0; 4326 4327 /* Re-enable the VFLR interrupt cause here, before looking for which 4328 * VF got reset. Otherwise, if another VF gets a reset while the 4329 * first one is being processed, that interrupt will be lost, and 4330 * that VF will be stuck in reset forever. 4331 */ 4332 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 4333 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 4334 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 4335 i40e_flush(hw); 4336 4337 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 4338 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 4339 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 4340 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 4341 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 4342 vf = &pf->vf[vf_id]; 4343 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 4344 if (reg & BIT(bit_idx)) 4345 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 4346 if (!i40e_reset_vf(vf, true)) { 4347 /* At least one VF did not finish resetting, retry next time */ 4348 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 4349 } 4350 } 4351 4352 return 0; 4353 } 4354 4355 /** 4356 * i40e_validate_vf 4357 * @pf: the physical function 4358 * @vf_id: VF identifier 4359 * 4360 * Check that the VF is enabled and the VSI exists. 4361 * 4362 * Returns 0 on success, negative on failure 4363 **/ 4364 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 4365 { 4366 struct i40e_vsi *vsi; 4367 struct i40e_vf *vf; 4368 int ret = 0; 4369 4370 if (vf_id >= pf->num_alloc_vfs) { 4371 dev_err(&pf->pdev->dev, 4372 "Invalid VF Identifier %d\n", vf_id); 4373 ret = -EINVAL; 4374 goto err_out; 4375 } 4376 vf = &pf->vf[vf_id]; 4377 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 4378 if (!vsi) 4379 ret = -EINVAL; 4380 err_out: 4381 return ret; 4382 } 4383 4384 /** 4385 * i40e_check_vf_init_timeout 4386 * @vf: the virtual function 4387 * 4388 * Check that the VF's initialization was successfully done and if not 4389 * wait up to 300ms for its finish. 4390 * 4391 * Returns true when VF is initialized, false on timeout 4392 **/ 4393 static bool i40e_check_vf_init_timeout(struct i40e_vf *vf) 4394 { 4395 int i; 4396 4397 /* When the VF is resetting wait until it is done. 4398 * It can take up to 200 milliseconds, but wait for 4399 * up to 300 milliseconds to be safe. 4400 */ 4401 for (i = 0; i < 15; i++) { 4402 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 4403 return true; 4404 msleep(20); 4405 } 4406 4407 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4408 dev_err(&vf->pf->pdev->dev, 4409 "VF %d still in reset. Try again.\n", vf->vf_id); 4410 return false; 4411 } 4412 4413 return true; 4414 } 4415 4416 /** 4417 * i40e_ndo_set_vf_mac 4418 * @netdev: network interface device structure 4419 * @vf_id: VF identifier 4420 * @mac: mac address 4421 * 4422 * program VF mac address 4423 **/ 4424 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 4425 { 4426 struct i40e_netdev_priv *np = netdev_priv(netdev); 4427 struct i40e_vsi *vsi = np->vsi; 4428 struct i40e_pf *pf = vsi->back; 4429 struct i40e_mac_filter *f; 4430 struct i40e_vf *vf; 4431 int ret = 0; 4432 struct hlist_node *h; 4433 int bkt; 4434 4435 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4436 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4437 return -EAGAIN; 4438 } 4439 4440 /* validate the request */ 4441 ret = i40e_validate_vf(pf, vf_id); 4442 if (ret) 4443 goto error_param; 4444 4445 vf = &pf->vf[vf_id]; 4446 if (!i40e_check_vf_init_timeout(vf)) { 4447 ret = -EAGAIN; 4448 goto error_param; 4449 } 4450 vsi = pf->vsi[vf->lan_vsi_idx]; 4451 4452 if (is_multicast_ether_addr(mac)) { 4453 dev_err(&pf->pdev->dev, 4454 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 4455 ret = -EINVAL; 4456 goto error_param; 4457 } 4458 4459 /* Lock once because below invoked function add/del_filter requires 4460 * mac_filter_hash_lock to be held 4461 */ 4462 spin_lock_bh(&vsi->mac_filter_hash_lock); 4463 4464 /* delete the temporary mac address */ 4465 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 4466 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 4467 4468 /* Delete all the filters for this VSI - we're going to kill it 4469 * anyway. 4470 */ 4471 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 4472 __i40e_del_filter(vsi, f); 4473 4474 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4475 4476 /* program mac filter */ 4477 if (i40e_sync_vsi_filters(vsi)) { 4478 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 4479 ret = -EIO; 4480 goto error_param; 4481 } 4482 ether_addr_copy(vf->default_lan_addr.addr, mac); 4483 4484 if (is_zero_ether_addr(mac)) { 4485 vf->pf_set_mac = false; 4486 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 4487 } else { 4488 vf->pf_set_mac = true; 4489 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 4490 mac, vf_id); 4491 } 4492 4493 /* Force the VF interface down so it has to bring up with new MAC 4494 * address 4495 */ 4496 i40e_vc_reset_vf(vf, true); 4497 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 4498 4499 error_param: 4500 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4501 return ret; 4502 } 4503 4504 /** 4505 * i40e_ndo_set_vf_port_vlan 4506 * @netdev: network interface device structure 4507 * @vf_id: VF identifier 4508 * @vlan_id: mac address 4509 * @qos: priority setting 4510 * @vlan_proto: vlan protocol 4511 * 4512 * program VF vlan id and/or qos 4513 **/ 4514 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4515 u16 vlan_id, u8 qos, __be16 vlan_proto) 4516 { 4517 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4518 struct i40e_netdev_priv *np = netdev_priv(netdev); 4519 bool allmulti = false, alluni = false; 4520 struct i40e_pf *pf = np->vsi->back; 4521 struct i40e_vsi *vsi; 4522 struct i40e_vf *vf; 4523 int ret = 0; 4524 4525 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4526 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4527 return -EAGAIN; 4528 } 4529 4530 /* validate the request */ 4531 ret = i40e_validate_vf(pf, vf_id); 4532 if (ret) 4533 goto error_pvid; 4534 4535 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4536 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4537 ret = -EINVAL; 4538 goto error_pvid; 4539 } 4540 4541 if (vlan_proto != htons(ETH_P_8021Q)) { 4542 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4543 ret = -EPROTONOSUPPORT; 4544 goto error_pvid; 4545 } 4546 4547 vf = &pf->vf[vf_id]; 4548 if (!i40e_check_vf_init_timeout(vf)) { 4549 ret = -EAGAIN; 4550 goto error_pvid; 4551 } 4552 vsi = pf->vsi[vf->lan_vsi_idx]; 4553 4554 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4555 /* duplicate request, so just return success */ 4556 goto error_pvid; 4557 4558 i40e_vlan_stripping_enable(vsi); 4559 4560 /* Locked once because multiple functions below iterate list */ 4561 spin_lock_bh(&vsi->mac_filter_hash_lock); 4562 4563 /* Check for condition where there was already a port VLAN ID 4564 * filter set and now it is being deleted by setting it to zero. 4565 * Additionally check for the condition where there was a port 4566 * VLAN but now there is a new and different port VLAN being set. 4567 * Before deleting all the old VLAN filters we must add new ones 4568 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4569 * MAC addresses deleted. 4570 */ 4571 if ((!(vlan_id || qos) || 4572 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4573 vsi->info.pvid) { 4574 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4575 if (ret) { 4576 dev_info(&vsi->back->pdev->dev, 4577 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4578 vsi->back->hw.aq.asq_last_status); 4579 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4580 goto error_pvid; 4581 } 4582 } 4583 4584 if (vsi->info.pvid) { 4585 /* remove all filters on the old VLAN */ 4586 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4587 VLAN_VID_MASK)); 4588 } 4589 4590 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4591 4592 /* disable promisc modes in case they were enabled */ 4593 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, 4594 allmulti, alluni); 4595 if (ret) { 4596 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n"); 4597 goto error_pvid; 4598 } 4599 4600 if (vlan_id || qos) 4601 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4602 else 4603 i40e_vsi_remove_pvid(vsi); 4604 spin_lock_bh(&vsi->mac_filter_hash_lock); 4605 4606 if (vlan_id) { 4607 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4608 vlan_id, qos, vf_id); 4609 4610 /* add new VLAN filter for each MAC */ 4611 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4612 if (ret) { 4613 dev_info(&vsi->back->pdev->dev, 4614 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4615 vsi->back->hw.aq.asq_last_status); 4616 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4617 goto error_pvid; 4618 } 4619 4620 /* remove the previously added non-VLAN MAC filters */ 4621 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4622 } 4623 4624 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4625 4626 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 4627 alluni = true; 4628 4629 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 4630 allmulti = true; 4631 4632 /* Schedule the worker thread to take care of applying changes */ 4633 i40e_service_event_schedule(vsi->back); 4634 4635 if (ret) { 4636 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4637 goto error_pvid; 4638 } 4639 4640 /* The Port VLAN needs to be saved across resets the same as the 4641 * default LAN MAC address. 4642 */ 4643 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4644 4645 i40e_vc_reset_vf(vf, true); 4646 /* During reset the VF got a new VSI, so refresh a pointer. */ 4647 vsi = pf->vsi[vf->lan_vsi_idx]; 4648 4649 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); 4650 if (ret) { 4651 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); 4652 goto error_pvid; 4653 } 4654 4655 ret = 0; 4656 4657 error_pvid: 4658 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4659 return ret; 4660 } 4661 4662 /** 4663 * i40e_ndo_set_vf_bw 4664 * @netdev: network interface device structure 4665 * @vf_id: VF identifier 4666 * @min_tx_rate: Minimum Tx rate 4667 * @max_tx_rate: Maximum Tx rate 4668 * 4669 * configure VF Tx rate 4670 **/ 4671 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4672 int max_tx_rate) 4673 { 4674 struct i40e_netdev_priv *np = netdev_priv(netdev); 4675 struct i40e_pf *pf = np->vsi->back; 4676 struct i40e_vsi *vsi; 4677 struct i40e_vf *vf; 4678 int ret = 0; 4679 4680 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4681 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4682 return -EAGAIN; 4683 } 4684 4685 /* validate the request */ 4686 ret = i40e_validate_vf(pf, vf_id); 4687 if (ret) 4688 goto error; 4689 4690 if (min_tx_rate) { 4691 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4692 min_tx_rate, vf_id); 4693 ret = -EINVAL; 4694 goto error; 4695 } 4696 4697 vf = &pf->vf[vf_id]; 4698 if (!i40e_check_vf_init_timeout(vf)) { 4699 ret = -EAGAIN; 4700 goto error; 4701 } 4702 vsi = pf->vsi[vf->lan_vsi_idx]; 4703 4704 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4705 if (ret) 4706 goto error; 4707 4708 vf->tx_rate = max_tx_rate; 4709 error: 4710 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4711 return ret; 4712 } 4713 4714 /** 4715 * i40e_ndo_get_vf_config 4716 * @netdev: network interface device structure 4717 * @vf_id: VF identifier 4718 * @ivi: VF configuration structure 4719 * 4720 * return VF configuration 4721 **/ 4722 int i40e_ndo_get_vf_config(struct net_device *netdev, 4723 int vf_id, struct ifla_vf_info *ivi) 4724 { 4725 struct i40e_netdev_priv *np = netdev_priv(netdev); 4726 struct i40e_vsi *vsi = np->vsi; 4727 struct i40e_pf *pf = vsi->back; 4728 struct i40e_vf *vf; 4729 int ret = 0; 4730 4731 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4732 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4733 return -EAGAIN; 4734 } 4735 4736 /* validate the request */ 4737 ret = i40e_validate_vf(pf, vf_id); 4738 if (ret) 4739 goto error_param; 4740 4741 vf = &pf->vf[vf_id]; 4742 /* first vsi is always the LAN vsi */ 4743 vsi = pf->vsi[vf->lan_vsi_idx]; 4744 if (!vsi) { 4745 ret = -ENOENT; 4746 goto error_param; 4747 } 4748 4749 ivi->vf = vf_id; 4750 4751 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4752 4753 ivi->max_tx_rate = vf->tx_rate; 4754 ivi->min_tx_rate = 0; 4755 ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK); 4756 ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK); 4757 if (vf->link_forced == false) 4758 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4759 else if (vf->link_up == true) 4760 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4761 else 4762 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4763 ivi->spoofchk = vf->spoofchk; 4764 ivi->trusted = vf->trusted; 4765 ret = 0; 4766 4767 error_param: 4768 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4769 return ret; 4770 } 4771 4772 /** 4773 * i40e_ndo_set_vf_link_state 4774 * @netdev: network interface device structure 4775 * @vf_id: VF identifier 4776 * @link: required link state 4777 * 4778 * Set the link state of a specified VF, regardless of physical link state 4779 **/ 4780 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4781 { 4782 struct i40e_netdev_priv *np = netdev_priv(netdev); 4783 struct i40e_pf *pf = np->vsi->back; 4784 struct i40e_link_status *ls = &pf->hw.phy.link_info; 4785 struct virtchnl_pf_event pfe; 4786 struct i40e_hw *hw = &pf->hw; 4787 struct i40e_vsi *vsi; 4788 unsigned long q_map; 4789 struct i40e_vf *vf; 4790 int abs_vf_id; 4791 int ret = 0; 4792 int tmp; 4793 4794 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4795 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4796 return -EAGAIN; 4797 } 4798 4799 /* validate the request */ 4800 if (vf_id >= pf->num_alloc_vfs) { 4801 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4802 ret = -EINVAL; 4803 goto error_out; 4804 } 4805 4806 vf = &pf->vf[vf_id]; 4807 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4808 4809 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4810 pfe.severity = PF_EVENT_SEVERITY_INFO; 4811 4812 switch (link) { 4813 case IFLA_VF_LINK_STATE_AUTO: 4814 vf->link_forced = false; 4815 vf->is_disabled_from_host = false; 4816 /* reset needed to reinit VF resources */ 4817 i40e_vc_reset_vf(vf, true); 4818 i40e_set_vf_link_state(vf, &pfe, ls); 4819 break; 4820 case IFLA_VF_LINK_STATE_ENABLE: 4821 vf->link_forced = true; 4822 vf->link_up = true; 4823 vf->is_disabled_from_host = false; 4824 /* reset needed to reinit VF resources */ 4825 i40e_vc_reset_vf(vf, true); 4826 i40e_set_vf_link_state(vf, &pfe, ls); 4827 break; 4828 case IFLA_VF_LINK_STATE_DISABLE: 4829 vf->link_forced = true; 4830 vf->link_up = false; 4831 i40e_set_vf_link_state(vf, &pfe, ls); 4832 4833 vsi = pf->vsi[vf->lan_vsi_idx]; 4834 q_map = BIT(vsi->num_queue_pairs) - 1; 4835 4836 vf->is_disabled_from_host = true; 4837 4838 /* Try to stop both Tx&Rx rings even if one of the calls fails 4839 * to ensure we stop the rings even in case of errors. 4840 * If any of them returns with an error then the first 4841 * error that occurred will be returned. 4842 */ 4843 tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false); 4844 ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false); 4845 4846 ret = tmp ? tmp : ret; 4847 break; 4848 default: 4849 ret = -EINVAL; 4850 goto error_out; 4851 } 4852 /* Notify the VF of its new link state */ 4853 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4854 0, (u8 *)&pfe, sizeof(pfe), NULL); 4855 4856 error_out: 4857 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4858 return ret; 4859 } 4860 4861 /** 4862 * i40e_ndo_set_vf_spoofchk 4863 * @netdev: network interface device structure 4864 * @vf_id: VF identifier 4865 * @enable: flag to enable or disable feature 4866 * 4867 * Enable or disable VF spoof checking 4868 **/ 4869 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4870 { 4871 struct i40e_netdev_priv *np = netdev_priv(netdev); 4872 struct i40e_vsi *vsi = np->vsi; 4873 struct i40e_pf *pf = vsi->back; 4874 struct i40e_vsi_context ctxt; 4875 struct i40e_hw *hw = &pf->hw; 4876 struct i40e_vf *vf; 4877 int ret = 0; 4878 4879 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4880 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4881 return -EAGAIN; 4882 } 4883 4884 /* validate the request */ 4885 if (vf_id >= pf->num_alloc_vfs) { 4886 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4887 ret = -EINVAL; 4888 goto out; 4889 } 4890 4891 vf = &(pf->vf[vf_id]); 4892 if (!i40e_check_vf_init_timeout(vf)) { 4893 ret = -EAGAIN; 4894 goto out; 4895 } 4896 4897 if (enable == vf->spoofchk) 4898 goto out; 4899 4900 vf->spoofchk = enable; 4901 memset(&ctxt, 0, sizeof(ctxt)); 4902 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4903 ctxt.pf_num = pf->hw.pf_id; 4904 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4905 if (enable) 4906 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4907 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4908 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4909 if (ret) { 4910 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4911 ret); 4912 ret = -EIO; 4913 } 4914 out: 4915 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4916 return ret; 4917 } 4918 4919 /** 4920 * i40e_ndo_set_vf_trust 4921 * @netdev: network interface device structure of the pf 4922 * @vf_id: VF identifier 4923 * @setting: trust setting 4924 * 4925 * Enable or disable VF trust setting 4926 **/ 4927 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4928 { 4929 struct i40e_netdev_priv *np = netdev_priv(netdev); 4930 struct i40e_pf *pf = np->vsi->back; 4931 struct i40e_vf *vf; 4932 int ret = 0; 4933 4934 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4935 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4936 return -EAGAIN; 4937 } 4938 4939 /* validate the request */ 4940 if (vf_id >= pf->num_alloc_vfs) { 4941 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4942 ret = -EINVAL; 4943 goto out; 4944 } 4945 4946 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { 4947 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4948 ret = -EINVAL; 4949 goto out; 4950 } 4951 4952 vf = &pf->vf[vf_id]; 4953 4954 if (setting == vf->trusted) 4955 goto out; 4956 4957 vf->trusted = setting; 4958 4959 /* request PF to sync mac/vlan filters for the VF */ 4960 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); 4961 pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 4962 4963 i40e_vc_reset_vf(vf, true); 4964 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4965 vf_id, setting ? "" : "un"); 4966 4967 if (vf->adq_enabled) { 4968 if (!vf->trusted) { 4969 dev_info(&pf->pdev->dev, 4970 "VF %u no longer Trusted, deleting all cloud filters\n", 4971 vf_id); 4972 i40e_del_all_cloud_filters(vf); 4973 } 4974 } 4975 4976 out: 4977 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4978 return ret; 4979 } 4980 4981 /** 4982 * i40e_get_vf_stats - populate some stats for the VF 4983 * @netdev: the netdev of the PF 4984 * @vf_id: the host OS identifier (0-127) 4985 * @vf_stats: pointer to the OS memory to be initialized 4986 */ 4987 int i40e_get_vf_stats(struct net_device *netdev, int vf_id, 4988 struct ifla_vf_stats *vf_stats) 4989 { 4990 struct i40e_netdev_priv *np = netdev_priv(netdev); 4991 struct i40e_pf *pf = np->vsi->back; 4992 struct i40e_eth_stats *stats; 4993 struct i40e_vsi *vsi; 4994 struct i40e_vf *vf; 4995 4996 /* validate the request */ 4997 if (i40e_validate_vf(pf, vf_id)) 4998 return -EINVAL; 4999 5000 vf = &pf->vf[vf_id]; 5001 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 5002 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); 5003 return -EBUSY; 5004 } 5005 5006 vsi = pf->vsi[vf->lan_vsi_idx]; 5007 if (!vsi) 5008 return -EINVAL; 5009 5010 i40e_update_eth_stats(vsi); 5011 stats = &vsi->eth_stats; 5012 5013 memset(vf_stats, 0, sizeof(*vf_stats)); 5014 5015 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 5016 stats->rx_multicast; 5017 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 5018 stats->tx_multicast; 5019 vf_stats->rx_bytes = stats->rx_bytes; 5020 vf_stats->tx_bytes = stats->tx_bytes; 5021 vf_stats->broadcast = stats->rx_broadcast; 5022 vf_stats->multicast = stats->rx_multicast; 5023 vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; 5024 vf_stats->tx_dropped = stats->tx_errors; 5025 5026 return 0; 5027 } 5028