1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 #include "i40e_lan_hmc.h" 6 #include "i40e_virtchnl_pf.h" 7 8 /*********************notification routines***********************/ 9 10 /** 11 * i40e_vc_vf_broadcast 12 * @pf: pointer to the PF structure 13 * @v_opcode: operation code 14 * @v_retval: return value 15 * @msg: pointer to the msg buffer 16 * @msglen: msg length 17 * 18 * send a message to all VFs on a given PF 19 **/ 20 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 21 enum virtchnl_ops v_opcode, 22 int v_retval, u8 *msg, 23 u16 msglen) 24 { 25 struct i40e_hw *hw = &pf->hw; 26 struct i40e_vf *vf = pf->vf; 27 int i; 28 29 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 30 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 31 /* Not all vfs are enabled so skip the ones that are not */ 32 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 33 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 34 continue; 35 36 /* Ignore return value on purpose - a given VF may fail, but 37 * we need to keep going and send to all of them 38 */ 39 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 40 msg, msglen, NULL); 41 } 42 } 43 44 /** 45 * i40e_vc_link_speed2mbps 46 * converts i40e_aq_link_speed to integer value of Mbps 47 * @link_speed: the speed to convert 48 * 49 * return the speed as direct value of Mbps. 50 **/ 51 static u32 52 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed) 53 { 54 switch (link_speed) { 55 case I40E_LINK_SPEED_100MB: 56 return SPEED_100; 57 case I40E_LINK_SPEED_1GB: 58 return SPEED_1000; 59 case I40E_LINK_SPEED_2_5GB: 60 return SPEED_2500; 61 case I40E_LINK_SPEED_5GB: 62 return SPEED_5000; 63 case I40E_LINK_SPEED_10GB: 64 return SPEED_10000; 65 case I40E_LINK_SPEED_20GB: 66 return SPEED_20000; 67 case I40E_LINK_SPEED_25GB: 68 return SPEED_25000; 69 case I40E_LINK_SPEED_40GB: 70 return SPEED_40000; 71 case I40E_LINK_SPEED_UNKNOWN: 72 return SPEED_UNKNOWN; 73 } 74 return SPEED_UNKNOWN; 75 } 76 77 /** 78 * i40e_set_vf_link_state 79 * @vf: pointer to the VF structure 80 * @pfe: pointer to PF event structure 81 * @ls: pointer to link status structure 82 * 83 * set a link state on a single vf 84 **/ 85 static void i40e_set_vf_link_state(struct i40e_vf *vf, 86 struct virtchnl_pf_event *pfe, struct i40e_link_status *ls) 87 { 88 u8 link_status = ls->link_info & I40E_AQ_LINK_UP; 89 90 if (vf->link_forced) 91 link_status = vf->link_up; 92 93 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { 94 pfe->event_data.link_event_adv.link_speed = link_status ? 95 i40e_vc_link_speed2mbps(ls->link_speed) : 0; 96 pfe->event_data.link_event_adv.link_status = link_status; 97 } else { 98 pfe->event_data.link_event.link_speed = link_status ? 99 i40e_virtchnl_link_speed(ls->link_speed) : 0; 100 pfe->event_data.link_event.link_status = link_status; 101 } 102 } 103 104 /** 105 * i40e_vc_notify_vf_link_state 106 * @vf: pointer to the VF structure 107 * 108 * send a link status message to a single VF 109 **/ 110 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 111 { 112 struct virtchnl_pf_event pfe; 113 struct i40e_pf *pf = vf->pf; 114 struct i40e_hw *hw = &pf->hw; 115 struct i40e_link_status *ls = &pf->hw.phy.link_info; 116 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 117 118 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 119 pfe.severity = PF_EVENT_SEVERITY_INFO; 120 121 i40e_set_vf_link_state(vf, &pfe, ls); 122 123 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 124 0, (u8 *)&pfe, sizeof(pfe), NULL); 125 } 126 127 /** 128 * i40e_vc_notify_link_state 129 * @pf: pointer to the PF structure 130 * 131 * send a link status message to all VFs on a given PF 132 **/ 133 void i40e_vc_notify_link_state(struct i40e_pf *pf) 134 { 135 int i; 136 137 for (i = 0; i < pf->num_alloc_vfs; i++) 138 i40e_vc_notify_vf_link_state(&pf->vf[i]); 139 } 140 141 /** 142 * i40e_vc_notify_reset 143 * @pf: pointer to the PF structure 144 * 145 * indicate a pending reset to all VFs on a given PF 146 **/ 147 void i40e_vc_notify_reset(struct i40e_pf *pf) 148 { 149 struct virtchnl_pf_event pfe; 150 151 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 152 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 153 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 154 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 155 } 156 157 #ifdef CONFIG_PCI_IOV 158 void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev) 159 { 160 u16 vf_id; 161 u16 pos; 162 163 /* Continue only if this is a PF */ 164 if (!pdev->is_physfn) 165 return; 166 167 if (!pci_num_vf(pdev)) 168 return; 169 170 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 171 if (pos) { 172 struct pci_dev *vf_dev = NULL; 173 174 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); 175 while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) { 176 if (vf_dev->is_virtfn && vf_dev->physfn == pdev) 177 pci_restore_msi_state(vf_dev); 178 } 179 } 180 } 181 #endif /* CONFIG_PCI_IOV */ 182 183 /** 184 * i40e_vc_notify_vf_reset 185 * @vf: pointer to the VF structure 186 * 187 * indicate a pending reset to the given VF 188 **/ 189 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 190 { 191 struct virtchnl_pf_event pfe; 192 int abs_vf_id; 193 194 /* validate the request */ 195 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 196 return; 197 198 /* verify if the VF is in either init or active before proceeding */ 199 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 200 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 201 return; 202 203 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 204 205 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 206 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 207 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 208 0, (u8 *)&pfe, 209 sizeof(struct virtchnl_pf_event), NULL); 210 } 211 /***********************misc routines*****************************/ 212 213 /** 214 * i40e_vc_reset_vf 215 * @vf: pointer to the VF info 216 * @notify_vf: notify vf about reset or not 217 * Reset VF handler. 218 **/ 219 void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) 220 { 221 struct i40e_pf *pf = vf->pf; 222 int i; 223 224 if (notify_vf) 225 i40e_vc_notify_vf_reset(vf); 226 227 /* We want to ensure that an actual reset occurs initiated after this 228 * function was called. However, we do not want to wait forever, so 229 * we'll give a reasonable time and print a message if we failed to 230 * ensure a reset. 231 */ 232 for (i = 0; i < 20; i++) { 233 /* If PF is in VFs releasing state reset VF is impossible, 234 * so leave it. 235 */ 236 if (test_bit(__I40E_VFS_RELEASING, pf->state)) 237 return; 238 if (i40e_reset_vf(vf, false)) 239 return; 240 usleep_range(10000, 20000); 241 } 242 243 if (notify_vf) 244 dev_warn(&vf->pf->pdev->dev, 245 "Failed to initiate reset for VF %d after 200 milliseconds\n", 246 vf->vf_id); 247 else 248 dev_dbg(&vf->pf->pdev->dev, 249 "Failed to initiate reset for VF %d after 200 milliseconds\n", 250 vf->vf_id); 251 } 252 253 /** 254 * i40e_vc_isvalid_vsi_id 255 * @vf: pointer to the VF info 256 * @vsi_id: VF relative VSI id 257 * 258 * check for the valid VSI id 259 **/ 260 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 261 { 262 struct i40e_pf *pf = vf->pf; 263 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 264 265 return (vsi && (vsi->vf_id == vf->vf_id)); 266 } 267 268 /** 269 * i40e_vc_isvalid_queue_id 270 * @vf: pointer to the VF info 271 * @vsi_id: vsi id 272 * @qid: vsi relative queue id 273 * 274 * check for the valid queue id 275 **/ 276 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 277 u16 qid) 278 { 279 struct i40e_pf *pf = vf->pf; 280 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 281 282 return (vsi && (qid < vsi->alloc_queue_pairs)); 283 } 284 285 /** 286 * i40e_vc_isvalid_vector_id 287 * @vf: pointer to the VF info 288 * @vector_id: VF relative vector id 289 * 290 * check for the valid vector id 291 **/ 292 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) 293 { 294 struct i40e_pf *pf = vf->pf; 295 296 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 297 } 298 299 /***********************vf resource mgmt routines*****************/ 300 301 /** 302 * i40e_vc_get_pf_queue_id 303 * @vf: pointer to the VF info 304 * @vsi_id: id of VSI as provided by the FW 305 * @vsi_queue_id: vsi relative queue id 306 * 307 * return PF relative queue id 308 **/ 309 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 310 u8 vsi_queue_id) 311 { 312 struct i40e_pf *pf = vf->pf; 313 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 314 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 315 316 if (!vsi) 317 return pf_queue_id; 318 319 if (le16_to_cpu(vsi->info.mapping_flags) & 320 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 321 pf_queue_id = 322 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 323 else 324 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 325 vsi_queue_id; 326 327 return pf_queue_id; 328 } 329 330 /** 331 * i40e_get_real_pf_qid 332 * @vf: pointer to the VF info 333 * @vsi_id: vsi id 334 * @queue_id: queue number 335 * 336 * wrapper function to get pf_queue_id handling ADq code as well 337 **/ 338 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 339 { 340 int i; 341 342 if (vf->adq_enabled) { 343 /* Although VF considers all the queues(can be 1 to 16) as its 344 * own but they may actually belong to different VSIs(up to 4). 345 * We need to find which queues belongs to which VSI. 346 */ 347 for (i = 0; i < vf->num_tc; i++) { 348 if (queue_id < vf->ch[i].num_qps) { 349 vsi_id = vf->ch[i].vsi_id; 350 break; 351 } 352 /* find right queue id which is relative to a 353 * given VSI. 354 */ 355 queue_id -= vf->ch[i].num_qps; 356 } 357 } 358 359 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 360 } 361 362 /** 363 * i40e_config_irq_link_list 364 * @vf: pointer to the VF info 365 * @vsi_id: id of VSI as given by the FW 366 * @vecmap: irq map info 367 * 368 * configure irq link list from the map 369 **/ 370 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 371 struct virtchnl_vector_map *vecmap) 372 { 373 unsigned long linklistmap = 0, tempmap; 374 struct i40e_pf *pf = vf->pf; 375 struct i40e_hw *hw = &pf->hw; 376 u16 vsi_queue_id, pf_queue_id; 377 enum i40e_queue_type qtype; 378 u16 next_q, vector_id, size; 379 u32 reg, reg_idx; 380 u16 itr_idx = 0; 381 382 vector_id = vecmap->vector_id; 383 /* setup the head */ 384 if (0 == vector_id) 385 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 386 else 387 reg_idx = I40E_VPINT_LNKLSTN( 388 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 389 (vector_id - 1)); 390 391 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 392 /* Special case - No queues mapped on this vector */ 393 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 394 goto irq_list_done; 395 } 396 tempmap = vecmap->rxq_map; 397 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 398 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 399 vsi_queue_id)); 400 } 401 402 tempmap = vecmap->txq_map; 403 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 404 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 405 vsi_queue_id + 1)); 406 } 407 408 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 409 next_q = find_first_bit(&linklistmap, size); 410 if (unlikely(next_q == size)) 411 goto irq_list_done; 412 413 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 414 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 415 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 416 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 417 418 wr32(hw, reg_idx, reg); 419 420 while (next_q < size) { 421 switch (qtype) { 422 case I40E_QUEUE_TYPE_RX: 423 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 424 itr_idx = vecmap->rxitr_idx; 425 break; 426 case I40E_QUEUE_TYPE_TX: 427 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 428 itr_idx = vecmap->txitr_idx; 429 break; 430 default: 431 break; 432 } 433 434 next_q = find_next_bit(&linklistmap, size, next_q + 1); 435 if (next_q < size) { 436 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 437 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 438 pf_queue_id = i40e_get_real_pf_qid(vf, 439 vsi_id, 440 vsi_queue_id); 441 } else { 442 pf_queue_id = I40E_QUEUE_END_OF_LIST; 443 qtype = 0; 444 } 445 446 /* format for the RQCTL & TQCTL regs is same */ 447 reg = (vector_id) | 448 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 449 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 450 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 451 FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx); 452 wr32(hw, reg_idx, reg); 453 } 454 455 /* if the vf is running in polling mode and using interrupt zero, 456 * need to disable auto-mask on enabling zero interrupt for VFs. 457 */ 458 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 459 (vector_id == 0)) { 460 reg = rd32(hw, I40E_GLINT_CTL); 461 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 462 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 463 wr32(hw, I40E_GLINT_CTL, reg); 464 } 465 } 466 467 irq_list_done: 468 i40e_flush(hw); 469 } 470 471 /** 472 * i40e_release_rdma_qvlist 473 * @vf: pointer to the VF. 474 * 475 **/ 476 static void i40e_release_rdma_qvlist(struct i40e_vf *vf) 477 { 478 struct i40e_pf *pf = vf->pf; 479 struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info; 480 u32 msix_vf; 481 u32 i; 482 483 if (!vf->qvlist_info) 484 return; 485 486 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 487 for (i = 0; i < qvlist_info->num_vectors; i++) { 488 struct virtchnl_rdma_qv_info *qv_info; 489 u32 next_q_index, next_q_type; 490 struct i40e_hw *hw = &pf->hw; 491 u32 v_idx, reg_idx, reg; 492 493 qv_info = &qvlist_info->qv_info[i]; 494 v_idx = qv_info->v_idx; 495 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 496 /* Figure out the queue after CEQ and make that the 497 * first queue. 498 */ 499 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 500 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 501 next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK, 502 reg); 503 next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK, 504 reg); 505 506 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 507 reg = (next_q_index & 508 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 509 (next_q_type << 510 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 511 512 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 513 } 514 } 515 kfree(vf->qvlist_info); 516 vf->qvlist_info = NULL; 517 } 518 519 /** 520 * i40e_config_rdma_qvlist 521 * @vf: pointer to the VF info 522 * @qvlist_info: queue and vector list 523 * 524 * Return 0 on success or < 0 on error 525 **/ 526 static int 527 i40e_config_rdma_qvlist(struct i40e_vf *vf, 528 struct virtchnl_rdma_qvlist_info *qvlist_info) 529 { 530 struct i40e_pf *pf = vf->pf; 531 struct i40e_hw *hw = &pf->hw; 532 struct virtchnl_rdma_qv_info *qv_info; 533 u32 v_idx, i, reg_idx, reg; 534 u32 next_q_idx, next_q_type; 535 size_t size; 536 u32 msix_vf; 537 int ret = 0; 538 539 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 540 541 if (qvlist_info->num_vectors > msix_vf) { 542 dev_warn(&pf->pdev->dev, 543 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", 544 qvlist_info->num_vectors, 545 msix_vf); 546 ret = -EINVAL; 547 goto err_out; 548 } 549 550 kfree(vf->qvlist_info); 551 size = virtchnl_struct_size(vf->qvlist_info, qv_info, 552 qvlist_info->num_vectors); 553 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 554 if (!vf->qvlist_info) { 555 ret = -ENOMEM; 556 goto err_out; 557 } 558 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 559 560 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 561 for (i = 0; i < qvlist_info->num_vectors; i++) { 562 qv_info = &qvlist_info->qv_info[i]; 563 564 /* Validate vector id belongs to this vf */ 565 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { 566 ret = -EINVAL; 567 goto err_free; 568 } 569 570 v_idx = qv_info->v_idx; 571 572 vf->qvlist_info->qv_info[i] = *qv_info; 573 574 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 575 /* We might be sharing the interrupt, so get the first queue 576 * index and type, push it down the list by adding the new 577 * queue on top. Also link it with the new queue in CEQCTL. 578 */ 579 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 580 next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK, 581 reg); 582 next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK, 583 reg); 584 585 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 586 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 587 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 588 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 589 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 590 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 591 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 592 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 593 594 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 595 reg = (qv_info->ceq_idx & 596 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 597 (I40E_QUEUE_TYPE_PE_CEQ << 598 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 599 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 600 } 601 602 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 603 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 604 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 605 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 606 607 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 608 } 609 } 610 611 return 0; 612 err_free: 613 kfree(vf->qvlist_info); 614 vf->qvlist_info = NULL; 615 err_out: 616 return ret; 617 } 618 619 /** 620 * i40e_config_vsi_tx_queue 621 * @vf: pointer to the VF info 622 * @vsi_id: id of VSI as provided by the FW 623 * @vsi_queue_id: vsi relative queue index 624 * @info: config. info 625 * 626 * configure tx queue 627 **/ 628 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 629 u16 vsi_queue_id, 630 struct virtchnl_txq_info *info) 631 { 632 struct i40e_pf *pf = vf->pf; 633 struct i40e_hw *hw = &pf->hw; 634 struct i40e_hmc_obj_txq tx_ctx; 635 struct i40e_vsi *vsi; 636 u16 pf_queue_id; 637 u32 qtx_ctl; 638 int ret = 0; 639 640 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 641 ret = -ENOENT; 642 goto error_context; 643 } 644 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 645 vsi = i40e_find_vsi_from_id(pf, vsi_id); 646 if (!vsi) { 647 ret = -ENOENT; 648 goto error_context; 649 } 650 651 /* clear the context structure first */ 652 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 653 654 /* only set the required fields */ 655 tx_ctx.base = info->dma_ring_addr / 128; 656 657 /* ring_len has to be multiple of 8 */ 658 if (!IS_ALIGNED(info->ring_len, 8) || 659 info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { 660 ret = -EINVAL; 661 goto error_context; 662 } 663 tx_ctx.qlen = info->ring_len; 664 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 665 tx_ctx.rdylist_act = 0; 666 tx_ctx.head_wb_ena = info->headwb_enabled; 667 tx_ctx.head_wb_addr = info->dma_headwb_addr; 668 669 /* clear the context in the HMC */ 670 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 671 if (ret) { 672 dev_err(&pf->pdev->dev, 673 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 674 pf_queue_id, ret); 675 ret = -ENOENT; 676 goto error_context; 677 } 678 679 /* set the context in the HMC */ 680 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 681 if (ret) { 682 dev_err(&pf->pdev->dev, 683 "Failed to set VF LAN Tx queue context %d error: %d\n", 684 pf_queue_id, ret); 685 ret = -ENOENT; 686 goto error_context; 687 } 688 689 /* associate this queue with the PCI VF function */ 690 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 691 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id); 692 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK, 693 vf->vf_id + hw->func_caps.vf_base_id); 694 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 695 i40e_flush(hw); 696 697 error_context: 698 return ret; 699 } 700 701 /** 702 * i40e_config_vsi_rx_queue 703 * @vf: pointer to the VF info 704 * @vsi_id: id of VSI as provided by the FW 705 * @vsi_queue_id: vsi relative queue index 706 * @info: config. info 707 * 708 * configure rx queue 709 **/ 710 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 711 u16 vsi_queue_id, 712 struct virtchnl_rxq_info *info) 713 { 714 u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 715 struct i40e_pf *pf = vf->pf; 716 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; 717 struct i40e_hw *hw = &pf->hw; 718 struct i40e_hmc_obj_rxq rx_ctx; 719 int ret = 0; 720 721 /* clear the context structure first */ 722 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 723 724 /* only set the required fields */ 725 rx_ctx.base = info->dma_ring_addr / 128; 726 727 /* ring_len has to be multiple of 32 */ 728 if (!IS_ALIGNED(info->ring_len, 32) || 729 info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { 730 ret = -EINVAL; 731 goto error_param; 732 } 733 rx_ctx.qlen = info->ring_len; 734 735 if (info->splithdr_enabled) { 736 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 737 I40E_RX_SPLIT_IP | 738 I40E_RX_SPLIT_TCP_UDP | 739 I40E_RX_SPLIT_SCTP; 740 /* header length validation */ 741 if (info->hdr_size > ((2 * 1024) - 64)) { 742 ret = -EINVAL; 743 goto error_param; 744 } 745 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 746 747 /* set split mode 10b */ 748 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 749 } 750 751 /* databuffer length validation */ 752 if (info->databuffer_size > ((16 * 1024) - 128)) { 753 ret = -EINVAL; 754 goto error_param; 755 } 756 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 757 758 /* max pkt. length validation */ 759 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 760 ret = -EINVAL; 761 goto error_param; 762 } 763 rx_ctx.rxmax = info->max_pkt_size; 764 765 /* if port VLAN is configured increase the max packet size */ 766 if (vsi->info.pvid) 767 rx_ctx.rxmax += VLAN_HLEN; 768 769 /* enable 32bytes desc always */ 770 rx_ctx.dsize = 1; 771 772 /* default values */ 773 rx_ctx.lrxqthresh = 1; 774 rx_ctx.crcstrip = 1; 775 rx_ctx.prefena = 1; 776 rx_ctx.l2tsel = 1; 777 778 /* clear the context in the HMC */ 779 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 780 if (ret) { 781 dev_err(&pf->pdev->dev, 782 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 783 pf_queue_id, ret); 784 ret = -ENOENT; 785 goto error_param; 786 } 787 788 /* set the context in the HMC */ 789 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 790 if (ret) { 791 dev_err(&pf->pdev->dev, 792 "Failed to set VF LAN Rx queue context %d error: %d\n", 793 pf_queue_id, ret); 794 ret = -ENOENT; 795 goto error_param; 796 } 797 798 error_param: 799 return ret; 800 } 801 802 /** 803 * i40e_alloc_vsi_res 804 * @vf: pointer to the VF info 805 * @idx: VSI index, applies only for ADq mode, zero otherwise 806 * 807 * alloc VF vsi context & resources 808 **/ 809 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 810 { 811 struct i40e_mac_filter *f = NULL; 812 struct i40e_vsi *main_vsi, *vsi; 813 struct i40e_pf *pf = vf->pf; 814 u64 max_tx_rate = 0; 815 int ret = 0; 816 817 main_vsi = i40e_pf_get_main_vsi(pf); 818 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id); 819 820 if (!vsi) { 821 dev_err(&pf->pdev->dev, 822 "add vsi failed for VF %d, aq_err %d\n", 823 vf->vf_id, pf->hw.aq.asq_last_status); 824 ret = -ENOENT; 825 goto error_alloc_vsi_res; 826 } 827 828 if (!idx) { 829 u64 hashcfg = i40e_pf_get_default_rss_hashcfg(pf); 830 u8 broadcast[ETH_ALEN]; 831 832 vf->lan_vsi_idx = vsi->idx; 833 vf->lan_vsi_id = vsi->id; 834 /* If the port VLAN has been configured and then the 835 * VF driver was removed then the VSI port VLAN 836 * configuration was destroyed. Check if there is 837 * a port VLAN and restore the VSI configuration if 838 * needed. 839 */ 840 if (vf->port_vlan_id) 841 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 842 843 spin_lock_bh(&vsi->mac_filter_hash_lock); 844 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 845 f = i40e_add_mac_filter(vsi, 846 vf->default_lan_addr.addr); 847 if (!f) 848 dev_info(&pf->pdev->dev, 849 "Could not add MAC filter %pM for VF %d\n", 850 vf->default_lan_addr.addr, vf->vf_id); 851 } 852 eth_broadcast_addr(broadcast); 853 f = i40e_add_mac_filter(vsi, broadcast); 854 if (!f) 855 dev_info(&pf->pdev->dev, 856 "Could not allocate VF broadcast filter\n"); 857 spin_unlock_bh(&vsi->mac_filter_hash_lock); 858 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hashcfg); 859 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), 860 (u32)(hashcfg >> 32)); 861 /* program mac filter only for VF VSI */ 862 ret = i40e_sync_vsi_filters(vsi); 863 if (ret) 864 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 865 } 866 867 /* storing VSI index and id for ADq and don't apply the mac filter */ 868 if (vf->adq_enabled) { 869 vf->ch[idx].vsi_idx = vsi->idx; 870 vf->ch[idx].vsi_id = vsi->id; 871 } 872 873 /* Set VF bandwidth if specified */ 874 if (vf->tx_rate) { 875 max_tx_rate = vf->tx_rate; 876 } else if (vf->ch[idx].max_tx_rate) { 877 max_tx_rate = vf->ch[idx].max_tx_rate; 878 } 879 880 if (max_tx_rate) { 881 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 882 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 883 max_tx_rate, 0, NULL); 884 if (ret) 885 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 886 vf->vf_id, ret); 887 } 888 889 error_alloc_vsi_res: 890 return ret; 891 } 892 893 /** 894 * i40e_map_pf_queues_to_vsi 895 * @vf: pointer to the VF info 896 * 897 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 898 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 899 **/ 900 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 901 { 902 struct i40e_pf *pf = vf->pf; 903 struct i40e_hw *hw = &pf->hw; 904 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 905 u16 vsi_id, qps; 906 int i, j; 907 908 if (vf->adq_enabled) 909 num_tc = vf->num_tc; 910 911 for (i = 0; i < num_tc; i++) { 912 if (vf->adq_enabled) { 913 qps = vf->ch[i].num_qps; 914 vsi_id = vf->ch[i].vsi_id; 915 } else { 916 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 917 vsi_id = vf->lan_vsi_id; 918 } 919 920 for (j = 0; j < 7; j++) { 921 if (j * 2 >= qps) { 922 /* end of list */ 923 reg = 0x07FF07FF; 924 } else { 925 u16 qid = i40e_vc_get_pf_queue_id(vf, 926 vsi_id, 927 j * 2); 928 reg = qid; 929 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 930 (j * 2) + 1); 931 reg |= qid << 16; 932 } 933 i40e_write_rx_ctl(hw, 934 I40E_VSILAN_QTABLE(j, vsi_id), 935 reg); 936 } 937 } 938 } 939 940 /** 941 * i40e_map_pf_to_vf_queues 942 * @vf: pointer to the VF info 943 * 944 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 945 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 946 **/ 947 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 948 { 949 struct i40e_pf *pf = vf->pf; 950 struct i40e_hw *hw = &pf->hw; 951 u32 reg, total_qps = 0; 952 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 953 u16 vsi_id, qid; 954 int i, j; 955 956 if (vf->adq_enabled) 957 num_tc = vf->num_tc; 958 959 for (i = 0; i < num_tc; i++) { 960 if (vf->adq_enabled) { 961 qps = vf->ch[i].num_qps; 962 vsi_id = vf->ch[i].vsi_id; 963 } else { 964 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 965 vsi_id = vf->lan_vsi_id; 966 } 967 968 for (j = 0; j < qps; j++) { 969 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 970 971 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 972 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 973 reg); 974 total_qps++; 975 } 976 } 977 } 978 979 /** 980 * i40e_enable_vf_mappings 981 * @vf: pointer to the VF info 982 * 983 * enable VF mappings 984 **/ 985 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 986 { 987 struct i40e_pf *pf = vf->pf; 988 struct i40e_hw *hw = &pf->hw; 989 u32 reg; 990 991 /* Tell the hardware we're using noncontiguous mapping. HW requires 992 * that VF queues be mapped using this method, even when they are 993 * contiguous in real life 994 */ 995 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 996 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 997 998 /* enable VF vplan_qtable mappings */ 999 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 1000 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 1001 1002 i40e_map_pf_to_vf_queues(vf); 1003 i40e_map_pf_queues_to_vsi(vf); 1004 1005 i40e_flush(hw); 1006 } 1007 1008 /** 1009 * i40e_disable_vf_mappings 1010 * @vf: pointer to the VF info 1011 * 1012 * disable VF mappings 1013 **/ 1014 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 1015 { 1016 struct i40e_pf *pf = vf->pf; 1017 struct i40e_hw *hw = &pf->hw; 1018 int i; 1019 1020 /* disable qp mappings */ 1021 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 1022 for (i = 0; i < I40E_MAX_VSI_QP; i++) 1023 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 1024 I40E_QUEUE_END_OF_LIST); 1025 i40e_flush(hw); 1026 } 1027 1028 /** 1029 * i40e_free_vf_res 1030 * @vf: pointer to the VF info 1031 * 1032 * free VF resources 1033 **/ 1034 static void i40e_free_vf_res(struct i40e_vf *vf) 1035 { 1036 struct i40e_pf *pf = vf->pf; 1037 struct i40e_hw *hw = &pf->hw; 1038 u32 reg_idx, reg; 1039 int i, j, msix_vf; 1040 1041 /* Start by disabling VF's configuration API to prevent the OS from 1042 * accessing the VF's VSI after it's freed / invalidated. 1043 */ 1044 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1045 1046 /* It's possible the VF had requeuested more queues than the default so 1047 * do the accounting here when we're about to free them. 1048 */ 1049 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 1050 pf->queues_left += vf->num_queue_pairs - 1051 I40E_DEFAULT_QUEUES_PER_VF; 1052 } 1053 1054 /* free vsi & disconnect it from the parent uplink */ 1055 if (vf->lan_vsi_idx) { 1056 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 1057 vf->lan_vsi_idx = 0; 1058 vf->lan_vsi_id = 0; 1059 } 1060 1061 /* do the accounting and remove additional ADq VSI's */ 1062 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 1063 for (j = 0; j < vf->num_tc; j++) { 1064 /* At this point VSI0 is already released so don't 1065 * release it again and only clear their values in 1066 * structure variables 1067 */ 1068 if (j) 1069 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 1070 vf->ch[j].vsi_idx = 0; 1071 vf->ch[j].vsi_id = 0; 1072 } 1073 } 1074 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 1075 1076 /* disable interrupts so the VF starts in a known state */ 1077 for (i = 0; i < msix_vf; i++) { 1078 /* format is same for both registers */ 1079 if (0 == i) 1080 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 1081 else 1082 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 1083 (vf->vf_id)) 1084 + (i - 1)); 1085 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 1086 i40e_flush(hw); 1087 } 1088 1089 /* clear the irq settings */ 1090 for (i = 0; i < msix_vf; i++) { 1091 /* format is same for both registers */ 1092 if (0 == i) 1093 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 1094 else 1095 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 1096 (vf->vf_id)) 1097 + (i - 1)); 1098 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 1099 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 1100 wr32(hw, reg_idx, reg); 1101 i40e_flush(hw); 1102 } 1103 /* reset some of the state variables keeping track of the resources */ 1104 vf->num_queue_pairs = 0; 1105 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1106 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1107 } 1108 1109 /** 1110 * i40e_alloc_vf_res 1111 * @vf: pointer to the VF info 1112 * 1113 * allocate VF resources 1114 **/ 1115 static int i40e_alloc_vf_res(struct i40e_vf *vf) 1116 { 1117 struct i40e_pf *pf = vf->pf; 1118 int total_queue_pairs = 0; 1119 int ret, idx; 1120 1121 if (vf->num_req_queues && 1122 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1123 pf->num_vf_qps = vf->num_req_queues; 1124 else 1125 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1126 1127 /* allocate hw vsi context & associated resources */ 1128 ret = i40e_alloc_vsi_res(vf, 0); 1129 if (ret) 1130 goto error_alloc; 1131 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1132 1133 /* allocate additional VSIs based on tc information for ADq */ 1134 if (vf->adq_enabled) { 1135 if (pf->queues_left >= 1136 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1137 /* TC 0 always belongs to VF VSI */ 1138 for (idx = 1; idx < vf->num_tc; idx++) { 1139 ret = i40e_alloc_vsi_res(vf, idx); 1140 if (ret) 1141 goto error_alloc; 1142 } 1143 /* send correct number of queues */ 1144 total_queue_pairs = I40E_MAX_VF_QUEUES; 1145 } else { 1146 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1147 vf->vf_id); 1148 vf->adq_enabled = false; 1149 } 1150 } 1151 1152 /* We account for each VF to get a default number of queue pairs. If 1153 * the VF has now requested more, we need to account for that to make 1154 * certain we never request more queues than we actually have left in 1155 * HW. 1156 */ 1157 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1158 pf->queues_left -= 1159 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1160 1161 if (vf->trusted) 1162 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1163 else 1164 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1165 1166 /* store the total qps number for the runtime 1167 * VF req validation 1168 */ 1169 vf->num_queue_pairs = total_queue_pairs; 1170 1171 /* VF is now completely initialized */ 1172 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1173 1174 error_alloc: 1175 if (ret) 1176 i40e_free_vf_res(vf); 1177 1178 return ret; 1179 } 1180 1181 #define VF_DEVICE_STATUS 0xAA 1182 #define VF_TRANS_PENDING_MASK 0x20 1183 /** 1184 * i40e_quiesce_vf_pci 1185 * @vf: pointer to the VF structure 1186 * 1187 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1188 * if the transactions never clear. 1189 **/ 1190 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1191 { 1192 struct i40e_pf *pf = vf->pf; 1193 struct i40e_hw *hw = &pf->hw; 1194 int vf_abs_id, i; 1195 u32 reg; 1196 1197 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1198 1199 wr32(hw, I40E_PF_PCI_CIAA, 1200 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1201 for (i = 0; i < 100; i++) { 1202 reg = rd32(hw, I40E_PF_PCI_CIAD); 1203 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1204 return 0; 1205 udelay(1); 1206 } 1207 return -EIO; 1208 } 1209 1210 /** 1211 * __i40e_getnum_vf_vsi_vlan_filters 1212 * @vsi: pointer to the vsi 1213 * 1214 * called to get the number of VLANs offloaded on this VF 1215 **/ 1216 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1217 { 1218 struct i40e_mac_filter *f; 1219 u16 num_vlans = 0, bkt; 1220 1221 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1222 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1223 num_vlans++; 1224 } 1225 1226 return num_vlans; 1227 } 1228 1229 /** 1230 * i40e_getnum_vf_vsi_vlan_filters 1231 * @vsi: pointer to the vsi 1232 * 1233 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held 1234 **/ 1235 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1236 { 1237 int num_vlans; 1238 1239 spin_lock_bh(&vsi->mac_filter_hash_lock); 1240 num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); 1241 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1242 1243 return num_vlans; 1244 } 1245 1246 /** 1247 * i40e_get_vlan_list_sync 1248 * @vsi: pointer to the VSI 1249 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller 1250 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller. 1251 * This array is allocated here, but has to be freed in caller. 1252 * 1253 * Called to get number of VLANs and VLAN list present in mac_filter_hash. 1254 **/ 1255 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans, 1256 s16 **vlan_list) 1257 { 1258 struct i40e_mac_filter *f; 1259 int i = 0; 1260 int bkt; 1261 1262 spin_lock_bh(&vsi->mac_filter_hash_lock); 1263 *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); 1264 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC); 1265 if (!(*vlan_list)) 1266 goto err; 1267 1268 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1269 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1270 continue; 1271 (*vlan_list)[i++] = f->vlan; 1272 } 1273 err: 1274 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1275 } 1276 1277 /** 1278 * i40e_set_vsi_promisc 1279 * @vf: pointer to the VF struct 1280 * @seid: VSI number 1281 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable 1282 * for a given VLAN 1283 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable 1284 * for a given VLAN 1285 * @vl: List of VLANs - apply filter for given VLANs 1286 * @num_vlans: Number of elements in @vl 1287 **/ 1288 static int 1289 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, 1290 bool unicast_enable, s16 *vl, u16 num_vlans) 1291 { 1292 struct i40e_pf *pf = vf->pf; 1293 struct i40e_hw *hw = &pf->hw; 1294 int aq_ret, aq_tmp = 0; 1295 int i; 1296 1297 /* No VLAN to set promisc on, set on VSI */ 1298 if (!num_vlans || !vl) { 1299 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid, 1300 multi_enable, 1301 NULL); 1302 if (aq_ret) { 1303 int aq_err = pf->hw.aq.asq_last_status; 1304 1305 dev_err(&pf->pdev->dev, 1306 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", 1307 vf->vf_id, ERR_PTR(aq_ret), 1308 libie_aq_str(aq_err)); 1309 1310 return aq_ret; 1311 } 1312 1313 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid, 1314 unicast_enable, 1315 NULL, true); 1316 1317 if (aq_ret) { 1318 int aq_err = pf->hw.aq.asq_last_status; 1319 1320 dev_err(&pf->pdev->dev, 1321 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", 1322 vf->vf_id, ERR_PTR(aq_ret), 1323 libie_aq_str(aq_err)); 1324 } 1325 1326 return aq_ret; 1327 } 1328 1329 for (i = 0; i < num_vlans; i++) { 1330 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid, 1331 multi_enable, 1332 vl[i], NULL); 1333 if (aq_ret) { 1334 int aq_err = pf->hw.aq.asq_last_status; 1335 1336 dev_err(&pf->pdev->dev, 1337 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", 1338 vf->vf_id, ERR_PTR(aq_ret), 1339 libie_aq_str(aq_err)); 1340 1341 if (!aq_tmp) 1342 aq_tmp = aq_ret; 1343 } 1344 1345 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid, 1346 unicast_enable, 1347 vl[i], NULL); 1348 if (aq_ret) { 1349 int aq_err = pf->hw.aq.asq_last_status; 1350 1351 dev_err(&pf->pdev->dev, 1352 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", 1353 vf->vf_id, ERR_PTR(aq_ret), 1354 libie_aq_str(aq_err)); 1355 1356 if (!aq_tmp) 1357 aq_tmp = aq_ret; 1358 } 1359 } 1360 1361 if (aq_tmp) 1362 aq_ret = aq_tmp; 1363 1364 return aq_ret; 1365 } 1366 1367 /** 1368 * i40e_config_vf_promiscuous_mode 1369 * @vf: pointer to the VF info 1370 * @vsi_id: VSI id 1371 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1372 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1373 * 1374 * Called from the VF to configure the promiscuous mode of 1375 * VF vsis and from the VF reset path to reset promiscuous mode. 1376 **/ 1377 static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1378 u16 vsi_id, 1379 bool allmulti, 1380 bool alluni) 1381 { 1382 struct i40e_pf *pf = vf->pf; 1383 struct i40e_vsi *vsi; 1384 int aq_ret = 0; 1385 u16 num_vlans; 1386 s16 *vl; 1387 1388 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1389 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1390 return -EINVAL; 1391 1392 if (vf->port_vlan_id) { 1393 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, 1394 alluni, &vf->port_vlan_id, 1); 1395 return aq_ret; 1396 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1397 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl); 1398 1399 if (!vl) 1400 return -ENOMEM; 1401 1402 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, 1403 vl, num_vlans); 1404 kfree(vl); 1405 return aq_ret; 1406 } 1407 1408 /* no VLANs to set on, set on VSI */ 1409 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, 1410 NULL, 0); 1411 return aq_ret; 1412 } 1413 1414 /** 1415 * i40e_sync_vfr_reset 1416 * @hw: pointer to hw struct 1417 * @vf_id: VF identifier 1418 * 1419 * Before trigger hardware reset, we need to know if no other process has 1420 * reserved the hardware for any reset operations. This check is done by 1421 * examining the status of the RSTAT1 register used to signal the reset. 1422 **/ 1423 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id) 1424 { 1425 u32 reg; 1426 int i; 1427 1428 for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) { 1429 reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) & 1430 I40E_VFINT_ICR0_ADMINQ_MASK; 1431 if (reg) 1432 return 0; 1433 1434 usleep_range(100, 200); 1435 } 1436 1437 return -EAGAIN; 1438 } 1439 1440 /** 1441 * i40e_trigger_vf_reset 1442 * @vf: pointer to the VF structure 1443 * @flr: VFLR was issued or not 1444 * 1445 * Trigger hardware to start a reset for a particular VF. Expects the caller 1446 * to wait the proper amount of time to allow hardware to reset the VF before 1447 * it cleans up and restores VF functionality. 1448 **/ 1449 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1450 { 1451 struct i40e_pf *pf = vf->pf; 1452 struct i40e_hw *hw = &pf->hw; 1453 u32 reg, reg_idx, bit_idx; 1454 bool vf_active; 1455 u32 radq; 1456 1457 /* warn the VF */ 1458 vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1459 1460 /* Disable VF's configuration API during reset. The flag is re-enabled 1461 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1462 * It's normally disabled in i40e_free_vf_res(), but it's safer 1463 * to do it earlier to give some time to finish to any VF config 1464 * functions that may still be running at this point. 1465 */ 1466 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1467 clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states); 1468 1469 /* In the case of a VFLR, the HW has already reset the VF and we 1470 * just need to clean up, so don't hit the VFRTRIG register. 1471 */ 1472 if (!flr) { 1473 /* Sync VFR reset before trigger next one */ 1474 radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) & 1475 I40E_VFINT_ICR0_ADMINQ_MASK; 1476 if (vf_active && !radq) 1477 /* waiting for finish reset by virtual driver */ 1478 if (i40e_sync_vfr_reset(hw, vf->vf_id)) 1479 dev_info(&pf->pdev->dev, 1480 "Reset VF %d never finished\n", 1481 vf->vf_id); 1482 1483 /* Reset VF using VPGEN_VFRTRIG reg. It is also setting 1484 * in progress state in rstat1 register. 1485 */ 1486 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1487 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1488 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1489 i40e_flush(hw); 1490 } 1491 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1492 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1493 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1494 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1495 i40e_flush(hw); 1496 1497 if (i40e_quiesce_vf_pci(vf)) 1498 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1499 vf->vf_id); 1500 } 1501 1502 /** 1503 * i40e_cleanup_reset_vf 1504 * @vf: pointer to the VF structure 1505 * 1506 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1507 * have verified whether the reset is finished properly, and ensure the 1508 * minimum amount of wait time has passed. 1509 **/ 1510 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1511 { 1512 struct i40e_pf *pf = vf->pf; 1513 struct i40e_hw *hw = &pf->hw; 1514 u32 reg; 1515 1516 /* disable promisc modes in case they were enabled */ 1517 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1518 1519 /* free VF resources to begin resetting the VSI state */ 1520 i40e_free_vf_res(vf); 1521 1522 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1523 * By doing this we allow HW to access VF memory at any point. If we 1524 * did it any sooner, HW could access memory while it was being freed 1525 * in i40e_free_vf_res(), causing an IOMMU fault. 1526 * 1527 * On the other hand, this needs to be done ASAP, because the VF driver 1528 * is waiting for this to happen and may report a timeout. It's 1529 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1530 * it. 1531 */ 1532 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1533 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1534 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1535 1536 /* reallocate VF resources to finish resetting the VSI state */ 1537 if (!i40e_alloc_vf_res(vf)) { 1538 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1539 i40e_enable_vf_mappings(vf); 1540 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1541 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1542 /* Do not notify the client during VF init */ 1543 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1544 &vf->vf_states)) 1545 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1546 vf->num_vlan = 0; 1547 } 1548 1549 /* Tell the VF driver the reset is done. This needs to be done only 1550 * after VF has been fully initialized, because the VF driver may 1551 * request resources immediately after setting this flag. 1552 */ 1553 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1554 } 1555 1556 /** 1557 * i40e_reset_vf 1558 * @vf: pointer to the VF structure 1559 * @flr: VFLR was issued or not 1560 * 1561 * Return: True if reset was performed successfully or if resets are disabled. 1562 * False if reset is already in progress. 1563 **/ 1564 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1565 { 1566 struct i40e_pf *pf = vf->pf; 1567 struct i40e_hw *hw = &pf->hw; 1568 bool rsd = false; 1569 u32 reg; 1570 int i; 1571 1572 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) 1573 return true; 1574 1575 /* Bail out if VFs are disabled. */ 1576 if (test_bit(__I40E_VF_DISABLE, pf->state)) 1577 return true; 1578 1579 /* If VF is being reset already we don't need to continue. */ 1580 if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1581 return false; 1582 1583 i40e_trigger_vf_reset(vf, flr); 1584 1585 /* poll VPGEN_VFRSTAT reg to make sure 1586 * that reset is complete 1587 */ 1588 for (i = 0; i < 10; i++) { 1589 /* VF reset requires driver to first reset the VF and then 1590 * poll the status register to make sure that the reset 1591 * completed successfully. Due to internal HW FIFO flushes, 1592 * we must wait 10ms before the register will be valid. 1593 */ 1594 usleep_range(10000, 20000); 1595 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1596 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1597 rsd = true; 1598 break; 1599 } 1600 } 1601 1602 if (flr) 1603 usleep_range(10000, 20000); 1604 1605 if (!rsd) 1606 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1607 vf->vf_id); 1608 usleep_range(10000, 20000); 1609 1610 /* On initial reset, we don't have any queues to disable */ 1611 if (vf->lan_vsi_idx != 0) 1612 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1613 1614 i40e_cleanup_reset_vf(vf); 1615 1616 i40e_flush(hw); 1617 usleep_range(20000, 40000); 1618 clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states); 1619 1620 return true; 1621 } 1622 1623 /** 1624 * i40e_reset_all_vfs 1625 * @pf: pointer to the PF structure 1626 * @flr: VFLR was issued or not 1627 * 1628 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1629 * VF, then do all the waiting in one chunk, and finally finish restoring each 1630 * VF after the wait. This is useful during PF routines which need to reset 1631 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1632 * 1633 * Returns true if any VFs were reset, and false otherwise. 1634 **/ 1635 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1636 { 1637 struct i40e_hw *hw = &pf->hw; 1638 struct i40e_vf *vf; 1639 u32 reg; 1640 int i; 1641 1642 /* If we don't have any VFs, then there is nothing to reset */ 1643 if (!pf->num_alloc_vfs) 1644 return false; 1645 1646 /* If VFs have been disabled, there is no need to reset */ 1647 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1648 return false; 1649 1650 /* Begin reset on all VFs at once */ 1651 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1652 /* If VF is being reset no need to trigger reset again */ 1653 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1654 i40e_trigger_vf_reset(vf, flr); 1655 } 1656 1657 /* HW requires some time to make sure it can flush the FIFO for a VF 1658 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1659 * sequence to make sure that it has completed. We'll keep track of 1660 * the VFs using a simple iterator that increments once that VF has 1661 * finished resetting. 1662 */ 1663 for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) { 1664 usleep_range(10000, 20000); 1665 1666 /* Check each VF in sequence, beginning with the VF to fail 1667 * the previous check. 1668 */ 1669 while (vf < &pf->vf[pf->num_alloc_vfs]) { 1670 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { 1671 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1672 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1673 break; 1674 } 1675 1676 /* If the current VF has finished resetting, move on 1677 * to the next VF in sequence. 1678 */ 1679 ++vf; 1680 } 1681 } 1682 1683 if (flr) 1684 usleep_range(10000, 20000); 1685 1686 /* Display a warning if at least one VF didn't manage to reset in 1687 * time, but continue on with the operation. 1688 */ 1689 if (vf < &pf->vf[pf->num_alloc_vfs]) 1690 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1691 vf->vf_id); 1692 usleep_range(10000, 20000); 1693 1694 /* Begin disabling all the rings associated with VFs, but do not wait 1695 * between each VF. 1696 */ 1697 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1698 /* On initial reset, we don't have any queues to disable */ 1699 if (vf->lan_vsi_idx == 0) 1700 continue; 1701 1702 /* If VF is reset in another thread just continue */ 1703 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1704 continue; 1705 1706 i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]); 1707 } 1708 1709 /* Now that we've notified HW to disable all of the VF rings, wait 1710 * until they finish. 1711 */ 1712 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1713 /* On initial reset, we don't have any queues to disable */ 1714 if (vf->lan_vsi_idx == 0) 1715 continue; 1716 1717 /* If VF is reset in another thread just continue */ 1718 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1719 continue; 1720 1721 i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]); 1722 } 1723 1724 /* Hw may need up to 50ms to finish disabling the RX queues. We 1725 * minimize the wait by delaying only once for all VFs. 1726 */ 1727 mdelay(50); 1728 1729 /* Finish the reset on each VF */ 1730 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { 1731 /* If VF is reset in another thread just continue */ 1732 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) 1733 continue; 1734 1735 i40e_cleanup_reset_vf(vf); 1736 } 1737 1738 i40e_flush(hw); 1739 usleep_range(20000, 40000); 1740 clear_bit(__I40E_VF_DISABLE, pf->state); 1741 1742 return true; 1743 } 1744 1745 /** 1746 * i40e_free_vfs 1747 * @pf: pointer to the PF structure 1748 * 1749 * free VF resources 1750 **/ 1751 void i40e_free_vfs(struct i40e_pf *pf) 1752 { 1753 struct i40e_hw *hw = &pf->hw; 1754 u32 reg_idx, bit_idx; 1755 int i, tmp, vf_id; 1756 1757 if (!pf->vf) 1758 return; 1759 1760 set_bit(__I40E_VFS_RELEASING, pf->state); 1761 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1762 usleep_range(1000, 2000); 1763 1764 i40e_notify_client_of_vf_enable(pf, 0); 1765 1766 /* Disable IOV before freeing resources. This lets any VF drivers 1767 * running in the host get themselves cleaned up before we yank 1768 * the carpet out from underneath their feet. 1769 */ 1770 if (!pci_vfs_assigned(pf->pdev)) 1771 pci_disable_sriov(pf->pdev); 1772 else 1773 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1774 1775 /* Amortize wait time by stopping all VFs at the same time */ 1776 for (i = 0; i < pf->num_alloc_vfs; i++) { 1777 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1778 continue; 1779 1780 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1781 } 1782 1783 for (i = 0; i < pf->num_alloc_vfs; i++) { 1784 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1785 continue; 1786 1787 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1788 } 1789 1790 /* free up VF resources */ 1791 tmp = pf->num_alloc_vfs; 1792 pf->num_alloc_vfs = 0; 1793 for (i = 0; i < tmp; i++) { 1794 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1795 i40e_free_vf_res(&pf->vf[i]); 1796 /* disable qp mappings */ 1797 i40e_disable_vf_mappings(&pf->vf[i]); 1798 } 1799 1800 kfree(pf->vf); 1801 pf->vf = NULL; 1802 1803 /* This check is for when the driver is unloaded while VFs are 1804 * assigned. Setting the number of VFs to 0 through sysfs is caught 1805 * before this function ever gets called. 1806 */ 1807 if (!pci_vfs_assigned(pf->pdev)) { 1808 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1809 * work correctly when SR-IOV gets re-enabled. 1810 */ 1811 for (vf_id = 0; vf_id < tmp; vf_id++) { 1812 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1813 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1814 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1815 } 1816 } 1817 clear_bit(__I40E_VF_DISABLE, pf->state); 1818 clear_bit(__I40E_VFS_RELEASING, pf->state); 1819 } 1820 1821 #ifdef CONFIG_PCI_IOV 1822 /** 1823 * i40e_alloc_vfs 1824 * @pf: pointer to the PF structure 1825 * @num_alloc_vfs: number of VFs to allocate 1826 * 1827 * allocate VF resources 1828 **/ 1829 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1830 { 1831 struct i40e_vf *vfs; 1832 int i, ret = 0; 1833 1834 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1835 i40e_irq_dynamic_disable_icr0(pf); 1836 1837 /* Check to see if we're just allocating resources for extant VFs */ 1838 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1839 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1840 if (ret) { 1841 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 1842 pf->num_alloc_vfs = 0; 1843 goto err_iov; 1844 } 1845 } 1846 /* allocate memory */ 1847 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1848 if (!vfs) { 1849 ret = -ENOMEM; 1850 goto err_alloc; 1851 } 1852 pf->vf = vfs; 1853 1854 /* apply default profile */ 1855 for (i = 0; i < num_alloc_vfs; i++) { 1856 vfs[i].pf = pf; 1857 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1858 vfs[i].vf_id = i; 1859 1860 /* assign default capabilities */ 1861 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1862 vfs[i].spoofchk = true; 1863 1864 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1865 1866 } 1867 pf->num_alloc_vfs = num_alloc_vfs; 1868 1869 /* VF resources get allocated during reset */ 1870 i40e_reset_all_vfs(pf, false); 1871 1872 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1873 1874 err_alloc: 1875 if (ret) 1876 i40e_free_vfs(pf); 1877 err_iov: 1878 /* Re-enable interrupt 0. */ 1879 i40e_irq_dynamic_enable_icr0(pf); 1880 return ret; 1881 } 1882 1883 #endif 1884 /** 1885 * i40e_pci_sriov_enable 1886 * @pdev: pointer to a pci_dev structure 1887 * @num_vfs: number of VFs to allocate 1888 * 1889 * Enable or change the number of VFs 1890 **/ 1891 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1892 { 1893 #ifdef CONFIG_PCI_IOV 1894 struct i40e_pf *pf = pci_get_drvdata(pdev); 1895 int pre_existing_vfs = pci_num_vf(pdev); 1896 int err = 0; 1897 1898 if (test_bit(__I40E_TESTING, pf->state)) { 1899 dev_warn(&pdev->dev, 1900 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1901 err = -EPERM; 1902 goto err_out; 1903 } 1904 1905 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1906 i40e_free_vfs(pf); 1907 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1908 goto out; 1909 1910 if (num_vfs > pf->num_req_vfs) { 1911 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1912 num_vfs, pf->num_req_vfs); 1913 err = -EPERM; 1914 goto err_out; 1915 } 1916 1917 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1918 err = i40e_alloc_vfs(pf, num_vfs); 1919 if (err) { 1920 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1921 goto err_out; 1922 } 1923 1924 out: 1925 return num_vfs; 1926 1927 err_out: 1928 return err; 1929 #endif 1930 return 0; 1931 } 1932 1933 /** 1934 * i40e_pci_sriov_configure 1935 * @pdev: pointer to a pci_dev structure 1936 * @num_vfs: number of VFs to allocate 1937 * 1938 * Enable or change the number of VFs. Called when the user updates the number 1939 * of VFs in sysfs. 1940 **/ 1941 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1942 { 1943 struct i40e_pf *pf = pci_get_drvdata(pdev); 1944 int ret = 0; 1945 1946 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1947 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1948 return -EAGAIN; 1949 } 1950 1951 if (num_vfs) { 1952 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { 1953 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 1954 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); 1955 } 1956 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1957 goto sriov_configure_out; 1958 } 1959 1960 if (!pci_vfs_assigned(pf->pdev)) { 1961 i40e_free_vfs(pf); 1962 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 1963 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); 1964 } else { 1965 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1966 ret = -EINVAL; 1967 goto sriov_configure_out; 1968 } 1969 sriov_configure_out: 1970 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1971 return ret; 1972 } 1973 1974 /***********************virtual channel routines******************/ 1975 1976 /** 1977 * i40e_vc_send_msg_to_vf 1978 * @vf: pointer to the VF info 1979 * @v_opcode: virtual channel opcode 1980 * @v_retval: virtual channel return value 1981 * @msg: pointer to the msg buffer 1982 * @msglen: msg length 1983 * 1984 * send msg to VF 1985 **/ 1986 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1987 u32 v_retval, u8 *msg, u16 msglen) 1988 { 1989 struct i40e_pf *pf; 1990 struct i40e_hw *hw; 1991 int abs_vf_id; 1992 int aq_ret; 1993 1994 /* validate the request */ 1995 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1996 return -EINVAL; 1997 1998 pf = vf->pf; 1999 hw = &pf->hw; 2000 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 2001 2002 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 2003 msg, msglen, NULL); 2004 if (aq_ret) { 2005 dev_info(&pf->pdev->dev, 2006 "Unable to send the message to VF %d aq_err %d\n", 2007 vf->vf_id, pf->hw.aq.asq_last_status); 2008 return -EIO; 2009 } 2010 2011 return 0; 2012 } 2013 2014 /** 2015 * i40e_vc_send_resp_to_vf 2016 * @vf: pointer to the VF info 2017 * @opcode: operation code 2018 * @retval: return value 2019 * 2020 * send resp msg to VF 2021 **/ 2022 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 2023 enum virtchnl_ops opcode, 2024 int retval) 2025 { 2026 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 2027 } 2028 2029 /** 2030 * i40e_sync_vf_state 2031 * @vf: pointer to the VF info 2032 * @state: VF state 2033 * 2034 * Called from a VF message to synchronize the service with a potential 2035 * VF reset state 2036 **/ 2037 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state) 2038 { 2039 int i; 2040 2041 /* When handling some messages, it needs VF state to be set. 2042 * It is possible that this flag is cleared during VF reset, 2043 * so there is a need to wait until the end of the reset to 2044 * handle the request message correctly. 2045 */ 2046 for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) { 2047 if (test_bit(state, &vf->vf_states)) 2048 return true; 2049 usleep_range(10000, 20000); 2050 } 2051 2052 return test_bit(state, &vf->vf_states); 2053 } 2054 2055 /** 2056 * i40e_vc_get_version_msg 2057 * @vf: pointer to the VF info 2058 * @msg: pointer to the msg buffer 2059 * 2060 * called from the VF to request the API version used by the PF 2061 **/ 2062 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 2063 { 2064 struct virtchnl_version_info info = { 2065 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 2066 }; 2067 2068 vf->vf_ver = *(struct virtchnl_version_info *)msg; 2069 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 2070 if (VF_IS_V10(&vf->vf_ver)) 2071 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 2072 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 2073 0, (u8 *)&info, 2074 sizeof(struct virtchnl_version_info)); 2075 } 2076 2077 /** 2078 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 2079 * @vf: pointer to VF structure 2080 **/ 2081 static void i40e_del_qch(struct i40e_vf *vf) 2082 { 2083 struct i40e_pf *pf = vf->pf; 2084 int i; 2085 2086 /* first element in the array belongs to primary VF VSI and we shouldn't 2087 * delete it. We should however delete the rest of the VSIs created 2088 */ 2089 for (i = 1; i < vf->num_tc; i++) { 2090 if (vf->ch[i].vsi_idx) { 2091 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 2092 vf->ch[i].vsi_idx = 0; 2093 vf->ch[i].vsi_id = 0; 2094 } 2095 } 2096 } 2097 2098 /** 2099 * i40e_vc_get_max_frame_size 2100 * @vf: pointer to the VF 2101 * 2102 * Max frame size is determined based on the current port's max frame size and 2103 * whether a port VLAN is configured on this VF. The VF is not aware whether 2104 * it's in a port VLAN so the PF needs to account for this in max frame size 2105 * checks and sending the max frame size to the VF. 2106 **/ 2107 static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) 2108 { 2109 u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; 2110 2111 if (vf->port_vlan_id) 2112 max_frame_size -= VLAN_HLEN; 2113 2114 return max_frame_size; 2115 } 2116 2117 /** 2118 * i40e_vc_get_vf_resources_msg 2119 * @vf: pointer to the VF info 2120 * @msg: pointer to the msg buffer 2121 * 2122 * called from the VF to request its resources 2123 **/ 2124 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 2125 { 2126 struct virtchnl_vf_resource *vfres = NULL; 2127 struct i40e_pf *pf = vf->pf; 2128 struct i40e_vsi *vsi; 2129 int num_vsis = 1; 2130 int aq_ret = 0; 2131 size_t len = 0; 2132 int ret; 2133 2134 i40e_sync_vf_state(vf, I40E_VF_STATE_INIT); 2135 2136 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) || 2137 test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) { 2138 aq_ret = -EINVAL; 2139 goto err; 2140 } 2141 2142 len = virtchnl_struct_size(vfres, vsi_res, num_vsis); 2143 vfres = kzalloc(len, GFP_KERNEL); 2144 if (!vfres) { 2145 aq_ret = -ENOMEM; 2146 len = 0; 2147 goto err; 2148 } 2149 if (VF_IS_V11(&vf->vf_ver)) 2150 vf->driver_caps = *(u32 *)msg; 2151 else 2152 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 2153 VIRTCHNL_VF_OFFLOAD_RSS_REG | 2154 VIRTCHNL_VF_OFFLOAD_VLAN; 2155 2156 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 2157 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 2158 vsi = pf->vsi[vf->lan_vsi_idx]; 2159 if (!vsi->info.pvid) 2160 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 2161 2162 if (i40e_vf_client_capable(pf, vf->vf_id) && 2163 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) { 2164 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA; 2165 set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); 2166 } else { 2167 clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); 2168 } 2169 2170 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 2171 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 2172 } else { 2173 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && 2174 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 2175 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 2176 else 2177 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 2178 } 2179 2180 if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) { 2181 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 2182 vfres->vf_cap_flags |= 2183 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 2184 } 2185 2186 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 2187 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 2188 2189 if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) && 2190 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 2191 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 2192 2193 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 2194 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { 2195 dev_err(&pf->pdev->dev, 2196 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 2197 vf->vf_id); 2198 aq_ret = -EINVAL; 2199 goto err; 2200 } 2201 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 2202 } 2203 2204 if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) { 2205 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 2206 vfres->vf_cap_flags |= 2207 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 2208 } 2209 2210 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 2211 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 2212 2213 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 2214 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 2215 2216 vfres->num_vsis = num_vsis; 2217 vfres->num_queue_pairs = vf->num_queue_pairs; 2218 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 2219 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 2220 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 2221 vfres->max_mtu = i40e_vc_get_max_frame_size(vf); 2222 2223 if (vf->lan_vsi_idx) { 2224 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 2225 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 2226 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 2227 /* VFs only use TC 0 */ 2228 vfres->vsi_res[0].qset_handle 2229 = le16_to_cpu(vsi->info.qs_handle[0]); 2230 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) { 2231 spin_lock_bh(&vsi->mac_filter_hash_lock); 2232 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 2233 eth_zero_addr(vf->default_lan_addr.addr); 2234 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2235 } 2236 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 2237 vf->default_lan_addr.addr); 2238 } 2239 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 2240 set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states); 2241 2242 err: 2243 /* send the response back to the VF */ 2244 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 2245 aq_ret, (u8 *)vfres, len); 2246 2247 kfree(vfres); 2248 return ret; 2249 } 2250 2251 /** 2252 * i40e_vc_config_promiscuous_mode_msg 2253 * @vf: pointer to the VF info 2254 * @msg: pointer to the msg buffer 2255 * 2256 * called from the VF to configure the promiscuous mode of 2257 * VF vsis 2258 **/ 2259 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 2260 { 2261 struct virtchnl_promisc_info *info = 2262 (struct virtchnl_promisc_info *)msg; 2263 struct i40e_pf *pf = vf->pf; 2264 bool allmulti = false; 2265 bool alluni = false; 2266 int aq_ret = 0; 2267 2268 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2269 aq_ret = -EINVAL; 2270 goto err_out; 2271 } 2272 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2273 dev_err(&pf->pdev->dev, 2274 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 2275 vf->vf_id); 2276 2277 /* Lie to the VF on purpose, because this is an error we can 2278 * ignore. Unprivileged VF is not a virtual channel error. 2279 */ 2280 aq_ret = 0; 2281 goto err_out; 2282 } 2283 2284 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { 2285 aq_ret = -EINVAL; 2286 goto err_out; 2287 } 2288 2289 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 2290 aq_ret = -EINVAL; 2291 goto err_out; 2292 } 2293 2294 /* Multicast promiscuous handling*/ 2295 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2296 allmulti = true; 2297 2298 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2299 alluni = true; 2300 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2301 alluni); 2302 if (aq_ret) 2303 goto err_out; 2304 2305 if (allmulti) { 2306 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC, 2307 &vf->vf_states)) 2308 dev_info(&pf->pdev->dev, 2309 "VF %d successfully set multicast promiscuous mode\n", 2310 vf->vf_id); 2311 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC, 2312 &vf->vf_states)) 2313 dev_info(&pf->pdev->dev, 2314 "VF %d successfully unset multicast promiscuous mode\n", 2315 vf->vf_id); 2316 2317 if (alluni) { 2318 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC, 2319 &vf->vf_states)) 2320 dev_info(&pf->pdev->dev, 2321 "VF %d successfully set unicast promiscuous mode\n", 2322 vf->vf_id); 2323 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC, 2324 &vf->vf_states)) 2325 dev_info(&pf->pdev->dev, 2326 "VF %d successfully unset unicast promiscuous mode\n", 2327 vf->vf_id); 2328 2329 err_out: 2330 /* send the response to the VF */ 2331 return i40e_vc_send_resp_to_vf(vf, 2332 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2333 aq_ret); 2334 } 2335 2336 /** 2337 * i40e_vc_config_queues_msg 2338 * @vf: pointer to the VF info 2339 * @msg: pointer to the msg buffer 2340 * 2341 * called from the VF to configure the rx/tx 2342 * queues 2343 **/ 2344 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2345 { 2346 struct virtchnl_vsi_queue_config_info *qci = 2347 (struct virtchnl_vsi_queue_config_info *)msg; 2348 struct virtchnl_queue_pair_info *qpi; 2349 u16 vsi_id, vsi_queue_id = 0; 2350 struct i40e_pf *pf = vf->pf; 2351 int i, j = 0, idx = 0; 2352 struct i40e_vsi *vsi; 2353 u16 num_qps_all = 0; 2354 int aq_ret = 0; 2355 2356 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2357 aq_ret = -EINVAL; 2358 goto error_param; 2359 } 2360 2361 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { 2362 aq_ret = -EINVAL; 2363 goto error_param; 2364 } 2365 2366 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { 2367 aq_ret = -EINVAL; 2368 goto error_param; 2369 } 2370 2371 if (vf->adq_enabled) { 2372 for (i = 0; i < vf->num_tc; i++) 2373 num_qps_all += vf->ch[i].num_qps; 2374 if (num_qps_all != qci->num_queue_pairs) { 2375 aq_ret = -EINVAL; 2376 goto error_param; 2377 } 2378 } 2379 2380 vsi_id = qci->vsi_id; 2381 2382 for (i = 0; i < qci->num_queue_pairs; i++) { 2383 qpi = &qci->qpair[i]; 2384 2385 if (!vf->adq_enabled) { 2386 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 2387 qpi->txq.queue_id)) { 2388 aq_ret = -EINVAL; 2389 goto error_param; 2390 } 2391 2392 vsi_queue_id = qpi->txq.queue_id; 2393 2394 if (qpi->txq.vsi_id != qci->vsi_id || 2395 qpi->rxq.vsi_id != qci->vsi_id || 2396 qpi->rxq.queue_id != vsi_queue_id) { 2397 aq_ret = -EINVAL; 2398 goto error_param; 2399 } 2400 } 2401 2402 if (vf->adq_enabled) { 2403 if (idx >= vf->num_tc) { 2404 aq_ret = -ENODEV; 2405 goto error_param; 2406 } 2407 vsi_id = vf->ch[idx].vsi_id; 2408 } 2409 2410 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2411 &qpi->rxq) || 2412 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2413 &qpi->txq)) { 2414 aq_ret = -EINVAL; 2415 goto error_param; 2416 } 2417 2418 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2419 * VF does not know about these additional VSIs and all 2420 * it cares is about its own queues. PF configures these queues 2421 * to its appropriate VSIs based on TC mapping 2422 */ 2423 if (vf->adq_enabled) { 2424 if (idx >= vf->num_tc) { 2425 aq_ret = -ENODEV; 2426 goto error_param; 2427 } 2428 if (j == (vf->ch[idx].num_qps - 1)) { 2429 idx++; 2430 j = 0; /* resetting the queue count */ 2431 vsi_queue_id = 0; 2432 } else { 2433 j++; 2434 vsi_queue_id++; 2435 } 2436 } 2437 } 2438 /* set vsi num_queue_pairs in use to num configured by VF */ 2439 if (!vf->adq_enabled) { 2440 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2441 qci->num_queue_pairs; 2442 } else { 2443 for (i = 0; i < vf->num_tc; i++) { 2444 vsi = pf->vsi[vf->ch[i].vsi_idx]; 2445 vsi->num_queue_pairs = vf->ch[i].num_qps; 2446 2447 if (i40e_update_adq_vsi_queues(vsi, i)) { 2448 aq_ret = -EIO; 2449 goto error_param; 2450 } 2451 } 2452 } 2453 2454 error_param: 2455 /* send the response to the VF */ 2456 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2457 aq_ret); 2458 } 2459 2460 /** 2461 * i40e_validate_queue_map - check queue map is valid 2462 * @vf: the VF structure pointer 2463 * @vsi_id: vsi id 2464 * @queuemap: Tx or Rx queue map 2465 * 2466 * check if Tx or Rx queue map is valid 2467 **/ 2468 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2469 unsigned long queuemap) 2470 { 2471 u16 vsi_queue_id, queue_id; 2472 2473 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2474 u16 idx = vsi_queue_id / I40E_MAX_VF_VSI; 2475 2476 if (vf->adq_enabled && idx < vf->num_tc) { 2477 vsi_id = vf->ch[idx].vsi_id; 2478 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2479 } else { 2480 queue_id = vsi_queue_id; 2481 } 2482 2483 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2484 return -EINVAL; 2485 } 2486 2487 return 0; 2488 } 2489 2490 /** 2491 * i40e_vc_config_irq_map_msg 2492 * @vf: pointer to the VF info 2493 * @msg: pointer to the msg buffer 2494 * 2495 * called from the VF to configure the irq to 2496 * queue map 2497 **/ 2498 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2499 { 2500 struct virtchnl_irq_map_info *irqmap_info = 2501 (struct virtchnl_irq_map_info *)msg; 2502 struct virtchnl_vector_map *map; 2503 int aq_ret = 0; 2504 u16 vsi_id; 2505 int i; 2506 2507 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2508 aq_ret = -EINVAL; 2509 goto error_param; 2510 } 2511 2512 if (irqmap_info->num_vectors > 2513 vf->pf->hw.func_caps.num_msix_vectors_vf) { 2514 aq_ret = -EINVAL; 2515 goto error_param; 2516 } 2517 2518 for (i = 0; i < irqmap_info->num_vectors; i++) { 2519 map = &irqmap_info->vecmap[i]; 2520 /* validate msg params */ 2521 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || 2522 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { 2523 aq_ret = -EINVAL; 2524 goto error_param; 2525 } 2526 vsi_id = map->vsi_id; 2527 2528 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2529 aq_ret = -EINVAL; 2530 goto error_param; 2531 } 2532 2533 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2534 aq_ret = -EINVAL; 2535 goto error_param; 2536 } 2537 2538 i40e_config_irq_link_list(vf, vsi_id, map); 2539 } 2540 error_param: 2541 /* send the response to the VF */ 2542 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2543 aq_ret); 2544 } 2545 2546 /** 2547 * i40e_ctrl_vf_tx_rings 2548 * @vsi: the SRIOV VSI being configured 2549 * @q_map: bit map of the queues to be enabled 2550 * @enable: start or stop the queue 2551 **/ 2552 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2553 bool enable) 2554 { 2555 struct i40e_pf *pf = vsi->back; 2556 int ret = 0; 2557 u16 q_id; 2558 2559 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2560 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2561 vsi->base_queue + q_id, 2562 false /*is xdp*/, enable); 2563 if (ret) 2564 break; 2565 } 2566 return ret; 2567 } 2568 2569 /** 2570 * i40e_ctrl_vf_rx_rings 2571 * @vsi: the SRIOV VSI being configured 2572 * @q_map: bit map of the queues to be enabled 2573 * @enable: start or stop the queue 2574 **/ 2575 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2576 bool enable) 2577 { 2578 struct i40e_pf *pf = vsi->back; 2579 int ret = 0; 2580 u16 q_id; 2581 2582 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2583 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2584 enable); 2585 if (ret) 2586 break; 2587 } 2588 return ret; 2589 } 2590 2591 /** 2592 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL 2593 * @vqs: virtchnl_queue_select structure containing bitmaps to validate 2594 * 2595 * Returns true if validation was successful, else false. 2596 */ 2597 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) 2598 { 2599 if ((!vqs->rx_queues && !vqs->tx_queues) || 2600 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) || 2601 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES)) 2602 return false; 2603 2604 return true; 2605 } 2606 2607 /** 2608 * i40e_vc_enable_queues_msg 2609 * @vf: pointer to the VF info 2610 * @msg: pointer to the msg buffer 2611 * 2612 * called from the VF to enable all or specific queue(s) 2613 **/ 2614 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2615 { 2616 struct virtchnl_queue_select *vqs = 2617 (struct virtchnl_queue_select *)msg; 2618 struct i40e_pf *pf = vf->pf; 2619 int aq_ret = 0; 2620 int i; 2621 2622 if (vf->is_disabled_from_host) { 2623 aq_ret = -EPERM; 2624 dev_info(&pf->pdev->dev, 2625 "Admin has disabled VF %d, will not enable queues\n", 2626 vf->vf_id); 2627 goto error_param; 2628 } 2629 2630 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2631 aq_ret = -EINVAL; 2632 goto error_param; 2633 } 2634 2635 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2636 aq_ret = -EINVAL; 2637 goto error_param; 2638 } 2639 2640 if (!i40e_vc_validate_vqs_bitmaps(vqs)) { 2641 aq_ret = -EINVAL; 2642 goto error_param; 2643 } 2644 2645 /* Use the queue bit map sent by the VF */ 2646 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2647 true)) { 2648 aq_ret = -EIO; 2649 goto error_param; 2650 } 2651 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2652 true)) { 2653 aq_ret = -EIO; 2654 goto error_param; 2655 } 2656 2657 /* need to start the rings for additional ADq VSI's as well */ 2658 if (vf->adq_enabled) { 2659 /* zero belongs to LAN VSI */ 2660 for (i = 1; i < vf->num_tc; i++) { 2661 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2662 aq_ret = -EIO; 2663 } 2664 } 2665 2666 error_param: 2667 /* send the response to the VF */ 2668 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2669 aq_ret); 2670 } 2671 2672 /** 2673 * i40e_vc_disable_queues_msg 2674 * @vf: pointer to the VF info 2675 * @msg: pointer to the msg buffer 2676 * 2677 * called from the VF to disable all or specific 2678 * queue(s) 2679 **/ 2680 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2681 { 2682 struct virtchnl_queue_select *vqs = 2683 (struct virtchnl_queue_select *)msg; 2684 struct i40e_pf *pf = vf->pf; 2685 int aq_ret = 0; 2686 2687 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2688 aq_ret = -EINVAL; 2689 goto error_param; 2690 } 2691 2692 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2693 aq_ret = -EINVAL; 2694 goto error_param; 2695 } 2696 2697 if (!i40e_vc_validate_vqs_bitmaps(vqs)) { 2698 aq_ret = -EINVAL; 2699 goto error_param; 2700 } 2701 2702 /* Use the queue bit map sent by the VF */ 2703 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2704 false)) { 2705 aq_ret = -EIO; 2706 goto error_param; 2707 } 2708 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2709 false)) { 2710 aq_ret = -EIO; 2711 goto error_param; 2712 } 2713 error_param: 2714 /* send the response to the VF */ 2715 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2716 aq_ret); 2717 } 2718 2719 /** 2720 * i40e_check_enough_queue - find big enough queue number 2721 * @vf: pointer to the VF info 2722 * @needed: the number of items needed 2723 * 2724 * Returns the base item index of the queue, or negative for error 2725 **/ 2726 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed) 2727 { 2728 unsigned int i, cur_queues, more, pool_size; 2729 struct i40e_lump_tracking *pile; 2730 struct i40e_pf *pf = vf->pf; 2731 struct i40e_vsi *vsi; 2732 2733 vsi = pf->vsi[vf->lan_vsi_idx]; 2734 cur_queues = vsi->alloc_queue_pairs; 2735 2736 /* if current allocated queues are enough for need */ 2737 if (cur_queues >= needed) 2738 return vsi->base_queue; 2739 2740 pile = pf->qp_pile; 2741 if (cur_queues > 0) { 2742 /* if the allocated queues are not zero 2743 * just check if there are enough queues for more 2744 * behind the allocated queues. 2745 */ 2746 more = needed - cur_queues; 2747 for (i = vsi->base_queue + cur_queues; 2748 i < pile->num_entries; i++) { 2749 if (pile->list[i] & I40E_PILE_VALID_BIT) 2750 break; 2751 2752 if (more-- == 1) 2753 /* there is enough */ 2754 return vsi->base_queue; 2755 } 2756 } 2757 2758 pool_size = 0; 2759 for (i = 0; i < pile->num_entries; i++) { 2760 if (pile->list[i] & I40E_PILE_VALID_BIT) { 2761 pool_size = 0; 2762 continue; 2763 } 2764 if (needed <= ++pool_size) 2765 /* there is enough */ 2766 return i; 2767 } 2768 2769 return -ENOMEM; 2770 } 2771 2772 /** 2773 * i40e_vc_request_queues_msg 2774 * @vf: pointer to the VF info 2775 * @msg: pointer to the msg buffer 2776 * 2777 * VFs get a default number of queues but can use this message to request a 2778 * different number. If the request is successful, PF will reset the VF and 2779 * return 0. If unsuccessful, PF will send message informing VF of number of 2780 * available queues and return result of sending VF a message. 2781 **/ 2782 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2783 { 2784 struct virtchnl_vf_res_request *vfres = 2785 (struct virtchnl_vf_res_request *)msg; 2786 u16 req_pairs = vfres->num_queue_pairs; 2787 u8 cur_pairs = vf->num_queue_pairs; 2788 struct i40e_pf *pf = vf->pf; 2789 2790 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) 2791 return -EINVAL; 2792 2793 if (req_pairs > I40E_MAX_VF_QUEUES) { 2794 dev_err(&pf->pdev->dev, 2795 "VF %d tried to request more than %d queues.\n", 2796 vf->vf_id, 2797 I40E_MAX_VF_QUEUES); 2798 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2799 } else if (req_pairs - cur_pairs > pf->queues_left) { 2800 dev_warn(&pf->pdev->dev, 2801 "VF %d requested %d more queues, but only %d left.\n", 2802 vf->vf_id, 2803 req_pairs - cur_pairs, 2804 pf->queues_left); 2805 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2806 } else if (i40e_check_enough_queue(vf, req_pairs) < 0) { 2807 dev_warn(&pf->pdev->dev, 2808 "VF %d requested %d more queues, but there is not enough for it.\n", 2809 vf->vf_id, 2810 req_pairs - cur_pairs); 2811 vfres->num_queue_pairs = cur_pairs; 2812 } else { 2813 /* successful request */ 2814 vf->num_req_queues = req_pairs; 2815 i40e_vc_reset_vf(vf, true); 2816 return 0; 2817 } 2818 2819 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2820 (u8 *)vfres, sizeof(*vfres)); 2821 } 2822 2823 /** 2824 * i40e_vc_get_stats_msg 2825 * @vf: pointer to the VF info 2826 * @msg: pointer to the msg buffer 2827 * 2828 * called from the VF to get vsi stats 2829 **/ 2830 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2831 { 2832 struct virtchnl_queue_select *vqs = 2833 (struct virtchnl_queue_select *)msg; 2834 struct i40e_pf *pf = vf->pf; 2835 struct i40e_eth_stats stats; 2836 int aq_ret = 0; 2837 struct i40e_vsi *vsi; 2838 2839 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2840 2841 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 2842 aq_ret = -EINVAL; 2843 goto error_param; 2844 } 2845 2846 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2847 aq_ret = -EINVAL; 2848 goto error_param; 2849 } 2850 2851 vsi = pf->vsi[vf->lan_vsi_idx]; 2852 if (!vsi) { 2853 aq_ret = -EINVAL; 2854 goto error_param; 2855 } 2856 i40e_update_eth_stats(vsi); 2857 stats = vsi->eth_stats; 2858 2859 error_param: 2860 /* send the response back to the VF */ 2861 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2862 (u8 *)&stats, sizeof(stats)); 2863 } 2864 2865 #define I40E_MAX_MACVLAN_PER_HW 3072 2866 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ 2867 (num_ports)) 2868 /* If the VF is not trusted restrict the number of MAC/VLAN it can program 2869 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast 2870 */ 2871 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) 2872 #define I40E_VC_MAX_VLAN_PER_VF 16 2873 2874 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \ 2875 ({ typeof(vf_num) vf_num_ = (vf_num); \ 2876 typeof(num_ports) num_ports_ = (num_ports); \ 2877 ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \ 2878 I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \ 2879 I40E_VC_MAX_MAC_ADDR_PER_VF; }) 2880 /** 2881 * i40e_check_vf_permission 2882 * @vf: pointer to the VF info 2883 * @al: MAC address list from virtchnl 2884 * 2885 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2886 * if any address in the list is not valid. Checks the following conditions: 2887 * 2888 * 1) broadcast and zero addresses are never valid 2889 * 2) unicast addresses are not allowed if the VMM has administratively set 2890 * the VF MAC address, unless the VF is marked as privileged. 2891 * 3) There is enough space to add all the addresses. 2892 * 2893 * Note that to guarantee consistency, it is expected this function be called 2894 * while holding the mac_filter_hash_lock, as otherwise the current number of 2895 * addresses might not be accurate. 2896 **/ 2897 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2898 struct virtchnl_ether_addr_list *al) 2899 { 2900 struct i40e_pf *pf = vf->pf; 2901 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; 2902 struct i40e_hw *hw = &pf->hw; 2903 int i, mac_add_max, mac_add_cnt = 0; 2904 bool vf_trusted; 2905 2906 vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 2907 2908 for (i = 0; i < al->num_elements; i++) { 2909 struct i40e_mac_filter *f; 2910 u8 *addr = al->list[i].addr; 2911 2912 if (is_broadcast_ether_addr(addr) || 2913 is_zero_ether_addr(addr)) { 2914 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2915 addr); 2916 return -EINVAL; 2917 } 2918 2919 /* If the host VMM administrator has set the VF MAC address 2920 * administratively via the ndo_set_vf_mac command then deny 2921 * permission to the VF to add or delete unicast MAC addresses. 2922 * Unless the VF is privileged and then it can do whatever. 2923 * The VF may request to set the MAC address filter already 2924 * assigned to it so do not return an error in that case. 2925 */ 2926 if (!vf_trusted && !is_multicast_ether_addr(addr) && 2927 vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2928 dev_err(&pf->pdev->dev, 2929 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2930 return -EPERM; 2931 } 2932 2933 /*count filters that really will be added*/ 2934 f = i40e_find_mac(vsi, addr); 2935 if (!f) 2936 ++mac_add_cnt; 2937 } 2938 /* Determine the maximum number of MAC addresses this VF may use. 2939 * 2940 * - For untrusted VFs: use a fixed small limit. 2941 * 2942 * - For trusted VFs: limit is calculated by dividing total MAC 2943 * filter pool across all VFs/ports. 2944 * 2945 * - User can override this by devlink param "max_mac_per_vf". 2946 * If set its value is used as a strict cap for both trusted and 2947 * untrusted VFs. 2948 * Note: 2949 * even when overridden, this is a theoretical maximum; hardware 2950 * may reject additional MACs if the absolute HW limit is reached. 2951 */ 2952 if (!vf_trusted) 2953 mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF; 2954 else 2955 mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports); 2956 2957 if (pf->max_mac_per_vf > 0) 2958 mac_add_max = pf->max_mac_per_vf; 2959 2960 /* VF can replace all its filters in one step, in this case mac_add_max 2961 * will be added as active and another mac_add_max will be in 2962 * a to-be-removed state. Account for that. 2963 */ 2964 if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max || 2965 (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) { 2966 if (pf->max_mac_per_vf == mac_add_max && mac_add_max > 0) { 2967 dev_err(&pf->pdev->dev, 2968 "Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n", 2969 mac_add_max); 2970 return -EPERM; 2971 } 2972 if (!vf_trusted) { 2973 dev_err(&pf->pdev->dev, 2974 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2975 return -EPERM; 2976 } else { 2977 dev_err(&pf->pdev->dev, 2978 "Cannot add more MAC addresses: trusted VF reached its maximum allowed limit (%d)\n", 2979 mac_add_max); 2980 return -EPERM; 2981 } 2982 } 2983 return 0; 2984 } 2985 2986 /** 2987 * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr 2988 * @vc_ether_addr: used to extract the type 2989 **/ 2990 static u8 2991 i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) 2992 { 2993 return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK; 2994 } 2995 2996 /** 2997 * i40e_is_vc_addr_legacy 2998 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 2999 * 3000 * check if the MAC address is from an older VF 3001 **/ 3002 static bool 3003 i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr) 3004 { 3005 return i40e_vc_ether_addr_type(vc_ether_addr) == 3006 VIRTCHNL_ETHER_ADDR_LEGACY; 3007 } 3008 3009 /** 3010 * i40e_is_vc_addr_primary 3011 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 3012 * 3013 * check if the MAC address is the VF's primary MAC 3014 * This function should only be called when the MAC address in 3015 * virtchnl_ether_addr is a valid unicast MAC 3016 **/ 3017 static bool 3018 i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr) 3019 { 3020 return i40e_vc_ether_addr_type(vc_ether_addr) == 3021 VIRTCHNL_ETHER_ADDR_PRIMARY; 3022 } 3023 3024 /** 3025 * i40e_update_vf_mac_addr 3026 * @vf: VF to update 3027 * @vc_ether_addr: structure from VIRTCHNL with MAC to add 3028 * 3029 * update the VF's cached hardware MAC if allowed 3030 **/ 3031 static void 3032 i40e_update_vf_mac_addr(struct i40e_vf *vf, 3033 struct virtchnl_ether_addr *vc_ether_addr) 3034 { 3035 u8 *mac_addr = vc_ether_addr->addr; 3036 3037 if (!is_valid_ether_addr(mac_addr)) 3038 return; 3039 3040 /* If request to add MAC filter is a primary request update its default 3041 * MAC address with the requested one. If it is a legacy request then 3042 * check if current default is empty if so update the default MAC 3043 */ 3044 if (i40e_is_vc_addr_primary(vc_ether_addr)) { 3045 ether_addr_copy(vf->default_lan_addr.addr, mac_addr); 3046 } else if (i40e_is_vc_addr_legacy(vc_ether_addr)) { 3047 if (is_zero_ether_addr(vf->default_lan_addr.addr)) 3048 ether_addr_copy(vf->default_lan_addr.addr, mac_addr); 3049 } 3050 } 3051 3052 /** 3053 * i40e_vc_add_mac_addr_msg 3054 * @vf: pointer to the VF info 3055 * @msg: pointer to the msg buffer 3056 * 3057 * add guest mac address filter 3058 **/ 3059 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 3060 { 3061 struct virtchnl_ether_addr_list *al = 3062 (struct virtchnl_ether_addr_list *)msg; 3063 struct i40e_pf *pf = vf->pf; 3064 struct i40e_vsi *vsi = NULL; 3065 int ret = 0; 3066 int i; 3067 3068 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3069 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 3070 ret = -EINVAL; 3071 goto error_param; 3072 } 3073 3074 vsi = pf->vsi[vf->lan_vsi_idx]; 3075 3076 /* Lock once, because all function inside for loop accesses VSI's 3077 * MAC filter list which needs to be protected using same lock. 3078 */ 3079 spin_lock_bh(&vsi->mac_filter_hash_lock); 3080 3081 ret = i40e_check_vf_permission(vf, al); 3082 if (ret) { 3083 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3084 goto error_param; 3085 } 3086 3087 /* add new addresses to the list */ 3088 for (i = 0; i < al->num_elements; i++) { 3089 struct i40e_mac_filter *f; 3090 3091 f = i40e_find_mac(vsi, al->list[i].addr); 3092 if (!f) { 3093 f = i40e_add_mac_filter(vsi, al->list[i].addr); 3094 3095 if (!f) { 3096 dev_err(&pf->pdev->dev, 3097 "Unable to add MAC filter %pM for VF %d\n", 3098 al->list[i].addr, vf->vf_id); 3099 ret = -EINVAL; 3100 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3101 goto error_param; 3102 } 3103 } 3104 i40e_update_vf_mac_addr(vf, &al->list[i]); 3105 } 3106 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3107 3108 /* program the updated filter list */ 3109 ret = i40e_sync_vsi_filters(vsi); 3110 if (ret) 3111 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 3112 vf->vf_id, ret); 3113 3114 error_param: 3115 /* send the response to the VF */ 3116 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 3117 ret, NULL, 0); 3118 } 3119 3120 /** 3121 * i40e_vc_del_mac_addr_msg 3122 * @vf: pointer to the VF info 3123 * @msg: pointer to the msg buffer 3124 * 3125 * remove guest mac address filter 3126 **/ 3127 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 3128 { 3129 struct virtchnl_ether_addr_list *al = 3130 (struct virtchnl_ether_addr_list *)msg; 3131 bool was_unimac_deleted = false; 3132 struct i40e_pf *pf = vf->pf; 3133 struct i40e_vsi *vsi = NULL; 3134 int ret = 0; 3135 int i; 3136 3137 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3138 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 3139 ret = -EINVAL; 3140 goto error_param; 3141 } 3142 3143 for (i = 0; i < al->num_elements; i++) { 3144 if (is_broadcast_ether_addr(al->list[i].addr) || 3145 is_zero_ether_addr(al->list[i].addr)) { 3146 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 3147 al->list[i].addr, vf->vf_id); 3148 ret = -EINVAL; 3149 goto error_param; 3150 } 3151 } 3152 vsi = pf->vsi[vf->lan_vsi_idx]; 3153 3154 spin_lock_bh(&vsi->mac_filter_hash_lock); 3155 /* delete addresses from the list */ 3156 for (i = 0; i < al->num_elements; i++) { 3157 const u8 *addr = al->list[i].addr; 3158 3159 /* Allow to delete VF primary MAC only if it was not set 3160 * administratively by PF. 3161 */ 3162 if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { 3163 if (!vf->pf_set_mac) 3164 was_unimac_deleted = true; 3165 else 3166 continue; 3167 } 3168 3169 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 3170 ret = -EINVAL; 3171 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3172 goto error_param; 3173 } 3174 } 3175 3176 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3177 3178 if (was_unimac_deleted) 3179 eth_zero_addr(vf->default_lan_addr.addr); 3180 3181 /* program the updated filter list */ 3182 ret = i40e_sync_vsi_filters(vsi); 3183 if (ret) 3184 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 3185 vf->vf_id, ret); 3186 3187 if (vf->trusted && was_unimac_deleted) { 3188 struct i40e_mac_filter *f; 3189 struct hlist_node *h; 3190 u8 *macaddr = NULL; 3191 int bkt; 3192 3193 /* set last unicast mac address as default */ 3194 spin_lock_bh(&vsi->mac_filter_hash_lock); 3195 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 3196 if (is_valid_ether_addr(f->macaddr)) 3197 macaddr = f->macaddr; 3198 } 3199 if (macaddr) 3200 ether_addr_copy(vf->default_lan_addr.addr, macaddr); 3201 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3202 } 3203 error_param: 3204 /* send the response to the VF */ 3205 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret); 3206 } 3207 3208 /** 3209 * i40e_vc_add_vlan_msg 3210 * @vf: pointer to the VF info 3211 * @msg: pointer to the msg buffer 3212 * 3213 * program guest vlan id 3214 **/ 3215 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 3216 { 3217 struct virtchnl_vlan_filter_list *vfl = 3218 (struct virtchnl_vlan_filter_list *)msg; 3219 struct i40e_pf *pf = vf->pf; 3220 struct i40e_vsi *vsi = NULL; 3221 int aq_ret = 0; 3222 int i; 3223 3224 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 3225 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3226 dev_err(&pf->pdev->dev, 3227 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 3228 goto error_param; 3229 } 3230 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3231 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 3232 aq_ret = -EINVAL; 3233 goto error_param; 3234 } 3235 3236 for (i = 0; i < vfl->num_elements; i++) { 3237 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 3238 aq_ret = -EINVAL; 3239 dev_err(&pf->pdev->dev, 3240 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 3241 goto error_param; 3242 } 3243 } 3244 vsi = pf->vsi[vf->lan_vsi_idx]; 3245 if (vsi->info.pvid) { 3246 aq_ret = -EINVAL; 3247 goto error_param; 3248 } 3249 3250 i40e_vlan_stripping_enable(vsi); 3251 for (i = 0; i < vfl->num_elements; i++) { 3252 /* add new VLAN filter */ 3253 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 3254 if (!ret) 3255 vf->num_vlan++; 3256 3257 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 3258 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 3259 true, 3260 vfl->vlan_id[i], 3261 NULL); 3262 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 3263 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 3264 true, 3265 vfl->vlan_id[i], 3266 NULL); 3267 3268 if (ret) 3269 dev_err(&pf->pdev->dev, 3270 "Unable to add VLAN filter %d for VF %d, error %d\n", 3271 vfl->vlan_id[i], vf->vf_id, ret); 3272 } 3273 3274 error_param: 3275 /* send the response to the VF */ 3276 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 3277 } 3278 3279 /** 3280 * i40e_vc_remove_vlan_msg 3281 * @vf: pointer to the VF info 3282 * @msg: pointer to the msg buffer 3283 * 3284 * remove programmed guest vlan id 3285 **/ 3286 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 3287 { 3288 struct virtchnl_vlan_filter_list *vfl = 3289 (struct virtchnl_vlan_filter_list *)msg; 3290 struct i40e_pf *pf = vf->pf; 3291 struct i40e_vsi *vsi = NULL; 3292 int aq_ret = 0; 3293 int i; 3294 3295 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3296 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 3297 aq_ret = -EINVAL; 3298 goto error_param; 3299 } 3300 3301 for (i = 0; i < vfl->num_elements; i++) { 3302 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 3303 aq_ret = -EINVAL; 3304 goto error_param; 3305 } 3306 } 3307 3308 vsi = pf->vsi[vf->lan_vsi_idx]; 3309 if (vsi->info.pvid) { 3310 if (vfl->num_elements > 1 || vfl->vlan_id[0]) 3311 aq_ret = -EINVAL; 3312 goto error_param; 3313 } 3314 3315 for (i = 0; i < vfl->num_elements; i++) { 3316 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 3317 vf->num_vlan--; 3318 3319 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 3320 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 3321 false, 3322 vfl->vlan_id[i], 3323 NULL); 3324 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 3325 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 3326 false, 3327 vfl->vlan_id[i], 3328 NULL); 3329 } 3330 3331 error_param: 3332 /* send the response to the VF */ 3333 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 3334 } 3335 3336 /** 3337 * i40e_vc_rdma_msg 3338 * @vf: pointer to the VF info 3339 * @msg: pointer to the msg buffer 3340 * @msglen: msg length 3341 * 3342 * called from the VF for the iwarp msgs 3343 **/ 3344 static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 3345 { 3346 struct i40e_pf *pf = vf->pf; 3347 struct i40e_vsi *main_vsi; 3348 int aq_ret = 0; 3349 int abs_vf_id; 3350 3351 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3352 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { 3353 aq_ret = -EINVAL; 3354 goto error_param; 3355 } 3356 3357 main_vsi = i40e_pf_get_main_vsi(pf); 3358 abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 3359 i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen); 3360 3361 error_param: 3362 /* send the response to the VF */ 3363 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA, 3364 aq_ret); 3365 } 3366 3367 /** 3368 * i40e_vc_rdma_qvmap_msg 3369 * @vf: pointer to the VF info 3370 * @msg: pointer to the msg buffer 3371 * @config: config qvmap or release it 3372 * 3373 * called from the VF for the iwarp msgs 3374 **/ 3375 static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 3376 { 3377 struct virtchnl_rdma_qvlist_info *qvlist_info = 3378 (struct virtchnl_rdma_qvlist_info *)msg; 3379 int aq_ret = 0; 3380 3381 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3382 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { 3383 aq_ret = -EINVAL; 3384 goto error_param; 3385 } 3386 3387 if (config) { 3388 if (i40e_config_rdma_qvlist(vf, qvlist_info)) 3389 aq_ret = -EINVAL; 3390 } else { 3391 i40e_release_rdma_qvlist(vf); 3392 } 3393 3394 error_param: 3395 /* send the response to the VF */ 3396 return i40e_vc_send_resp_to_vf(vf, 3397 config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP : 3398 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, 3399 aq_ret); 3400 } 3401 3402 /** 3403 * i40e_vc_config_rss_key 3404 * @vf: pointer to the VF info 3405 * @msg: pointer to the msg buffer 3406 * 3407 * Configure the VF's RSS key 3408 **/ 3409 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 3410 { 3411 struct virtchnl_rss_key *vrk = 3412 (struct virtchnl_rss_key *)msg; 3413 struct i40e_pf *pf = vf->pf; 3414 struct i40e_vsi *vsi = NULL; 3415 int aq_ret = 0; 3416 3417 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3418 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || 3419 vrk->key_len != I40E_HKEY_ARRAY_SIZE) { 3420 aq_ret = -EINVAL; 3421 goto err; 3422 } 3423 3424 vsi = pf->vsi[vf->lan_vsi_idx]; 3425 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 3426 err: 3427 /* send the response to the VF */ 3428 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 3429 aq_ret); 3430 } 3431 3432 /** 3433 * i40e_vc_config_rss_lut 3434 * @vf: pointer to the VF info 3435 * @msg: pointer to the msg buffer 3436 * 3437 * Configure the VF's RSS LUT 3438 **/ 3439 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 3440 { 3441 struct virtchnl_rss_lut *vrl = 3442 (struct virtchnl_rss_lut *)msg; 3443 struct i40e_pf *pf = vf->pf; 3444 struct i40e_vsi *vsi = NULL; 3445 int aq_ret = 0; 3446 u16 i; 3447 3448 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || 3449 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || 3450 vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { 3451 aq_ret = -EINVAL; 3452 goto err; 3453 } 3454 3455 for (i = 0; i < vrl->lut_entries; i++) 3456 if (vrl->lut[i] >= vf->num_queue_pairs) { 3457 aq_ret = -EINVAL; 3458 goto err; 3459 } 3460 3461 vsi = pf->vsi[vf->lan_vsi_idx]; 3462 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 3463 /* send the response to the VF */ 3464 err: 3465 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 3466 aq_ret); 3467 } 3468 3469 /** 3470 * i40e_vc_get_rss_hashcfg 3471 * @vf: pointer to the VF info 3472 * @msg: pointer to the msg buffer 3473 * 3474 * Return the RSS Hash configuration bits allowed by the hardware 3475 **/ 3476 static int i40e_vc_get_rss_hashcfg(struct i40e_vf *vf, u8 *msg) 3477 { 3478 struct virtchnl_rss_hashcfg *vrh = NULL; 3479 struct i40e_pf *pf = vf->pf; 3480 int aq_ret = 0; 3481 int len = 0; 3482 3483 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3484 aq_ret = -EINVAL; 3485 goto err; 3486 } 3487 len = sizeof(struct virtchnl_rss_hashcfg); 3488 3489 vrh = kzalloc(len, GFP_KERNEL); 3490 if (!vrh) { 3491 aq_ret = -ENOMEM; 3492 len = 0; 3493 goto err; 3494 } 3495 vrh->hashcfg = i40e_pf_get_default_rss_hashcfg(pf); 3496 err: 3497 /* send the response back to the VF */ 3498 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, 3499 aq_ret, (u8 *)vrh, len); 3500 kfree(vrh); 3501 return aq_ret; 3502 } 3503 3504 /** 3505 * i40e_vc_set_rss_hashcfg 3506 * @vf: pointer to the VF info 3507 * @msg: pointer to the msg buffer 3508 * 3509 * Set the RSS Hash configuration bits for the VF 3510 **/ 3511 static int i40e_vc_set_rss_hashcfg(struct i40e_vf *vf, u8 *msg) 3512 { 3513 struct virtchnl_rss_hashcfg *vrh = 3514 (struct virtchnl_rss_hashcfg *)msg; 3515 struct i40e_pf *pf = vf->pf; 3516 struct i40e_hw *hw = &pf->hw; 3517 int aq_ret = 0; 3518 3519 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3520 aq_ret = -EINVAL; 3521 goto err; 3522 } 3523 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), 3524 (u32)vrh->hashcfg); 3525 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 3526 (u32)(vrh->hashcfg >> 32)); 3527 3528 /* send the response to the VF */ 3529 err: 3530 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, aq_ret); 3531 } 3532 3533 /** 3534 * i40e_vc_enable_vlan_stripping 3535 * @vf: pointer to the VF info 3536 * @msg: pointer to the msg buffer 3537 * 3538 * Enable vlan header stripping for the VF 3539 **/ 3540 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3541 { 3542 struct i40e_vsi *vsi; 3543 int aq_ret = 0; 3544 3545 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3546 aq_ret = -EINVAL; 3547 goto err; 3548 } 3549 3550 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3551 i40e_vlan_stripping_enable(vsi); 3552 3553 /* send the response to the VF */ 3554 err: 3555 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 3556 aq_ret); 3557 } 3558 3559 /** 3560 * i40e_vc_disable_vlan_stripping 3561 * @vf: pointer to the VF info 3562 * @msg: pointer to the msg buffer 3563 * 3564 * Disable vlan header stripping for the VF 3565 **/ 3566 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3567 { 3568 struct i40e_vsi *vsi; 3569 int aq_ret = 0; 3570 3571 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3572 aq_ret = -EINVAL; 3573 goto err; 3574 } 3575 3576 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3577 i40e_vlan_stripping_disable(vsi); 3578 3579 /* send the response to the VF */ 3580 err: 3581 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3582 aq_ret); 3583 } 3584 3585 /** 3586 * i40e_validate_cloud_filter 3587 * @vf: pointer to VF structure 3588 * @tc_filter: pointer to filter requested 3589 * 3590 * This function validates cloud filter programmed as TC filter for ADq 3591 **/ 3592 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3593 struct virtchnl_filter *tc_filter) 3594 { 3595 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3596 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3597 struct i40e_pf *pf = vf->pf; 3598 struct i40e_vsi *vsi = NULL; 3599 struct i40e_mac_filter *f; 3600 struct hlist_node *h; 3601 bool found = false; 3602 int bkt; 3603 3604 if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) { 3605 dev_info(&pf->pdev->dev, 3606 "VF %d: ADQ doesn't support this action (%d)\n", 3607 vf->vf_id, tc_filter->action); 3608 goto err; 3609 } 3610 3611 /* action_meta is TC number here to which the filter is applied */ 3612 if (!tc_filter->action_meta || 3613 tc_filter->action_meta >= vf->num_tc) { 3614 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3615 vf->vf_id, tc_filter->action_meta); 3616 goto err; 3617 } 3618 3619 /* Check filter if it's programmed for advanced mode or basic mode. 3620 * There are two ADq modes (for VF only), 3621 * 1. Basic mode: intended to allow as many filter options as possible 3622 * to be added to a VF in Non-trusted mode. Main goal is 3623 * to add filters to its own MAC and VLAN id. 3624 * 2. Advanced mode: is for allowing filters to be applied other than 3625 * its own MAC or VLAN. This mode requires the VF to be 3626 * Trusted. 3627 */ 3628 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3629 vsi = pf->vsi[vf->lan_vsi_idx]; 3630 f = i40e_find_mac(vsi, data.dst_mac); 3631 3632 if (!f) { 3633 dev_info(&pf->pdev->dev, 3634 "Destination MAC %pM doesn't belong to VF %d\n", 3635 data.dst_mac, vf->vf_id); 3636 goto err; 3637 } 3638 3639 if (mask.vlan_id) { 3640 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3641 hlist) { 3642 if (f->vlan == ntohs(data.vlan_id)) { 3643 found = true; 3644 break; 3645 } 3646 } 3647 if (!found) { 3648 dev_info(&pf->pdev->dev, 3649 "VF %d doesn't have any VLAN id %u\n", 3650 vf->vf_id, ntohs(data.vlan_id)); 3651 goto err; 3652 } 3653 } 3654 } else { 3655 /* Check if VF is trusted */ 3656 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3657 dev_err(&pf->pdev->dev, 3658 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3659 vf->vf_id); 3660 return -EIO; 3661 } 3662 } 3663 3664 if (mask.dst_mac[0] & data.dst_mac[0]) { 3665 if (is_broadcast_ether_addr(data.dst_mac) || 3666 is_zero_ether_addr(data.dst_mac)) { 3667 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3668 vf->vf_id, data.dst_mac); 3669 goto err; 3670 } 3671 } 3672 3673 if (mask.src_mac[0] & data.src_mac[0]) { 3674 if (is_broadcast_ether_addr(data.src_mac) || 3675 is_zero_ether_addr(data.src_mac)) { 3676 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3677 vf->vf_id, data.src_mac); 3678 goto err; 3679 } 3680 } 3681 3682 if (mask.dst_port & data.dst_port) { 3683 if (!data.dst_port) { 3684 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3685 vf->vf_id); 3686 goto err; 3687 } 3688 } 3689 3690 if (mask.src_port & data.src_port) { 3691 if (!data.src_port) { 3692 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3693 vf->vf_id); 3694 goto err; 3695 } 3696 } 3697 3698 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3699 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3700 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3701 vf->vf_id); 3702 goto err; 3703 } 3704 3705 if (mask.vlan_id & data.vlan_id) { 3706 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3707 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3708 vf->vf_id); 3709 goto err; 3710 } 3711 } 3712 3713 return 0; 3714 err: 3715 return -EIO; 3716 } 3717 3718 /** 3719 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3720 * @vf: pointer to the VF info 3721 * @seid: seid of the vsi it is searching for 3722 **/ 3723 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3724 { 3725 struct i40e_pf *pf = vf->pf; 3726 struct i40e_vsi *vsi = NULL; 3727 int i; 3728 3729 for (i = 0; i < vf->num_tc ; i++) { 3730 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3731 if (vsi && vsi->seid == seid) 3732 return vsi; 3733 } 3734 return NULL; 3735 } 3736 3737 /** 3738 * i40e_del_all_cloud_filters 3739 * @vf: pointer to the VF info 3740 * 3741 * This function deletes all cloud filters 3742 **/ 3743 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3744 { 3745 struct i40e_cloud_filter *cfilter = NULL; 3746 struct i40e_pf *pf = vf->pf; 3747 struct i40e_vsi *vsi = NULL; 3748 struct hlist_node *node; 3749 int ret; 3750 3751 hlist_for_each_entry_safe(cfilter, node, 3752 &vf->cloud_filter_list, cloud_node) { 3753 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3754 3755 if (!vsi) { 3756 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3757 vf->vf_id, cfilter->seid); 3758 continue; 3759 } 3760 3761 if (cfilter->dst_port) 3762 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3763 false); 3764 else 3765 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3766 if (ret) 3767 dev_err(&pf->pdev->dev, 3768 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", 3769 vf->vf_id, ERR_PTR(ret), 3770 libie_aq_str(pf->hw.aq.asq_last_status)); 3771 3772 hlist_del(&cfilter->cloud_node); 3773 kfree(cfilter); 3774 vf->num_cloud_filters--; 3775 } 3776 } 3777 3778 /** 3779 * i40e_vc_del_cloud_filter 3780 * @vf: pointer to the VF info 3781 * @msg: pointer to the msg buffer 3782 * 3783 * This function deletes a cloud filter programmed as TC filter for ADq 3784 **/ 3785 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3786 { 3787 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3788 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3789 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3790 struct i40e_cloud_filter cfilter, *cf = NULL; 3791 struct i40e_pf *pf = vf->pf; 3792 struct i40e_vsi *vsi = NULL; 3793 struct hlist_node *node; 3794 int aq_ret = 0; 3795 int i, ret; 3796 3797 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3798 aq_ret = -EINVAL; 3799 goto err; 3800 } 3801 3802 if (!vf->adq_enabled) { 3803 dev_info(&pf->pdev->dev, 3804 "VF %d: ADq not enabled, can't apply cloud filter\n", 3805 vf->vf_id); 3806 aq_ret = -EINVAL; 3807 goto err; 3808 } 3809 3810 if (i40e_validate_cloud_filter(vf, vcf)) { 3811 dev_info(&pf->pdev->dev, 3812 "VF %d: Invalid input, can't apply cloud filter\n", 3813 vf->vf_id); 3814 aq_ret = -EINVAL; 3815 goto err; 3816 } 3817 3818 memset(&cfilter, 0, sizeof(cfilter)); 3819 /* parse destination mac address */ 3820 for (i = 0; i < ETH_ALEN; i++) 3821 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3822 3823 /* parse source mac address */ 3824 for (i = 0; i < ETH_ALEN; i++) 3825 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3826 3827 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3828 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3829 cfilter.src_port = mask.src_port & tcf.src_port; 3830 3831 switch (vcf->flow_type) { 3832 case VIRTCHNL_TCP_V4_FLOW: 3833 cfilter.n_proto = ETH_P_IP; 3834 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3835 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3836 ARRAY_SIZE(tcf.dst_ip)); 3837 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3838 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3839 ARRAY_SIZE(tcf.dst_ip)); 3840 break; 3841 case VIRTCHNL_TCP_V6_FLOW: 3842 cfilter.n_proto = ETH_P_IPV6; 3843 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3844 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3845 sizeof(cfilter.ip.v6.dst_ip6)); 3846 if (mask.src_ip[3] & tcf.src_ip[3]) 3847 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3848 sizeof(cfilter.ip.v6.src_ip6)); 3849 break; 3850 default: 3851 /* TC filter can be configured based on different combinations 3852 * and in this case IP is not a part of filter config 3853 */ 3854 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3855 vf->vf_id); 3856 } 3857 3858 /* get the vsi to which the tc belongs to */ 3859 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3860 cfilter.seid = vsi->seid; 3861 cfilter.flags = vcf->field_flags; 3862 3863 /* Deleting TC filter */ 3864 if (tcf.dst_port) 3865 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3866 else 3867 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3868 if (ret) { 3869 dev_err(&pf->pdev->dev, 3870 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", 3871 vf->vf_id, ERR_PTR(ret), 3872 libie_aq_str(pf->hw.aq.asq_last_status)); 3873 goto err; 3874 } 3875 3876 hlist_for_each_entry_safe(cf, node, 3877 &vf->cloud_filter_list, cloud_node) { 3878 if (cf->seid != cfilter.seid) 3879 continue; 3880 if (mask.dst_port) 3881 if (cfilter.dst_port != cf->dst_port) 3882 continue; 3883 if (mask.dst_mac[0]) 3884 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3885 continue; 3886 /* for ipv4 data to be valid, only first byte of mask is set */ 3887 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3888 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3889 ARRAY_SIZE(tcf.dst_ip))) 3890 continue; 3891 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3892 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3893 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3894 sizeof(cfilter.ip.v6.src_ip6))) 3895 continue; 3896 if (mask.vlan_id) 3897 if (cfilter.vlan_id != cf->vlan_id) 3898 continue; 3899 3900 hlist_del(&cf->cloud_node); 3901 kfree(cf); 3902 vf->num_cloud_filters--; 3903 } 3904 3905 err: 3906 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3907 aq_ret); 3908 } 3909 3910 #define I40E_MAX_VF_CLOUD_FILTER 0xFF00 3911 3912 /** 3913 * i40e_vc_add_cloud_filter 3914 * @vf: pointer to the VF info 3915 * @msg: pointer to the msg buffer 3916 * 3917 * This function adds a cloud filter programmed as TC filter for ADq 3918 **/ 3919 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3920 { 3921 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3922 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3923 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3924 struct i40e_cloud_filter *cfilter = NULL; 3925 struct i40e_pf *pf = vf->pf; 3926 struct i40e_vsi *vsi = NULL; 3927 int aq_ret = 0; 3928 int i; 3929 3930 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 3931 aq_ret = -EINVAL; 3932 goto err_out; 3933 } 3934 3935 if (!vf->adq_enabled) { 3936 dev_info(&pf->pdev->dev, 3937 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3938 vf->vf_id); 3939 aq_ret = -EINVAL; 3940 goto err_out; 3941 } 3942 3943 if (i40e_validate_cloud_filter(vf, vcf)) { 3944 dev_info(&pf->pdev->dev, 3945 "VF %d: Invalid input/s, can't apply cloud filter\n", 3946 vf->vf_id); 3947 aq_ret = -EINVAL; 3948 goto err_out; 3949 } 3950 3951 if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) { 3952 dev_warn(&pf->pdev->dev, 3953 "VF %d: Max number of filters reached, can't apply cloud filter\n", 3954 vf->vf_id); 3955 aq_ret = -ENOSPC; 3956 goto err_out; 3957 } 3958 3959 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3960 if (!cfilter) { 3961 aq_ret = -ENOMEM; 3962 goto err_out; 3963 } 3964 3965 /* parse destination mac address */ 3966 for (i = 0; i < ETH_ALEN; i++) 3967 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3968 3969 /* parse source mac address */ 3970 for (i = 0; i < ETH_ALEN; i++) 3971 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3972 3973 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3974 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3975 cfilter->src_port = mask.src_port & tcf.src_port; 3976 3977 switch (vcf->flow_type) { 3978 case VIRTCHNL_TCP_V4_FLOW: 3979 cfilter->n_proto = ETH_P_IP; 3980 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3981 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3982 ARRAY_SIZE(tcf.dst_ip)); 3983 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3984 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3985 ARRAY_SIZE(tcf.dst_ip)); 3986 break; 3987 case VIRTCHNL_TCP_V6_FLOW: 3988 cfilter->n_proto = ETH_P_IPV6; 3989 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3990 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3991 sizeof(cfilter->ip.v6.dst_ip6)); 3992 if (mask.src_ip[3] & tcf.src_ip[3]) 3993 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3994 sizeof(cfilter->ip.v6.src_ip6)); 3995 break; 3996 default: 3997 /* TC filter can be configured based on different combinations 3998 * and in this case IP is not a part of filter config 3999 */ 4000 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 4001 vf->vf_id); 4002 } 4003 4004 /* get the VSI to which the TC belongs to */ 4005 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 4006 cfilter->seid = vsi->seid; 4007 cfilter->flags = vcf->field_flags; 4008 4009 /* Adding cloud filter programmed as TC filter */ 4010 if (tcf.dst_port) 4011 aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 4012 else 4013 aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 4014 if (aq_ret) { 4015 dev_err(&pf->pdev->dev, 4016 "VF %d: Failed to add cloud filter, err %pe aq_err %s\n", 4017 vf->vf_id, ERR_PTR(aq_ret), 4018 libie_aq_str(pf->hw.aq.asq_last_status)); 4019 goto err_free; 4020 } 4021 4022 INIT_HLIST_NODE(&cfilter->cloud_node); 4023 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 4024 /* release the pointer passing it to the collection */ 4025 cfilter = NULL; 4026 vf->num_cloud_filters++; 4027 err_free: 4028 kfree(cfilter); 4029 err_out: 4030 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 4031 aq_ret); 4032 } 4033 4034 /** 4035 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 4036 * @vf: pointer to the VF info 4037 * @msg: pointer to the msg buffer 4038 **/ 4039 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 4040 { 4041 struct virtchnl_tc_info *tci = 4042 (struct virtchnl_tc_info *)msg; 4043 struct i40e_pf *pf = vf->pf; 4044 struct i40e_link_status *ls = &pf->hw.phy.link_info; 4045 int i, adq_request_qps = 0; 4046 int aq_ret = 0; 4047 u64 speed = 0; 4048 4049 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 4050 aq_ret = -EINVAL; 4051 goto err; 4052 } 4053 4054 /* ADq cannot be applied if spoof check is ON */ 4055 if (vf->spoofchk) { 4056 dev_err(&pf->pdev->dev, 4057 "Spoof check is ON, turn it OFF to enable ADq\n"); 4058 aq_ret = -EINVAL; 4059 goto err; 4060 } 4061 4062 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 4063 dev_err(&pf->pdev->dev, 4064 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 4065 vf->vf_id); 4066 aq_ret = -EINVAL; 4067 goto err; 4068 } 4069 4070 /* max number of traffic classes for VF currently capped at 4 */ 4071 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 4072 dev_err(&pf->pdev->dev, 4073 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", 4074 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); 4075 aq_ret = -EINVAL; 4076 goto err; 4077 } 4078 4079 /* validate queues for each TC */ 4080 for (i = 0; i < tci->num_tc; i++) 4081 if (!tci->list[i].count || 4082 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 4083 dev_err(&pf->pdev->dev, 4084 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", 4085 vf->vf_id, i, tci->list[i].count, 4086 I40E_DEFAULT_QUEUES_PER_VF); 4087 aq_ret = -EINVAL; 4088 goto err; 4089 } 4090 4091 /* need Max VF queues but already have default number of queues */ 4092 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 4093 4094 if (pf->queues_left < adq_request_qps) { 4095 dev_err(&pf->pdev->dev, 4096 "No queues left to allocate to VF %d\n", 4097 vf->vf_id); 4098 aq_ret = -EINVAL; 4099 goto err; 4100 } else { 4101 /* we need to allocate max VF queues to enable ADq so as to 4102 * make sure ADq enabled VF always gets back queues when it 4103 * goes through a reset. 4104 */ 4105 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 4106 } 4107 4108 /* get link speed in MB to validate rate limit */ 4109 speed = i40e_vc_link_speed2mbps(ls->link_speed); 4110 if (speed == SPEED_UNKNOWN) { 4111 dev_err(&pf->pdev->dev, 4112 "Cannot detect link speed\n"); 4113 aq_ret = -EINVAL; 4114 goto err; 4115 } 4116 4117 /* parse data from the queue channel info */ 4118 vf->num_tc = tci->num_tc; 4119 for (i = 0; i < vf->num_tc; i++) { 4120 if (tci->list[i].max_tx_rate) { 4121 if (tci->list[i].max_tx_rate > speed) { 4122 dev_err(&pf->pdev->dev, 4123 "Invalid max tx rate %llu specified for VF %d.", 4124 tci->list[i].max_tx_rate, 4125 vf->vf_id); 4126 aq_ret = -EINVAL; 4127 goto err; 4128 } else { 4129 vf->ch[i].max_tx_rate = 4130 tci->list[i].max_tx_rate; 4131 } 4132 } 4133 vf->ch[i].num_qps = tci->list[i].count; 4134 } 4135 4136 /* set this flag only after making sure all inputs are sane */ 4137 vf->adq_enabled = true; 4138 4139 /* reset the VF in order to allocate resources */ 4140 i40e_vc_reset_vf(vf, true); 4141 4142 return 0; 4143 4144 /* send the response to the VF */ 4145 err: 4146 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 4147 aq_ret); 4148 } 4149 4150 /** 4151 * i40e_vc_del_qch_msg 4152 * @vf: pointer to the VF info 4153 * @msg: pointer to the msg buffer 4154 **/ 4155 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 4156 { 4157 struct i40e_pf *pf = vf->pf; 4158 int aq_ret = 0; 4159 4160 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { 4161 aq_ret = -EINVAL; 4162 goto err; 4163 } 4164 4165 if (vf->adq_enabled) { 4166 i40e_del_all_cloud_filters(vf); 4167 i40e_del_qch(vf); 4168 vf->adq_enabled = false; 4169 vf->num_tc = 0; 4170 dev_info(&pf->pdev->dev, 4171 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 4172 vf->vf_id); 4173 } else { 4174 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 4175 vf->vf_id); 4176 aq_ret = -EINVAL; 4177 } 4178 4179 /* reset the VF in order to allocate resources */ 4180 i40e_vc_reset_vf(vf, true); 4181 4182 return 0; 4183 4184 err: 4185 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 4186 aq_ret); 4187 } 4188 4189 /** 4190 * i40e_vc_process_vf_msg 4191 * @pf: pointer to the PF structure 4192 * @vf_id: source VF id 4193 * @v_opcode: operation code 4194 * @v_retval: unused return value code 4195 * @msg: pointer to the msg buffer 4196 * @msglen: msg length 4197 * 4198 * called from the common aeq/arq handler to 4199 * process request from VF 4200 **/ 4201 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 4202 u32 __always_unused v_retval, u8 *msg, u16 msglen) 4203 { 4204 struct i40e_hw *hw = &pf->hw; 4205 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 4206 struct i40e_vf *vf; 4207 int ret; 4208 4209 pf->vf_aq_requests++; 4210 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) 4211 return -EINVAL; 4212 vf = &(pf->vf[local_vf_id]); 4213 4214 /* Check if VF is disabled. */ 4215 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 4216 return -EINVAL; 4217 4218 /* perform basic checks on the msg */ 4219 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 4220 4221 if (ret) { 4222 i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL); 4223 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 4224 local_vf_id, v_opcode, msglen); 4225 return ret; 4226 } 4227 4228 switch (v_opcode) { 4229 case VIRTCHNL_OP_VERSION: 4230 ret = i40e_vc_get_version_msg(vf, msg); 4231 break; 4232 case VIRTCHNL_OP_GET_VF_RESOURCES: 4233 ret = i40e_vc_get_vf_resources_msg(vf, msg); 4234 i40e_vc_notify_vf_link_state(vf); 4235 break; 4236 case VIRTCHNL_OP_RESET_VF: 4237 i40e_vc_reset_vf(vf, false); 4238 ret = 0; 4239 break; 4240 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 4241 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 4242 break; 4243 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 4244 ret = i40e_vc_config_queues_msg(vf, msg); 4245 break; 4246 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 4247 ret = i40e_vc_config_irq_map_msg(vf, msg); 4248 break; 4249 case VIRTCHNL_OP_ENABLE_QUEUES: 4250 ret = i40e_vc_enable_queues_msg(vf, msg); 4251 i40e_vc_notify_vf_link_state(vf); 4252 break; 4253 case VIRTCHNL_OP_DISABLE_QUEUES: 4254 ret = i40e_vc_disable_queues_msg(vf, msg); 4255 break; 4256 case VIRTCHNL_OP_ADD_ETH_ADDR: 4257 ret = i40e_vc_add_mac_addr_msg(vf, msg); 4258 break; 4259 case VIRTCHNL_OP_DEL_ETH_ADDR: 4260 ret = i40e_vc_del_mac_addr_msg(vf, msg); 4261 break; 4262 case VIRTCHNL_OP_ADD_VLAN: 4263 ret = i40e_vc_add_vlan_msg(vf, msg); 4264 break; 4265 case VIRTCHNL_OP_DEL_VLAN: 4266 ret = i40e_vc_remove_vlan_msg(vf, msg); 4267 break; 4268 case VIRTCHNL_OP_GET_STATS: 4269 ret = i40e_vc_get_stats_msg(vf, msg); 4270 break; 4271 case VIRTCHNL_OP_RDMA: 4272 ret = i40e_vc_rdma_msg(vf, msg, msglen); 4273 break; 4274 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: 4275 ret = i40e_vc_rdma_qvmap_msg(vf, msg, true); 4276 break; 4277 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: 4278 ret = i40e_vc_rdma_qvmap_msg(vf, msg, false); 4279 break; 4280 case VIRTCHNL_OP_CONFIG_RSS_KEY: 4281 ret = i40e_vc_config_rss_key(vf, msg); 4282 break; 4283 case VIRTCHNL_OP_CONFIG_RSS_LUT: 4284 ret = i40e_vc_config_rss_lut(vf, msg); 4285 break; 4286 case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: 4287 ret = i40e_vc_get_rss_hashcfg(vf, msg); 4288 break; 4289 case VIRTCHNL_OP_SET_RSS_HASHCFG: 4290 ret = i40e_vc_set_rss_hashcfg(vf, msg); 4291 break; 4292 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 4293 ret = i40e_vc_enable_vlan_stripping(vf, msg); 4294 break; 4295 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 4296 ret = i40e_vc_disable_vlan_stripping(vf, msg); 4297 break; 4298 case VIRTCHNL_OP_REQUEST_QUEUES: 4299 ret = i40e_vc_request_queues_msg(vf, msg); 4300 break; 4301 case VIRTCHNL_OP_ENABLE_CHANNELS: 4302 ret = i40e_vc_add_qch_msg(vf, msg); 4303 break; 4304 case VIRTCHNL_OP_DISABLE_CHANNELS: 4305 ret = i40e_vc_del_qch_msg(vf, msg); 4306 break; 4307 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 4308 ret = i40e_vc_add_cloud_filter(vf, msg); 4309 break; 4310 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 4311 ret = i40e_vc_del_cloud_filter(vf, msg); 4312 break; 4313 case VIRTCHNL_OP_UNKNOWN: 4314 default: 4315 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 4316 v_opcode, local_vf_id); 4317 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 4318 -EOPNOTSUPP); 4319 break; 4320 } 4321 4322 return ret; 4323 } 4324 4325 /** 4326 * i40e_vc_process_vflr_event 4327 * @pf: pointer to the PF structure 4328 * 4329 * called from the vlfr irq handler to 4330 * free up VF resources and state variables 4331 **/ 4332 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 4333 { 4334 struct i40e_hw *hw = &pf->hw; 4335 u32 reg, reg_idx, bit_idx; 4336 struct i40e_vf *vf; 4337 int vf_id; 4338 4339 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 4340 return 0; 4341 4342 /* Re-enable the VFLR interrupt cause here, before looking for which 4343 * VF got reset. Otherwise, if another VF gets a reset while the 4344 * first one is being processed, that interrupt will be lost, and 4345 * that VF will be stuck in reset forever. 4346 */ 4347 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 4348 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 4349 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 4350 i40e_flush(hw); 4351 4352 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 4353 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 4354 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 4355 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 4356 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 4357 vf = &pf->vf[vf_id]; 4358 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 4359 if (reg & BIT(bit_idx)) 4360 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 4361 if (!i40e_reset_vf(vf, true)) { 4362 /* At least one VF did not finish resetting, retry next time */ 4363 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 4364 } 4365 } 4366 4367 return 0; 4368 } 4369 4370 /** 4371 * i40e_validate_vf 4372 * @pf: the physical function 4373 * @vf_id: VF identifier 4374 * 4375 * Check that the VF is enabled and the VSI exists. 4376 * 4377 * Returns 0 on success, negative on failure 4378 **/ 4379 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 4380 { 4381 struct i40e_vsi *vsi; 4382 struct i40e_vf *vf; 4383 int ret = 0; 4384 4385 if (vf_id >= pf->num_alloc_vfs) { 4386 dev_err(&pf->pdev->dev, 4387 "Invalid VF Identifier %d\n", vf_id); 4388 ret = -EINVAL; 4389 goto err_out; 4390 } 4391 vf = &pf->vf[vf_id]; 4392 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 4393 if (!vsi) 4394 ret = -EINVAL; 4395 err_out: 4396 return ret; 4397 } 4398 4399 /** 4400 * i40e_check_vf_init_timeout 4401 * @vf: the virtual function 4402 * 4403 * Check that the VF's initialization was successfully done and if not 4404 * wait up to 300ms for its finish. 4405 * 4406 * Returns true when VF is initialized, false on timeout 4407 **/ 4408 static bool i40e_check_vf_init_timeout(struct i40e_vf *vf) 4409 { 4410 int i; 4411 4412 /* When the VF is resetting wait until it is done. 4413 * It can take up to 200 milliseconds, but wait for 4414 * up to 300 milliseconds to be safe. 4415 */ 4416 for (i = 0; i < 15; i++) { 4417 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 4418 return true; 4419 msleep(20); 4420 } 4421 4422 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4423 dev_err(&vf->pf->pdev->dev, 4424 "VF %d still in reset. Try again.\n", vf->vf_id); 4425 return false; 4426 } 4427 4428 return true; 4429 } 4430 4431 /** 4432 * i40e_ndo_set_vf_mac 4433 * @netdev: network interface device structure 4434 * @vf_id: VF identifier 4435 * @mac: mac address 4436 * 4437 * program VF mac address 4438 **/ 4439 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 4440 { 4441 struct i40e_netdev_priv *np = netdev_priv(netdev); 4442 struct i40e_vsi *vsi = np->vsi; 4443 struct i40e_pf *pf = vsi->back; 4444 struct i40e_mac_filter *f; 4445 struct i40e_vf *vf; 4446 int ret = 0; 4447 struct hlist_node *h; 4448 int bkt; 4449 4450 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4451 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4452 return -EAGAIN; 4453 } 4454 4455 /* validate the request */ 4456 ret = i40e_validate_vf(pf, vf_id); 4457 if (ret) 4458 goto error_param; 4459 4460 vf = &pf->vf[vf_id]; 4461 if (!i40e_check_vf_init_timeout(vf)) { 4462 ret = -EAGAIN; 4463 goto error_param; 4464 } 4465 vsi = pf->vsi[vf->lan_vsi_idx]; 4466 4467 if (is_multicast_ether_addr(mac)) { 4468 dev_err(&pf->pdev->dev, 4469 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 4470 ret = -EINVAL; 4471 goto error_param; 4472 } 4473 4474 /* Lock once because below invoked function add/del_filter requires 4475 * mac_filter_hash_lock to be held 4476 */ 4477 spin_lock_bh(&vsi->mac_filter_hash_lock); 4478 4479 /* delete the temporary mac address */ 4480 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 4481 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 4482 4483 /* Delete all the filters for this VSI - we're going to kill it 4484 * anyway. 4485 */ 4486 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 4487 __i40e_del_filter(vsi, f); 4488 4489 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4490 4491 /* program mac filter */ 4492 if (i40e_sync_vsi_filters(vsi)) { 4493 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 4494 ret = -EIO; 4495 goto error_param; 4496 } 4497 ether_addr_copy(vf->default_lan_addr.addr, mac); 4498 4499 if (is_zero_ether_addr(mac)) { 4500 vf->pf_set_mac = false; 4501 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 4502 } else { 4503 vf->pf_set_mac = true; 4504 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 4505 mac, vf_id); 4506 } 4507 4508 /* Force the VF interface down so it has to bring up with new MAC 4509 * address 4510 */ 4511 i40e_vc_reset_vf(vf, true); 4512 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 4513 4514 error_param: 4515 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4516 return ret; 4517 } 4518 4519 /** 4520 * i40e_ndo_set_vf_port_vlan 4521 * @netdev: network interface device structure 4522 * @vf_id: VF identifier 4523 * @vlan_id: mac address 4524 * @qos: priority setting 4525 * @vlan_proto: vlan protocol 4526 * 4527 * program VF vlan id and/or qos 4528 **/ 4529 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4530 u16 vlan_id, u8 qos, __be16 vlan_proto) 4531 { 4532 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4533 struct i40e_netdev_priv *np = netdev_priv(netdev); 4534 bool allmulti = false, alluni = false; 4535 struct i40e_pf *pf = np->vsi->back; 4536 struct i40e_vsi *vsi; 4537 struct i40e_vf *vf; 4538 int ret = 0; 4539 4540 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4541 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4542 return -EAGAIN; 4543 } 4544 4545 /* validate the request */ 4546 ret = i40e_validate_vf(pf, vf_id); 4547 if (ret) 4548 goto error_pvid; 4549 4550 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4551 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4552 ret = -EINVAL; 4553 goto error_pvid; 4554 } 4555 4556 if (vlan_proto != htons(ETH_P_8021Q)) { 4557 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4558 ret = -EPROTONOSUPPORT; 4559 goto error_pvid; 4560 } 4561 4562 vf = &pf->vf[vf_id]; 4563 if (!i40e_check_vf_init_timeout(vf)) { 4564 ret = -EAGAIN; 4565 goto error_pvid; 4566 } 4567 vsi = pf->vsi[vf->lan_vsi_idx]; 4568 4569 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4570 /* duplicate request, so just return success */ 4571 goto error_pvid; 4572 4573 i40e_vlan_stripping_enable(vsi); 4574 4575 /* Locked once because multiple functions below iterate list */ 4576 spin_lock_bh(&vsi->mac_filter_hash_lock); 4577 4578 /* Check for condition where there was already a port VLAN ID 4579 * filter set and now it is being deleted by setting it to zero. 4580 * Additionally check for the condition where there was a port 4581 * VLAN but now there is a new and different port VLAN being set. 4582 * Before deleting all the old VLAN filters we must add new ones 4583 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4584 * MAC addresses deleted. 4585 */ 4586 if ((!(vlan_id || qos) || 4587 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4588 vsi->info.pvid) { 4589 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4590 if (ret) { 4591 dev_info(&vsi->back->pdev->dev, 4592 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4593 vsi->back->hw.aq.asq_last_status); 4594 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4595 goto error_pvid; 4596 } 4597 } 4598 4599 if (vsi->info.pvid) { 4600 /* remove all filters on the old VLAN */ 4601 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4602 VLAN_VID_MASK)); 4603 } 4604 4605 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4606 4607 /* disable promisc modes in case they were enabled */ 4608 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, 4609 allmulti, alluni); 4610 if (ret) { 4611 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n"); 4612 goto error_pvid; 4613 } 4614 4615 if (vlan_id || qos) 4616 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4617 else 4618 i40e_vsi_remove_pvid(vsi); 4619 spin_lock_bh(&vsi->mac_filter_hash_lock); 4620 4621 if (vlan_id) { 4622 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4623 vlan_id, qos, vf_id); 4624 4625 /* add new VLAN filter for each MAC */ 4626 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4627 if (ret) { 4628 dev_info(&vsi->back->pdev->dev, 4629 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4630 vsi->back->hw.aq.asq_last_status); 4631 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4632 goto error_pvid; 4633 } 4634 4635 /* remove the previously added non-VLAN MAC filters */ 4636 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4637 } 4638 4639 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4640 4641 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 4642 alluni = true; 4643 4644 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 4645 allmulti = true; 4646 4647 /* Schedule the worker thread to take care of applying changes */ 4648 i40e_service_event_schedule(vsi->back); 4649 4650 if (ret) { 4651 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4652 goto error_pvid; 4653 } 4654 4655 /* The Port VLAN needs to be saved across resets the same as the 4656 * default LAN MAC address. 4657 */ 4658 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4659 4660 i40e_vc_reset_vf(vf, true); 4661 /* During reset the VF got a new VSI, so refresh a pointer. */ 4662 vsi = pf->vsi[vf->lan_vsi_idx]; 4663 4664 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); 4665 if (ret) { 4666 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); 4667 goto error_pvid; 4668 } 4669 4670 ret = 0; 4671 4672 error_pvid: 4673 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4674 return ret; 4675 } 4676 4677 /** 4678 * i40e_ndo_set_vf_bw 4679 * @netdev: network interface device structure 4680 * @vf_id: VF identifier 4681 * @min_tx_rate: Minimum Tx rate 4682 * @max_tx_rate: Maximum Tx rate 4683 * 4684 * configure VF Tx rate 4685 **/ 4686 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4687 int max_tx_rate) 4688 { 4689 struct i40e_netdev_priv *np = netdev_priv(netdev); 4690 struct i40e_pf *pf = np->vsi->back; 4691 struct i40e_vsi *vsi; 4692 struct i40e_vf *vf; 4693 int ret = 0; 4694 4695 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4696 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4697 return -EAGAIN; 4698 } 4699 4700 /* validate the request */ 4701 ret = i40e_validate_vf(pf, vf_id); 4702 if (ret) 4703 goto error; 4704 4705 if (min_tx_rate) { 4706 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4707 min_tx_rate, vf_id); 4708 ret = -EINVAL; 4709 goto error; 4710 } 4711 4712 vf = &pf->vf[vf_id]; 4713 if (!i40e_check_vf_init_timeout(vf)) { 4714 ret = -EAGAIN; 4715 goto error; 4716 } 4717 vsi = pf->vsi[vf->lan_vsi_idx]; 4718 4719 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4720 if (ret) 4721 goto error; 4722 4723 vf->tx_rate = max_tx_rate; 4724 error: 4725 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4726 return ret; 4727 } 4728 4729 /** 4730 * i40e_ndo_get_vf_config 4731 * @netdev: network interface device structure 4732 * @vf_id: VF identifier 4733 * @ivi: VF configuration structure 4734 * 4735 * return VF configuration 4736 **/ 4737 int i40e_ndo_get_vf_config(struct net_device *netdev, 4738 int vf_id, struct ifla_vf_info *ivi) 4739 { 4740 struct i40e_netdev_priv *np = netdev_priv(netdev); 4741 struct i40e_vsi *vsi = np->vsi; 4742 struct i40e_pf *pf = vsi->back; 4743 struct i40e_vf *vf; 4744 int ret = 0; 4745 4746 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4747 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4748 return -EAGAIN; 4749 } 4750 4751 /* validate the request */ 4752 ret = i40e_validate_vf(pf, vf_id); 4753 if (ret) 4754 goto error_param; 4755 4756 vf = &pf->vf[vf_id]; 4757 /* first vsi is always the LAN vsi */ 4758 vsi = pf->vsi[vf->lan_vsi_idx]; 4759 if (!vsi) { 4760 ret = -ENOENT; 4761 goto error_param; 4762 } 4763 4764 ivi->vf = vf_id; 4765 4766 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4767 4768 ivi->max_tx_rate = vf->tx_rate; 4769 ivi->min_tx_rate = 0; 4770 ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK); 4771 ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK); 4772 if (vf->link_forced == false) 4773 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4774 else if (vf->link_up == true) 4775 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4776 else 4777 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4778 ivi->spoofchk = vf->spoofchk; 4779 ivi->trusted = vf->trusted; 4780 ret = 0; 4781 4782 error_param: 4783 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4784 return ret; 4785 } 4786 4787 /** 4788 * i40e_ndo_set_vf_link_state 4789 * @netdev: network interface device structure 4790 * @vf_id: VF identifier 4791 * @link: required link state 4792 * 4793 * Set the link state of a specified VF, regardless of physical link state 4794 **/ 4795 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4796 { 4797 struct i40e_netdev_priv *np = netdev_priv(netdev); 4798 struct i40e_pf *pf = np->vsi->back; 4799 struct i40e_link_status *ls = &pf->hw.phy.link_info; 4800 struct virtchnl_pf_event pfe; 4801 struct i40e_hw *hw = &pf->hw; 4802 struct i40e_vsi *vsi; 4803 unsigned long q_map; 4804 struct i40e_vf *vf; 4805 int abs_vf_id; 4806 int old_link; 4807 int ret = 0; 4808 int tmp; 4809 4810 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4811 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4812 return -EAGAIN; 4813 } 4814 4815 /* validate the request */ 4816 if (vf_id >= pf->num_alloc_vfs) { 4817 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4818 ret = -EINVAL; 4819 goto error_out; 4820 } 4821 4822 vf = &pf->vf[vf_id]; 4823 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4824 4825 /* skip VF link state change if requested state is already set */ 4826 if (!vf->link_forced) 4827 old_link = IFLA_VF_LINK_STATE_AUTO; 4828 else if (vf->link_up) 4829 old_link = IFLA_VF_LINK_STATE_ENABLE; 4830 else 4831 old_link = IFLA_VF_LINK_STATE_DISABLE; 4832 4833 if (link == old_link) 4834 goto error_out; 4835 4836 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4837 pfe.severity = PF_EVENT_SEVERITY_INFO; 4838 4839 switch (link) { 4840 case IFLA_VF_LINK_STATE_AUTO: 4841 vf->link_forced = false; 4842 vf->is_disabled_from_host = false; 4843 /* reset needed to reinit VF resources */ 4844 i40e_vc_reset_vf(vf, true); 4845 i40e_set_vf_link_state(vf, &pfe, ls); 4846 break; 4847 case IFLA_VF_LINK_STATE_ENABLE: 4848 vf->link_forced = true; 4849 vf->link_up = true; 4850 vf->is_disabled_from_host = false; 4851 /* reset needed to reinit VF resources */ 4852 i40e_vc_reset_vf(vf, true); 4853 i40e_set_vf_link_state(vf, &pfe, ls); 4854 break; 4855 case IFLA_VF_LINK_STATE_DISABLE: 4856 vf->link_forced = true; 4857 vf->link_up = false; 4858 i40e_set_vf_link_state(vf, &pfe, ls); 4859 4860 vsi = pf->vsi[vf->lan_vsi_idx]; 4861 q_map = BIT(vsi->num_queue_pairs) - 1; 4862 4863 vf->is_disabled_from_host = true; 4864 4865 /* Try to stop both Tx&Rx rings even if one of the calls fails 4866 * to ensure we stop the rings even in case of errors. 4867 * If any of them returns with an error then the first 4868 * error that occurred will be returned. 4869 */ 4870 tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false); 4871 ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false); 4872 4873 ret = tmp ? tmp : ret; 4874 break; 4875 default: 4876 ret = -EINVAL; 4877 goto error_out; 4878 } 4879 /* Notify the VF of its new link state */ 4880 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4881 0, (u8 *)&pfe, sizeof(pfe), NULL); 4882 4883 error_out: 4884 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4885 return ret; 4886 } 4887 4888 /** 4889 * i40e_ndo_set_vf_spoofchk 4890 * @netdev: network interface device structure 4891 * @vf_id: VF identifier 4892 * @enable: flag to enable or disable feature 4893 * 4894 * Enable or disable VF spoof checking 4895 **/ 4896 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4897 { 4898 struct i40e_netdev_priv *np = netdev_priv(netdev); 4899 struct i40e_vsi *vsi = np->vsi; 4900 struct i40e_pf *pf = vsi->back; 4901 struct i40e_vsi_context ctxt; 4902 struct i40e_hw *hw = &pf->hw; 4903 struct i40e_vf *vf; 4904 int ret = 0; 4905 4906 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4907 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4908 return -EAGAIN; 4909 } 4910 4911 /* validate the request */ 4912 if (vf_id >= pf->num_alloc_vfs) { 4913 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4914 ret = -EINVAL; 4915 goto out; 4916 } 4917 4918 vf = &(pf->vf[vf_id]); 4919 if (!i40e_check_vf_init_timeout(vf)) { 4920 ret = -EAGAIN; 4921 goto out; 4922 } 4923 4924 if (enable == vf->spoofchk) 4925 goto out; 4926 4927 vf->spoofchk = enable; 4928 memset(&ctxt, 0, sizeof(ctxt)); 4929 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4930 ctxt.pf_num = pf->hw.pf_id; 4931 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4932 if (enable) 4933 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4934 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4935 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4936 if (ret) { 4937 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4938 ret); 4939 ret = -EIO; 4940 } 4941 out: 4942 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4943 return ret; 4944 } 4945 4946 /** 4947 * i40e_ndo_set_vf_trust 4948 * @netdev: network interface device structure of the pf 4949 * @vf_id: VF identifier 4950 * @setting: trust setting 4951 * 4952 * Enable or disable VF trust setting 4953 **/ 4954 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4955 { 4956 struct i40e_netdev_priv *np = netdev_priv(netdev); 4957 struct i40e_pf *pf = np->vsi->back; 4958 struct i40e_vf *vf; 4959 int ret = 0; 4960 4961 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4962 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4963 return -EAGAIN; 4964 } 4965 4966 /* validate the request */ 4967 if (vf_id >= pf->num_alloc_vfs) { 4968 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4969 ret = -EINVAL; 4970 goto out; 4971 } 4972 4973 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { 4974 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4975 ret = -EINVAL; 4976 goto out; 4977 } 4978 4979 vf = &pf->vf[vf_id]; 4980 4981 if (setting == vf->trusted) 4982 goto out; 4983 4984 vf->trusted = setting; 4985 4986 /* request PF to sync mac/vlan filters for the VF */ 4987 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); 4988 pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 4989 4990 i40e_vc_reset_vf(vf, true); 4991 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4992 vf_id, setting ? "" : "un"); 4993 4994 if (vf->adq_enabled) { 4995 if (!vf->trusted) { 4996 dev_info(&pf->pdev->dev, 4997 "VF %u no longer Trusted, deleting all cloud filters\n", 4998 vf_id); 4999 i40e_del_all_cloud_filters(vf); 5000 } 5001 } 5002 5003 out: 5004 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 5005 return ret; 5006 } 5007 5008 /** 5009 * i40e_get_vf_stats - populate some stats for the VF 5010 * @netdev: the netdev of the PF 5011 * @vf_id: the host OS identifier (0-127) 5012 * @vf_stats: pointer to the OS memory to be initialized 5013 */ 5014 int i40e_get_vf_stats(struct net_device *netdev, int vf_id, 5015 struct ifla_vf_stats *vf_stats) 5016 { 5017 struct i40e_netdev_priv *np = netdev_priv(netdev); 5018 struct i40e_pf *pf = np->vsi->back; 5019 struct i40e_eth_stats *stats; 5020 struct i40e_vsi *vsi; 5021 struct i40e_vf *vf; 5022 5023 /* validate the request */ 5024 if (i40e_validate_vf(pf, vf_id)) 5025 return -EINVAL; 5026 5027 vf = &pf->vf[vf_id]; 5028 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 5029 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); 5030 return -EBUSY; 5031 } 5032 5033 vsi = pf->vsi[vf->lan_vsi_idx]; 5034 if (!vsi) 5035 return -EINVAL; 5036 5037 i40e_update_eth_stats(vsi); 5038 stats = &vsi->eth_stats; 5039 5040 memset(vf_stats, 0, sizeof(*vf_stats)); 5041 5042 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 5043 stats->rx_multicast; 5044 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 5045 stats->tx_multicast; 5046 vf_stats->rx_bytes = stats->rx_bytes; 5047 vf_stats->tx_bytes = stats->tx_bytes; 5048 vf_stats->broadcast = stats->rx_broadcast; 5049 vf_stats->multicast = stats->rx_multicast; 5050 vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; 5051 vf_stats->tx_dropped = stats->tx_errors; 5052 5053 return 0; 5054 } 5055