1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 6 /*********************notification routines***********************/ 7 8 /** 9 * i40e_vc_vf_broadcast 10 * @pf: pointer to the PF structure 11 * @v_opcode: operation code 12 * @v_retval: return value 13 * @msg: pointer to the msg buffer 14 * @msglen: msg length 15 * 16 * send a message to all VFs on a given PF 17 **/ 18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 19 enum virtchnl_ops v_opcode, 20 i40e_status v_retval, u8 *msg, 21 u16 msglen) 22 { 23 struct i40e_hw *hw = &pf->hw; 24 struct i40e_vf *vf = pf->vf; 25 int i; 26 27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 29 /* Not all vfs are enabled so skip the ones that are not */ 30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 32 continue; 33 34 /* Ignore return value on purpose - a given VF may fail, but 35 * we need to keep going and send to all of them 36 */ 37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 38 msg, msglen, NULL); 39 } 40 } 41 42 /** 43 * i40e_vc_notify_vf_link_state 44 * @vf: pointer to the VF structure 45 * 46 * send a link status message to a single VF 47 **/ 48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 49 { 50 struct virtchnl_pf_event pfe; 51 struct i40e_pf *pf = vf->pf; 52 struct i40e_hw *hw = &pf->hw; 53 struct i40e_link_status *ls = &pf->hw.phy.link_info; 54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 55 56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 57 pfe.severity = PF_EVENT_SEVERITY_INFO; 58 if (vf->link_forced) { 59 pfe.event_data.link_event.link_status = vf->link_up; 60 pfe.event_data.link_event.link_speed = 61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); 62 } else { 63 pfe.event_data.link_event.link_status = 64 ls->link_info & I40E_AQ_LINK_UP; 65 pfe.event_data.link_event.link_speed = 66 i40e_virtchnl_link_speed(ls->link_speed); 67 } 68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 69 0, (u8 *)&pfe, sizeof(pfe), NULL); 70 } 71 72 /** 73 * i40e_vc_notify_link_state 74 * @pf: pointer to the PF structure 75 * 76 * send a link status message to all VFs on a given PF 77 **/ 78 void i40e_vc_notify_link_state(struct i40e_pf *pf) 79 { 80 int i; 81 82 for (i = 0; i < pf->num_alloc_vfs; i++) 83 i40e_vc_notify_vf_link_state(&pf->vf[i]); 84 } 85 86 /** 87 * i40e_vc_notify_reset 88 * @pf: pointer to the PF structure 89 * 90 * indicate a pending reset to all VFs on a given PF 91 **/ 92 void i40e_vc_notify_reset(struct i40e_pf *pf) 93 { 94 struct virtchnl_pf_event pfe; 95 96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 100 } 101 102 /** 103 * i40e_vc_notify_vf_reset 104 * @vf: pointer to the VF structure 105 * 106 * indicate a pending reset to the given VF 107 **/ 108 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 109 { 110 struct virtchnl_pf_event pfe; 111 int abs_vf_id; 112 113 /* validate the request */ 114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 115 return; 116 117 /* verify if the VF is in either init or active before proceeding */ 118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 120 return; 121 122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 123 124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 127 0, (u8 *)&pfe, 128 sizeof(struct virtchnl_pf_event), NULL); 129 } 130 /***********************misc routines*****************************/ 131 132 /** 133 * i40e_vc_disable_vf 134 * @vf: pointer to the VF info 135 * 136 * Disable the VF through a SW reset. 137 **/ 138 static inline void i40e_vc_disable_vf(struct i40e_vf *vf) 139 { 140 int i; 141 142 i40e_vc_notify_vf_reset(vf); 143 144 /* We want to ensure that an actual reset occurs initiated after this 145 * function was called. However, we do not want to wait forever, so 146 * we'll give a reasonable time and print a message if we failed to 147 * ensure a reset. 148 */ 149 for (i = 0; i < 20; i++) { 150 if (i40e_reset_vf(vf, false)) 151 return; 152 usleep_range(10000, 20000); 153 } 154 155 dev_warn(&vf->pf->pdev->dev, 156 "Failed to initiate reset for VF %d after 200 milliseconds\n", 157 vf->vf_id); 158 } 159 160 /** 161 * i40e_vc_isvalid_vsi_id 162 * @vf: pointer to the VF info 163 * @vsi_id: VF relative VSI id 164 * 165 * check for the valid VSI id 166 **/ 167 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 168 { 169 struct i40e_pf *pf = vf->pf; 170 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 171 172 return (vsi && (vsi->vf_id == vf->vf_id)); 173 } 174 175 /** 176 * i40e_vc_isvalid_queue_id 177 * @vf: pointer to the VF info 178 * @vsi_id: vsi id 179 * @qid: vsi relative queue id 180 * 181 * check for the valid queue id 182 **/ 183 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 184 u16 qid) 185 { 186 struct i40e_pf *pf = vf->pf; 187 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 188 189 return (vsi && (qid < vsi->alloc_queue_pairs)); 190 } 191 192 /** 193 * i40e_vc_isvalid_vector_id 194 * @vf: pointer to the VF info 195 * @vector_id: VF relative vector id 196 * 197 * check for the valid vector id 198 **/ 199 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) 200 { 201 struct i40e_pf *pf = vf->pf; 202 203 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 204 } 205 206 /***********************vf resource mgmt routines*****************/ 207 208 /** 209 * i40e_vc_get_pf_queue_id 210 * @vf: pointer to the VF info 211 * @vsi_id: id of VSI as provided by the FW 212 * @vsi_queue_id: vsi relative queue id 213 * 214 * return PF relative queue id 215 **/ 216 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 217 u8 vsi_queue_id) 218 { 219 struct i40e_pf *pf = vf->pf; 220 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 221 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 222 223 if (!vsi) 224 return pf_queue_id; 225 226 if (le16_to_cpu(vsi->info.mapping_flags) & 227 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 228 pf_queue_id = 229 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 230 else 231 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 232 vsi_queue_id; 233 234 return pf_queue_id; 235 } 236 237 /** 238 * i40e_get_real_pf_qid 239 * @vf: pointer to the VF info 240 * @vsi_id: vsi id 241 * @queue_id: queue number 242 * 243 * wrapper function to get pf_queue_id handling ADq code as well 244 **/ 245 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 246 { 247 int i; 248 249 if (vf->adq_enabled) { 250 /* Although VF considers all the queues(can be 1 to 16) as its 251 * own but they may actually belong to different VSIs(up to 4). 252 * We need to find which queues belongs to which VSI. 253 */ 254 for (i = 0; i < vf->num_tc; i++) { 255 if (queue_id < vf->ch[i].num_qps) { 256 vsi_id = vf->ch[i].vsi_id; 257 break; 258 } 259 /* find right queue id which is relative to a 260 * given VSI. 261 */ 262 queue_id -= vf->ch[i].num_qps; 263 } 264 } 265 266 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 267 } 268 269 /** 270 * i40e_config_irq_link_list 271 * @vf: pointer to the VF info 272 * @vsi_id: id of VSI as given by the FW 273 * @vecmap: irq map info 274 * 275 * configure irq link list from the map 276 **/ 277 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 278 struct virtchnl_vector_map *vecmap) 279 { 280 unsigned long linklistmap = 0, tempmap; 281 struct i40e_pf *pf = vf->pf; 282 struct i40e_hw *hw = &pf->hw; 283 u16 vsi_queue_id, pf_queue_id; 284 enum i40e_queue_type qtype; 285 u16 next_q, vector_id, size; 286 u32 reg, reg_idx; 287 u16 itr_idx = 0; 288 289 vector_id = vecmap->vector_id; 290 /* setup the head */ 291 if (0 == vector_id) 292 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 293 else 294 reg_idx = I40E_VPINT_LNKLSTN( 295 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 296 (vector_id - 1)); 297 298 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 299 /* Special case - No queues mapped on this vector */ 300 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 301 goto irq_list_done; 302 } 303 tempmap = vecmap->rxq_map; 304 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 305 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 306 vsi_queue_id)); 307 } 308 309 tempmap = vecmap->txq_map; 310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 312 vsi_queue_id + 1)); 313 } 314 315 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 316 next_q = find_first_bit(&linklistmap, size); 317 if (unlikely(next_q == size)) 318 goto irq_list_done; 319 320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 323 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 324 325 wr32(hw, reg_idx, reg); 326 327 while (next_q < size) { 328 switch (qtype) { 329 case I40E_QUEUE_TYPE_RX: 330 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 331 itr_idx = vecmap->rxitr_idx; 332 break; 333 case I40E_QUEUE_TYPE_TX: 334 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 335 itr_idx = vecmap->txitr_idx; 336 break; 337 default: 338 break; 339 } 340 341 next_q = find_next_bit(&linklistmap, size, next_q + 1); 342 if (next_q < size) { 343 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 344 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 345 pf_queue_id = i40e_get_real_pf_qid(vf, 346 vsi_id, 347 vsi_queue_id); 348 } else { 349 pf_queue_id = I40E_QUEUE_END_OF_LIST; 350 qtype = 0; 351 } 352 353 /* format for the RQCTL & TQCTL regs is same */ 354 reg = (vector_id) | 355 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 356 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 357 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 358 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 359 wr32(hw, reg_idx, reg); 360 } 361 362 /* if the vf is running in polling mode and using interrupt zero, 363 * need to disable auto-mask on enabling zero interrupt for VFs. 364 */ 365 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 366 (vector_id == 0)) { 367 reg = rd32(hw, I40E_GLINT_CTL); 368 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 369 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 370 wr32(hw, I40E_GLINT_CTL, reg); 371 } 372 } 373 374 irq_list_done: 375 i40e_flush(hw); 376 } 377 378 /** 379 * i40e_release_iwarp_qvlist 380 * @vf: pointer to the VF. 381 * 382 **/ 383 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 384 { 385 struct i40e_pf *pf = vf->pf; 386 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 387 u32 msix_vf; 388 u32 i; 389 390 if (!vf->qvlist_info) 391 return; 392 393 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 394 for (i = 0; i < qvlist_info->num_vectors; i++) { 395 struct virtchnl_iwarp_qv_info *qv_info; 396 u32 next_q_index, next_q_type; 397 struct i40e_hw *hw = &pf->hw; 398 u32 v_idx, reg_idx, reg; 399 400 qv_info = &qvlist_info->qv_info[i]; 401 if (!qv_info) 402 continue; 403 v_idx = qv_info->v_idx; 404 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 405 /* Figure out the queue after CEQ and make that the 406 * first queue. 407 */ 408 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 409 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 410 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 411 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 412 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 413 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 414 415 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 416 reg = (next_q_index & 417 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 418 (next_q_type << 419 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 420 421 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 422 } 423 } 424 kfree(vf->qvlist_info); 425 vf->qvlist_info = NULL; 426 } 427 428 /** 429 * i40e_config_iwarp_qvlist 430 * @vf: pointer to the VF info 431 * @qvlist_info: queue and vector list 432 * 433 * Return 0 on success or < 0 on error 434 **/ 435 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 436 struct virtchnl_iwarp_qvlist_info *qvlist_info) 437 { 438 struct i40e_pf *pf = vf->pf; 439 struct i40e_hw *hw = &pf->hw; 440 struct virtchnl_iwarp_qv_info *qv_info; 441 u32 v_idx, i, reg_idx, reg; 442 u32 next_q_idx, next_q_type; 443 u32 msix_vf, size; 444 int ret = 0; 445 446 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 447 448 if (qvlist_info->num_vectors > msix_vf) { 449 dev_warn(&pf->pdev->dev, 450 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", 451 qvlist_info->num_vectors, 452 msix_vf); 453 ret = -EINVAL; 454 goto err_out; 455 } 456 457 size = sizeof(struct virtchnl_iwarp_qvlist_info) + 458 (sizeof(struct virtchnl_iwarp_qv_info) * 459 (qvlist_info->num_vectors - 1)); 460 kfree(vf->qvlist_info); 461 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 462 if (!vf->qvlist_info) { 463 ret = -ENOMEM; 464 goto err_out; 465 } 466 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 467 468 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 469 for (i = 0; i < qvlist_info->num_vectors; i++) { 470 qv_info = &qvlist_info->qv_info[i]; 471 if (!qv_info) 472 continue; 473 474 /* Validate vector id belongs to this vf */ 475 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { 476 ret = -EINVAL; 477 goto err_free; 478 } 479 480 v_idx = qv_info->v_idx; 481 482 vf->qvlist_info->qv_info[i] = *qv_info; 483 484 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 485 /* We might be sharing the interrupt, so get the first queue 486 * index and type, push it down the list by adding the new 487 * queue on top. Also link it with the new queue in CEQCTL. 488 */ 489 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 490 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 491 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 492 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 493 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 494 495 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 496 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 497 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 498 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 499 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 500 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 501 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 502 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 503 504 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 505 reg = (qv_info->ceq_idx & 506 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 507 (I40E_QUEUE_TYPE_PE_CEQ << 508 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 509 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 510 } 511 512 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 513 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 514 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 515 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 516 517 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 518 } 519 } 520 521 return 0; 522 err_free: 523 kfree(vf->qvlist_info); 524 vf->qvlist_info = NULL; 525 err_out: 526 return ret; 527 } 528 529 /** 530 * i40e_config_vsi_tx_queue 531 * @vf: pointer to the VF info 532 * @vsi_id: id of VSI as provided by the FW 533 * @vsi_queue_id: vsi relative queue index 534 * @info: config. info 535 * 536 * configure tx queue 537 **/ 538 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 539 u16 vsi_queue_id, 540 struct virtchnl_txq_info *info) 541 { 542 struct i40e_pf *pf = vf->pf; 543 struct i40e_hw *hw = &pf->hw; 544 struct i40e_hmc_obj_txq tx_ctx; 545 struct i40e_vsi *vsi; 546 u16 pf_queue_id; 547 u32 qtx_ctl; 548 int ret = 0; 549 550 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 551 ret = -ENOENT; 552 goto error_context; 553 } 554 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 555 vsi = i40e_find_vsi_from_id(pf, vsi_id); 556 if (!vsi) { 557 ret = -ENOENT; 558 goto error_context; 559 } 560 561 /* clear the context structure first */ 562 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 563 564 /* only set the required fields */ 565 tx_ctx.base = info->dma_ring_addr / 128; 566 tx_ctx.qlen = info->ring_len; 567 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 568 tx_ctx.rdylist_act = 0; 569 tx_ctx.head_wb_ena = info->headwb_enabled; 570 tx_ctx.head_wb_addr = info->dma_headwb_addr; 571 572 /* clear the context in the HMC */ 573 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 574 if (ret) { 575 dev_err(&pf->pdev->dev, 576 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 577 pf_queue_id, ret); 578 ret = -ENOENT; 579 goto error_context; 580 } 581 582 /* set the context in the HMC */ 583 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 584 if (ret) { 585 dev_err(&pf->pdev->dev, 586 "Failed to set VF LAN Tx queue context %d error: %d\n", 587 pf_queue_id, ret); 588 ret = -ENOENT; 589 goto error_context; 590 } 591 592 /* associate this queue with the PCI VF function */ 593 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 594 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 595 & I40E_QTX_CTL_PF_INDX_MASK); 596 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 597 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 598 & I40E_QTX_CTL_VFVM_INDX_MASK); 599 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 600 i40e_flush(hw); 601 602 error_context: 603 return ret; 604 } 605 606 /** 607 * i40e_config_vsi_rx_queue 608 * @vf: pointer to the VF info 609 * @vsi_id: id of VSI as provided by the FW 610 * @vsi_queue_id: vsi relative queue index 611 * @info: config. info 612 * 613 * configure rx queue 614 **/ 615 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 616 u16 vsi_queue_id, 617 struct virtchnl_rxq_info *info) 618 { 619 struct i40e_pf *pf = vf->pf; 620 struct i40e_hw *hw = &pf->hw; 621 struct i40e_hmc_obj_rxq rx_ctx; 622 u16 pf_queue_id; 623 int ret = 0; 624 625 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 626 627 /* clear the context structure first */ 628 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 629 630 /* only set the required fields */ 631 rx_ctx.base = info->dma_ring_addr / 128; 632 rx_ctx.qlen = info->ring_len; 633 634 if (info->splithdr_enabled) { 635 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 636 I40E_RX_SPLIT_IP | 637 I40E_RX_SPLIT_TCP_UDP | 638 I40E_RX_SPLIT_SCTP; 639 /* header length validation */ 640 if (info->hdr_size > ((2 * 1024) - 64)) { 641 ret = -EINVAL; 642 goto error_param; 643 } 644 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 645 646 /* set split mode 10b */ 647 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 648 } 649 650 /* databuffer length validation */ 651 if (info->databuffer_size > ((16 * 1024) - 128)) { 652 ret = -EINVAL; 653 goto error_param; 654 } 655 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 656 657 /* max pkt. length validation */ 658 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 659 ret = -EINVAL; 660 goto error_param; 661 } 662 rx_ctx.rxmax = info->max_pkt_size; 663 664 /* enable 32bytes desc always */ 665 rx_ctx.dsize = 1; 666 667 /* default values */ 668 rx_ctx.lrxqthresh = 1; 669 rx_ctx.crcstrip = 1; 670 rx_ctx.prefena = 1; 671 rx_ctx.l2tsel = 1; 672 673 /* clear the context in the HMC */ 674 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 675 if (ret) { 676 dev_err(&pf->pdev->dev, 677 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 678 pf_queue_id, ret); 679 ret = -ENOENT; 680 goto error_param; 681 } 682 683 /* set the context in the HMC */ 684 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 685 if (ret) { 686 dev_err(&pf->pdev->dev, 687 "Failed to set VF LAN Rx queue context %d error: %d\n", 688 pf_queue_id, ret); 689 ret = -ENOENT; 690 goto error_param; 691 } 692 693 error_param: 694 return ret; 695 } 696 697 /** 698 * i40e_alloc_vsi_res 699 * @vf: pointer to the VF info 700 * @idx: VSI index, applies only for ADq mode, zero otherwise 701 * 702 * alloc VF vsi context & resources 703 **/ 704 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 705 { 706 struct i40e_mac_filter *f = NULL; 707 struct i40e_pf *pf = vf->pf; 708 struct i40e_vsi *vsi; 709 u64 max_tx_rate = 0; 710 int ret = 0; 711 712 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, 713 vf->vf_id); 714 715 if (!vsi) { 716 dev_err(&pf->pdev->dev, 717 "add vsi failed for VF %d, aq_err %d\n", 718 vf->vf_id, pf->hw.aq.asq_last_status); 719 ret = -ENOENT; 720 goto error_alloc_vsi_res; 721 } 722 723 if (!idx) { 724 u64 hena = i40e_pf_get_default_rss_hena(pf); 725 u8 broadcast[ETH_ALEN]; 726 727 vf->lan_vsi_idx = vsi->idx; 728 vf->lan_vsi_id = vsi->id; 729 /* If the port VLAN has been configured and then the 730 * VF driver was removed then the VSI port VLAN 731 * configuration was destroyed. Check if there is 732 * a port VLAN and restore the VSI configuration if 733 * needed. 734 */ 735 if (vf->port_vlan_id) 736 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 737 738 spin_lock_bh(&vsi->mac_filter_hash_lock); 739 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 740 f = i40e_add_mac_filter(vsi, 741 vf->default_lan_addr.addr); 742 if (!f) 743 dev_info(&pf->pdev->dev, 744 "Could not add MAC filter %pM for VF %d\n", 745 vf->default_lan_addr.addr, vf->vf_id); 746 } 747 eth_broadcast_addr(broadcast); 748 f = i40e_add_mac_filter(vsi, broadcast); 749 if (!f) 750 dev_info(&pf->pdev->dev, 751 "Could not allocate VF broadcast filter\n"); 752 spin_unlock_bh(&vsi->mac_filter_hash_lock); 753 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 754 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 755 /* program mac filter only for VF VSI */ 756 ret = i40e_sync_vsi_filters(vsi); 757 if (ret) 758 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 759 } 760 761 /* storing VSI index and id for ADq and don't apply the mac filter */ 762 if (vf->adq_enabled) { 763 vf->ch[idx].vsi_idx = vsi->idx; 764 vf->ch[idx].vsi_id = vsi->id; 765 } 766 767 /* Set VF bandwidth if specified */ 768 if (vf->tx_rate) { 769 max_tx_rate = vf->tx_rate; 770 } else if (vf->ch[idx].max_tx_rate) { 771 max_tx_rate = vf->ch[idx].max_tx_rate; 772 } 773 774 if (max_tx_rate) { 775 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 776 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 777 max_tx_rate, 0, NULL); 778 if (ret) 779 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 780 vf->vf_id, ret); 781 } 782 783 error_alloc_vsi_res: 784 return ret; 785 } 786 787 /** 788 * i40e_map_pf_queues_to_vsi 789 * @vf: pointer to the VF info 790 * 791 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 792 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 793 **/ 794 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 795 { 796 struct i40e_pf *pf = vf->pf; 797 struct i40e_hw *hw = &pf->hw; 798 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 799 u16 vsi_id, qps; 800 int i, j; 801 802 if (vf->adq_enabled) 803 num_tc = vf->num_tc; 804 805 for (i = 0; i < num_tc; i++) { 806 if (vf->adq_enabled) { 807 qps = vf->ch[i].num_qps; 808 vsi_id = vf->ch[i].vsi_id; 809 } else { 810 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 811 vsi_id = vf->lan_vsi_id; 812 } 813 814 for (j = 0; j < 7; j++) { 815 if (j * 2 >= qps) { 816 /* end of list */ 817 reg = 0x07FF07FF; 818 } else { 819 u16 qid = i40e_vc_get_pf_queue_id(vf, 820 vsi_id, 821 j * 2); 822 reg = qid; 823 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 824 (j * 2) + 1); 825 reg |= qid << 16; 826 } 827 i40e_write_rx_ctl(hw, 828 I40E_VSILAN_QTABLE(j, vsi_id), 829 reg); 830 } 831 } 832 } 833 834 /** 835 * i40e_map_pf_to_vf_queues 836 * @vf: pointer to the VF info 837 * 838 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 839 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 840 **/ 841 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 842 { 843 struct i40e_pf *pf = vf->pf; 844 struct i40e_hw *hw = &pf->hw; 845 u32 reg, total_qps = 0; 846 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 847 u16 vsi_id, qid; 848 int i, j; 849 850 if (vf->adq_enabled) 851 num_tc = vf->num_tc; 852 853 for (i = 0; i < num_tc; i++) { 854 if (vf->adq_enabled) { 855 qps = vf->ch[i].num_qps; 856 vsi_id = vf->ch[i].vsi_id; 857 } else { 858 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 859 vsi_id = vf->lan_vsi_id; 860 } 861 862 for (j = 0; j < qps; j++) { 863 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 864 865 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 866 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 867 reg); 868 total_qps++; 869 } 870 } 871 } 872 873 /** 874 * i40e_enable_vf_mappings 875 * @vf: pointer to the VF info 876 * 877 * enable VF mappings 878 **/ 879 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 880 { 881 struct i40e_pf *pf = vf->pf; 882 struct i40e_hw *hw = &pf->hw; 883 u32 reg; 884 885 /* Tell the hardware we're using noncontiguous mapping. HW requires 886 * that VF queues be mapped using this method, even when they are 887 * contiguous in real life 888 */ 889 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 890 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 891 892 /* enable VF vplan_qtable mappings */ 893 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 894 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 895 896 i40e_map_pf_to_vf_queues(vf); 897 i40e_map_pf_queues_to_vsi(vf); 898 899 i40e_flush(hw); 900 } 901 902 /** 903 * i40e_disable_vf_mappings 904 * @vf: pointer to the VF info 905 * 906 * disable VF mappings 907 **/ 908 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 909 { 910 struct i40e_pf *pf = vf->pf; 911 struct i40e_hw *hw = &pf->hw; 912 int i; 913 914 /* disable qp mappings */ 915 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 916 for (i = 0; i < I40E_MAX_VSI_QP; i++) 917 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 918 I40E_QUEUE_END_OF_LIST); 919 i40e_flush(hw); 920 } 921 922 /** 923 * i40e_free_vf_res 924 * @vf: pointer to the VF info 925 * 926 * free VF resources 927 **/ 928 static void i40e_free_vf_res(struct i40e_vf *vf) 929 { 930 struct i40e_pf *pf = vf->pf; 931 struct i40e_hw *hw = &pf->hw; 932 u32 reg_idx, reg; 933 int i, j, msix_vf; 934 935 /* Start by disabling VF's configuration API to prevent the OS from 936 * accessing the VF's VSI after it's freed / invalidated. 937 */ 938 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 939 940 /* It's possible the VF had requeuested more queues than the default so 941 * do the accounting here when we're about to free them. 942 */ 943 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 944 pf->queues_left += vf->num_queue_pairs - 945 I40E_DEFAULT_QUEUES_PER_VF; 946 } 947 948 /* free vsi & disconnect it from the parent uplink */ 949 if (vf->lan_vsi_idx) { 950 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 951 vf->lan_vsi_idx = 0; 952 vf->lan_vsi_id = 0; 953 vf->num_mac = 0; 954 } 955 956 /* do the accounting and remove additional ADq VSI's */ 957 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 958 for (j = 0; j < vf->num_tc; j++) { 959 /* At this point VSI0 is already released so don't 960 * release it again and only clear their values in 961 * structure variables 962 */ 963 if (j) 964 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 965 vf->ch[j].vsi_idx = 0; 966 vf->ch[j].vsi_id = 0; 967 } 968 } 969 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 970 971 /* disable interrupts so the VF starts in a known state */ 972 for (i = 0; i < msix_vf; i++) { 973 /* format is same for both registers */ 974 if (0 == i) 975 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 976 else 977 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 978 (vf->vf_id)) 979 + (i - 1)); 980 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 981 i40e_flush(hw); 982 } 983 984 /* clear the irq settings */ 985 for (i = 0; i < msix_vf; i++) { 986 /* format is same for both registers */ 987 if (0 == i) 988 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 989 else 990 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 991 (vf->vf_id)) 992 + (i - 1)); 993 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 994 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 995 wr32(hw, reg_idx, reg); 996 i40e_flush(hw); 997 } 998 /* reset some of the state variables keeping track of the resources */ 999 vf->num_queue_pairs = 0; 1000 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1001 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1002 } 1003 1004 /** 1005 * i40e_alloc_vf_res 1006 * @vf: pointer to the VF info 1007 * 1008 * allocate VF resources 1009 **/ 1010 static int i40e_alloc_vf_res(struct i40e_vf *vf) 1011 { 1012 struct i40e_pf *pf = vf->pf; 1013 int total_queue_pairs = 0; 1014 int ret, idx; 1015 1016 if (vf->num_req_queues && 1017 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1018 pf->num_vf_qps = vf->num_req_queues; 1019 else 1020 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1021 1022 /* allocate hw vsi context & associated resources */ 1023 ret = i40e_alloc_vsi_res(vf, 0); 1024 if (ret) 1025 goto error_alloc; 1026 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1027 1028 /* allocate additional VSIs based on tc information for ADq */ 1029 if (vf->adq_enabled) { 1030 if (pf->queues_left >= 1031 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1032 /* TC 0 always belongs to VF VSI */ 1033 for (idx = 1; idx < vf->num_tc; idx++) { 1034 ret = i40e_alloc_vsi_res(vf, idx); 1035 if (ret) 1036 goto error_alloc; 1037 } 1038 /* send correct number of queues */ 1039 total_queue_pairs = I40E_MAX_VF_QUEUES; 1040 } else { 1041 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1042 vf->vf_id); 1043 vf->adq_enabled = false; 1044 } 1045 } 1046 1047 /* We account for each VF to get a default number of queue pairs. If 1048 * the VF has now requested more, we need to account for that to make 1049 * certain we never request more queues than we actually have left in 1050 * HW. 1051 */ 1052 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1053 pf->queues_left -= 1054 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1055 1056 if (vf->trusted) 1057 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1058 else 1059 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1060 1061 /* store the total qps number for the runtime 1062 * VF req validation 1063 */ 1064 vf->num_queue_pairs = total_queue_pairs; 1065 1066 /* VF is now completely initialized */ 1067 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1068 1069 error_alloc: 1070 if (ret) 1071 i40e_free_vf_res(vf); 1072 1073 return ret; 1074 } 1075 1076 #define VF_DEVICE_STATUS 0xAA 1077 #define VF_TRANS_PENDING_MASK 0x20 1078 /** 1079 * i40e_quiesce_vf_pci 1080 * @vf: pointer to the VF structure 1081 * 1082 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1083 * if the transactions never clear. 1084 **/ 1085 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1086 { 1087 struct i40e_pf *pf = vf->pf; 1088 struct i40e_hw *hw = &pf->hw; 1089 int vf_abs_id, i; 1090 u32 reg; 1091 1092 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1093 1094 wr32(hw, I40E_PF_PCI_CIAA, 1095 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1096 for (i = 0; i < 100; i++) { 1097 reg = rd32(hw, I40E_PF_PCI_CIAD); 1098 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1099 return 0; 1100 udelay(1); 1101 } 1102 return -EIO; 1103 } 1104 1105 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi); 1106 1107 /** 1108 * i40e_config_vf_promiscuous_mode 1109 * @vf: pointer to the VF info 1110 * @vsi_id: VSI id 1111 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1112 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1113 * 1114 * Called from the VF to configure the promiscuous mode of 1115 * VF vsis and from the VF reset path to reset promiscuous mode. 1116 **/ 1117 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1118 u16 vsi_id, 1119 bool allmulti, 1120 bool alluni) 1121 { 1122 struct i40e_pf *pf = vf->pf; 1123 struct i40e_hw *hw = &pf->hw; 1124 struct i40e_mac_filter *f; 1125 i40e_status aq_ret = 0; 1126 struct i40e_vsi *vsi; 1127 int bkt; 1128 1129 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1130 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1131 return I40E_ERR_PARAM; 1132 1133 if (vf->port_vlan_id) { 1134 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, 1135 allmulti, 1136 vf->port_vlan_id, 1137 NULL); 1138 if (aq_ret) { 1139 int aq_err = pf->hw.aq.asq_last_status; 1140 1141 dev_err(&pf->pdev->dev, 1142 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1143 vf->vf_id, 1144 i40e_stat_str(&pf->hw, aq_ret), 1145 i40e_aq_str(&pf->hw, aq_err)); 1146 return aq_ret; 1147 } 1148 1149 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, 1150 alluni, 1151 vf->port_vlan_id, 1152 NULL); 1153 if (aq_ret) { 1154 int aq_err = pf->hw.aq.asq_last_status; 1155 1156 dev_err(&pf->pdev->dev, 1157 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1158 vf->vf_id, 1159 i40e_stat_str(&pf->hw, aq_ret), 1160 i40e_aq_str(&pf->hw, aq_err)); 1161 } 1162 return aq_ret; 1163 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1164 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1165 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1166 continue; 1167 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, 1168 vsi->seid, 1169 allmulti, 1170 f->vlan, 1171 NULL); 1172 if (aq_ret) { 1173 int aq_err = pf->hw.aq.asq_last_status; 1174 1175 dev_err(&pf->pdev->dev, 1176 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", 1177 f->vlan, 1178 i40e_stat_str(&pf->hw, aq_ret), 1179 i40e_aq_str(&pf->hw, aq_err)); 1180 } 1181 1182 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, 1183 vsi->seid, 1184 alluni, 1185 f->vlan, 1186 NULL); 1187 if (aq_ret) { 1188 int aq_err = pf->hw.aq.asq_last_status; 1189 1190 dev_err(&pf->pdev->dev, 1191 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", 1192 f->vlan, 1193 i40e_stat_str(&pf->hw, aq_ret), 1194 i40e_aq_str(&pf->hw, aq_err)); 1195 } 1196 } 1197 return aq_ret; 1198 } 1199 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, 1200 NULL); 1201 if (aq_ret) { 1202 int aq_err = pf->hw.aq.asq_last_status; 1203 1204 dev_err(&pf->pdev->dev, 1205 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1206 vf->vf_id, 1207 i40e_stat_str(&pf->hw, aq_ret), 1208 i40e_aq_str(&pf->hw, aq_err)); 1209 return aq_ret; 1210 } 1211 1212 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni, 1213 NULL, true); 1214 if (aq_ret) { 1215 int aq_err = pf->hw.aq.asq_last_status; 1216 1217 dev_err(&pf->pdev->dev, 1218 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1219 vf->vf_id, 1220 i40e_stat_str(&pf->hw, aq_ret), 1221 i40e_aq_str(&pf->hw, aq_err)); 1222 } 1223 1224 return aq_ret; 1225 } 1226 1227 /** 1228 * i40e_trigger_vf_reset 1229 * @vf: pointer to the VF structure 1230 * @flr: VFLR was issued or not 1231 * 1232 * Trigger hardware to start a reset for a particular VF. Expects the caller 1233 * to wait the proper amount of time to allow hardware to reset the VF before 1234 * it cleans up and restores VF functionality. 1235 **/ 1236 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1237 { 1238 struct i40e_pf *pf = vf->pf; 1239 struct i40e_hw *hw = &pf->hw; 1240 u32 reg, reg_idx, bit_idx; 1241 1242 /* warn the VF */ 1243 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1244 1245 /* Disable VF's configuration API during reset. The flag is re-enabled 1246 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1247 * It's normally disabled in i40e_free_vf_res(), but it's safer 1248 * to do it earlier to give some time to finish to any VF config 1249 * functions that may still be running at this point. 1250 */ 1251 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1252 1253 /* In the case of a VFLR, the HW has already reset the VF and we 1254 * just need to clean up, so don't hit the VFRTRIG register. 1255 */ 1256 if (!flr) { 1257 /* reset VF using VPGEN_VFRTRIG reg */ 1258 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1259 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1260 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1261 i40e_flush(hw); 1262 } 1263 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1264 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1265 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1266 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1267 i40e_flush(hw); 1268 1269 if (i40e_quiesce_vf_pci(vf)) 1270 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1271 vf->vf_id); 1272 } 1273 1274 /** 1275 * i40e_cleanup_reset_vf 1276 * @vf: pointer to the VF structure 1277 * 1278 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1279 * have verified whether the reset is finished properly, and ensure the 1280 * minimum amount of wait time has passed. 1281 **/ 1282 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1283 { 1284 struct i40e_pf *pf = vf->pf; 1285 struct i40e_hw *hw = &pf->hw; 1286 u32 reg; 1287 1288 /* disable promisc modes in case they were enabled */ 1289 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1290 1291 /* free VF resources to begin resetting the VSI state */ 1292 i40e_free_vf_res(vf); 1293 1294 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1295 * By doing this we allow HW to access VF memory at any point. If we 1296 * did it any sooner, HW could access memory while it was being freed 1297 * in i40e_free_vf_res(), causing an IOMMU fault. 1298 * 1299 * On the other hand, this needs to be done ASAP, because the VF driver 1300 * is waiting for this to happen and may report a timeout. It's 1301 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1302 * it. 1303 */ 1304 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1305 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1306 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1307 1308 /* reallocate VF resources to finish resetting the VSI state */ 1309 if (!i40e_alloc_vf_res(vf)) { 1310 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1311 i40e_enable_vf_mappings(vf); 1312 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1313 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1314 /* Do not notify the client during VF init */ 1315 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1316 &vf->vf_states)) 1317 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1318 vf->num_vlan = 0; 1319 } 1320 1321 /* Tell the VF driver the reset is done. This needs to be done only 1322 * after VF has been fully initialized, because the VF driver may 1323 * request resources immediately after setting this flag. 1324 */ 1325 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1326 } 1327 1328 /** 1329 * i40e_reset_vf 1330 * @vf: pointer to the VF structure 1331 * @flr: VFLR was issued or not 1332 * 1333 * Returns true if the VF is reset, false otherwise. 1334 **/ 1335 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1336 { 1337 struct i40e_pf *pf = vf->pf; 1338 struct i40e_hw *hw = &pf->hw; 1339 bool rsd = false; 1340 u32 reg; 1341 int i; 1342 1343 /* If the VFs have been disabled, this means something else is 1344 * resetting the VF, so we shouldn't continue. 1345 */ 1346 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1347 return false; 1348 1349 i40e_trigger_vf_reset(vf, flr); 1350 1351 /* poll VPGEN_VFRSTAT reg to make sure 1352 * that reset is complete 1353 */ 1354 for (i = 0; i < 10; i++) { 1355 /* VF reset requires driver to first reset the VF and then 1356 * poll the status register to make sure that the reset 1357 * completed successfully. Due to internal HW FIFO flushes, 1358 * we must wait 10ms before the register will be valid. 1359 */ 1360 usleep_range(10000, 20000); 1361 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1362 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1363 rsd = true; 1364 break; 1365 } 1366 } 1367 1368 if (flr) 1369 usleep_range(10000, 20000); 1370 1371 if (!rsd) 1372 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1373 vf->vf_id); 1374 usleep_range(10000, 20000); 1375 1376 /* On initial reset, we don't have any queues to disable */ 1377 if (vf->lan_vsi_idx != 0) 1378 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1379 1380 i40e_cleanup_reset_vf(vf); 1381 1382 i40e_flush(hw); 1383 clear_bit(__I40E_VF_DISABLE, pf->state); 1384 1385 return true; 1386 } 1387 1388 /** 1389 * i40e_reset_all_vfs 1390 * @pf: pointer to the PF structure 1391 * @flr: VFLR was issued or not 1392 * 1393 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1394 * VF, then do all the waiting in one chunk, and finally finish restoring each 1395 * VF after the wait. This is useful during PF routines which need to reset 1396 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1397 * 1398 * Returns true if any VFs were reset, and false otherwise. 1399 **/ 1400 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1401 { 1402 struct i40e_hw *hw = &pf->hw; 1403 struct i40e_vf *vf; 1404 int i, v; 1405 u32 reg; 1406 1407 /* If we don't have any VFs, then there is nothing to reset */ 1408 if (!pf->num_alloc_vfs) 1409 return false; 1410 1411 /* If VFs have been disabled, there is no need to reset */ 1412 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1413 return false; 1414 1415 /* Begin reset on all VFs at once */ 1416 for (v = 0; v < pf->num_alloc_vfs; v++) 1417 i40e_trigger_vf_reset(&pf->vf[v], flr); 1418 1419 /* HW requires some time to make sure it can flush the FIFO for a VF 1420 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1421 * sequence to make sure that it has completed. We'll keep track of 1422 * the VFs using a simple iterator that increments once that VF has 1423 * finished resetting. 1424 */ 1425 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1426 usleep_range(10000, 20000); 1427 1428 /* Check each VF in sequence, beginning with the VF to fail 1429 * the previous check. 1430 */ 1431 while (v < pf->num_alloc_vfs) { 1432 vf = &pf->vf[v]; 1433 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1434 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1435 break; 1436 1437 /* If the current VF has finished resetting, move on 1438 * to the next VF in sequence. 1439 */ 1440 v++; 1441 } 1442 } 1443 1444 if (flr) 1445 usleep_range(10000, 20000); 1446 1447 /* Display a warning if at least one VF didn't manage to reset in 1448 * time, but continue on with the operation. 1449 */ 1450 if (v < pf->num_alloc_vfs) 1451 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1452 pf->vf[v].vf_id); 1453 usleep_range(10000, 20000); 1454 1455 /* Begin disabling all the rings associated with VFs, but do not wait 1456 * between each VF. 1457 */ 1458 for (v = 0; v < pf->num_alloc_vfs; v++) { 1459 /* On initial reset, we don't have any queues to disable */ 1460 if (pf->vf[v].lan_vsi_idx == 0) 1461 continue; 1462 1463 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1464 } 1465 1466 /* Now that we've notified HW to disable all of the VF rings, wait 1467 * until they finish. 1468 */ 1469 for (v = 0; v < pf->num_alloc_vfs; v++) { 1470 /* On initial reset, we don't have any queues to disable */ 1471 if (pf->vf[v].lan_vsi_idx == 0) 1472 continue; 1473 1474 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1475 } 1476 1477 /* Hw may need up to 50ms to finish disabling the RX queues. We 1478 * minimize the wait by delaying only once for all VFs. 1479 */ 1480 mdelay(50); 1481 1482 /* Finish the reset on each VF */ 1483 for (v = 0; v < pf->num_alloc_vfs; v++) 1484 i40e_cleanup_reset_vf(&pf->vf[v]); 1485 1486 i40e_flush(hw); 1487 clear_bit(__I40E_VF_DISABLE, pf->state); 1488 1489 return true; 1490 } 1491 1492 /** 1493 * i40e_free_vfs 1494 * @pf: pointer to the PF structure 1495 * 1496 * free VF resources 1497 **/ 1498 void i40e_free_vfs(struct i40e_pf *pf) 1499 { 1500 struct i40e_hw *hw = &pf->hw; 1501 u32 reg_idx, bit_idx; 1502 int i, tmp, vf_id; 1503 1504 if (!pf->vf) 1505 return; 1506 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1507 usleep_range(1000, 2000); 1508 1509 i40e_notify_client_of_vf_enable(pf, 0); 1510 1511 /* Amortize wait time by stopping all VFs at the same time */ 1512 for (i = 0; i < pf->num_alloc_vfs; i++) { 1513 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1514 continue; 1515 1516 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1517 } 1518 1519 for (i = 0; i < pf->num_alloc_vfs; i++) { 1520 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1521 continue; 1522 1523 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1524 } 1525 1526 /* Disable IOV before freeing resources. This lets any VF drivers 1527 * running in the host get themselves cleaned up before we yank 1528 * the carpet out from underneath their feet. 1529 */ 1530 if (!pci_vfs_assigned(pf->pdev)) 1531 pci_disable_sriov(pf->pdev); 1532 else 1533 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1534 1535 /* free up VF resources */ 1536 tmp = pf->num_alloc_vfs; 1537 pf->num_alloc_vfs = 0; 1538 for (i = 0; i < tmp; i++) { 1539 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1540 i40e_free_vf_res(&pf->vf[i]); 1541 /* disable qp mappings */ 1542 i40e_disable_vf_mappings(&pf->vf[i]); 1543 } 1544 1545 kfree(pf->vf); 1546 pf->vf = NULL; 1547 1548 /* This check is for when the driver is unloaded while VFs are 1549 * assigned. Setting the number of VFs to 0 through sysfs is caught 1550 * before this function ever gets called. 1551 */ 1552 if (!pci_vfs_assigned(pf->pdev)) { 1553 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1554 * work correctly when SR-IOV gets re-enabled. 1555 */ 1556 for (vf_id = 0; vf_id < tmp; vf_id++) { 1557 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1558 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1559 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1560 } 1561 } 1562 clear_bit(__I40E_VF_DISABLE, pf->state); 1563 } 1564 1565 #ifdef CONFIG_PCI_IOV 1566 /** 1567 * i40e_alloc_vfs 1568 * @pf: pointer to the PF structure 1569 * @num_alloc_vfs: number of VFs to allocate 1570 * 1571 * allocate VF resources 1572 **/ 1573 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1574 { 1575 struct i40e_vf *vfs; 1576 int i, ret = 0; 1577 1578 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1579 i40e_irq_dynamic_disable_icr0(pf); 1580 1581 /* Check to see if we're just allocating resources for extant VFs */ 1582 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1583 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1584 if (ret) { 1585 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1586 pf->num_alloc_vfs = 0; 1587 goto err_iov; 1588 } 1589 } 1590 /* allocate memory */ 1591 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1592 if (!vfs) { 1593 ret = -ENOMEM; 1594 goto err_alloc; 1595 } 1596 pf->vf = vfs; 1597 1598 /* apply default profile */ 1599 for (i = 0; i < num_alloc_vfs; i++) { 1600 vfs[i].pf = pf; 1601 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1602 vfs[i].vf_id = i; 1603 1604 /* assign default capabilities */ 1605 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1606 vfs[i].spoofchk = true; 1607 1608 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1609 1610 } 1611 pf->num_alloc_vfs = num_alloc_vfs; 1612 1613 /* VF resources get allocated during reset */ 1614 i40e_reset_all_vfs(pf, false); 1615 1616 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1617 1618 err_alloc: 1619 if (ret) 1620 i40e_free_vfs(pf); 1621 err_iov: 1622 /* Re-enable interrupt 0. */ 1623 i40e_irq_dynamic_enable_icr0(pf); 1624 return ret; 1625 } 1626 1627 #endif 1628 /** 1629 * i40e_pci_sriov_enable 1630 * @pdev: pointer to a pci_dev structure 1631 * @num_vfs: number of VFs to allocate 1632 * 1633 * Enable or change the number of VFs 1634 **/ 1635 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1636 { 1637 #ifdef CONFIG_PCI_IOV 1638 struct i40e_pf *pf = pci_get_drvdata(pdev); 1639 int pre_existing_vfs = pci_num_vf(pdev); 1640 int err = 0; 1641 1642 if (test_bit(__I40E_TESTING, pf->state)) { 1643 dev_warn(&pdev->dev, 1644 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1645 err = -EPERM; 1646 goto err_out; 1647 } 1648 1649 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1650 i40e_free_vfs(pf); 1651 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1652 goto out; 1653 1654 if (num_vfs > pf->num_req_vfs) { 1655 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1656 num_vfs, pf->num_req_vfs); 1657 err = -EPERM; 1658 goto err_out; 1659 } 1660 1661 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1662 err = i40e_alloc_vfs(pf, num_vfs); 1663 if (err) { 1664 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1665 goto err_out; 1666 } 1667 1668 out: 1669 return num_vfs; 1670 1671 err_out: 1672 return err; 1673 #endif 1674 return 0; 1675 } 1676 1677 /** 1678 * i40e_pci_sriov_configure 1679 * @pdev: pointer to a pci_dev structure 1680 * @num_vfs: number of VFs to allocate 1681 * 1682 * Enable or change the number of VFs. Called when the user updates the number 1683 * of VFs in sysfs. 1684 **/ 1685 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1686 { 1687 struct i40e_pf *pf = pci_get_drvdata(pdev); 1688 int ret = 0; 1689 1690 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1691 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1692 return -EAGAIN; 1693 } 1694 1695 if (num_vfs) { 1696 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1697 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1698 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1699 } 1700 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1701 goto sriov_configure_out; 1702 } 1703 1704 if (!pci_vfs_assigned(pf->pdev)) { 1705 i40e_free_vfs(pf); 1706 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1707 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1708 } else { 1709 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1710 ret = -EINVAL; 1711 goto sriov_configure_out; 1712 } 1713 sriov_configure_out: 1714 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1715 return ret; 1716 } 1717 1718 /***********************virtual channel routines******************/ 1719 1720 /** 1721 * i40e_vc_send_msg_to_vf 1722 * @vf: pointer to the VF info 1723 * @v_opcode: virtual channel opcode 1724 * @v_retval: virtual channel return value 1725 * @msg: pointer to the msg buffer 1726 * @msglen: msg length 1727 * 1728 * send msg to VF 1729 **/ 1730 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1731 u32 v_retval, u8 *msg, u16 msglen) 1732 { 1733 struct i40e_pf *pf; 1734 struct i40e_hw *hw; 1735 int abs_vf_id; 1736 i40e_status aq_ret; 1737 1738 /* validate the request */ 1739 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1740 return -EINVAL; 1741 1742 pf = vf->pf; 1743 hw = &pf->hw; 1744 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1745 1746 /* single place to detect unsuccessful return values */ 1747 if (v_retval) { 1748 vf->num_invalid_msgs++; 1749 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1750 vf->vf_id, v_opcode, v_retval); 1751 if (vf->num_invalid_msgs > 1752 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1753 dev_err(&pf->pdev->dev, 1754 "Number of invalid messages exceeded for VF %d\n", 1755 vf->vf_id); 1756 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1757 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1758 } 1759 } else { 1760 vf->num_valid_msgs++; 1761 /* reset the invalid counter, if a valid message is received. */ 1762 vf->num_invalid_msgs = 0; 1763 } 1764 1765 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1766 msg, msglen, NULL); 1767 if (aq_ret) { 1768 dev_info(&pf->pdev->dev, 1769 "Unable to send the message to VF %d aq_err %d\n", 1770 vf->vf_id, pf->hw.aq.asq_last_status); 1771 return -EIO; 1772 } 1773 1774 return 0; 1775 } 1776 1777 /** 1778 * i40e_vc_send_resp_to_vf 1779 * @vf: pointer to the VF info 1780 * @opcode: operation code 1781 * @retval: return value 1782 * 1783 * send resp msg to VF 1784 **/ 1785 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1786 enum virtchnl_ops opcode, 1787 i40e_status retval) 1788 { 1789 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1790 } 1791 1792 /** 1793 * i40e_vc_get_version_msg 1794 * @vf: pointer to the VF info 1795 * @msg: pointer to the msg buffer 1796 * 1797 * called from the VF to request the API version used by the PF 1798 **/ 1799 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1800 { 1801 struct virtchnl_version_info info = { 1802 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1803 }; 1804 1805 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1806 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1807 if (VF_IS_V10(&vf->vf_ver)) 1808 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1809 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1810 I40E_SUCCESS, (u8 *)&info, 1811 sizeof(struct virtchnl_version_info)); 1812 } 1813 1814 /** 1815 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 1816 * @vf: pointer to VF structure 1817 **/ 1818 static void i40e_del_qch(struct i40e_vf *vf) 1819 { 1820 struct i40e_pf *pf = vf->pf; 1821 int i; 1822 1823 /* first element in the array belongs to primary VF VSI and we shouldn't 1824 * delete it. We should however delete the rest of the VSIs created 1825 */ 1826 for (i = 1; i < vf->num_tc; i++) { 1827 if (vf->ch[i].vsi_idx) { 1828 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 1829 vf->ch[i].vsi_idx = 0; 1830 vf->ch[i].vsi_id = 0; 1831 } 1832 } 1833 } 1834 1835 /** 1836 * i40e_vc_get_vf_resources_msg 1837 * @vf: pointer to the VF info 1838 * @msg: pointer to the msg buffer 1839 * 1840 * called from the VF to request its resources 1841 **/ 1842 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1843 { 1844 struct virtchnl_vf_resource *vfres = NULL; 1845 struct i40e_pf *pf = vf->pf; 1846 i40e_status aq_ret = 0; 1847 struct i40e_vsi *vsi; 1848 int num_vsis = 1; 1849 int len = 0; 1850 int ret; 1851 1852 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1853 aq_ret = I40E_ERR_PARAM; 1854 goto err; 1855 } 1856 1857 len = (sizeof(struct virtchnl_vf_resource) + 1858 sizeof(struct virtchnl_vsi_resource) * num_vsis); 1859 1860 vfres = kzalloc(len, GFP_KERNEL); 1861 if (!vfres) { 1862 aq_ret = I40E_ERR_NO_MEMORY; 1863 len = 0; 1864 goto err; 1865 } 1866 if (VF_IS_V11(&vf->vf_ver)) 1867 vf->driver_caps = *(u32 *)msg; 1868 else 1869 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 1870 VIRTCHNL_VF_OFFLOAD_RSS_REG | 1871 VIRTCHNL_VF_OFFLOAD_VLAN; 1872 1873 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 1874 vsi = pf->vsi[vf->lan_vsi_idx]; 1875 if (!vsi->info.pvid) 1876 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 1877 1878 if (i40e_vf_client_capable(pf, vf->vf_id) && 1879 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 1880 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 1881 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1882 } else { 1883 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1884 } 1885 1886 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1887 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 1888 } else { 1889 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 1890 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1891 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1892 else 1893 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 1894 } 1895 1896 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1897 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1898 vfres->vf_cap_flags |= 1899 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1900 } 1901 1902 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 1903 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 1904 1905 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && 1906 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 1907 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 1908 1909 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1910 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1911 dev_err(&pf->pdev->dev, 1912 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1913 vf->vf_id); 1914 aq_ret = I40E_ERR_PARAM; 1915 goto err; 1916 } 1917 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1918 } 1919 1920 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { 1921 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1922 vfres->vf_cap_flags |= 1923 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1924 } 1925 1926 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 1927 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 1928 1929 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 1930 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 1931 1932 vfres->num_vsis = num_vsis; 1933 vfres->num_queue_pairs = vf->num_queue_pairs; 1934 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1935 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 1936 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 1937 1938 if (vf->lan_vsi_idx) { 1939 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 1940 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 1941 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 1942 /* VFs only use TC 0 */ 1943 vfres->vsi_res[0].qset_handle 1944 = le16_to_cpu(vsi->info.qs_handle[0]); 1945 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 1946 vf->default_lan_addr.addr); 1947 } 1948 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1949 1950 err: 1951 /* send the response back to the VF */ 1952 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 1953 aq_ret, (u8 *)vfres, len); 1954 1955 kfree(vfres); 1956 return ret; 1957 } 1958 1959 /** 1960 * i40e_vc_reset_vf_msg 1961 * @vf: pointer to the VF info 1962 * 1963 * called from the VF to reset itself, 1964 * unlike other virtchnl messages, PF driver 1965 * doesn't send the response back to the VF 1966 **/ 1967 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1968 { 1969 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 1970 i40e_reset_vf(vf, false); 1971 } 1972 1973 /** 1974 * i40e_getnum_vf_vsi_vlan_filters 1975 * @vsi: pointer to the vsi 1976 * 1977 * called to get the number of VLANs offloaded on this VF 1978 **/ 1979 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1980 { 1981 struct i40e_mac_filter *f; 1982 int num_vlans = 0, bkt; 1983 1984 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1985 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1986 num_vlans++; 1987 } 1988 1989 return num_vlans; 1990 } 1991 1992 /** 1993 * i40e_vc_config_promiscuous_mode_msg 1994 * @vf: pointer to the VF info 1995 * @msg: pointer to the msg buffer 1996 * 1997 * called from the VF to configure the promiscuous mode of 1998 * VF vsis 1999 **/ 2000 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 2001 { 2002 struct virtchnl_promisc_info *info = 2003 (struct virtchnl_promisc_info *)msg; 2004 struct i40e_pf *pf = vf->pf; 2005 i40e_status aq_ret = 0; 2006 bool allmulti = false; 2007 bool alluni = false; 2008 2009 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2010 aq_ret = I40E_ERR_PARAM; 2011 goto err_out; 2012 } 2013 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2014 dev_err(&pf->pdev->dev, 2015 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 2016 vf->vf_id); 2017 2018 /* Lie to the VF on purpose, because this is an error we can 2019 * ignore. Unprivileged VF is not a virtual channel error. 2020 */ 2021 aq_ret = 0; 2022 goto err_out; 2023 } 2024 2025 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { 2026 aq_ret = I40E_ERR_PARAM; 2027 goto err_out; 2028 } 2029 2030 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 2031 aq_ret = I40E_ERR_PARAM; 2032 goto err_out; 2033 } 2034 2035 /* Multicast promiscuous handling*/ 2036 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2037 allmulti = true; 2038 2039 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2040 alluni = true; 2041 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2042 alluni); 2043 if (!aq_ret) { 2044 if (allmulti) { 2045 dev_info(&pf->pdev->dev, 2046 "VF %d successfully set multicast promiscuous mode\n", 2047 vf->vf_id); 2048 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2049 } else { 2050 dev_info(&pf->pdev->dev, 2051 "VF %d successfully unset multicast promiscuous mode\n", 2052 vf->vf_id); 2053 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2054 } 2055 if (alluni) { 2056 dev_info(&pf->pdev->dev, 2057 "VF %d successfully set unicast promiscuous mode\n", 2058 vf->vf_id); 2059 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2060 } else { 2061 dev_info(&pf->pdev->dev, 2062 "VF %d successfully unset unicast promiscuous mode\n", 2063 vf->vf_id); 2064 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2065 } 2066 } 2067 err_out: 2068 /* send the response to the VF */ 2069 return i40e_vc_send_resp_to_vf(vf, 2070 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2071 aq_ret); 2072 } 2073 2074 /** 2075 * i40e_vc_config_queues_msg 2076 * @vf: pointer to the VF info 2077 * @msg: pointer to the msg buffer 2078 * 2079 * called from the VF to configure the rx/tx 2080 * queues 2081 **/ 2082 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2083 { 2084 struct virtchnl_vsi_queue_config_info *qci = 2085 (struct virtchnl_vsi_queue_config_info *)msg; 2086 struct virtchnl_queue_pair_info *qpi; 2087 struct i40e_pf *pf = vf->pf; 2088 u16 vsi_id, vsi_queue_id = 0; 2089 u16 num_qps_all = 0; 2090 i40e_status aq_ret = 0; 2091 int i, j = 0, idx = 0; 2092 2093 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2094 aq_ret = I40E_ERR_PARAM; 2095 goto error_param; 2096 } 2097 2098 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { 2099 aq_ret = I40E_ERR_PARAM; 2100 goto error_param; 2101 } 2102 2103 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { 2104 aq_ret = I40E_ERR_PARAM; 2105 goto error_param; 2106 } 2107 2108 if (vf->adq_enabled) { 2109 for (i = 0; i < I40E_MAX_VF_VSI; i++) 2110 num_qps_all += vf->ch[i].num_qps; 2111 if (num_qps_all != qci->num_queue_pairs) { 2112 aq_ret = I40E_ERR_PARAM; 2113 goto error_param; 2114 } 2115 } 2116 2117 vsi_id = qci->vsi_id; 2118 2119 for (i = 0; i < qci->num_queue_pairs; i++) { 2120 qpi = &qci->qpair[i]; 2121 2122 if (!vf->adq_enabled) { 2123 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 2124 qpi->txq.queue_id)) { 2125 aq_ret = I40E_ERR_PARAM; 2126 goto error_param; 2127 } 2128 2129 vsi_queue_id = qpi->txq.queue_id; 2130 2131 if (qpi->txq.vsi_id != qci->vsi_id || 2132 qpi->rxq.vsi_id != qci->vsi_id || 2133 qpi->rxq.queue_id != vsi_queue_id) { 2134 aq_ret = I40E_ERR_PARAM; 2135 goto error_param; 2136 } 2137 } 2138 2139 if (vf->adq_enabled) { 2140 if (idx >= ARRAY_SIZE(vf->ch)) { 2141 aq_ret = I40E_ERR_NO_AVAILABLE_VSI; 2142 goto error_param; 2143 } 2144 vsi_id = vf->ch[idx].vsi_id; 2145 } 2146 2147 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2148 &qpi->rxq) || 2149 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2150 &qpi->txq)) { 2151 aq_ret = I40E_ERR_PARAM; 2152 goto error_param; 2153 } 2154 2155 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2156 * VF does not know about these additional VSIs and all 2157 * it cares is about its own queues. PF configures these queues 2158 * to its appropriate VSIs based on TC mapping 2159 **/ 2160 if (vf->adq_enabled) { 2161 if (idx >= ARRAY_SIZE(vf->ch)) { 2162 aq_ret = I40E_ERR_NO_AVAILABLE_VSI; 2163 goto error_param; 2164 } 2165 if (j == (vf->ch[idx].num_qps - 1)) { 2166 idx++; 2167 j = 0; /* resetting the queue count */ 2168 vsi_queue_id = 0; 2169 } else { 2170 j++; 2171 vsi_queue_id++; 2172 } 2173 } 2174 } 2175 /* set vsi num_queue_pairs in use to num configured by VF */ 2176 if (!vf->adq_enabled) { 2177 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2178 qci->num_queue_pairs; 2179 } else { 2180 for (i = 0; i < vf->num_tc; i++) 2181 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = 2182 vf->ch[i].num_qps; 2183 } 2184 2185 error_param: 2186 /* send the response to the VF */ 2187 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2188 aq_ret); 2189 } 2190 2191 /** 2192 * i40e_validate_queue_map 2193 * @vsi_id: vsi id 2194 * @queuemap: Tx or Rx queue map 2195 * 2196 * check if Tx or Rx queue map is valid 2197 **/ 2198 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2199 unsigned long queuemap) 2200 { 2201 u16 vsi_queue_id, queue_id; 2202 2203 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2204 if (vf->adq_enabled) { 2205 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; 2206 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2207 } else { 2208 queue_id = vsi_queue_id; 2209 } 2210 2211 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2212 return -EINVAL; 2213 } 2214 2215 return 0; 2216 } 2217 2218 /** 2219 * i40e_vc_config_irq_map_msg 2220 * @vf: pointer to the VF info 2221 * @msg: pointer to the msg buffer 2222 * 2223 * called from the VF to configure the irq to 2224 * queue map 2225 **/ 2226 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2227 { 2228 struct virtchnl_irq_map_info *irqmap_info = 2229 (struct virtchnl_irq_map_info *)msg; 2230 struct virtchnl_vector_map *map; 2231 u16 vsi_id; 2232 i40e_status aq_ret = 0; 2233 int i; 2234 2235 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2236 aq_ret = I40E_ERR_PARAM; 2237 goto error_param; 2238 } 2239 2240 if (irqmap_info->num_vectors > 2241 vf->pf->hw.func_caps.num_msix_vectors_vf) { 2242 aq_ret = I40E_ERR_PARAM; 2243 goto error_param; 2244 } 2245 2246 for (i = 0; i < irqmap_info->num_vectors; i++) { 2247 map = &irqmap_info->vecmap[i]; 2248 /* validate msg params */ 2249 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || 2250 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { 2251 aq_ret = I40E_ERR_PARAM; 2252 goto error_param; 2253 } 2254 vsi_id = map->vsi_id; 2255 2256 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2257 aq_ret = I40E_ERR_PARAM; 2258 goto error_param; 2259 } 2260 2261 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2262 aq_ret = I40E_ERR_PARAM; 2263 goto error_param; 2264 } 2265 2266 i40e_config_irq_link_list(vf, vsi_id, map); 2267 } 2268 error_param: 2269 /* send the response to the VF */ 2270 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2271 aq_ret); 2272 } 2273 2274 /** 2275 * i40e_ctrl_vf_tx_rings 2276 * @vsi: the SRIOV VSI being configured 2277 * @q_map: bit map of the queues to be enabled 2278 * @enable: start or stop the queue 2279 **/ 2280 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2281 bool enable) 2282 { 2283 struct i40e_pf *pf = vsi->back; 2284 int ret = 0; 2285 u16 q_id; 2286 2287 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2288 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2289 vsi->base_queue + q_id, 2290 false /*is xdp*/, enable); 2291 if (ret) 2292 break; 2293 } 2294 return ret; 2295 } 2296 2297 /** 2298 * i40e_ctrl_vf_rx_rings 2299 * @vsi: the SRIOV VSI being configured 2300 * @q_map: bit map of the queues to be enabled 2301 * @enable: start or stop the queue 2302 **/ 2303 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2304 bool enable) 2305 { 2306 struct i40e_pf *pf = vsi->back; 2307 int ret = 0; 2308 u16 q_id; 2309 2310 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2311 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2312 enable); 2313 if (ret) 2314 break; 2315 } 2316 return ret; 2317 } 2318 2319 /** 2320 * i40e_vc_enable_queues_msg 2321 * @vf: pointer to the VF info 2322 * @msg: pointer to the msg buffer 2323 * 2324 * called from the VF to enable all or specific queue(s) 2325 **/ 2326 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2327 { 2328 struct virtchnl_queue_select *vqs = 2329 (struct virtchnl_queue_select *)msg; 2330 struct i40e_pf *pf = vf->pf; 2331 i40e_status aq_ret = 0; 2332 int i; 2333 2334 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2335 aq_ret = I40E_ERR_PARAM; 2336 goto error_param; 2337 } 2338 2339 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2340 aq_ret = I40E_ERR_PARAM; 2341 goto error_param; 2342 } 2343 2344 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 2345 aq_ret = I40E_ERR_PARAM; 2346 goto error_param; 2347 } 2348 2349 /* Use the queue bit map sent by the VF */ 2350 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2351 true)) { 2352 aq_ret = I40E_ERR_TIMEOUT; 2353 goto error_param; 2354 } 2355 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2356 true)) { 2357 aq_ret = I40E_ERR_TIMEOUT; 2358 goto error_param; 2359 } 2360 2361 /* need to start the rings for additional ADq VSI's as well */ 2362 if (vf->adq_enabled) { 2363 /* zero belongs to LAN VSI */ 2364 for (i = 1; i < vf->num_tc; i++) { 2365 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2366 aq_ret = I40E_ERR_TIMEOUT; 2367 } 2368 } 2369 2370 error_param: 2371 /* send the response to the VF */ 2372 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2373 aq_ret); 2374 } 2375 2376 /** 2377 * i40e_vc_disable_queues_msg 2378 * @vf: pointer to the VF info 2379 * @msg: pointer to the msg buffer 2380 * 2381 * called from the VF to disable all or specific 2382 * queue(s) 2383 **/ 2384 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2385 { 2386 struct virtchnl_queue_select *vqs = 2387 (struct virtchnl_queue_select *)msg; 2388 struct i40e_pf *pf = vf->pf; 2389 i40e_status aq_ret = 0; 2390 2391 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2392 aq_ret = I40E_ERR_PARAM; 2393 goto error_param; 2394 } 2395 2396 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2397 aq_ret = I40E_ERR_PARAM; 2398 goto error_param; 2399 } 2400 2401 if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) || 2402 vqs->rx_queues > I40E_MAX_VF_QUEUES || 2403 vqs->tx_queues > I40E_MAX_VF_QUEUES) { 2404 aq_ret = I40E_ERR_PARAM; 2405 goto error_param; 2406 } 2407 2408 /* Use the queue bit map sent by the VF */ 2409 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2410 false)) { 2411 aq_ret = I40E_ERR_TIMEOUT; 2412 goto error_param; 2413 } 2414 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2415 false)) { 2416 aq_ret = I40E_ERR_TIMEOUT; 2417 goto error_param; 2418 } 2419 error_param: 2420 /* send the response to the VF */ 2421 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2422 aq_ret); 2423 } 2424 2425 /** 2426 * i40e_vc_request_queues_msg 2427 * @vf: pointer to the VF info 2428 * @msg: pointer to the msg buffer 2429 * 2430 * VFs get a default number of queues but can use this message to request a 2431 * different number. If the request is successful, PF will reset the VF and 2432 * return 0. If unsuccessful, PF will send message informing VF of number of 2433 * available queues and return result of sending VF a message. 2434 **/ 2435 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2436 { 2437 struct virtchnl_vf_res_request *vfres = 2438 (struct virtchnl_vf_res_request *)msg; 2439 u16 req_pairs = vfres->num_queue_pairs; 2440 u8 cur_pairs = vf->num_queue_pairs; 2441 struct i40e_pf *pf = vf->pf; 2442 2443 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2444 return -EINVAL; 2445 2446 if (req_pairs > I40E_MAX_VF_QUEUES) { 2447 dev_err(&pf->pdev->dev, 2448 "VF %d tried to request more than %d queues.\n", 2449 vf->vf_id, 2450 I40E_MAX_VF_QUEUES); 2451 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2452 } else if (req_pairs - cur_pairs > pf->queues_left) { 2453 dev_warn(&pf->pdev->dev, 2454 "VF %d requested %d more queues, but only %d left.\n", 2455 vf->vf_id, 2456 req_pairs - cur_pairs, 2457 pf->queues_left); 2458 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2459 } else { 2460 /* successful request */ 2461 vf->num_req_queues = req_pairs; 2462 i40e_vc_notify_vf_reset(vf); 2463 i40e_reset_vf(vf, false); 2464 return 0; 2465 } 2466 2467 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2468 (u8 *)vfres, sizeof(*vfres)); 2469 } 2470 2471 /** 2472 * i40e_vc_get_stats_msg 2473 * @vf: pointer to the VF info 2474 * @msg: pointer to the msg buffer 2475 * 2476 * called from the VF to get vsi stats 2477 **/ 2478 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2479 { 2480 struct virtchnl_queue_select *vqs = 2481 (struct virtchnl_queue_select *)msg; 2482 struct i40e_pf *pf = vf->pf; 2483 struct i40e_eth_stats stats; 2484 i40e_status aq_ret = 0; 2485 struct i40e_vsi *vsi; 2486 2487 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2488 2489 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2490 aq_ret = I40E_ERR_PARAM; 2491 goto error_param; 2492 } 2493 2494 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2495 aq_ret = I40E_ERR_PARAM; 2496 goto error_param; 2497 } 2498 2499 vsi = pf->vsi[vf->lan_vsi_idx]; 2500 if (!vsi) { 2501 aq_ret = I40E_ERR_PARAM; 2502 goto error_param; 2503 } 2504 i40e_update_eth_stats(vsi); 2505 stats = vsi->eth_stats; 2506 2507 error_param: 2508 /* send the response back to the VF */ 2509 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2510 (u8 *)&stats, sizeof(stats)); 2511 } 2512 2513 /* If the VF is not trusted restrict the number of MAC/VLAN it can program 2514 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast 2515 */ 2516 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) 2517 #define I40E_VC_MAX_VLAN_PER_VF 16 2518 2519 /** 2520 * i40e_check_vf_permission 2521 * @vf: pointer to the VF info 2522 * @al: MAC address list from virtchnl 2523 * 2524 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2525 * if any address in the list is not valid. Checks the following conditions: 2526 * 2527 * 1) broadcast and zero addresses are never valid 2528 * 2) unicast addresses are not allowed if the VMM has administratively set 2529 * the VF MAC address, unless the VF is marked as privileged. 2530 * 3) There is enough space to add all the addresses. 2531 * 2532 * Note that to guarantee consistency, it is expected this function be called 2533 * while holding the mac_filter_hash_lock, as otherwise the current number of 2534 * addresses might not be accurate. 2535 **/ 2536 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2537 struct virtchnl_ether_addr_list *al) 2538 { 2539 struct i40e_pf *pf = vf->pf; 2540 int i; 2541 2542 /* If this VF is not privileged, then we can't add more than a limited 2543 * number of addresses. Check to make sure that the additions do not 2544 * push us over the limit. 2545 */ 2546 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2547 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) { 2548 dev_err(&pf->pdev->dev, 2549 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2550 return -EPERM; 2551 } 2552 2553 for (i = 0; i < al->num_elements; i++) { 2554 u8 *addr = al->list[i].addr; 2555 2556 if (is_broadcast_ether_addr(addr) || 2557 is_zero_ether_addr(addr)) { 2558 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2559 addr); 2560 return I40E_ERR_INVALID_MAC_ADDR; 2561 } 2562 2563 /* If the host VMM administrator has set the VF MAC address 2564 * administratively via the ndo_set_vf_mac command then deny 2565 * permission to the VF to add or delete unicast MAC addresses. 2566 * Unless the VF is privileged and then it can do whatever. 2567 * The VF may request to set the MAC address filter already 2568 * assigned to it so do not return an error in that case. 2569 */ 2570 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2571 !is_multicast_ether_addr(addr) && vf->pf_set_mac && 2572 !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2573 dev_err(&pf->pdev->dev, 2574 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2575 return -EPERM; 2576 } 2577 } 2578 2579 return 0; 2580 } 2581 2582 /** 2583 * i40e_vc_add_mac_addr_msg 2584 * @vf: pointer to the VF info 2585 * @msg: pointer to the msg buffer 2586 * 2587 * add guest mac address filter 2588 **/ 2589 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2590 { 2591 struct virtchnl_ether_addr_list *al = 2592 (struct virtchnl_ether_addr_list *)msg; 2593 struct i40e_pf *pf = vf->pf; 2594 struct i40e_vsi *vsi = NULL; 2595 i40e_status ret = 0; 2596 int i; 2597 2598 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2599 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 2600 ret = I40E_ERR_PARAM; 2601 goto error_param; 2602 } 2603 2604 vsi = pf->vsi[vf->lan_vsi_idx]; 2605 2606 /* Lock once, because all function inside for loop accesses VSI's 2607 * MAC filter list which needs to be protected using same lock. 2608 */ 2609 spin_lock_bh(&vsi->mac_filter_hash_lock); 2610 2611 ret = i40e_check_vf_permission(vf, al); 2612 if (ret) { 2613 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2614 goto error_param; 2615 } 2616 2617 /* add new addresses to the list */ 2618 for (i = 0; i < al->num_elements; i++) { 2619 struct i40e_mac_filter *f; 2620 2621 f = i40e_find_mac(vsi, al->list[i].addr); 2622 if (!f) { 2623 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2624 2625 if (!f) { 2626 dev_err(&pf->pdev->dev, 2627 "Unable to add MAC filter %pM for VF %d\n", 2628 al->list[i].addr, vf->vf_id); 2629 ret = I40E_ERR_PARAM; 2630 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2631 goto error_param; 2632 } else { 2633 vf->num_mac++; 2634 } 2635 } 2636 } 2637 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2638 2639 /* program the updated filter list */ 2640 ret = i40e_sync_vsi_filters(vsi); 2641 if (ret) 2642 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2643 vf->vf_id, ret); 2644 2645 error_param: 2646 /* send the response to the VF */ 2647 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2648 ret); 2649 } 2650 2651 /** 2652 * i40e_vc_del_mac_addr_msg 2653 * @vf: pointer to the VF info 2654 * @msg: pointer to the msg buffer 2655 * 2656 * remove guest mac address filter 2657 **/ 2658 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2659 { 2660 struct virtchnl_ether_addr_list *al = 2661 (struct virtchnl_ether_addr_list *)msg; 2662 struct i40e_pf *pf = vf->pf; 2663 struct i40e_vsi *vsi = NULL; 2664 i40e_status ret = 0; 2665 int i; 2666 2667 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2668 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 2669 ret = I40E_ERR_PARAM; 2670 goto error_param; 2671 } 2672 2673 for (i = 0; i < al->num_elements; i++) { 2674 if (is_broadcast_ether_addr(al->list[i].addr) || 2675 is_zero_ether_addr(al->list[i].addr)) { 2676 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2677 al->list[i].addr, vf->vf_id); 2678 ret = I40E_ERR_INVALID_MAC_ADDR; 2679 goto error_param; 2680 } 2681 2682 if (vf->pf_set_mac && 2683 ether_addr_equal(al->list[i].addr, 2684 vf->default_lan_addr.addr)) { 2685 dev_err(&pf->pdev->dev, 2686 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", 2687 vf->default_lan_addr.addr, vf->vf_id); 2688 ret = I40E_ERR_PARAM; 2689 goto error_param; 2690 } 2691 } 2692 vsi = pf->vsi[vf->lan_vsi_idx]; 2693 2694 spin_lock_bh(&vsi->mac_filter_hash_lock); 2695 /* delete addresses from the list */ 2696 for (i = 0; i < al->num_elements; i++) 2697 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2698 ret = I40E_ERR_INVALID_MAC_ADDR; 2699 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2700 goto error_param; 2701 } else { 2702 vf->num_mac--; 2703 } 2704 2705 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2706 2707 /* program the updated filter list */ 2708 ret = i40e_sync_vsi_filters(vsi); 2709 if (ret) 2710 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2711 vf->vf_id, ret); 2712 2713 error_param: 2714 /* send the response to the VF */ 2715 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 2716 ret); 2717 } 2718 2719 /** 2720 * i40e_vc_add_vlan_msg 2721 * @vf: pointer to the VF info 2722 * @msg: pointer to the msg buffer 2723 * 2724 * program guest vlan id 2725 **/ 2726 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 2727 { 2728 struct virtchnl_vlan_filter_list *vfl = 2729 (struct virtchnl_vlan_filter_list *)msg; 2730 struct i40e_pf *pf = vf->pf; 2731 struct i40e_vsi *vsi = NULL; 2732 i40e_status aq_ret = 0; 2733 int i; 2734 2735 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2736 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2737 dev_err(&pf->pdev->dev, 2738 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2739 goto error_param; 2740 } 2741 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2742 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 2743 aq_ret = I40E_ERR_PARAM; 2744 goto error_param; 2745 } 2746 2747 for (i = 0; i < vfl->num_elements; i++) { 2748 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2749 aq_ret = I40E_ERR_PARAM; 2750 dev_err(&pf->pdev->dev, 2751 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2752 goto error_param; 2753 } 2754 } 2755 vsi = pf->vsi[vf->lan_vsi_idx]; 2756 if (vsi->info.pvid) { 2757 aq_ret = I40E_ERR_PARAM; 2758 goto error_param; 2759 } 2760 2761 i40e_vlan_stripping_enable(vsi); 2762 for (i = 0; i < vfl->num_elements; i++) { 2763 /* add new VLAN filter */ 2764 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2765 if (!ret) 2766 vf->num_vlan++; 2767 2768 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2769 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2770 true, 2771 vfl->vlan_id[i], 2772 NULL); 2773 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2774 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2775 true, 2776 vfl->vlan_id[i], 2777 NULL); 2778 2779 if (ret) 2780 dev_err(&pf->pdev->dev, 2781 "Unable to add VLAN filter %d for VF %d, error %d\n", 2782 vfl->vlan_id[i], vf->vf_id, ret); 2783 } 2784 2785 error_param: 2786 /* send the response to the VF */ 2787 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2788 } 2789 2790 /** 2791 * i40e_vc_remove_vlan_msg 2792 * @vf: pointer to the VF info 2793 * @msg: pointer to the msg buffer 2794 * 2795 * remove programmed guest vlan id 2796 **/ 2797 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 2798 { 2799 struct virtchnl_vlan_filter_list *vfl = 2800 (struct virtchnl_vlan_filter_list *)msg; 2801 struct i40e_pf *pf = vf->pf; 2802 struct i40e_vsi *vsi = NULL; 2803 i40e_status aq_ret = 0; 2804 int i; 2805 2806 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2807 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 2808 aq_ret = I40E_ERR_PARAM; 2809 goto error_param; 2810 } 2811 2812 for (i = 0; i < vfl->num_elements; i++) { 2813 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2814 aq_ret = I40E_ERR_PARAM; 2815 goto error_param; 2816 } 2817 } 2818 2819 vsi = pf->vsi[vf->lan_vsi_idx]; 2820 if (vsi->info.pvid) { 2821 if (vfl->num_elements > 1 || vfl->vlan_id[0]) 2822 aq_ret = I40E_ERR_PARAM; 2823 goto error_param; 2824 } 2825 2826 for (i = 0; i < vfl->num_elements; i++) { 2827 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2828 vf->num_vlan--; 2829 2830 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2831 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2832 false, 2833 vfl->vlan_id[i], 2834 NULL); 2835 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2836 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2837 false, 2838 vfl->vlan_id[i], 2839 NULL); 2840 } 2841 2842 error_param: 2843 /* send the response to the VF */ 2844 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2845 } 2846 2847 /** 2848 * i40e_vc_iwarp_msg 2849 * @vf: pointer to the VF info 2850 * @msg: pointer to the msg buffer 2851 * @msglen: msg length 2852 * 2853 * called from the VF for the iwarp msgs 2854 **/ 2855 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2856 { 2857 struct i40e_pf *pf = vf->pf; 2858 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2859 i40e_status aq_ret = 0; 2860 2861 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2862 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2863 aq_ret = I40E_ERR_PARAM; 2864 goto error_param; 2865 } 2866 2867 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2868 msg, msglen); 2869 2870 error_param: 2871 /* send the response to the VF */ 2872 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 2873 aq_ret); 2874 } 2875 2876 /** 2877 * i40e_vc_iwarp_qvmap_msg 2878 * @vf: pointer to the VF info 2879 * @msg: pointer to the msg buffer 2880 * @config: config qvmap or release it 2881 * 2882 * called from the VF for the iwarp msgs 2883 **/ 2884 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 2885 { 2886 struct virtchnl_iwarp_qvlist_info *qvlist_info = 2887 (struct virtchnl_iwarp_qvlist_info *)msg; 2888 i40e_status aq_ret = 0; 2889 2890 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2891 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2892 aq_ret = I40E_ERR_PARAM; 2893 goto error_param; 2894 } 2895 2896 if (config) { 2897 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2898 aq_ret = I40E_ERR_PARAM; 2899 } else { 2900 i40e_release_iwarp_qvlist(vf); 2901 } 2902 2903 error_param: 2904 /* send the response to the VF */ 2905 return i40e_vc_send_resp_to_vf(vf, 2906 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 2907 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 2908 aq_ret); 2909 } 2910 2911 /** 2912 * i40e_vc_config_rss_key 2913 * @vf: pointer to the VF info 2914 * @msg: pointer to the msg buffer 2915 * 2916 * Configure the VF's RSS key 2917 **/ 2918 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 2919 { 2920 struct virtchnl_rss_key *vrk = 2921 (struct virtchnl_rss_key *)msg; 2922 struct i40e_pf *pf = vf->pf; 2923 struct i40e_vsi *vsi = NULL; 2924 i40e_status aq_ret = 0; 2925 2926 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2927 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || 2928 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 2929 aq_ret = I40E_ERR_PARAM; 2930 goto err; 2931 } 2932 2933 vsi = pf->vsi[vf->lan_vsi_idx]; 2934 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 2935 err: 2936 /* send the response to the VF */ 2937 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 2938 aq_ret); 2939 } 2940 2941 /** 2942 * i40e_vc_config_rss_lut 2943 * @vf: pointer to the VF info 2944 * @msg: pointer to the msg buffer 2945 * 2946 * Configure the VF's RSS LUT 2947 **/ 2948 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 2949 { 2950 struct virtchnl_rss_lut *vrl = 2951 (struct virtchnl_rss_lut *)msg; 2952 struct i40e_pf *pf = vf->pf; 2953 struct i40e_vsi *vsi = NULL; 2954 i40e_status aq_ret = 0; 2955 u16 i; 2956 2957 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2958 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || 2959 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 2960 aq_ret = I40E_ERR_PARAM; 2961 goto err; 2962 } 2963 2964 for (i = 0; i < vrl->lut_entries; i++) 2965 if (vrl->lut[i] >= vf->num_queue_pairs) { 2966 aq_ret = I40E_ERR_PARAM; 2967 goto err; 2968 } 2969 2970 vsi = pf->vsi[vf->lan_vsi_idx]; 2971 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 2972 /* send the response to the VF */ 2973 err: 2974 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 2975 aq_ret); 2976 } 2977 2978 /** 2979 * i40e_vc_get_rss_hena 2980 * @vf: pointer to the VF info 2981 * @msg: pointer to the msg buffer 2982 * 2983 * Return the RSS HENA bits allowed by the hardware 2984 **/ 2985 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) 2986 { 2987 struct virtchnl_rss_hena *vrh = NULL; 2988 struct i40e_pf *pf = vf->pf; 2989 i40e_status aq_ret = 0; 2990 int len = 0; 2991 2992 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2993 aq_ret = I40E_ERR_PARAM; 2994 goto err; 2995 } 2996 len = sizeof(struct virtchnl_rss_hena); 2997 2998 vrh = kzalloc(len, GFP_KERNEL); 2999 if (!vrh) { 3000 aq_ret = I40E_ERR_NO_MEMORY; 3001 len = 0; 3002 goto err; 3003 } 3004 vrh->hena = i40e_pf_get_default_rss_hena(pf); 3005 err: 3006 /* send the response back to the VF */ 3007 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 3008 aq_ret, (u8 *)vrh, len); 3009 kfree(vrh); 3010 return aq_ret; 3011 } 3012 3013 /** 3014 * i40e_vc_set_rss_hena 3015 * @vf: pointer to the VF info 3016 * @msg: pointer to the msg buffer 3017 * 3018 * Set the RSS HENA bits for the VF 3019 **/ 3020 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) 3021 { 3022 struct virtchnl_rss_hena *vrh = 3023 (struct virtchnl_rss_hena *)msg; 3024 struct i40e_pf *pf = vf->pf; 3025 struct i40e_hw *hw = &pf->hw; 3026 i40e_status aq_ret = 0; 3027 3028 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3029 aq_ret = I40E_ERR_PARAM; 3030 goto err; 3031 } 3032 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 3033 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 3034 (u32)(vrh->hena >> 32)); 3035 3036 /* send the response to the VF */ 3037 err: 3038 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 3039 } 3040 3041 /** 3042 * i40e_vc_enable_vlan_stripping 3043 * @vf: pointer to the VF info 3044 * @msg: pointer to the msg buffer 3045 * 3046 * Enable vlan header stripping for the VF 3047 **/ 3048 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3049 { 3050 i40e_status aq_ret = 0; 3051 struct i40e_vsi *vsi; 3052 3053 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3054 aq_ret = I40E_ERR_PARAM; 3055 goto err; 3056 } 3057 3058 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3059 i40e_vlan_stripping_enable(vsi); 3060 3061 /* send the response to the VF */ 3062 err: 3063 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 3064 aq_ret); 3065 } 3066 3067 /** 3068 * i40e_vc_disable_vlan_stripping 3069 * @vf: pointer to the VF info 3070 * @msg: pointer to the msg buffer 3071 * 3072 * Disable vlan header stripping for the VF 3073 **/ 3074 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3075 { 3076 i40e_status aq_ret = 0; 3077 struct i40e_vsi *vsi; 3078 3079 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3080 aq_ret = I40E_ERR_PARAM; 3081 goto err; 3082 } 3083 3084 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3085 i40e_vlan_stripping_disable(vsi); 3086 3087 /* send the response to the VF */ 3088 err: 3089 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3090 aq_ret); 3091 } 3092 3093 /** 3094 * i40e_validate_cloud_filter 3095 * @mask: mask for TC filter 3096 * @data: data for TC filter 3097 * 3098 * This function validates cloud filter programmed as TC filter for ADq 3099 **/ 3100 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3101 struct virtchnl_filter *tc_filter) 3102 { 3103 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3104 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3105 struct i40e_pf *pf = vf->pf; 3106 struct i40e_vsi *vsi = NULL; 3107 struct i40e_mac_filter *f; 3108 struct hlist_node *h; 3109 bool found = false; 3110 int bkt; 3111 3112 if (!tc_filter->action) { 3113 dev_info(&pf->pdev->dev, 3114 "VF %d: Currently ADq doesn't support Drop Action\n", 3115 vf->vf_id); 3116 goto err; 3117 } 3118 3119 /* action_meta is TC number here to which the filter is applied */ 3120 if (!tc_filter->action_meta || 3121 tc_filter->action_meta > I40E_MAX_VF_VSI) { 3122 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3123 vf->vf_id, tc_filter->action_meta); 3124 goto err; 3125 } 3126 3127 /* Check filter if it's programmed for advanced mode or basic mode. 3128 * There are two ADq modes (for VF only), 3129 * 1. Basic mode: intended to allow as many filter options as possible 3130 * to be added to a VF in Non-trusted mode. Main goal is 3131 * to add filters to its own MAC and VLAN id. 3132 * 2. Advanced mode: is for allowing filters to be applied other than 3133 * its own MAC or VLAN. This mode requires the VF to be 3134 * Trusted. 3135 */ 3136 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3137 vsi = pf->vsi[vf->lan_vsi_idx]; 3138 f = i40e_find_mac(vsi, data.dst_mac); 3139 3140 if (!f) { 3141 dev_info(&pf->pdev->dev, 3142 "Destination MAC %pM doesn't belong to VF %d\n", 3143 data.dst_mac, vf->vf_id); 3144 goto err; 3145 } 3146 3147 if (mask.vlan_id) { 3148 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3149 hlist) { 3150 if (f->vlan == ntohs(data.vlan_id)) { 3151 found = true; 3152 break; 3153 } 3154 } 3155 if (!found) { 3156 dev_info(&pf->pdev->dev, 3157 "VF %d doesn't have any VLAN id %u\n", 3158 vf->vf_id, ntohs(data.vlan_id)); 3159 goto err; 3160 } 3161 } 3162 } else { 3163 /* Check if VF is trusted */ 3164 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3165 dev_err(&pf->pdev->dev, 3166 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3167 vf->vf_id); 3168 return I40E_ERR_CONFIG; 3169 } 3170 } 3171 3172 if (mask.dst_mac[0] & data.dst_mac[0]) { 3173 if (is_broadcast_ether_addr(data.dst_mac) || 3174 is_zero_ether_addr(data.dst_mac)) { 3175 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3176 vf->vf_id, data.dst_mac); 3177 goto err; 3178 } 3179 } 3180 3181 if (mask.src_mac[0] & data.src_mac[0]) { 3182 if (is_broadcast_ether_addr(data.src_mac) || 3183 is_zero_ether_addr(data.src_mac)) { 3184 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3185 vf->vf_id, data.src_mac); 3186 goto err; 3187 } 3188 } 3189 3190 if (mask.dst_port & data.dst_port) { 3191 if (!data.dst_port) { 3192 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3193 vf->vf_id); 3194 goto err; 3195 } 3196 } 3197 3198 if (mask.src_port & data.src_port) { 3199 if (!data.src_port) { 3200 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3201 vf->vf_id); 3202 goto err; 3203 } 3204 } 3205 3206 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3207 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3208 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3209 vf->vf_id); 3210 goto err; 3211 } 3212 3213 if (mask.vlan_id & data.vlan_id) { 3214 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3215 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3216 vf->vf_id); 3217 goto err; 3218 } 3219 } 3220 3221 return I40E_SUCCESS; 3222 err: 3223 return I40E_ERR_CONFIG; 3224 } 3225 3226 /** 3227 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3228 * @vf: pointer to the VF info 3229 * @seid - seid of the vsi it is searching for 3230 **/ 3231 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3232 { 3233 struct i40e_pf *pf = vf->pf; 3234 struct i40e_vsi *vsi = NULL; 3235 int i; 3236 3237 for (i = 0; i < vf->num_tc ; i++) { 3238 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3239 if (vsi && vsi->seid == seid) 3240 return vsi; 3241 } 3242 return NULL; 3243 } 3244 3245 /** 3246 * i40e_del_all_cloud_filters 3247 * @vf: pointer to the VF info 3248 * 3249 * This function deletes all cloud filters 3250 **/ 3251 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3252 { 3253 struct i40e_cloud_filter *cfilter = NULL; 3254 struct i40e_pf *pf = vf->pf; 3255 struct i40e_vsi *vsi = NULL; 3256 struct hlist_node *node; 3257 int ret; 3258 3259 hlist_for_each_entry_safe(cfilter, node, 3260 &vf->cloud_filter_list, cloud_node) { 3261 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3262 3263 if (!vsi) { 3264 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3265 vf->vf_id, cfilter->seid); 3266 continue; 3267 } 3268 3269 if (cfilter->dst_port) 3270 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3271 false); 3272 else 3273 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3274 if (ret) 3275 dev_err(&pf->pdev->dev, 3276 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3277 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3278 i40e_aq_str(&pf->hw, 3279 pf->hw.aq.asq_last_status)); 3280 3281 hlist_del(&cfilter->cloud_node); 3282 kfree(cfilter); 3283 vf->num_cloud_filters--; 3284 } 3285 } 3286 3287 /** 3288 * i40e_vc_del_cloud_filter 3289 * @vf: pointer to the VF info 3290 * @msg: pointer to the msg buffer 3291 * 3292 * This function deletes a cloud filter programmed as TC filter for ADq 3293 **/ 3294 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3295 { 3296 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3297 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3298 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3299 struct i40e_cloud_filter cfilter, *cf = NULL; 3300 struct i40e_pf *pf = vf->pf; 3301 struct i40e_vsi *vsi = NULL; 3302 struct hlist_node *node; 3303 i40e_status aq_ret = 0; 3304 int i, ret; 3305 3306 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3307 aq_ret = I40E_ERR_PARAM; 3308 goto err; 3309 } 3310 3311 if (!vf->adq_enabled) { 3312 dev_info(&pf->pdev->dev, 3313 "VF %d: ADq not enabled, can't apply cloud filter\n", 3314 vf->vf_id); 3315 aq_ret = I40E_ERR_PARAM; 3316 goto err; 3317 } 3318 3319 if (i40e_validate_cloud_filter(vf, vcf)) { 3320 dev_info(&pf->pdev->dev, 3321 "VF %d: Invalid input, can't apply cloud filter\n", 3322 vf->vf_id); 3323 aq_ret = I40E_ERR_PARAM; 3324 goto err; 3325 } 3326 3327 memset(&cfilter, 0, sizeof(cfilter)); 3328 /* parse destination mac address */ 3329 for (i = 0; i < ETH_ALEN; i++) 3330 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3331 3332 /* parse source mac address */ 3333 for (i = 0; i < ETH_ALEN; i++) 3334 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3335 3336 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3337 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3338 cfilter.src_port = mask.src_port & tcf.src_port; 3339 3340 switch (vcf->flow_type) { 3341 case VIRTCHNL_TCP_V4_FLOW: 3342 cfilter.n_proto = ETH_P_IP; 3343 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3344 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3345 ARRAY_SIZE(tcf.dst_ip)); 3346 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3347 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3348 ARRAY_SIZE(tcf.dst_ip)); 3349 break; 3350 case VIRTCHNL_TCP_V6_FLOW: 3351 cfilter.n_proto = ETH_P_IPV6; 3352 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3353 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3354 sizeof(cfilter.ip.v6.dst_ip6)); 3355 if (mask.src_ip[3] & tcf.src_ip[3]) 3356 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3357 sizeof(cfilter.ip.v6.src_ip6)); 3358 break; 3359 default: 3360 /* TC filter can be configured based on different combinations 3361 * and in this case IP is not a part of filter config 3362 */ 3363 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3364 vf->vf_id); 3365 } 3366 3367 /* get the vsi to which the tc belongs to */ 3368 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3369 cfilter.seid = vsi->seid; 3370 cfilter.flags = vcf->field_flags; 3371 3372 /* Deleting TC filter */ 3373 if (tcf.dst_port) 3374 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3375 else 3376 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3377 if (ret) { 3378 dev_err(&pf->pdev->dev, 3379 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3380 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3381 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3382 goto err; 3383 } 3384 3385 hlist_for_each_entry_safe(cf, node, 3386 &vf->cloud_filter_list, cloud_node) { 3387 if (cf->seid != cfilter.seid) 3388 continue; 3389 if (mask.dst_port) 3390 if (cfilter.dst_port != cf->dst_port) 3391 continue; 3392 if (mask.dst_mac[0]) 3393 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3394 continue; 3395 /* for ipv4 data to be valid, only first byte of mask is set */ 3396 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3397 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3398 ARRAY_SIZE(tcf.dst_ip))) 3399 continue; 3400 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3401 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3402 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3403 sizeof(cfilter.ip.v6.src_ip6))) 3404 continue; 3405 if (mask.vlan_id) 3406 if (cfilter.vlan_id != cf->vlan_id) 3407 continue; 3408 3409 hlist_del(&cf->cloud_node); 3410 kfree(cf); 3411 vf->num_cloud_filters--; 3412 } 3413 3414 err: 3415 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3416 aq_ret); 3417 } 3418 3419 /** 3420 * i40e_vc_add_cloud_filter 3421 * @vf: pointer to the VF info 3422 * @msg: pointer to the msg buffer 3423 * 3424 * This function adds a cloud filter programmed as TC filter for ADq 3425 **/ 3426 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3427 { 3428 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3429 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3430 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3431 struct i40e_cloud_filter *cfilter = NULL; 3432 struct i40e_pf *pf = vf->pf; 3433 struct i40e_vsi *vsi = NULL; 3434 i40e_status aq_ret = 0; 3435 int i, ret; 3436 3437 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3438 aq_ret = I40E_ERR_PARAM; 3439 goto err_out; 3440 } 3441 3442 if (!vf->adq_enabled) { 3443 dev_info(&pf->pdev->dev, 3444 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3445 vf->vf_id); 3446 aq_ret = I40E_ERR_PARAM; 3447 goto err_out; 3448 } 3449 3450 if (i40e_validate_cloud_filter(vf, vcf)) { 3451 dev_info(&pf->pdev->dev, 3452 "VF %d: Invalid input/s, can't apply cloud filter\n", 3453 vf->vf_id); 3454 aq_ret = I40E_ERR_PARAM; 3455 goto err_out; 3456 } 3457 3458 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3459 if (!cfilter) 3460 return -ENOMEM; 3461 3462 /* parse destination mac address */ 3463 for (i = 0; i < ETH_ALEN; i++) 3464 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3465 3466 /* parse source mac address */ 3467 for (i = 0; i < ETH_ALEN; i++) 3468 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3469 3470 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3471 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3472 cfilter->src_port = mask.src_port & tcf.src_port; 3473 3474 switch (vcf->flow_type) { 3475 case VIRTCHNL_TCP_V4_FLOW: 3476 cfilter->n_proto = ETH_P_IP; 3477 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3478 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3479 ARRAY_SIZE(tcf.dst_ip)); 3480 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3481 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3482 ARRAY_SIZE(tcf.dst_ip)); 3483 break; 3484 case VIRTCHNL_TCP_V6_FLOW: 3485 cfilter->n_proto = ETH_P_IPV6; 3486 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3487 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3488 sizeof(cfilter->ip.v6.dst_ip6)); 3489 if (mask.src_ip[3] & tcf.src_ip[3]) 3490 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3491 sizeof(cfilter->ip.v6.src_ip6)); 3492 break; 3493 default: 3494 /* TC filter can be configured based on different combinations 3495 * and in this case IP is not a part of filter config 3496 */ 3497 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3498 vf->vf_id); 3499 } 3500 3501 /* get the VSI to which the TC belongs to */ 3502 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3503 cfilter->seid = vsi->seid; 3504 cfilter->flags = vcf->field_flags; 3505 3506 /* Adding cloud filter programmed as TC filter */ 3507 if (tcf.dst_port) 3508 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3509 else 3510 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3511 if (ret) { 3512 dev_err(&pf->pdev->dev, 3513 "VF %d: Failed to add cloud filter, err %s aq_err %s\n", 3514 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3515 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3516 goto err_free; 3517 } 3518 3519 INIT_HLIST_NODE(&cfilter->cloud_node); 3520 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 3521 /* release the pointer passing it to the collection */ 3522 cfilter = NULL; 3523 vf->num_cloud_filters++; 3524 err_free: 3525 kfree(cfilter); 3526 err_out: 3527 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 3528 aq_ret); 3529 } 3530 3531 /** 3532 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 3533 * @vf: pointer to the VF info 3534 * @msg: pointer to the msg buffer 3535 **/ 3536 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 3537 { 3538 struct virtchnl_tc_info *tci = 3539 (struct virtchnl_tc_info *)msg; 3540 struct i40e_pf *pf = vf->pf; 3541 struct i40e_link_status *ls = &pf->hw.phy.link_info; 3542 int i, adq_request_qps = 0; 3543 i40e_status aq_ret = 0; 3544 u64 speed = 0; 3545 3546 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3547 aq_ret = I40E_ERR_PARAM; 3548 goto err; 3549 } 3550 3551 /* ADq cannot be applied if spoof check is ON */ 3552 if (vf->spoofchk) { 3553 dev_err(&pf->pdev->dev, 3554 "Spoof check is ON, turn it OFF to enable ADq\n"); 3555 aq_ret = I40E_ERR_PARAM; 3556 goto err; 3557 } 3558 3559 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3560 dev_err(&pf->pdev->dev, 3561 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 3562 vf->vf_id); 3563 aq_ret = I40E_ERR_PARAM; 3564 goto err; 3565 } 3566 3567 /* max number of traffic classes for VF currently capped at 4 */ 3568 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 3569 dev_err(&pf->pdev->dev, 3570 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", 3571 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); 3572 aq_ret = I40E_ERR_PARAM; 3573 goto err; 3574 } 3575 3576 /* validate queues for each TC */ 3577 for (i = 0; i < tci->num_tc; i++) 3578 if (!tci->list[i].count || 3579 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 3580 dev_err(&pf->pdev->dev, 3581 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", 3582 vf->vf_id, i, tci->list[i].count, 3583 I40E_DEFAULT_QUEUES_PER_VF); 3584 aq_ret = I40E_ERR_PARAM; 3585 goto err; 3586 } 3587 3588 /* need Max VF queues but already have default number of queues */ 3589 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 3590 3591 if (pf->queues_left < adq_request_qps) { 3592 dev_err(&pf->pdev->dev, 3593 "No queues left to allocate to VF %d\n", 3594 vf->vf_id); 3595 aq_ret = I40E_ERR_PARAM; 3596 goto err; 3597 } else { 3598 /* we need to allocate max VF queues to enable ADq so as to 3599 * make sure ADq enabled VF always gets back queues when it 3600 * goes through a reset. 3601 */ 3602 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 3603 } 3604 3605 /* get link speed in MB to validate rate limit */ 3606 switch (ls->link_speed) { 3607 case VIRTCHNL_LINK_SPEED_100MB: 3608 speed = SPEED_100; 3609 break; 3610 case VIRTCHNL_LINK_SPEED_1GB: 3611 speed = SPEED_1000; 3612 break; 3613 case VIRTCHNL_LINK_SPEED_10GB: 3614 speed = SPEED_10000; 3615 break; 3616 case VIRTCHNL_LINK_SPEED_20GB: 3617 speed = SPEED_20000; 3618 break; 3619 case VIRTCHNL_LINK_SPEED_25GB: 3620 speed = SPEED_25000; 3621 break; 3622 case VIRTCHNL_LINK_SPEED_40GB: 3623 speed = SPEED_40000; 3624 break; 3625 default: 3626 dev_err(&pf->pdev->dev, 3627 "Cannot detect link speed\n"); 3628 aq_ret = I40E_ERR_PARAM; 3629 goto err; 3630 } 3631 3632 /* parse data from the queue channel info */ 3633 vf->num_tc = tci->num_tc; 3634 for (i = 0; i < vf->num_tc; i++) { 3635 if (tci->list[i].max_tx_rate) { 3636 if (tci->list[i].max_tx_rate > speed) { 3637 dev_err(&pf->pdev->dev, 3638 "Invalid max tx rate %llu specified for VF %d.", 3639 tci->list[i].max_tx_rate, 3640 vf->vf_id); 3641 aq_ret = I40E_ERR_PARAM; 3642 goto err; 3643 } else { 3644 vf->ch[i].max_tx_rate = 3645 tci->list[i].max_tx_rate; 3646 } 3647 } 3648 vf->ch[i].num_qps = tci->list[i].count; 3649 } 3650 3651 /* set this flag only after making sure all inputs are sane */ 3652 vf->adq_enabled = true; 3653 /* num_req_queues is set when user changes number of queues via ethtool 3654 * and this causes issue for default VSI(which depends on this variable) 3655 * when ADq is enabled, hence reset it. 3656 */ 3657 vf->num_req_queues = 0; 3658 3659 /* reset the VF in order to allocate resources */ 3660 i40e_vc_notify_vf_reset(vf); 3661 i40e_reset_vf(vf, false); 3662 3663 return I40E_SUCCESS; 3664 3665 /* send the response to the VF */ 3666 err: 3667 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 3668 aq_ret); 3669 } 3670 3671 /** 3672 * i40e_vc_del_qch_msg 3673 * @vf: pointer to the VF info 3674 * @msg: pointer to the msg buffer 3675 **/ 3676 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 3677 { 3678 struct i40e_pf *pf = vf->pf; 3679 i40e_status aq_ret = 0; 3680 3681 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3682 aq_ret = I40E_ERR_PARAM; 3683 goto err; 3684 } 3685 3686 if (vf->adq_enabled) { 3687 i40e_del_all_cloud_filters(vf); 3688 i40e_del_qch(vf); 3689 vf->adq_enabled = false; 3690 vf->num_tc = 0; 3691 dev_info(&pf->pdev->dev, 3692 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 3693 vf->vf_id); 3694 } else { 3695 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 3696 vf->vf_id); 3697 aq_ret = I40E_ERR_PARAM; 3698 } 3699 3700 /* reset the VF in order to allocate resources */ 3701 i40e_vc_notify_vf_reset(vf); 3702 i40e_reset_vf(vf, false); 3703 3704 return I40E_SUCCESS; 3705 3706 err: 3707 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 3708 aq_ret); 3709 } 3710 3711 /** 3712 * i40e_vc_process_vf_msg 3713 * @pf: pointer to the PF structure 3714 * @vf_id: source VF id 3715 * @v_opcode: operation code 3716 * @v_retval: unused return value code 3717 * @msg: pointer to the msg buffer 3718 * @msglen: msg length 3719 * 3720 * called from the common aeq/arq handler to 3721 * process request from VF 3722 **/ 3723 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 3724 u32 __always_unused v_retval, u8 *msg, u16 msglen) 3725 { 3726 struct i40e_hw *hw = &pf->hw; 3727 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 3728 struct i40e_vf *vf; 3729 int ret; 3730 3731 pf->vf_aq_requests++; 3732 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) 3733 return -EINVAL; 3734 vf = &(pf->vf[local_vf_id]); 3735 3736 /* Check if VF is disabled. */ 3737 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 3738 return I40E_ERR_PARAM; 3739 3740 /* perform basic checks on the msg */ 3741 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 3742 3743 if (ret) { 3744 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 3745 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 3746 local_vf_id, v_opcode, msglen); 3747 switch (ret) { 3748 case VIRTCHNL_STATUS_ERR_PARAM: 3749 return -EPERM; 3750 default: 3751 return -EINVAL; 3752 } 3753 } 3754 3755 switch (v_opcode) { 3756 case VIRTCHNL_OP_VERSION: 3757 ret = i40e_vc_get_version_msg(vf, msg); 3758 break; 3759 case VIRTCHNL_OP_GET_VF_RESOURCES: 3760 ret = i40e_vc_get_vf_resources_msg(vf, msg); 3761 i40e_vc_notify_vf_link_state(vf); 3762 break; 3763 case VIRTCHNL_OP_RESET_VF: 3764 i40e_vc_reset_vf_msg(vf); 3765 ret = 0; 3766 break; 3767 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 3768 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 3769 break; 3770 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 3771 ret = i40e_vc_config_queues_msg(vf, msg); 3772 break; 3773 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 3774 ret = i40e_vc_config_irq_map_msg(vf, msg); 3775 break; 3776 case VIRTCHNL_OP_ENABLE_QUEUES: 3777 ret = i40e_vc_enable_queues_msg(vf, msg); 3778 i40e_vc_notify_vf_link_state(vf); 3779 break; 3780 case VIRTCHNL_OP_DISABLE_QUEUES: 3781 ret = i40e_vc_disable_queues_msg(vf, msg); 3782 break; 3783 case VIRTCHNL_OP_ADD_ETH_ADDR: 3784 ret = i40e_vc_add_mac_addr_msg(vf, msg); 3785 break; 3786 case VIRTCHNL_OP_DEL_ETH_ADDR: 3787 ret = i40e_vc_del_mac_addr_msg(vf, msg); 3788 break; 3789 case VIRTCHNL_OP_ADD_VLAN: 3790 ret = i40e_vc_add_vlan_msg(vf, msg); 3791 break; 3792 case VIRTCHNL_OP_DEL_VLAN: 3793 ret = i40e_vc_remove_vlan_msg(vf, msg); 3794 break; 3795 case VIRTCHNL_OP_GET_STATS: 3796 ret = i40e_vc_get_stats_msg(vf, msg); 3797 break; 3798 case VIRTCHNL_OP_IWARP: 3799 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 3800 break; 3801 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 3802 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); 3803 break; 3804 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 3805 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); 3806 break; 3807 case VIRTCHNL_OP_CONFIG_RSS_KEY: 3808 ret = i40e_vc_config_rss_key(vf, msg); 3809 break; 3810 case VIRTCHNL_OP_CONFIG_RSS_LUT: 3811 ret = i40e_vc_config_rss_lut(vf, msg); 3812 break; 3813 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 3814 ret = i40e_vc_get_rss_hena(vf, msg); 3815 break; 3816 case VIRTCHNL_OP_SET_RSS_HENA: 3817 ret = i40e_vc_set_rss_hena(vf, msg); 3818 break; 3819 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 3820 ret = i40e_vc_enable_vlan_stripping(vf, msg); 3821 break; 3822 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 3823 ret = i40e_vc_disable_vlan_stripping(vf, msg); 3824 break; 3825 case VIRTCHNL_OP_REQUEST_QUEUES: 3826 ret = i40e_vc_request_queues_msg(vf, msg); 3827 break; 3828 case VIRTCHNL_OP_ENABLE_CHANNELS: 3829 ret = i40e_vc_add_qch_msg(vf, msg); 3830 break; 3831 case VIRTCHNL_OP_DISABLE_CHANNELS: 3832 ret = i40e_vc_del_qch_msg(vf, msg); 3833 break; 3834 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 3835 ret = i40e_vc_add_cloud_filter(vf, msg); 3836 break; 3837 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 3838 ret = i40e_vc_del_cloud_filter(vf, msg); 3839 break; 3840 case VIRTCHNL_OP_UNKNOWN: 3841 default: 3842 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 3843 v_opcode, local_vf_id); 3844 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 3845 I40E_ERR_NOT_IMPLEMENTED); 3846 break; 3847 } 3848 3849 return ret; 3850 } 3851 3852 /** 3853 * i40e_vc_process_vflr_event 3854 * @pf: pointer to the PF structure 3855 * 3856 * called from the vlfr irq handler to 3857 * free up VF resources and state variables 3858 **/ 3859 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 3860 { 3861 struct i40e_hw *hw = &pf->hw; 3862 u32 reg, reg_idx, bit_idx; 3863 struct i40e_vf *vf; 3864 int vf_id; 3865 3866 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 3867 return 0; 3868 3869 /* Re-enable the VFLR interrupt cause here, before looking for which 3870 * VF got reset. Otherwise, if another VF gets a reset while the 3871 * first one is being processed, that interrupt will be lost, and 3872 * that VF will be stuck in reset forever. 3873 */ 3874 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 3875 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 3876 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 3877 i40e_flush(hw); 3878 3879 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 3880 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 3881 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 3882 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 3883 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 3884 vf = &pf->vf[vf_id]; 3885 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 3886 if (reg & BIT(bit_idx)) 3887 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 3888 i40e_reset_vf(vf, true); 3889 } 3890 3891 return 0; 3892 } 3893 3894 /** 3895 * i40e_validate_vf 3896 * @pf: the physical function 3897 * @vf_id: VF identifier 3898 * 3899 * Check that the VF is enabled and the VSI exists. 3900 * 3901 * Returns 0 on success, negative on failure 3902 **/ 3903 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 3904 { 3905 struct i40e_vsi *vsi; 3906 struct i40e_vf *vf; 3907 int ret = 0; 3908 3909 if (vf_id >= pf->num_alloc_vfs) { 3910 dev_err(&pf->pdev->dev, 3911 "Invalid VF Identifier %d\n", vf_id); 3912 ret = -EINVAL; 3913 goto err_out; 3914 } 3915 vf = &pf->vf[vf_id]; 3916 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 3917 if (!vsi) 3918 ret = -EINVAL; 3919 err_out: 3920 return ret; 3921 } 3922 3923 /** 3924 * i40e_ndo_set_vf_mac 3925 * @netdev: network interface device structure 3926 * @vf_id: VF identifier 3927 * @mac: mac address 3928 * 3929 * program VF mac address 3930 **/ 3931 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 3932 { 3933 struct i40e_netdev_priv *np = netdev_priv(netdev); 3934 struct i40e_vsi *vsi = np->vsi; 3935 struct i40e_pf *pf = vsi->back; 3936 struct i40e_mac_filter *f; 3937 struct i40e_vf *vf; 3938 int ret = 0; 3939 struct hlist_node *h; 3940 int bkt; 3941 u8 i; 3942 3943 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 3944 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 3945 return -EAGAIN; 3946 } 3947 3948 /* validate the request */ 3949 ret = i40e_validate_vf(pf, vf_id); 3950 if (ret) 3951 goto error_param; 3952 3953 vf = &pf->vf[vf_id]; 3954 vsi = pf->vsi[vf->lan_vsi_idx]; 3955 3956 /* When the VF is resetting wait until it is done. 3957 * It can take up to 200 milliseconds, 3958 * but wait for up to 300 milliseconds to be safe. 3959 */ 3960 for (i = 0; i < 15; i++) { 3961 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 3962 break; 3963 msleep(20); 3964 } 3965 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3966 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3967 vf_id); 3968 ret = -EAGAIN; 3969 goto error_param; 3970 } 3971 3972 if (is_multicast_ether_addr(mac)) { 3973 dev_err(&pf->pdev->dev, 3974 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 3975 ret = -EINVAL; 3976 goto error_param; 3977 } 3978 3979 /* Lock once because below invoked function add/del_filter requires 3980 * mac_filter_hash_lock to be held 3981 */ 3982 spin_lock_bh(&vsi->mac_filter_hash_lock); 3983 3984 /* delete the temporary mac address */ 3985 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 3986 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 3987 3988 /* Delete all the filters for this VSI - we're going to kill it 3989 * anyway. 3990 */ 3991 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 3992 __i40e_del_filter(vsi, f); 3993 3994 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3995 3996 /* program mac filter */ 3997 if (i40e_sync_vsi_filters(vsi)) { 3998 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 3999 ret = -EIO; 4000 goto error_param; 4001 } 4002 ether_addr_copy(vf->default_lan_addr.addr, mac); 4003 4004 if (is_zero_ether_addr(mac)) { 4005 vf->pf_set_mac = false; 4006 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 4007 } else { 4008 vf->pf_set_mac = true; 4009 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 4010 mac, vf_id); 4011 } 4012 4013 /* Force the VF interface down so it has to bring up with new MAC 4014 * address 4015 */ 4016 i40e_vc_disable_vf(vf); 4017 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 4018 4019 error_param: 4020 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4021 return ret; 4022 } 4023 4024 /** 4025 * i40e_vsi_has_vlans - True if VSI has configured VLANs 4026 * @vsi: pointer to the vsi 4027 * 4028 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if 4029 * we have no configured VLANs. Do not call while holding the 4030 * mac_filter_hash_lock. 4031 */ 4032 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) 4033 { 4034 bool have_vlans; 4035 4036 /* If we have a port VLAN, then the VSI cannot have any VLANs 4037 * configured, as all MAC/VLAN filters will be assigned to the PVID. 4038 */ 4039 if (vsi->info.pvid) 4040 return false; 4041 4042 /* Since we don't have a PVID, we know that if the device is in VLAN 4043 * mode it must be because of a VLAN filter configured on this VSI. 4044 */ 4045 spin_lock_bh(&vsi->mac_filter_hash_lock); 4046 have_vlans = i40e_is_vsi_in_vlan(vsi); 4047 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4048 4049 return have_vlans; 4050 } 4051 4052 /** 4053 * i40e_ndo_set_vf_port_vlan 4054 * @netdev: network interface device structure 4055 * @vf_id: VF identifier 4056 * @vlan_id: mac address 4057 * @qos: priority setting 4058 * @vlan_proto: vlan protocol 4059 * 4060 * program VF vlan id and/or qos 4061 **/ 4062 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4063 u16 vlan_id, u8 qos, __be16 vlan_proto) 4064 { 4065 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4066 struct i40e_netdev_priv *np = netdev_priv(netdev); 4067 bool allmulti = false, alluni = false; 4068 struct i40e_pf *pf = np->vsi->back; 4069 struct i40e_vsi *vsi; 4070 struct i40e_vf *vf; 4071 int ret = 0; 4072 4073 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4074 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4075 return -EAGAIN; 4076 } 4077 4078 /* validate the request */ 4079 ret = i40e_validate_vf(pf, vf_id); 4080 if (ret) 4081 goto error_pvid; 4082 4083 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4084 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4085 ret = -EINVAL; 4086 goto error_pvid; 4087 } 4088 4089 if (vlan_proto != htons(ETH_P_8021Q)) { 4090 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4091 ret = -EPROTONOSUPPORT; 4092 goto error_pvid; 4093 } 4094 4095 vf = &pf->vf[vf_id]; 4096 vsi = pf->vsi[vf->lan_vsi_idx]; 4097 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4098 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4099 vf_id); 4100 ret = -EAGAIN; 4101 goto error_pvid; 4102 } 4103 4104 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4105 /* duplicate request, so just return success */ 4106 goto error_pvid; 4107 4108 if (i40e_vsi_has_vlans(vsi)) { 4109 dev_err(&pf->pdev->dev, 4110 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 4111 vf_id); 4112 /* Administrator Error - knock the VF offline until he does 4113 * the right thing by reconfiguring his network correctly 4114 * and then reloading the VF driver. 4115 */ 4116 i40e_vc_disable_vf(vf); 4117 /* During reset the VF got a new VSI, so refresh the pointer. */ 4118 vsi = pf->vsi[vf->lan_vsi_idx]; 4119 } 4120 4121 /* Locked once because multiple functions below iterate list */ 4122 spin_lock_bh(&vsi->mac_filter_hash_lock); 4123 4124 /* Check for condition where there was already a port VLAN ID 4125 * filter set and now it is being deleted by setting it to zero. 4126 * Additionally check for the condition where there was a port 4127 * VLAN but now there is a new and different port VLAN being set. 4128 * Before deleting all the old VLAN filters we must add new ones 4129 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4130 * MAC addresses deleted. 4131 */ 4132 if ((!(vlan_id || qos) || 4133 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4134 vsi->info.pvid) { 4135 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4136 if (ret) { 4137 dev_info(&vsi->back->pdev->dev, 4138 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4139 vsi->back->hw.aq.asq_last_status); 4140 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4141 goto error_pvid; 4142 } 4143 } 4144 4145 if (vsi->info.pvid) { 4146 /* remove all filters on the old VLAN */ 4147 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4148 VLAN_VID_MASK)); 4149 } 4150 4151 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4152 4153 /* disable promisc modes in case they were enabled */ 4154 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, 4155 allmulti, alluni); 4156 if (ret) { 4157 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n"); 4158 goto error_pvid; 4159 } 4160 4161 if (vlan_id || qos) 4162 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4163 else 4164 i40e_vsi_remove_pvid(vsi); 4165 spin_lock_bh(&vsi->mac_filter_hash_lock); 4166 4167 if (vlan_id) { 4168 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4169 vlan_id, qos, vf_id); 4170 4171 /* add new VLAN filter for each MAC */ 4172 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4173 if (ret) { 4174 dev_info(&vsi->back->pdev->dev, 4175 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4176 vsi->back->hw.aq.asq_last_status); 4177 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4178 goto error_pvid; 4179 } 4180 4181 /* remove the previously added non-VLAN MAC filters */ 4182 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4183 } 4184 4185 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4186 4187 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 4188 alluni = true; 4189 4190 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 4191 allmulti = true; 4192 4193 /* Schedule the worker thread to take care of applying changes */ 4194 i40e_service_event_schedule(vsi->back); 4195 4196 if (ret) { 4197 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4198 goto error_pvid; 4199 } 4200 4201 /* The Port VLAN needs to be saved across resets the same as the 4202 * default LAN MAC address. 4203 */ 4204 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4205 4206 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); 4207 if (ret) { 4208 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); 4209 goto error_pvid; 4210 } 4211 4212 ret = 0; 4213 4214 error_pvid: 4215 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4216 return ret; 4217 } 4218 4219 /** 4220 * i40e_ndo_set_vf_bw 4221 * @netdev: network interface device structure 4222 * @vf_id: VF identifier 4223 * @min_tx_rate: Minimum Tx rate 4224 * @max_tx_rate: Maximum Tx rate 4225 * 4226 * configure VF Tx rate 4227 **/ 4228 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4229 int max_tx_rate) 4230 { 4231 struct i40e_netdev_priv *np = netdev_priv(netdev); 4232 struct i40e_pf *pf = np->vsi->back; 4233 struct i40e_vsi *vsi; 4234 struct i40e_vf *vf; 4235 int ret = 0; 4236 4237 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4238 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4239 return -EAGAIN; 4240 } 4241 4242 /* validate the request */ 4243 ret = i40e_validate_vf(pf, vf_id); 4244 if (ret) 4245 goto error; 4246 4247 if (min_tx_rate) { 4248 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4249 min_tx_rate, vf_id); 4250 return -EINVAL; 4251 } 4252 4253 vf = &pf->vf[vf_id]; 4254 vsi = pf->vsi[vf->lan_vsi_idx]; 4255 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4256 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4257 vf_id); 4258 ret = -EAGAIN; 4259 goto error; 4260 } 4261 4262 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4263 if (ret) 4264 goto error; 4265 4266 vf->tx_rate = max_tx_rate; 4267 error: 4268 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4269 return ret; 4270 } 4271 4272 /** 4273 * i40e_ndo_get_vf_config 4274 * @netdev: network interface device structure 4275 * @vf_id: VF identifier 4276 * @ivi: VF configuration structure 4277 * 4278 * return VF configuration 4279 **/ 4280 int i40e_ndo_get_vf_config(struct net_device *netdev, 4281 int vf_id, struct ifla_vf_info *ivi) 4282 { 4283 struct i40e_netdev_priv *np = netdev_priv(netdev); 4284 struct i40e_vsi *vsi = np->vsi; 4285 struct i40e_pf *pf = vsi->back; 4286 struct i40e_vf *vf; 4287 int ret = 0; 4288 4289 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4290 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4291 return -EAGAIN; 4292 } 4293 4294 /* validate the request */ 4295 ret = i40e_validate_vf(pf, vf_id); 4296 if (ret) 4297 goto error_param; 4298 4299 vf = &pf->vf[vf_id]; 4300 /* first vsi is always the LAN vsi */ 4301 vsi = pf->vsi[vf->lan_vsi_idx]; 4302 if (!vsi) { 4303 ret = -ENOENT; 4304 goto error_param; 4305 } 4306 4307 ivi->vf = vf_id; 4308 4309 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4310 4311 ivi->max_tx_rate = vf->tx_rate; 4312 ivi->min_tx_rate = 0; 4313 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 4314 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 4315 I40E_VLAN_PRIORITY_SHIFT; 4316 if (vf->link_forced == false) 4317 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4318 else if (vf->link_up == true) 4319 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4320 else 4321 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4322 ivi->spoofchk = vf->spoofchk; 4323 ivi->trusted = vf->trusted; 4324 ret = 0; 4325 4326 error_param: 4327 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4328 return ret; 4329 } 4330 4331 /** 4332 * i40e_ndo_set_vf_link_state 4333 * @netdev: network interface device structure 4334 * @vf_id: VF identifier 4335 * @link: required link state 4336 * 4337 * Set the link state of a specified VF, regardless of physical link state 4338 **/ 4339 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4340 { 4341 struct i40e_netdev_priv *np = netdev_priv(netdev); 4342 struct i40e_pf *pf = np->vsi->back; 4343 struct virtchnl_pf_event pfe; 4344 struct i40e_hw *hw = &pf->hw; 4345 struct i40e_vf *vf; 4346 int abs_vf_id; 4347 int ret = 0; 4348 4349 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4350 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4351 return -EAGAIN; 4352 } 4353 4354 /* validate the request */ 4355 if (vf_id >= pf->num_alloc_vfs) { 4356 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4357 ret = -EINVAL; 4358 goto error_out; 4359 } 4360 4361 vf = &pf->vf[vf_id]; 4362 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4363 4364 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4365 pfe.severity = PF_EVENT_SEVERITY_INFO; 4366 4367 switch (link) { 4368 case IFLA_VF_LINK_STATE_AUTO: 4369 vf->link_forced = false; 4370 pfe.event_data.link_event.link_status = 4371 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 4372 pfe.event_data.link_event.link_speed = 4373 (enum virtchnl_link_speed) 4374 pf->hw.phy.link_info.link_speed; 4375 break; 4376 case IFLA_VF_LINK_STATE_ENABLE: 4377 vf->link_forced = true; 4378 vf->link_up = true; 4379 pfe.event_data.link_event.link_status = true; 4380 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; 4381 break; 4382 case IFLA_VF_LINK_STATE_DISABLE: 4383 vf->link_forced = true; 4384 vf->link_up = false; 4385 pfe.event_data.link_event.link_status = false; 4386 pfe.event_data.link_event.link_speed = 0; 4387 break; 4388 default: 4389 ret = -EINVAL; 4390 goto error_out; 4391 } 4392 /* Notify the VF of its new link state */ 4393 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4394 0, (u8 *)&pfe, sizeof(pfe), NULL); 4395 4396 error_out: 4397 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4398 return ret; 4399 } 4400 4401 /** 4402 * i40e_ndo_set_vf_spoofchk 4403 * @netdev: network interface device structure 4404 * @vf_id: VF identifier 4405 * @enable: flag to enable or disable feature 4406 * 4407 * Enable or disable VF spoof checking 4408 **/ 4409 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4410 { 4411 struct i40e_netdev_priv *np = netdev_priv(netdev); 4412 struct i40e_vsi *vsi = np->vsi; 4413 struct i40e_pf *pf = vsi->back; 4414 struct i40e_vsi_context ctxt; 4415 struct i40e_hw *hw = &pf->hw; 4416 struct i40e_vf *vf; 4417 int ret = 0; 4418 4419 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4420 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4421 return -EAGAIN; 4422 } 4423 4424 /* validate the request */ 4425 if (vf_id >= pf->num_alloc_vfs) { 4426 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4427 ret = -EINVAL; 4428 goto out; 4429 } 4430 4431 vf = &(pf->vf[vf_id]); 4432 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4433 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4434 vf_id); 4435 ret = -EAGAIN; 4436 goto out; 4437 } 4438 4439 if (enable == vf->spoofchk) 4440 goto out; 4441 4442 vf->spoofchk = enable; 4443 memset(&ctxt, 0, sizeof(ctxt)); 4444 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4445 ctxt.pf_num = pf->hw.pf_id; 4446 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4447 if (enable) 4448 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4449 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4450 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4451 if (ret) { 4452 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4453 ret); 4454 ret = -EIO; 4455 } 4456 out: 4457 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4458 return ret; 4459 } 4460 4461 /** 4462 * i40e_ndo_set_vf_trust 4463 * @netdev: network interface device structure of the pf 4464 * @vf_id: VF identifier 4465 * @setting: trust setting 4466 * 4467 * Enable or disable VF trust setting 4468 **/ 4469 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4470 { 4471 struct i40e_netdev_priv *np = netdev_priv(netdev); 4472 struct i40e_pf *pf = np->vsi->back; 4473 struct i40e_vf *vf; 4474 int ret = 0; 4475 4476 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4477 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4478 return -EAGAIN; 4479 } 4480 4481 /* validate the request */ 4482 if (vf_id >= pf->num_alloc_vfs) { 4483 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4484 ret = -EINVAL; 4485 goto out; 4486 } 4487 4488 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4489 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4490 ret = -EINVAL; 4491 goto out; 4492 } 4493 4494 vf = &pf->vf[vf_id]; 4495 4496 if (setting == vf->trusted) 4497 goto out; 4498 4499 vf->trusted = setting; 4500 i40e_vc_disable_vf(vf); 4501 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4502 vf_id, setting ? "" : "un"); 4503 4504 if (vf->adq_enabled) { 4505 if (!vf->trusted) { 4506 dev_info(&pf->pdev->dev, 4507 "VF %u no longer Trusted, deleting all cloud filters\n", 4508 vf_id); 4509 i40e_del_all_cloud_filters(vf); 4510 } 4511 } 4512 4513 out: 4514 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4515 return ret; 4516 } 4517