1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 6 /*********************notification routines***********************/ 7 8 /** 9 * i40e_vc_vf_broadcast 10 * @pf: pointer to the PF structure 11 * @v_opcode: operation code 12 * @v_retval: return value 13 * @msg: pointer to the msg buffer 14 * @msglen: msg length 15 * 16 * send a message to all VFs on a given PF 17 **/ 18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 19 enum virtchnl_ops v_opcode, 20 i40e_status v_retval, u8 *msg, 21 u16 msglen) 22 { 23 struct i40e_hw *hw = &pf->hw; 24 struct i40e_vf *vf = pf->vf; 25 int i; 26 27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 29 /* Not all vfs are enabled so skip the ones that are not */ 30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 32 continue; 33 34 /* Ignore return value on purpose - a given VF may fail, but 35 * we need to keep going and send to all of them 36 */ 37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 38 msg, msglen, NULL); 39 } 40 } 41 42 /** 43 * i40e_vc_notify_vf_link_state 44 * @vf: pointer to the VF structure 45 * 46 * send a link status message to a single VF 47 **/ 48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 49 { 50 struct virtchnl_pf_event pfe; 51 struct i40e_pf *pf = vf->pf; 52 struct i40e_hw *hw = &pf->hw; 53 struct i40e_link_status *ls = &pf->hw.phy.link_info; 54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 55 56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 57 pfe.severity = PF_EVENT_SEVERITY_INFO; 58 if (vf->link_forced) { 59 pfe.event_data.link_event.link_status = vf->link_up; 60 pfe.event_data.link_event.link_speed = 61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); 62 } else { 63 pfe.event_data.link_event.link_status = 64 ls->link_info & I40E_AQ_LINK_UP; 65 pfe.event_data.link_event.link_speed = 66 i40e_virtchnl_link_speed(ls->link_speed); 67 } 68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 69 0, (u8 *)&pfe, sizeof(pfe), NULL); 70 } 71 72 /** 73 * i40e_vc_notify_link_state 74 * @pf: pointer to the PF structure 75 * 76 * send a link status message to all VFs on a given PF 77 **/ 78 void i40e_vc_notify_link_state(struct i40e_pf *pf) 79 { 80 int i; 81 82 for (i = 0; i < pf->num_alloc_vfs; i++) 83 i40e_vc_notify_vf_link_state(&pf->vf[i]); 84 } 85 86 /** 87 * i40e_vc_notify_reset 88 * @pf: pointer to the PF structure 89 * 90 * indicate a pending reset to all VFs on a given PF 91 **/ 92 void i40e_vc_notify_reset(struct i40e_pf *pf) 93 { 94 struct virtchnl_pf_event pfe; 95 96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 100 } 101 102 /** 103 * i40e_vc_notify_vf_reset 104 * @vf: pointer to the VF structure 105 * 106 * indicate a pending reset to the given VF 107 **/ 108 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 109 { 110 struct virtchnl_pf_event pfe; 111 int abs_vf_id; 112 113 /* validate the request */ 114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 115 return; 116 117 /* verify if the VF is in either init or active before proceeding */ 118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 120 return; 121 122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 123 124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 127 0, (u8 *)&pfe, 128 sizeof(struct virtchnl_pf_event), NULL); 129 } 130 /***********************misc routines*****************************/ 131 132 /** 133 * i40e_vc_disable_vf 134 * @vf: pointer to the VF info 135 * 136 * Disable the VF through a SW reset. 137 **/ 138 static inline void i40e_vc_disable_vf(struct i40e_vf *vf) 139 { 140 int i; 141 142 i40e_vc_notify_vf_reset(vf); 143 144 /* We want to ensure that an actual reset occurs initiated after this 145 * function was called. However, we do not want to wait forever, so 146 * we'll give a reasonable time and print a message if we failed to 147 * ensure a reset. 148 */ 149 for (i = 0; i < 20; i++) { 150 if (i40e_reset_vf(vf, false)) 151 return; 152 usleep_range(10000, 20000); 153 } 154 155 dev_warn(&vf->pf->pdev->dev, 156 "Failed to initiate reset for VF %d after 200 milliseconds\n", 157 vf->vf_id); 158 } 159 160 /** 161 * i40e_vc_isvalid_vsi_id 162 * @vf: pointer to the VF info 163 * @vsi_id: VF relative VSI id 164 * 165 * check for the valid VSI id 166 **/ 167 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 168 { 169 struct i40e_pf *pf = vf->pf; 170 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 171 172 return (vsi && (vsi->vf_id == vf->vf_id)); 173 } 174 175 /** 176 * i40e_vc_isvalid_queue_id 177 * @vf: pointer to the VF info 178 * @vsi_id: vsi id 179 * @qid: vsi relative queue id 180 * 181 * check for the valid queue id 182 **/ 183 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 184 u16 qid) 185 { 186 struct i40e_pf *pf = vf->pf; 187 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 188 189 return (vsi && (qid < vsi->alloc_queue_pairs)); 190 } 191 192 /** 193 * i40e_vc_isvalid_vector_id 194 * @vf: pointer to the VF info 195 * @vector_id: VF relative vector id 196 * 197 * check for the valid vector id 198 **/ 199 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) 200 { 201 struct i40e_pf *pf = vf->pf; 202 203 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 204 } 205 206 /***********************vf resource mgmt routines*****************/ 207 208 /** 209 * i40e_vc_get_pf_queue_id 210 * @vf: pointer to the VF info 211 * @vsi_id: id of VSI as provided by the FW 212 * @vsi_queue_id: vsi relative queue id 213 * 214 * return PF relative queue id 215 **/ 216 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 217 u8 vsi_queue_id) 218 { 219 struct i40e_pf *pf = vf->pf; 220 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 221 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 222 223 if (!vsi) 224 return pf_queue_id; 225 226 if (le16_to_cpu(vsi->info.mapping_flags) & 227 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 228 pf_queue_id = 229 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 230 else 231 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 232 vsi_queue_id; 233 234 return pf_queue_id; 235 } 236 237 /** 238 * i40e_get_real_pf_qid 239 * @vf: pointer to the VF info 240 * @vsi_id: vsi id 241 * @queue_id: queue number 242 * 243 * wrapper function to get pf_queue_id handling ADq code as well 244 **/ 245 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 246 { 247 int i; 248 249 if (vf->adq_enabled) { 250 /* Although VF considers all the queues(can be 1 to 16) as its 251 * own but they may actually belong to different VSIs(up to 4). 252 * We need to find which queues belongs to which VSI. 253 */ 254 for (i = 0; i < vf->num_tc; i++) { 255 if (queue_id < vf->ch[i].num_qps) { 256 vsi_id = vf->ch[i].vsi_id; 257 break; 258 } 259 /* find right queue id which is relative to a 260 * given VSI. 261 */ 262 queue_id -= vf->ch[i].num_qps; 263 } 264 } 265 266 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 267 } 268 269 /** 270 * i40e_config_irq_link_list 271 * @vf: pointer to the VF info 272 * @vsi_id: id of VSI as given by the FW 273 * @vecmap: irq map info 274 * 275 * configure irq link list from the map 276 **/ 277 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 278 struct virtchnl_vector_map *vecmap) 279 { 280 unsigned long linklistmap = 0, tempmap; 281 struct i40e_pf *pf = vf->pf; 282 struct i40e_hw *hw = &pf->hw; 283 u16 vsi_queue_id, pf_queue_id; 284 enum i40e_queue_type qtype; 285 u16 next_q, vector_id, size; 286 u32 reg, reg_idx; 287 u16 itr_idx = 0; 288 289 vector_id = vecmap->vector_id; 290 /* setup the head */ 291 if (0 == vector_id) 292 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 293 else 294 reg_idx = I40E_VPINT_LNKLSTN( 295 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 296 (vector_id - 1)); 297 298 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 299 /* Special case - No queues mapped on this vector */ 300 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 301 goto irq_list_done; 302 } 303 tempmap = vecmap->rxq_map; 304 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 305 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 306 vsi_queue_id)); 307 } 308 309 tempmap = vecmap->txq_map; 310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 312 vsi_queue_id + 1)); 313 } 314 315 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 316 next_q = find_first_bit(&linklistmap, size); 317 if (unlikely(next_q == size)) 318 goto irq_list_done; 319 320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 323 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 324 325 wr32(hw, reg_idx, reg); 326 327 while (next_q < size) { 328 switch (qtype) { 329 case I40E_QUEUE_TYPE_RX: 330 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 331 itr_idx = vecmap->rxitr_idx; 332 break; 333 case I40E_QUEUE_TYPE_TX: 334 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 335 itr_idx = vecmap->txitr_idx; 336 break; 337 default: 338 break; 339 } 340 341 next_q = find_next_bit(&linklistmap, size, next_q + 1); 342 if (next_q < size) { 343 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 344 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 345 pf_queue_id = i40e_get_real_pf_qid(vf, 346 vsi_id, 347 vsi_queue_id); 348 } else { 349 pf_queue_id = I40E_QUEUE_END_OF_LIST; 350 qtype = 0; 351 } 352 353 /* format for the RQCTL & TQCTL regs is same */ 354 reg = (vector_id) | 355 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 356 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 357 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 358 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 359 wr32(hw, reg_idx, reg); 360 } 361 362 /* if the vf is running in polling mode and using interrupt zero, 363 * need to disable auto-mask on enabling zero interrupt for VFs. 364 */ 365 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 366 (vector_id == 0)) { 367 reg = rd32(hw, I40E_GLINT_CTL); 368 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 369 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 370 wr32(hw, I40E_GLINT_CTL, reg); 371 } 372 } 373 374 irq_list_done: 375 i40e_flush(hw); 376 } 377 378 /** 379 * i40e_release_iwarp_qvlist 380 * @vf: pointer to the VF. 381 * 382 **/ 383 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 384 { 385 struct i40e_pf *pf = vf->pf; 386 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 387 u32 msix_vf; 388 u32 i; 389 390 if (!vf->qvlist_info) 391 return; 392 393 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 394 for (i = 0; i < qvlist_info->num_vectors; i++) { 395 struct virtchnl_iwarp_qv_info *qv_info; 396 u32 next_q_index, next_q_type; 397 struct i40e_hw *hw = &pf->hw; 398 u32 v_idx, reg_idx, reg; 399 400 qv_info = &qvlist_info->qv_info[i]; 401 if (!qv_info) 402 continue; 403 v_idx = qv_info->v_idx; 404 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 405 /* Figure out the queue after CEQ and make that the 406 * first queue. 407 */ 408 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 409 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 410 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 411 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 412 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 413 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 414 415 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 416 reg = (next_q_index & 417 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 418 (next_q_type << 419 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 420 421 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 422 } 423 } 424 kfree(vf->qvlist_info); 425 vf->qvlist_info = NULL; 426 } 427 428 /** 429 * i40e_config_iwarp_qvlist 430 * @vf: pointer to the VF info 431 * @qvlist_info: queue and vector list 432 * 433 * Return 0 on success or < 0 on error 434 **/ 435 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 436 struct virtchnl_iwarp_qvlist_info *qvlist_info) 437 { 438 struct i40e_pf *pf = vf->pf; 439 struct i40e_hw *hw = &pf->hw; 440 struct virtchnl_iwarp_qv_info *qv_info; 441 u32 v_idx, i, reg_idx, reg; 442 u32 next_q_idx, next_q_type; 443 u32 msix_vf, size; 444 int ret = 0; 445 446 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 447 448 if (qvlist_info->num_vectors > msix_vf) { 449 dev_warn(&pf->pdev->dev, 450 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", 451 qvlist_info->num_vectors, 452 msix_vf); 453 ret = -EINVAL; 454 goto err_out; 455 } 456 457 size = sizeof(struct virtchnl_iwarp_qvlist_info) + 458 (sizeof(struct virtchnl_iwarp_qv_info) * 459 (qvlist_info->num_vectors - 1)); 460 kfree(vf->qvlist_info); 461 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 462 if (!vf->qvlist_info) { 463 ret = -ENOMEM; 464 goto err_out; 465 } 466 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 467 468 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 469 for (i = 0; i < qvlist_info->num_vectors; i++) { 470 qv_info = &qvlist_info->qv_info[i]; 471 if (!qv_info) 472 continue; 473 v_idx = qv_info->v_idx; 474 475 /* Validate vector id belongs to this vf */ 476 if (!i40e_vc_isvalid_vector_id(vf, v_idx)) { 477 ret = -EINVAL; 478 goto err_free; 479 } 480 481 vf->qvlist_info->qv_info[i] = *qv_info; 482 483 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 484 /* We might be sharing the interrupt, so get the first queue 485 * index and type, push it down the list by adding the new 486 * queue on top. Also link it with the new queue in CEQCTL. 487 */ 488 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 489 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 490 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 491 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 492 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 493 494 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 495 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 496 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 497 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 498 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 499 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 500 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 501 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 502 503 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 504 reg = (qv_info->ceq_idx & 505 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 506 (I40E_QUEUE_TYPE_PE_CEQ << 507 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 508 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 509 } 510 511 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 512 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 513 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 514 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 515 516 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 517 } 518 } 519 520 return 0; 521 err_free: 522 kfree(vf->qvlist_info); 523 vf->qvlist_info = NULL; 524 err_out: 525 return ret; 526 } 527 528 /** 529 * i40e_config_vsi_tx_queue 530 * @vf: pointer to the VF info 531 * @vsi_id: id of VSI as provided by the FW 532 * @vsi_queue_id: vsi relative queue index 533 * @info: config. info 534 * 535 * configure tx queue 536 **/ 537 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 538 u16 vsi_queue_id, 539 struct virtchnl_txq_info *info) 540 { 541 struct i40e_pf *pf = vf->pf; 542 struct i40e_hw *hw = &pf->hw; 543 struct i40e_hmc_obj_txq tx_ctx; 544 struct i40e_vsi *vsi; 545 u16 pf_queue_id; 546 u32 qtx_ctl; 547 int ret = 0; 548 549 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 550 ret = -ENOENT; 551 goto error_context; 552 } 553 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 554 vsi = i40e_find_vsi_from_id(pf, vsi_id); 555 if (!vsi) { 556 ret = -ENOENT; 557 goto error_context; 558 } 559 560 /* clear the context structure first */ 561 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 562 563 /* only set the required fields */ 564 tx_ctx.base = info->dma_ring_addr / 128; 565 tx_ctx.qlen = info->ring_len; 566 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 567 tx_ctx.rdylist_act = 0; 568 tx_ctx.head_wb_ena = info->headwb_enabled; 569 tx_ctx.head_wb_addr = info->dma_headwb_addr; 570 571 /* clear the context in the HMC */ 572 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 573 if (ret) { 574 dev_err(&pf->pdev->dev, 575 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 576 pf_queue_id, ret); 577 ret = -ENOENT; 578 goto error_context; 579 } 580 581 /* set the context in the HMC */ 582 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 583 if (ret) { 584 dev_err(&pf->pdev->dev, 585 "Failed to set VF LAN Tx queue context %d error: %d\n", 586 pf_queue_id, ret); 587 ret = -ENOENT; 588 goto error_context; 589 } 590 591 /* associate this queue with the PCI VF function */ 592 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 593 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 594 & I40E_QTX_CTL_PF_INDX_MASK); 595 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 596 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 597 & I40E_QTX_CTL_VFVM_INDX_MASK); 598 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 599 i40e_flush(hw); 600 601 error_context: 602 return ret; 603 } 604 605 /** 606 * i40e_config_vsi_rx_queue 607 * @vf: pointer to the VF info 608 * @vsi_id: id of VSI as provided by the FW 609 * @vsi_queue_id: vsi relative queue index 610 * @info: config. info 611 * 612 * configure rx queue 613 **/ 614 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 615 u16 vsi_queue_id, 616 struct virtchnl_rxq_info *info) 617 { 618 struct i40e_pf *pf = vf->pf; 619 struct i40e_hw *hw = &pf->hw; 620 struct i40e_hmc_obj_rxq rx_ctx; 621 u16 pf_queue_id; 622 int ret = 0; 623 624 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 625 626 /* clear the context structure first */ 627 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 628 629 /* only set the required fields */ 630 rx_ctx.base = info->dma_ring_addr / 128; 631 rx_ctx.qlen = info->ring_len; 632 633 if (info->splithdr_enabled) { 634 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 635 I40E_RX_SPLIT_IP | 636 I40E_RX_SPLIT_TCP_UDP | 637 I40E_RX_SPLIT_SCTP; 638 /* header length validation */ 639 if (info->hdr_size > ((2 * 1024) - 64)) { 640 ret = -EINVAL; 641 goto error_param; 642 } 643 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 644 645 /* set split mode 10b */ 646 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 647 } 648 649 /* databuffer length validation */ 650 if (info->databuffer_size > ((16 * 1024) - 128)) { 651 ret = -EINVAL; 652 goto error_param; 653 } 654 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 655 656 /* max pkt. length validation */ 657 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 658 ret = -EINVAL; 659 goto error_param; 660 } 661 rx_ctx.rxmax = info->max_pkt_size; 662 663 /* enable 32bytes desc always */ 664 rx_ctx.dsize = 1; 665 666 /* default values */ 667 rx_ctx.lrxqthresh = 1; 668 rx_ctx.crcstrip = 1; 669 rx_ctx.prefena = 1; 670 rx_ctx.l2tsel = 1; 671 672 /* clear the context in the HMC */ 673 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 674 if (ret) { 675 dev_err(&pf->pdev->dev, 676 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 677 pf_queue_id, ret); 678 ret = -ENOENT; 679 goto error_param; 680 } 681 682 /* set the context in the HMC */ 683 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 684 if (ret) { 685 dev_err(&pf->pdev->dev, 686 "Failed to set VF LAN Rx queue context %d error: %d\n", 687 pf_queue_id, ret); 688 ret = -ENOENT; 689 goto error_param; 690 } 691 692 error_param: 693 return ret; 694 } 695 696 /** 697 * i40e_alloc_vsi_res 698 * @vf: pointer to the VF info 699 * @idx: VSI index, applies only for ADq mode, zero otherwise 700 * 701 * alloc VF vsi context & resources 702 **/ 703 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 704 { 705 struct i40e_mac_filter *f = NULL; 706 struct i40e_pf *pf = vf->pf; 707 struct i40e_vsi *vsi; 708 u64 max_tx_rate = 0; 709 int ret = 0; 710 711 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, 712 vf->vf_id); 713 714 if (!vsi) { 715 dev_err(&pf->pdev->dev, 716 "add vsi failed for VF %d, aq_err %d\n", 717 vf->vf_id, pf->hw.aq.asq_last_status); 718 ret = -ENOENT; 719 goto error_alloc_vsi_res; 720 } 721 722 if (!idx) { 723 u64 hena = i40e_pf_get_default_rss_hena(pf); 724 u8 broadcast[ETH_ALEN]; 725 726 vf->lan_vsi_idx = vsi->idx; 727 vf->lan_vsi_id = vsi->id; 728 /* If the port VLAN has been configured and then the 729 * VF driver was removed then the VSI port VLAN 730 * configuration was destroyed. Check if there is 731 * a port VLAN and restore the VSI configuration if 732 * needed. 733 */ 734 if (vf->port_vlan_id) 735 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 736 737 spin_lock_bh(&vsi->mac_filter_hash_lock); 738 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 739 f = i40e_add_mac_filter(vsi, 740 vf->default_lan_addr.addr); 741 if (!f) 742 dev_info(&pf->pdev->dev, 743 "Could not add MAC filter %pM for VF %d\n", 744 vf->default_lan_addr.addr, vf->vf_id); 745 } 746 eth_broadcast_addr(broadcast); 747 f = i40e_add_mac_filter(vsi, broadcast); 748 if (!f) 749 dev_info(&pf->pdev->dev, 750 "Could not allocate VF broadcast filter\n"); 751 spin_unlock_bh(&vsi->mac_filter_hash_lock); 752 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 753 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 754 /* program mac filter only for VF VSI */ 755 ret = i40e_sync_vsi_filters(vsi); 756 if (ret) 757 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 758 } 759 760 /* storing VSI index and id for ADq and don't apply the mac filter */ 761 if (vf->adq_enabled) { 762 vf->ch[idx].vsi_idx = vsi->idx; 763 vf->ch[idx].vsi_id = vsi->id; 764 } 765 766 /* Set VF bandwidth if specified */ 767 if (vf->tx_rate) { 768 max_tx_rate = vf->tx_rate; 769 } else if (vf->ch[idx].max_tx_rate) { 770 max_tx_rate = vf->ch[idx].max_tx_rate; 771 } 772 773 if (max_tx_rate) { 774 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 775 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 776 max_tx_rate, 0, NULL); 777 if (ret) 778 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 779 vf->vf_id, ret); 780 } 781 782 error_alloc_vsi_res: 783 return ret; 784 } 785 786 /** 787 * i40e_map_pf_queues_to_vsi 788 * @vf: pointer to the VF info 789 * 790 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 791 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 792 **/ 793 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 794 { 795 struct i40e_pf *pf = vf->pf; 796 struct i40e_hw *hw = &pf->hw; 797 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 798 u16 vsi_id, qps; 799 int i, j; 800 801 if (vf->adq_enabled) 802 num_tc = vf->num_tc; 803 804 for (i = 0; i < num_tc; i++) { 805 if (vf->adq_enabled) { 806 qps = vf->ch[i].num_qps; 807 vsi_id = vf->ch[i].vsi_id; 808 } else { 809 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 810 vsi_id = vf->lan_vsi_id; 811 } 812 813 for (j = 0; j < 7; j++) { 814 if (j * 2 >= qps) { 815 /* end of list */ 816 reg = 0x07FF07FF; 817 } else { 818 u16 qid = i40e_vc_get_pf_queue_id(vf, 819 vsi_id, 820 j * 2); 821 reg = qid; 822 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 823 (j * 2) + 1); 824 reg |= qid << 16; 825 } 826 i40e_write_rx_ctl(hw, 827 I40E_VSILAN_QTABLE(j, vsi_id), 828 reg); 829 } 830 } 831 } 832 833 /** 834 * i40e_map_pf_to_vf_queues 835 * @vf: pointer to the VF info 836 * 837 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 838 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 839 **/ 840 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 841 { 842 struct i40e_pf *pf = vf->pf; 843 struct i40e_hw *hw = &pf->hw; 844 u32 reg, total_qps = 0; 845 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 846 u16 vsi_id, qid; 847 int i, j; 848 849 if (vf->adq_enabled) 850 num_tc = vf->num_tc; 851 852 for (i = 0; i < num_tc; i++) { 853 if (vf->adq_enabled) { 854 qps = vf->ch[i].num_qps; 855 vsi_id = vf->ch[i].vsi_id; 856 } else { 857 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 858 vsi_id = vf->lan_vsi_id; 859 } 860 861 for (j = 0; j < qps; j++) { 862 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 863 864 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 865 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 866 reg); 867 total_qps++; 868 } 869 } 870 } 871 872 /** 873 * i40e_enable_vf_mappings 874 * @vf: pointer to the VF info 875 * 876 * enable VF mappings 877 **/ 878 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 879 { 880 struct i40e_pf *pf = vf->pf; 881 struct i40e_hw *hw = &pf->hw; 882 u32 reg; 883 884 /* Tell the hardware we're using noncontiguous mapping. HW requires 885 * that VF queues be mapped using this method, even when they are 886 * contiguous in real life 887 */ 888 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 889 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 890 891 /* enable VF vplan_qtable mappings */ 892 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 893 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 894 895 i40e_map_pf_to_vf_queues(vf); 896 i40e_map_pf_queues_to_vsi(vf); 897 898 i40e_flush(hw); 899 } 900 901 /** 902 * i40e_disable_vf_mappings 903 * @vf: pointer to the VF info 904 * 905 * disable VF mappings 906 **/ 907 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 908 { 909 struct i40e_pf *pf = vf->pf; 910 struct i40e_hw *hw = &pf->hw; 911 int i; 912 913 /* disable qp mappings */ 914 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 915 for (i = 0; i < I40E_MAX_VSI_QP; i++) 916 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 917 I40E_QUEUE_END_OF_LIST); 918 i40e_flush(hw); 919 } 920 921 /** 922 * i40e_free_vf_res 923 * @vf: pointer to the VF info 924 * 925 * free VF resources 926 **/ 927 static void i40e_free_vf_res(struct i40e_vf *vf) 928 { 929 struct i40e_pf *pf = vf->pf; 930 struct i40e_hw *hw = &pf->hw; 931 u32 reg_idx, reg; 932 int i, j, msix_vf; 933 934 /* Start by disabling VF's configuration API to prevent the OS from 935 * accessing the VF's VSI after it's freed / invalidated. 936 */ 937 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 938 939 /* It's possible the VF had requeuested more queues than the default so 940 * do the accounting here when we're about to free them. 941 */ 942 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 943 pf->queues_left += vf->num_queue_pairs - 944 I40E_DEFAULT_QUEUES_PER_VF; 945 } 946 947 /* free vsi & disconnect it from the parent uplink */ 948 if (vf->lan_vsi_idx) { 949 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 950 vf->lan_vsi_idx = 0; 951 vf->lan_vsi_id = 0; 952 vf->num_mac = 0; 953 } 954 955 /* do the accounting and remove additional ADq VSI's */ 956 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 957 for (j = 0; j < vf->num_tc; j++) { 958 /* At this point VSI0 is already released so don't 959 * release it again and only clear their values in 960 * structure variables 961 */ 962 if (j) 963 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 964 vf->ch[j].vsi_idx = 0; 965 vf->ch[j].vsi_id = 0; 966 } 967 } 968 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 969 970 /* disable interrupts so the VF starts in a known state */ 971 for (i = 0; i < msix_vf; i++) { 972 /* format is same for both registers */ 973 if (0 == i) 974 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 975 else 976 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 977 (vf->vf_id)) 978 + (i - 1)); 979 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 980 i40e_flush(hw); 981 } 982 983 /* clear the irq settings */ 984 for (i = 0; i < msix_vf; i++) { 985 /* format is same for both registers */ 986 if (0 == i) 987 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 988 else 989 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 990 (vf->vf_id)) 991 + (i - 1)); 992 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 993 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 994 wr32(hw, reg_idx, reg); 995 i40e_flush(hw); 996 } 997 /* reset some of the state variables keeping track of the resources */ 998 vf->num_queue_pairs = 0; 999 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1000 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1001 } 1002 1003 /** 1004 * i40e_alloc_vf_res 1005 * @vf: pointer to the VF info 1006 * 1007 * allocate VF resources 1008 **/ 1009 static int i40e_alloc_vf_res(struct i40e_vf *vf) 1010 { 1011 struct i40e_pf *pf = vf->pf; 1012 int total_queue_pairs = 0; 1013 int ret, idx; 1014 1015 if (vf->num_req_queues && 1016 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1017 pf->num_vf_qps = vf->num_req_queues; 1018 else 1019 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1020 1021 /* allocate hw vsi context & associated resources */ 1022 ret = i40e_alloc_vsi_res(vf, 0); 1023 if (ret) 1024 goto error_alloc; 1025 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1026 1027 /* allocate additional VSIs based on tc information for ADq */ 1028 if (vf->adq_enabled) { 1029 if (pf->queues_left >= 1030 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1031 /* TC 0 always belongs to VF VSI */ 1032 for (idx = 1; idx < vf->num_tc; idx++) { 1033 ret = i40e_alloc_vsi_res(vf, idx); 1034 if (ret) 1035 goto error_alloc; 1036 } 1037 /* send correct number of queues */ 1038 total_queue_pairs = I40E_MAX_VF_QUEUES; 1039 } else { 1040 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1041 vf->vf_id); 1042 vf->adq_enabled = false; 1043 } 1044 } 1045 1046 /* We account for each VF to get a default number of queue pairs. If 1047 * the VF has now requested more, we need to account for that to make 1048 * certain we never request more queues than we actually have left in 1049 * HW. 1050 */ 1051 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1052 pf->queues_left -= 1053 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1054 1055 if (vf->trusted) 1056 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1057 else 1058 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1059 1060 /* store the total qps number for the runtime 1061 * VF req validation 1062 */ 1063 vf->num_queue_pairs = total_queue_pairs; 1064 1065 /* VF is now completely initialized */ 1066 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1067 1068 error_alloc: 1069 if (ret) 1070 i40e_free_vf_res(vf); 1071 1072 return ret; 1073 } 1074 1075 #define VF_DEVICE_STATUS 0xAA 1076 #define VF_TRANS_PENDING_MASK 0x20 1077 /** 1078 * i40e_quiesce_vf_pci 1079 * @vf: pointer to the VF structure 1080 * 1081 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1082 * if the transactions never clear. 1083 **/ 1084 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1085 { 1086 struct i40e_pf *pf = vf->pf; 1087 struct i40e_hw *hw = &pf->hw; 1088 int vf_abs_id, i; 1089 u32 reg; 1090 1091 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1092 1093 wr32(hw, I40E_PF_PCI_CIAA, 1094 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1095 for (i = 0; i < 100; i++) { 1096 reg = rd32(hw, I40E_PF_PCI_CIAD); 1097 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1098 return 0; 1099 udelay(1); 1100 } 1101 return -EIO; 1102 } 1103 1104 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi); 1105 1106 /** 1107 * i40e_config_vf_promiscuous_mode 1108 * @vf: pointer to the VF info 1109 * @vsi_id: VSI id 1110 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1111 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1112 * 1113 * Called from the VF to configure the promiscuous mode of 1114 * VF vsis and from the VF reset path to reset promiscuous mode. 1115 **/ 1116 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1117 u16 vsi_id, 1118 bool allmulti, 1119 bool alluni) 1120 { 1121 struct i40e_pf *pf = vf->pf; 1122 struct i40e_hw *hw = &pf->hw; 1123 struct i40e_mac_filter *f; 1124 i40e_status aq_ret = 0; 1125 struct i40e_vsi *vsi; 1126 int bkt; 1127 1128 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1129 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1130 return I40E_ERR_PARAM; 1131 1132 if (vf->port_vlan_id) { 1133 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, 1134 allmulti, 1135 vf->port_vlan_id, 1136 NULL); 1137 if (aq_ret) { 1138 int aq_err = pf->hw.aq.asq_last_status; 1139 1140 dev_err(&pf->pdev->dev, 1141 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1142 vf->vf_id, 1143 i40e_stat_str(&pf->hw, aq_ret), 1144 i40e_aq_str(&pf->hw, aq_err)); 1145 return aq_ret; 1146 } 1147 1148 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, 1149 alluni, 1150 vf->port_vlan_id, 1151 NULL); 1152 if (aq_ret) { 1153 int aq_err = pf->hw.aq.asq_last_status; 1154 1155 dev_err(&pf->pdev->dev, 1156 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1157 vf->vf_id, 1158 i40e_stat_str(&pf->hw, aq_ret), 1159 i40e_aq_str(&pf->hw, aq_err)); 1160 } 1161 return aq_ret; 1162 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1163 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1164 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1165 continue; 1166 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, 1167 vsi->seid, 1168 allmulti, 1169 f->vlan, 1170 NULL); 1171 if (aq_ret) { 1172 int aq_err = pf->hw.aq.asq_last_status; 1173 1174 dev_err(&pf->pdev->dev, 1175 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", 1176 f->vlan, 1177 i40e_stat_str(&pf->hw, aq_ret), 1178 i40e_aq_str(&pf->hw, aq_err)); 1179 } 1180 1181 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, 1182 vsi->seid, 1183 alluni, 1184 f->vlan, 1185 NULL); 1186 if (aq_ret) { 1187 int aq_err = pf->hw.aq.asq_last_status; 1188 1189 dev_err(&pf->pdev->dev, 1190 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", 1191 f->vlan, 1192 i40e_stat_str(&pf->hw, aq_ret), 1193 i40e_aq_str(&pf->hw, aq_err)); 1194 } 1195 } 1196 return aq_ret; 1197 } 1198 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, 1199 NULL); 1200 if (aq_ret) { 1201 int aq_err = pf->hw.aq.asq_last_status; 1202 1203 dev_err(&pf->pdev->dev, 1204 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1205 vf->vf_id, 1206 i40e_stat_str(&pf->hw, aq_ret), 1207 i40e_aq_str(&pf->hw, aq_err)); 1208 return aq_ret; 1209 } 1210 1211 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni, 1212 NULL, true); 1213 if (aq_ret) { 1214 int aq_err = pf->hw.aq.asq_last_status; 1215 1216 dev_err(&pf->pdev->dev, 1217 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1218 vf->vf_id, 1219 i40e_stat_str(&pf->hw, aq_ret), 1220 i40e_aq_str(&pf->hw, aq_err)); 1221 } 1222 1223 return aq_ret; 1224 } 1225 1226 /** 1227 * i40e_trigger_vf_reset 1228 * @vf: pointer to the VF structure 1229 * @flr: VFLR was issued or not 1230 * 1231 * Trigger hardware to start a reset for a particular VF. Expects the caller 1232 * to wait the proper amount of time to allow hardware to reset the VF before 1233 * it cleans up and restores VF functionality. 1234 **/ 1235 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1236 { 1237 struct i40e_pf *pf = vf->pf; 1238 struct i40e_hw *hw = &pf->hw; 1239 u32 reg, reg_idx, bit_idx; 1240 1241 /* warn the VF */ 1242 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1243 1244 /* Disable VF's configuration API during reset. The flag is re-enabled 1245 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1246 * It's normally disabled in i40e_free_vf_res(), but it's safer 1247 * to do it earlier to give some time to finish to any VF config 1248 * functions that may still be running at this point. 1249 */ 1250 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1251 1252 /* In the case of a VFLR, the HW has already reset the VF and we 1253 * just need to clean up, so don't hit the VFRTRIG register. 1254 */ 1255 if (!flr) { 1256 /* reset VF using VPGEN_VFRTRIG reg */ 1257 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1258 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1259 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1260 i40e_flush(hw); 1261 } 1262 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1263 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1264 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1265 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1266 i40e_flush(hw); 1267 1268 if (i40e_quiesce_vf_pci(vf)) 1269 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1270 vf->vf_id); 1271 } 1272 1273 /** 1274 * i40e_cleanup_reset_vf 1275 * @vf: pointer to the VF structure 1276 * 1277 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1278 * have verified whether the reset is finished properly, and ensure the 1279 * minimum amount of wait time has passed. 1280 **/ 1281 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1282 { 1283 struct i40e_pf *pf = vf->pf; 1284 struct i40e_hw *hw = &pf->hw; 1285 u32 reg; 1286 1287 /* disable promisc modes in case they were enabled */ 1288 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1289 1290 /* free VF resources to begin resetting the VSI state */ 1291 i40e_free_vf_res(vf); 1292 1293 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1294 * By doing this we allow HW to access VF memory at any point. If we 1295 * did it any sooner, HW could access memory while it was being freed 1296 * in i40e_free_vf_res(), causing an IOMMU fault. 1297 * 1298 * On the other hand, this needs to be done ASAP, because the VF driver 1299 * is waiting for this to happen and may report a timeout. It's 1300 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1301 * it. 1302 */ 1303 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1304 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1305 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1306 1307 /* reallocate VF resources to finish resetting the VSI state */ 1308 if (!i40e_alloc_vf_res(vf)) { 1309 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1310 i40e_enable_vf_mappings(vf); 1311 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1312 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1313 /* Do not notify the client during VF init */ 1314 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1315 &vf->vf_states)) 1316 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1317 vf->num_vlan = 0; 1318 } 1319 1320 /* Tell the VF driver the reset is done. This needs to be done only 1321 * after VF has been fully initialized, because the VF driver may 1322 * request resources immediately after setting this flag. 1323 */ 1324 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1325 } 1326 1327 /** 1328 * i40e_reset_vf 1329 * @vf: pointer to the VF structure 1330 * @flr: VFLR was issued or not 1331 * 1332 * Returns true if the VF is reset, false otherwise. 1333 **/ 1334 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1335 { 1336 struct i40e_pf *pf = vf->pf; 1337 struct i40e_hw *hw = &pf->hw; 1338 bool rsd = false; 1339 u32 reg; 1340 int i; 1341 1342 /* If the VFs have been disabled, this means something else is 1343 * resetting the VF, so we shouldn't continue. 1344 */ 1345 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1346 return false; 1347 1348 i40e_trigger_vf_reset(vf, flr); 1349 1350 /* poll VPGEN_VFRSTAT reg to make sure 1351 * that reset is complete 1352 */ 1353 for (i = 0; i < 10; i++) { 1354 /* VF reset requires driver to first reset the VF and then 1355 * poll the status register to make sure that the reset 1356 * completed successfully. Due to internal HW FIFO flushes, 1357 * we must wait 10ms before the register will be valid. 1358 */ 1359 usleep_range(10000, 20000); 1360 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1361 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1362 rsd = true; 1363 break; 1364 } 1365 } 1366 1367 if (flr) 1368 usleep_range(10000, 20000); 1369 1370 if (!rsd) 1371 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1372 vf->vf_id); 1373 usleep_range(10000, 20000); 1374 1375 /* On initial reset, we don't have any queues to disable */ 1376 if (vf->lan_vsi_idx != 0) 1377 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1378 1379 i40e_cleanup_reset_vf(vf); 1380 1381 i40e_flush(hw); 1382 clear_bit(__I40E_VF_DISABLE, pf->state); 1383 1384 return true; 1385 } 1386 1387 /** 1388 * i40e_reset_all_vfs 1389 * @pf: pointer to the PF structure 1390 * @flr: VFLR was issued or not 1391 * 1392 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1393 * VF, then do all the waiting in one chunk, and finally finish restoring each 1394 * VF after the wait. This is useful during PF routines which need to reset 1395 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1396 * 1397 * Returns true if any VFs were reset, and false otherwise. 1398 **/ 1399 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1400 { 1401 struct i40e_hw *hw = &pf->hw; 1402 struct i40e_vf *vf; 1403 int i, v; 1404 u32 reg; 1405 1406 /* If we don't have any VFs, then there is nothing to reset */ 1407 if (!pf->num_alloc_vfs) 1408 return false; 1409 1410 /* If VFs have been disabled, there is no need to reset */ 1411 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1412 return false; 1413 1414 /* Begin reset on all VFs at once */ 1415 for (v = 0; v < pf->num_alloc_vfs; v++) 1416 i40e_trigger_vf_reset(&pf->vf[v], flr); 1417 1418 /* HW requires some time to make sure it can flush the FIFO for a VF 1419 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1420 * sequence to make sure that it has completed. We'll keep track of 1421 * the VFs using a simple iterator that increments once that VF has 1422 * finished resetting. 1423 */ 1424 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1425 usleep_range(10000, 20000); 1426 1427 /* Check each VF in sequence, beginning with the VF to fail 1428 * the previous check. 1429 */ 1430 while (v < pf->num_alloc_vfs) { 1431 vf = &pf->vf[v]; 1432 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1433 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1434 break; 1435 1436 /* If the current VF has finished resetting, move on 1437 * to the next VF in sequence. 1438 */ 1439 v++; 1440 } 1441 } 1442 1443 if (flr) 1444 usleep_range(10000, 20000); 1445 1446 /* Display a warning if at least one VF didn't manage to reset in 1447 * time, but continue on with the operation. 1448 */ 1449 if (v < pf->num_alloc_vfs) 1450 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1451 pf->vf[v].vf_id); 1452 usleep_range(10000, 20000); 1453 1454 /* Begin disabling all the rings associated with VFs, but do not wait 1455 * between each VF. 1456 */ 1457 for (v = 0; v < pf->num_alloc_vfs; v++) { 1458 /* On initial reset, we don't have any queues to disable */ 1459 if (pf->vf[v].lan_vsi_idx == 0) 1460 continue; 1461 1462 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1463 } 1464 1465 /* Now that we've notified HW to disable all of the VF rings, wait 1466 * until they finish. 1467 */ 1468 for (v = 0; v < pf->num_alloc_vfs; v++) { 1469 /* On initial reset, we don't have any queues to disable */ 1470 if (pf->vf[v].lan_vsi_idx == 0) 1471 continue; 1472 1473 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1474 } 1475 1476 /* Hw may need up to 50ms to finish disabling the RX queues. We 1477 * minimize the wait by delaying only once for all VFs. 1478 */ 1479 mdelay(50); 1480 1481 /* Finish the reset on each VF */ 1482 for (v = 0; v < pf->num_alloc_vfs; v++) 1483 i40e_cleanup_reset_vf(&pf->vf[v]); 1484 1485 i40e_flush(hw); 1486 clear_bit(__I40E_VF_DISABLE, pf->state); 1487 1488 return true; 1489 } 1490 1491 /** 1492 * i40e_free_vfs 1493 * @pf: pointer to the PF structure 1494 * 1495 * free VF resources 1496 **/ 1497 void i40e_free_vfs(struct i40e_pf *pf) 1498 { 1499 struct i40e_hw *hw = &pf->hw; 1500 u32 reg_idx, bit_idx; 1501 int i, tmp, vf_id; 1502 1503 if (!pf->vf) 1504 return; 1505 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1506 usleep_range(1000, 2000); 1507 1508 i40e_notify_client_of_vf_enable(pf, 0); 1509 1510 /* Amortize wait time by stopping all VFs at the same time */ 1511 for (i = 0; i < pf->num_alloc_vfs; i++) { 1512 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1513 continue; 1514 1515 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1516 } 1517 1518 for (i = 0; i < pf->num_alloc_vfs; i++) { 1519 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1520 continue; 1521 1522 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1523 } 1524 1525 /* Disable IOV before freeing resources. This lets any VF drivers 1526 * running in the host get themselves cleaned up before we yank 1527 * the carpet out from underneath their feet. 1528 */ 1529 if (!pci_vfs_assigned(pf->pdev)) 1530 pci_disable_sriov(pf->pdev); 1531 else 1532 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1533 1534 /* free up VF resources */ 1535 tmp = pf->num_alloc_vfs; 1536 pf->num_alloc_vfs = 0; 1537 for (i = 0; i < tmp; i++) { 1538 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1539 i40e_free_vf_res(&pf->vf[i]); 1540 /* disable qp mappings */ 1541 i40e_disable_vf_mappings(&pf->vf[i]); 1542 } 1543 1544 kfree(pf->vf); 1545 pf->vf = NULL; 1546 1547 /* This check is for when the driver is unloaded while VFs are 1548 * assigned. Setting the number of VFs to 0 through sysfs is caught 1549 * before this function ever gets called. 1550 */ 1551 if (!pci_vfs_assigned(pf->pdev)) { 1552 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1553 * work correctly when SR-IOV gets re-enabled. 1554 */ 1555 for (vf_id = 0; vf_id < tmp; vf_id++) { 1556 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1557 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1558 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1559 } 1560 } 1561 clear_bit(__I40E_VF_DISABLE, pf->state); 1562 } 1563 1564 #ifdef CONFIG_PCI_IOV 1565 /** 1566 * i40e_alloc_vfs 1567 * @pf: pointer to the PF structure 1568 * @num_alloc_vfs: number of VFs to allocate 1569 * 1570 * allocate VF resources 1571 **/ 1572 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1573 { 1574 struct i40e_vf *vfs; 1575 int i, ret = 0; 1576 1577 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1578 i40e_irq_dynamic_disable_icr0(pf); 1579 1580 /* Check to see if we're just allocating resources for extant VFs */ 1581 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1582 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1583 if (ret) { 1584 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1585 pf->num_alloc_vfs = 0; 1586 goto err_iov; 1587 } 1588 } 1589 /* allocate memory */ 1590 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1591 if (!vfs) { 1592 ret = -ENOMEM; 1593 goto err_alloc; 1594 } 1595 pf->vf = vfs; 1596 1597 /* apply default profile */ 1598 for (i = 0; i < num_alloc_vfs; i++) { 1599 vfs[i].pf = pf; 1600 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1601 vfs[i].vf_id = i; 1602 1603 /* assign default capabilities */ 1604 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1605 vfs[i].spoofchk = true; 1606 1607 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1608 1609 } 1610 pf->num_alloc_vfs = num_alloc_vfs; 1611 1612 /* VF resources get allocated during reset */ 1613 i40e_reset_all_vfs(pf, false); 1614 1615 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1616 1617 err_alloc: 1618 if (ret) 1619 i40e_free_vfs(pf); 1620 err_iov: 1621 /* Re-enable interrupt 0. */ 1622 i40e_irq_dynamic_enable_icr0(pf); 1623 return ret; 1624 } 1625 1626 #endif 1627 /** 1628 * i40e_pci_sriov_enable 1629 * @pdev: pointer to a pci_dev structure 1630 * @num_vfs: number of VFs to allocate 1631 * 1632 * Enable or change the number of VFs 1633 **/ 1634 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1635 { 1636 #ifdef CONFIG_PCI_IOV 1637 struct i40e_pf *pf = pci_get_drvdata(pdev); 1638 int pre_existing_vfs = pci_num_vf(pdev); 1639 int err = 0; 1640 1641 if (test_bit(__I40E_TESTING, pf->state)) { 1642 dev_warn(&pdev->dev, 1643 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1644 err = -EPERM; 1645 goto err_out; 1646 } 1647 1648 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1649 i40e_free_vfs(pf); 1650 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1651 goto out; 1652 1653 if (num_vfs > pf->num_req_vfs) { 1654 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1655 num_vfs, pf->num_req_vfs); 1656 err = -EPERM; 1657 goto err_out; 1658 } 1659 1660 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1661 err = i40e_alloc_vfs(pf, num_vfs); 1662 if (err) { 1663 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1664 goto err_out; 1665 } 1666 1667 out: 1668 return num_vfs; 1669 1670 err_out: 1671 return err; 1672 #endif 1673 return 0; 1674 } 1675 1676 /** 1677 * i40e_pci_sriov_configure 1678 * @pdev: pointer to a pci_dev structure 1679 * @num_vfs: number of VFs to allocate 1680 * 1681 * Enable or change the number of VFs. Called when the user updates the number 1682 * of VFs in sysfs. 1683 **/ 1684 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1685 { 1686 struct i40e_pf *pf = pci_get_drvdata(pdev); 1687 int ret = 0; 1688 1689 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1690 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1691 return -EAGAIN; 1692 } 1693 1694 if (num_vfs) { 1695 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1696 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1697 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1698 } 1699 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1700 goto sriov_configure_out; 1701 } 1702 1703 if (!pci_vfs_assigned(pf->pdev)) { 1704 i40e_free_vfs(pf); 1705 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1706 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1707 } else { 1708 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1709 ret = -EINVAL; 1710 goto sriov_configure_out; 1711 } 1712 sriov_configure_out: 1713 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1714 return ret; 1715 } 1716 1717 /***********************virtual channel routines******************/ 1718 1719 /** 1720 * i40e_vc_send_msg_to_vf 1721 * @vf: pointer to the VF info 1722 * @v_opcode: virtual channel opcode 1723 * @v_retval: virtual channel return value 1724 * @msg: pointer to the msg buffer 1725 * @msglen: msg length 1726 * 1727 * send msg to VF 1728 **/ 1729 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1730 u32 v_retval, u8 *msg, u16 msglen) 1731 { 1732 struct i40e_pf *pf; 1733 struct i40e_hw *hw; 1734 int abs_vf_id; 1735 i40e_status aq_ret; 1736 1737 /* validate the request */ 1738 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1739 return -EINVAL; 1740 1741 pf = vf->pf; 1742 hw = &pf->hw; 1743 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1744 1745 /* single place to detect unsuccessful return values */ 1746 if (v_retval) { 1747 vf->num_invalid_msgs++; 1748 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1749 vf->vf_id, v_opcode, v_retval); 1750 if (vf->num_invalid_msgs > 1751 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1752 dev_err(&pf->pdev->dev, 1753 "Number of invalid messages exceeded for VF %d\n", 1754 vf->vf_id); 1755 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1756 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1757 } 1758 } else { 1759 vf->num_valid_msgs++; 1760 /* reset the invalid counter, if a valid message is received. */ 1761 vf->num_invalid_msgs = 0; 1762 } 1763 1764 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1765 msg, msglen, NULL); 1766 if (aq_ret) { 1767 dev_info(&pf->pdev->dev, 1768 "Unable to send the message to VF %d aq_err %d\n", 1769 vf->vf_id, pf->hw.aq.asq_last_status); 1770 return -EIO; 1771 } 1772 1773 return 0; 1774 } 1775 1776 /** 1777 * i40e_vc_send_resp_to_vf 1778 * @vf: pointer to the VF info 1779 * @opcode: operation code 1780 * @retval: return value 1781 * 1782 * send resp msg to VF 1783 **/ 1784 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1785 enum virtchnl_ops opcode, 1786 i40e_status retval) 1787 { 1788 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1789 } 1790 1791 /** 1792 * i40e_vc_get_version_msg 1793 * @vf: pointer to the VF info 1794 * @msg: pointer to the msg buffer 1795 * 1796 * called from the VF to request the API version used by the PF 1797 **/ 1798 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1799 { 1800 struct virtchnl_version_info info = { 1801 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1802 }; 1803 1804 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1805 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1806 if (VF_IS_V10(&vf->vf_ver)) 1807 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1808 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1809 I40E_SUCCESS, (u8 *)&info, 1810 sizeof(struct virtchnl_version_info)); 1811 } 1812 1813 /** 1814 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 1815 * @vf: pointer to VF structure 1816 **/ 1817 static void i40e_del_qch(struct i40e_vf *vf) 1818 { 1819 struct i40e_pf *pf = vf->pf; 1820 int i; 1821 1822 /* first element in the array belongs to primary VF VSI and we shouldn't 1823 * delete it. We should however delete the rest of the VSIs created 1824 */ 1825 for (i = 1; i < vf->num_tc; i++) { 1826 if (vf->ch[i].vsi_idx) { 1827 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 1828 vf->ch[i].vsi_idx = 0; 1829 vf->ch[i].vsi_id = 0; 1830 } 1831 } 1832 } 1833 1834 /** 1835 * i40e_vc_get_vf_resources_msg 1836 * @vf: pointer to the VF info 1837 * @msg: pointer to the msg buffer 1838 * 1839 * called from the VF to request its resources 1840 **/ 1841 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1842 { 1843 struct virtchnl_vf_resource *vfres = NULL; 1844 struct i40e_pf *pf = vf->pf; 1845 i40e_status aq_ret = 0; 1846 struct i40e_vsi *vsi; 1847 int num_vsis = 1; 1848 int len = 0; 1849 int ret; 1850 1851 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1852 aq_ret = I40E_ERR_PARAM; 1853 goto err; 1854 } 1855 1856 len = (sizeof(struct virtchnl_vf_resource) + 1857 sizeof(struct virtchnl_vsi_resource) * num_vsis); 1858 1859 vfres = kzalloc(len, GFP_KERNEL); 1860 if (!vfres) { 1861 aq_ret = I40E_ERR_NO_MEMORY; 1862 len = 0; 1863 goto err; 1864 } 1865 if (VF_IS_V11(&vf->vf_ver)) 1866 vf->driver_caps = *(u32 *)msg; 1867 else 1868 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 1869 VIRTCHNL_VF_OFFLOAD_RSS_REG | 1870 VIRTCHNL_VF_OFFLOAD_VLAN; 1871 1872 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 1873 vsi = pf->vsi[vf->lan_vsi_idx]; 1874 if (!vsi->info.pvid) 1875 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 1876 1877 if (i40e_vf_client_capable(pf, vf->vf_id) && 1878 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 1879 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 1880 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1881 } else { 1882 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1883 } 1884 1885 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1886 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 1887 } else { 1888 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 1889 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1890 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1891 else 1892 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 1893 } 1894 1895 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1896 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1897 vfres->vf_cap_flags |= 1898 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1899 } 1900 1901 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 1902 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 1903 1904 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && 1905 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 1906 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 1907 1908 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1909 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1910 dev_err(&pf->pdev->dev, 1911 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1912 vf->vf_id); 1913 aq_ret = I40E_ERR_PARAM; 1914 goto err; 1915 } 1916 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1917 } 1918 1919 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { 1920 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1921 vfres->vf_cap_flags |= 1922 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1923 } 1924 1925 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 1926 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 1927 1928 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 1929 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 1930 1931 vfres->num_vsis = num_vsis; 1932 vfres->num_queue_pairs = vf->num_queue_pairs; 1933 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1934 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 1935 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 1936 1937 if (vf->lan_vsi_idx) { 1938 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 1939 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 1940 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 1941 /* VFs only use TC 0 */ 1942 vfres->vsi_res[0].qset_handle 1943 = le16_to_cpu(vsi->info.qs_handle[0]); 1944 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 1945 vf->default_lan_addr.addr); 1946 } 1947 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1948 1949 err: 1950 /* send the response back to the VF */ 1951 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 1952 aq_ret, (u8 *)vfres, len); 1953 1954 kfree(vfres); 1955 return ret; 1956 } 1957 1958 /** 1959 * i40e_vc_reset_vf_msg 1960 * @vf: pointer to the VF info 1961 * 1962 * called from the VF to reset itself, 1963 * unlike other virtchnl messages, PF driver 1964 * doesn't send the response back to the VF 1965 **/ 1966 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1967 { 1968 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 1969 i40e_reset_vf(vf, false); 1970 } 1971 1972 /** 1973 * i40e_getnum_vf_vsi_vlan_filters 1974 * @vsi: pointer to the vsi 1975 * 1976 * called to get the number of VLANs offloaded on this VF 1977 **/ 1978 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1979 { 1980 struct i40e_mac_filter *f; 1981 int num_vlans = 0, bkt; 1982 1983 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1984 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1985 num_vlans++; 1986 } 1987 1988 return num_vlans; 1989 } 1990 1991 /** 1992 * i40e_vc_config_promiscuous_mode_msg 1993 * @vf: pointer to the VF info 1994 * @msg: pointer to the msg buffer 1995 * 1996 * called from the VF to configure the promiscuous mode of 1997 * VF vsis 1998 **/ 1999 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 2000 { 2001 struct virtchnl_promisc_info *info = 2002 (struct virtchnl_promisc_info *)msg; 2003 struct i40e_pf *pf = vf->pf; 2004 i40e_status aq_ret = 0; 2005 bool allmulti = false; 2006 bool alluni = false; 2007 2008 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2009 aq_ret = I40E_ERR_PARAM; 2010 goto err_out; 2011 } 2012 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2013 dev_err(&pf->pdev->dev, 2014 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 2015 vf->vf_id); 2016 2017 /* Lie to the VF on purpose, because this is an error we can 2018 * ignore. Unprivileged VF is not a virtual channel error. 2019 */ 2020 aq_ret = 0; 2021 goto err_out; 2022 } 2023 2024 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { 2025 aq_ret = I40E_ERR_PARAM; 2026 goto err_out; 2027 } 2028 2029 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 2030 aq_ret = I40E_ERR_PARAM; 2031 goto err_out; 2032 } 2033 2034 /* Multicast promiscuous handling*/ 2035 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2036 allmulti = true; 2037 2038 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2039 alluni = true; 2040 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2041 alluni); 2042 if (!aq_ret) { 2043 if (allmulti) { 2044 dev_info(&pf->pdev->dev, 2045 "VF %d successfully set multicast promiscuous mode\n", 2046 vf->vf_id); 2047 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2048 } else { 2049 dev_info(&pf->pdev->dev, 2050 "VF %d successfully unset multicast promiscuous mode\n", 2051 vf->vf_id); 2052 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2053 } 2054 if (alluni) { 2055 dev_info(&pf->pdev->dev, 2056 "VF %d successfully set unicast promiscuous mode\n", 2057 vf->vf_id); 2058 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2059 } else { 2060 dev_info(&pf->pdev->dev, 2061 "VF %d successfully unset unicast promiscuous mode\n", 2062 vf->vf_id); 2063 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2064 } 2065 } 2066 err_out: 2067 /* send the response to the VF */ 2068 return i40e_vc_send_resp_to_vf(vf, 2069 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2070 aq_ret); 2071 } 2072 2073 /** 2074 * i40e_vc_config_queues_msg 2075 * @vf: pointer to the VF info 2076 * @msg: pointer to the msg buffer 2077 * 2078 * called from the VF to configure the rx/tx 2079 * queues 2080 **/ 2081 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2082 { 2083 struct virtchnl_vsi_queue_config_info *qci = 2084 (struct virtchnl_vsi_queue_config_info *)msg; 2085 struct virtchnl_queue_pair_info *qpi; 2086 struct i40e_pf *pf = vf->pf; 2087 u16 vsi_id, vsi_queue_id = 0; 2088 u16 num_qps_all = 0; 2089 i40e_status aq_ret = 0; 2090 int i, j = 0, idx = 0; 2091 2092 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2093 aq_ret = I40E_ERR_PARAM; 2094 goto error_param; 2095 } 2096 2097 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { 2098 aq_ret = I40E_ERR_PARAM; 2099 goto error_param; 2100 } 2101 2102 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { 2103 aq_ret = I40E_ERR_PARAM; 2104 goto error_param; 2105 } 2106 2107 if (vf->adq_enabled) { 2108 for (i = 0; i < I40E_MAX_VF_VSI; i++) 2109 num_qps_all += vf->ch[i].num_qps; 2110 if (num_qps_all != qci->num_queue_pairs) { 2111 aq_ret = I40E_ERR_PARAM; 2112 goto error_param; 2113 } 2114 } 2115 2116 vsi_id = qci->vsi_id; 2117 2118 for (i = 0; i < qci->num_queue_pairs; i++) { 2119 qpi = &qci->qpair[i]; 2120 2121 if (!vf->adq_enabled) { 2122 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 2123 qpi->txq.queue_id)) { 2124 aq_ret = I40E_ERR_PARAM; 2125 goto error_param; 2126 } 2127 2128 vsi_queue_id = qpi->txq.queue_id; 2129 2130 if (qpi->txq.vsi_id != qci->vsi_id || 2131 qpi->rxq.vsi_id != qci->vsi_id || 2132 qpi->rxq.queue_id != vsi_queue_id) { 2133 aq_ret = I40E_ERR_PARAM; 2134 goto error_param; 2135 } 2136 } 2137 2138 if (vf->adq_enabled) 2139 vsi_id = vf->ch[idx].vsi_id; 2140 2141 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2142 &qpi->rxq) || 2143 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2144 &qpi->txq)) { 2145 aq_ret = I40E_ERR_PARAM; 2146 goto error_param; 2147 } 2148 2149 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2150 * VF does not know about these additional VSIs and all 2151 * it cares is about its own queues. PF configures these queues 2152 * to its appropriate VSIs based on TC mapping 2153 **/ 2154 if (vf->adq_enabled) { 2155 if (j == (vf->ch[idx].num_qps - 1)) { 2156 idx++; 2157 j = 0; /* resetting the queue count */ 2158 vsi_queue_id = 0; 2159 } else { 2160 j++; 2161 vsi_queue_id++; 2162 } 2163 } 2164 } 2165 /* set vsi num_queue_pairs in use to num configured by VF */ 2166 if (!vf->adq_enabled) { 2167 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2168 qci->num_queue_pairs; 2169 } else { 2170 for (i = 0; i < vf->num_tc; i++) 2171 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = 2172 vf->ch[i].num_qps; 2173 } 2174 2175 error_param: 2176 /* send the response to the VF */ 2177 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2178 aq_ret); 2179 } 2180 2181 /** 2182 * i40e_validate_queue_map 2183 * @vsi_id: vsi id 2184 * @queuemap: Tx or Rx queue map 2185 * 2186 * check if Tx or Rx queue map is valid 2187 **/ 2188 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2189 unsigned long queuemap) 2190 { 2191 u16 vsi_queue_id, queue_id; 2192 2193 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2194 if (vf->adq_enabled) { 2195 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; 2196 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2197 } else { 2198 queue_id = vsi_queue_id; 2199 } 2200 2201 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2202 return -EINVAL; 2203 } 2204 2205 return 0; 2206 } 2207 2208 /** 2209 * i40e_vc_config_irq_map_msg 2210 * @vf: pointer to the VF info 2211 * @msg: pointer to the msg buffer 2212 * 2213 * called from the VF to configure the irq to 2214 * queue map 2215 **/ 2216 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2217 { 2218 struct virtchnl_irq_map_info *irqmap_info = 2219 (struct virtchnl_irq_map_info *)msg; 2220 struct virtchnl_vector_map *map; 2221 u16 vsi_id; 2222 i40e_status aq_ret = 0; 2223 int i; 2224 2225 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2226 aq_ret = I40E_ERR_PARAM; 2227 goto error_param; 2228 } 2229 2230 if (irqmap_info->num_vectors > 2231 vf->pf->hw.func_caps.num_msix_vectors_vf) { 2232 aq_ret = I40E_ERR_PARAM; 2233 goto error_param; 2234 } 2235 2236 for (i = 0; i < irqmap_info->num_vectors; i++) { 2237 map = &irqmap_info->vecmap[i]; 2238 /* validate msg params */ 2239 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || 2240 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { 2241 aq_ret = I40E_ERR_PARAM; 2242 goto error_param; 2243 } 2244 vsi_id = map->vsi_id; 2245 2246 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2247 aq_ret = I40E_ERR_PARAM; 2248 goto error_param; 2249 } 2250 2251 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2252 aq_ret = I40E_ERR_PARAM; 2253 goto error_param; 2254 } 2255 2256 i40e_config_irq_link_list(vf, vsi_id, map); 2257 } 2258 error_param: 2259 /* send the response to the VF */ 2260 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2261 aq_ret); 2262 } 2263 2264 /** 2265 * i40e_ctrl_vf_tx_rings 2266 * @vsi: the SRIOV VSI being configured 2267 * @q_map: bit map of the queues to be enabled 2268 * @enable: start or stop the queue 2269 **/ 2270 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2271 bool enable) 2272 { 2273 struct i40e_pf *pf = vsi->back; 2274 int ret = 0; 2275 u16 q_id; 2276 2277 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2278 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2279 vsi->base_queue + q_id, 2280 false /*is xdp*/, enable); 2281 if (ret) 2282 break; 2283 } 2284 return ret; 2285 } 2286 2287 /** 2288 * i40e_ctrl_vf_rx_rings 2289 * @vsi: the SRIOV VSI being configured 2290 * @q_map: bit map of the queues to be enabled 2291 * @enable: start or stop the queue 2292 **/ 2293 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2294 bool enable) 2295 { 2296 struct i40e_pf *pf = vsi->back; 2297 int ret = 0; 2298 u16 q_id; 2299 2300 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2301 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2302 enable); 2303 if (ret) 2304 break; 2305 } 2306 return ret; 2307 } 2308 2309 /** 2310 * i40e_vc_enable_queues_msg 2311 * @vf: pointer to the VF info 2312 * @msg: pointer to the msg buffer 2313 * 2314 * called from the VF to enable all or specific queue(s) 2315 **/ 2316 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2317 { 2318 struct virtchnl_queue_select *vqs = 2319 (struct virtchnl_queue_select *)msg; 2320 struct i40e_pf *pf = vf->pf; 2321 u16 vsi_id = vqs->vsi_id; 2322 i40e_status aq_ret = 0; 2323 int i; 2324 2325 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2326 aq_ret = I40E_ERR_PARAM; 2327 goto error_param; 2328 } 2329 2330 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2331 aq_ret = I40E_ERR_PARAM; 2332 goto error_param; 2333 } 2334 2335 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 2336 aq_ret = I40E_ERR_PARAM; 2337 goto error_param; 2338 } 2339 2340 /* Use the queue bit map sent by the VF */ 2341 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2342 true)) { 2343 aq_ret = I40E_ERR_TIMEOUT; 2344 goto error_param; 2345 } 2346 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2347 true)) { 2348 aq_ret = I40E_ERR_TIMEOUT; 2349 goto error_param; 2350 } 2351 2352 /* need to start the rings for additional ADq VSI's as well */ 2353 if (vf->adq_enabled) { 2354 /* zero belongs to LAN VSI */ 2355 for (i = 1; i < vf->num_tc; i++) { 2356 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2357 aq_ret = I40E_ERR_TIMEOUT; 2358 } 2359 } 2360 2361 error_param: 2362 /* send the response to the VF */ 2363 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2364 aq_ret); 2365 } 2366 2367 /** 2368 * i40e_vc_disable_queues_msg 2369 * @vf: pointer to the VF info 2370 * @msg: pointer to the msg buffer 2371 * 2372 * called from the VF to disable all or specific 2373 * queue(s) 2374 **/ 2375 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2376 { 2377 struct virtchnl_queue_select *vqs = 2378 (struct virtchnl_queue_select *)msg; 2379 struct i40e_pf *pf = vf->pf; 2380 i40e_status aq_ret = 0; 2381 2382 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2383 aq_ret = I40E_ERR_PARAM; 2384 goto error_param; 2385 } 2386 2387 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2388 aq_ret = I40E_ERR_PARAM; 2389 goto error_param; 2390 } 2391 2392 if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) || 2393 vqs->rx_queues > I40E_MAX_VF_QUEUES || 2394 vqs->tx_queues > I40E_MAX_VF_QUEUES) { 2395 aq_ret = I40E_ERR_PARAM; 2396 goto error_param; 2397 } 2398 2399 /* Use the queue bit map sent by the VF */ 2400 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2401 false)) { 2402 aq_ret = I40E_ERR_TIMEOUT; 2403 goto error_param; 2404 } 2405 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2406 false)) { 2407 aq_ret = I40E_ERR_TIMEOUT; 2408 goto error_param; 2409 } 2410 error_param: 2411 /* send the response to the VF */ 2412 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2413 aq_ret); 2414 } 2415 2416 /** 2417 * i40e_vc_request_queues_msg 2418 * @vf: pointer to the VF info 2419 * @msg: pointer to the msg buffer 2420 * 2421 * VFs get a default number of queues but can use this message to request a 2422 * different number. If the request is successful, PF will reset the VF and 2423 * return 0. If unsuccessful, PF will send message informing VF of number of 2424 * available queues and return result of sending VF a message. 2425 **/ 2426 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2427 { 2428 struct virtchnl_vf_res_request *vfres = 2429 (struct virtchnl_vf_res_request *)msg; 2430 int req_pairs = vfres->num_queue_pairs; 2431 int cur_pairs = vf->num_queue_pairs; 2432 struct i40e_pf *pf = vf->pf; 2433 2434 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2435 return -EINVAL; 2436 2437 if (req_pairs <= 0) { 2438 dev_err(&pf->pdev->dev, 2439 "VF %d tried to request %d queues. Ignoring.\n", 2440 vf->vf_id, req_pairs); 2441 } else if (req_pairs > I40E_MAX_VF_QUEUES) { 2442 dev_err(&pf->pdev->dev, 2443 "VF %d tried to request more than %d queues.\n", 2444 vf->vf_id, 2445 I40E_MAX_VF_QUEUES); 2446 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2447 } else if (req_pairs - cur_pairs > pf->queues_left) { 2448 dev_warn(&pf->pdev->dev, 2449 "VF %d requested %d more queues, but only %d left.\n", 2450 vf->vf_id, 2451 req_pairs - cur_pairs, 2452 pf->queues_left); 2453 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2454 } else { 2455 /* successful request */ 2456 vf->num_req_queues = req_pairs; 2457 i40e_vc_notify_vf_reset(vf); 2458 i40e_reset_vf(vf, false); 2459 return 0; 2460 } 2461 2462 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2463 (u8 *)vfres, sizeof(*vfres)); 2464 } 2465 2466 /** 2467 * i40e_vc_get_stats_msg 2468 * @vf: pointer to the VF info 2469 * @msg: pointer to the msg buffer 2470 * 2471 * called from the VF to get vsi stats 2472 **/ 2473 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2474 { 2475 struct virtchnl_queue_select *vqs = 2476 (struct virtchnl_queue_select *)msg; 2477 struct i40e_pf *pf = vf->pf; 2478 struct i40e_eth_stats stats; 2479 i40e_status aq_ret = 0; 2480 struct i40e_vsi *vsi; 2481 2482 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2483 2484 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2485 aq_ret = I40E_ERR_PARAM; 2486 goto error_param; 2487 } 2488 2489 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2490 aq_ret = I40E_ERR_PARAM; 2491 goto error_param; 2492 } 2493 2494 vsi = pf->vsi[vf->lan_vsi_idx]; 2495 if (!vsi) { 2496 aq_ret = I40E_ERR_PARAM; 2497 goto error_param; 2498 } 2499 i40e_update_eth_stats(vsi); 2500 stats = vsi->eth_stats; 2501 2502 error_param: 2503 /* send the response back to the VF */ 2504 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2505 (u8 *)&stats, sizeof(stats)); 2506 } 2507 2508 /* If the VF is not trusted restrict the number of MAC/VLAN it can program 2509 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast 2510 */ 2511 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) 2512 #define I40E_VC_MAX_VLAN_PER_VF 8 2513 2514 /** 2515 * i40e_check_vf_permission 2516 * @vf: pointer to the VF info 2517 * @al: MAC address list from virtchnl 2518 * 2519 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2520 * if any address in the list is not valid. Checks the following conditions: 2521 * 2522 * 1) broadcast and zero addresses are never valid 2523 * 2) unicast addresses are not allowed if the VMM has administratively set 2524 * the VF MAC address, unless the VF is marked as privileged. 2525 * 3) There is enough space to add all the addresses. 2526 * 2527 * Note that to guarantee consistency, it is expected this function be called 2528 * while holding the mac_filter_hash_lock, as otherwise the current number of 2529 * addresses might not be accurate. 2530 **/ 2531 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2532 struct virtchnl_ether_addr_list *al) 2533 { 2534 struct i40e_pf *pf = vf->pf; 2535 int i; 2536 2537 /* If this VF is not privileged, then we can't add more than a limited 2538 * number of addresses. Check to make sure that the additions do not 2539 * push us over the limit. 2540 */ 2541 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2542 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) { 2543 dev_err(&pf->pdev->dev, 2544 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2545 return -EPERM; 2546 } 2547 2548 for (i = 0; i < al->num_elements; i++) { 2549 u8 *addr = al->list[i].addr; 2550 2551 if (is_broadcast_ether_addr(addr) || 2552 is_zero_ether_addr(addr)) { 2553 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2554 addr); 2555 return I40E_ERR_INVALID_MAC_ADDR; 2556 } 2557 2558 /* If the host VMM administrator has set the VF MAC address 2559 * administratively via the ndo_set_vf_mac command then deny 2560 * permission to the VF to add or delete unicast MAC addresses. 2561 * Unless the VF is privileged and then it can do whatever. 2562 * The VF may request to set the MAC address filter already 2563 * assigned to it so do not return an error in that case. 2564 */ 2565 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2566 !is_multicast_ether_addr(addr) && vf->pf_set_mac && 2567 !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2568 dev_err(&pf->pdev->dev, 2569 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2570 return -EPERM; 2571 } 2572 } 2573 2574 return 0; 2575 } 2576 2577 /** 2578 * i40e_vc_add_mac_addr_msg 2579 * @vf: pointer to the VF info 2580 * @msg: pointer to the msg buffer 2581 * 2582 * add guest mac address filter 2583 **/ 2584 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2585 { 2586 struct virtchnl_ether_addr_list *al = 2587 (struct virtchnl_ether_addr_list *)msg; 2588 struct i40e_pf *pf = vf->pf; 2589 struct i40e_vsi *vsi = NULL; 2590 u16 vsi_id = al->vsi_id; 2591 i40e_status ret = 0; 2592 int i; 2593 2594 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2595 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2596 ret = I40E_ERR_PARAM; 2597 goto error_param; 2598 } 2599 2600 vsi = pf->vsi[vf->lan_vsi_idx]; 2601 2602 /* Lock once, because all function inside for loop accesses VSI's 2603 * MAC filter list which needs to be protected using same lock. 2604 */ 2605 spin_lock_bh(&vsi->mac_filter_hash_lock); 2606 2607 ret = i40e_check_vf_permission(vf, al); 2608 if (ret) { 2609 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2610 goto error_param; 2611 } 2612 2613 /* add new addresses to the list */ 2614 for (i = 0; i < al->num_elements; i++) { 2615 struct i40e_mac_filter *f; 2616 2617 f = i40e_find_mac(vsi, al->list[i].addr); 2618 if (!f) { 2619 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2620 2621 if (!f) { 2622 dev_err(&pf->pdev->dev, 2623 "Unable to add MAC filter %pM for VF %d\n", 2624 al->list[i].addr, vf->vf_id); 2625 ret = I40E_ERR_PARAM; 2626 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2627 goto error_param; 2628 } else { 2629 vf->num_mac++; 2630 } 2631 } 2632 } 2633 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2634 2635 /* program the updated filter list */ 2636 ret = i40e_sync_vsi_filters(vsi); 2637 if (ret) 2638 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2639 vf->vf_id, ret); 2640 2641 error_param: 2642 /* send the response to the VF */ 2643 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2644 ret); 2645 } 2646 2647 /** 2648 * i40e_vc_del_mac_addr_msg 2649 * @vf: pointer to the VF info 2650 * @msg: pointer to the msg buffer 2651 * 2652 * remove guest mac address filter 2653 **/ 2654 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2655 { 2656 struct virtchnl_ether_addr_list *al = 2657 (struct virtchnl_ether_addr_list *)msg; 2658 struct i40e_pf *pf = vf->pf; 2659 struct i40e_vsi *vsi = NULL; 2660 u16 vsi_id = al->vsi_id; 2661 i40e_status ret = 0; 2662 int i; 2663 2664 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2665 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2666 ret = I40E_ERR_PARAM; 2667 goto error_param; 2668 } 2669 2670 for (i = 0; i < al->num_elements; i++) { 2671 if (is_broadcast_ether_addr(al->list[i].addr) || 2672 is_zero_ether_addr(al->list[i].addr)) { 2673 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2674 al->list[i].addr, vf->vf_id); 2675 ret = I40E_ERR_INVALID_MAC_ADDR; 2676 goto error_param; 2677 } 2678 2679 if (vf->pf_set_mac && 2680 ether_addr_equal(al->list[i].addr, 2681 vf->default_lan_addr.addr)) { 2682 dev_err(&pf->pdev->dev, 2683 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", 2684 vf->default_lan_addr.addr, vf->vf_id); 2685 ret = I40E_ERR_PARAM; 2686 goto error_param; 2687 } 2688 } 2689 vsi = pf->vsi[vf->lan_vsi_idx]; 2690 2691 spin_lock_bh(&vsi->mac_filter_hash_lock); 2692 /* delete addresses from the list */ 2693 for (i = 0; i < al->num_elements; i++) 2694 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2695 ret = I40E_ERR_INVALID_MAC_ADDR; 2696 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2697 goto error_param; 2698 } else { 2699 vf->num_mac--; 2700 } 2701 2702 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2703 2704 /* program the updated filter list */ 2705 ret = i40e_sync_vsi_filters(vsi); 2706 if (ret) 2707 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2708 vf->vf_id, ret); 2709 2710 error_param: 2711 /* send the response to the VF */ 2712 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 2713 ret); 2714 } 2715 2716 /** 2717 * i40e_vc_add_vlan_msg 2718 * @vf: pointer to the VF info 2719 * @msg: pointer to the msg buffer 2720 * 2721 * program guest vlan id 2722 **/ 2723 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 2724 { 2725 struct virtchnl_vlan_filter_list *vfl = 2726 (struct virtchnl_vlan_filter_list *)msg; 2727 struct i40e_pf *pf = vf->pf; 2728 struct i40e_vsi *vsi = NULL; 2729 u16 vsi_id = vfl->vsi_id; 2730 i40e_status aq_ret = 0; 2731 int i; 2732 2733 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2734 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2735 dev_err(&pf->pdev->dev, 2736 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2737 goto error_param; 2738 } 2739 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2740 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2741 aq_ret = I40E_ERR_PARAM; 2742 goto error_param; 2743 } 2744 2745 for (i = 0; i < vfl->num_elements; i++) { 2746 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2747 aq_ret = I40E_ERR_PARAM; 2748 dev_err(&pf->pdev->dev, 2749 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2750 goto error_param; 2751 } 2752 } 2753 vsi = pf->vsi[vf->lan_vsi_idx]; 2754 if (vsi->info.pvid) { 2755 aq_ret = I40E_ERR_PARAM; 2756 goto error_param; 2757 } 2758 2759 i40e_vlan_stripping_enable(vsi); 2760 for (i = 0; i < vfl->num_elements; i++) { 2761 /* add new VLAN filter */ 2762 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2763 if (!ret) 2764 vf->num_vlan++; 2765 2766 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2767 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2768 true, 2769 vfl->vlan_id[i], 2770 NULL); 2771 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2772 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2773 true, 2774 vfl->vlan_id[i], 2775 NULL); 2776 2777 if (ret) 2778 dev_err(&pf->pdev->dev, 2779 "Unable to add VLAN filter %d for VF %d, error %d\n", 2780 vfl->vlan_id[i], vf->vf_id, ret); 2781 } 2782 2783 error_param: 2784 /* send the response to the VF */ 2785 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2786 } 2787 2788 /** 2789 * i40e_vc_remove_vlan_msg 2790 * @vf: pointer to the VF info 2791 * @msg: pointer to the msg buffer 2792 * 2793 * remove programmed guest vlan id 2794 **/ 2795 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 2796 { 2797 struct virtchnl_vlan_filter_list *vfl = 2798 (struct virtchnl_vlan_filter_list *)msg; 2799 struct i40e_pf *pf = vf->pf; 2800 struct i40e_vsi *vsi = NULL; 2801 u16 vsi_id = vfl->vsi_id; 2802 i40e_status aq_ret = 0; 2803 int i; 2804 2805 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2806 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2807 aq_ret = I40E_ERR_PARAM; 2808 goto error_param; 2809 } 2810 2811 for (i = 0; i < vfl->num_elements; i++) { 2812 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2813 aq_ret = I40E_ERR_PARAM; 2814 goto error_param; 2815 } 2816 } 2817 2818 vsi = pf->vsi[vf->lan_vsi_idx]; 2819 if (vsi->info.pvid) { 2820 if (vfl->num_elements > 1 || vfl->vlan_id[0]) 2821 aq_ret = I40E_ERR_PARAM; 2822 goto error_param; 2823 } 2824 2825 for (i = 0; i < vfl->num_elements; i++) { 2826 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2827 vf->num_vlan--; 2828 2829 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2830 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2831 false, 2832 vfl->vlan_id[i], 2833 NULL); 2834 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2835 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2836 false, 2837 vfl->vlan_id[i], 2838 NULL); 2839 } 2840 2841 error_param: 2842 /* send the response to the VF */ 2843 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2844 } 2845 2846 /** 2847 * i40e_vc_iwarp_msg 2848 * @vf: pointer to the VF info 2849 * @msg: pointer to the msg buffer 2850 * @msglen: msg length 2851 * 2852 * called from the VF for the iwarp msgs 2853 **/ 2854 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2855 { 2856 struct i40e_pf *pf = vf->pf; 2857 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2858 i40e_status aq_ret = 0; 2859 2860 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2861 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2862 aq_ret = I40E_ERR_PARAM; 2863 goto error_param; 2864 } 2865 2866 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2867 msg, msglen); 2868 2869 error_param: 2870 /* send the response to the VF */ 2871 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 2872 aq_ret); 2873 } 2874 2875 /** 2876 * i40e_vc_iwarp_qvmap_msg 2877 * @vf: pointer to the VF info 2878 * @msg: pointer to the msg buffer 2879 * @config: config qvmap or release it 2880 * 2881 * called from the VF for the iwarp msgs 2882 **/ 2883 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 2884 { 2885 struct virtchnl_iwarp_qvlist_info *qvlist_info = 2886 (struct virtchnl_iwarp_qvlist_info *)msg; 2887 i40e_status aq_ret = 0; 2888 2889 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2890 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2891 aq_ret = I40E_ERR_PARAM; 2892 goto error_param; 2893 } 2894 2895 if (config) { 2896 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2897 aq_ret = I40E_ERR_PARAM; 2898 } else { 2899 i40e_release_iwarp_qvlist(vf); 2900 } 2901 2902 error_param: 2903 /* send the response to the VF */ 2904 return i40e_vc_send_resp_to_vf(vf, 2905 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 2906 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 2907 aq_ret); 2908 } 2909 2910 /** 2911 * i40e_vc_config_rss_key 2912 * @vf: pointer to the VF info 2913 * @msg: pointer to the msg buffer 2914 * 2915 * Configure the VF's RSS key 2916 **/ 2917 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 2918 { 2919 struct virtchnl_rss_key *vrk = 2920 (struct virtchnl_rss_key *)msg; 2921 struct i40e_pf *pf = vf->pf; 2922 struct i40e_vsi *vsi = NULL; 2923 u16 vsi_id = vrk->vsi_id; 2924 i40e_status aq_ret = 0; 2925 2926 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2927 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2928 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 2929 aq_ret = I40E_ERR_PARAM; 2930 goto err; 2931 } 2932 2933 vsi = pf->vsi[vf->lan_vsi_idx]; 2934 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 2935 err: 2936 /* send the response to the VF */ 2937 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 2938 aq_ret); 2939 } 2940 2941 /** 2942 * i40e_vc_config_rss_lut 2943 * @vf: pointer to the VF info 2944 * @msg: pointer to the msg buffer 2945 * 2946 * Configure the VF's RSS LUT 2947 **/ 2948 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 2949 { 2950 struct virtchnl_rss_lut *vrl = 2951 (struct virtchnl_rss_lut *)msg; 2952 struct i40e_pf *pf = vf->pf; 2953 struct i40e_vsi *vsi = NULL; 2954 u16 vsi_id = vrl->vsi_id; 2955 i40e_status aq_ret = 0; 2956 2957 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2958 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2959 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 2960 aq_ret = I40E_ERR_PARAM; 2961 goto err; 2962 } 2963 2964 vsi = pf->vsi[vf->lan_vsi_idx]; 2965 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 2966 /* send the response to the VF */ 2967 err: 2968 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 2969 aq_ret); 2970 } 2971 2972 /** 2973 * i40e_vc_get_rss_hena 2974 * @vf: pointer to the VF info 2975 * @msg: pointer to the msg buffer 2976 * 2977 * Return the RSS HENA bits allowed by the hardware 2978 **/ 2979 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) 2980 { 2981 struct virtchnl_rss_hena *vrh = NULL; 2982 struct i40e_pf *pf = vf->pf; 2983 i40e_status aq_ret = 0; 2984 int len = 0; 2985 2986 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2987 aq_ret = I40E_ERR_PARAM; 2988 goto err; 2989 } 2990 len = sizeof(struct virtchnl_rss_hena); 2991 2992 vrh = kzalloc(len, GFP_KERNEL); 2993 if (!vrh) { 2994 aq_ret = I40E_ERR_NO_MEMORY; 2995 len = 0; 2996 goto err; 2997 } 2998 vrh->hena = i40e_pf_get_default_rss_hena(pf); 2999 err: 3000 /* send the response back to the VF */ 3001 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 3002 aq_ret, (u8 *)vrh, len); 3003 kfree(vrh); 3004 return aq_ret; 3005 } 3006 3007 /** 3008 * i40e_vc_set_rss_hena 3009 * @vf: pointer to the VF info 3010 * @msg: pointer to the msg buffer 3011 * 3012 * Set the RSS HENA bits for the VF 3013 **/ 3014 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) 3015 { 3016 struct virtchnl_rss_hena *vrh = 3017 (struct virtchnl_rss_hena *)msg; 3018 struct i40e_pf *pf = vf->pf; 3019 struct i40e_hw *hw = &pf->hw; 3020 i40e_status aq_ret = 0; 3021 3022 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3023 aq_ret = I40E_ERR_PARAM; 3024 goto err; 3025 } 3026 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 3027 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 3028 (u32)(vrh->hena >> 32)); 3029 3030 /* send the response to the VF */ 3031 err: 3032 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 3033 } 3034 3035 /** 3036 * i40e_vc_enable_vlan_stripping 3037 * @vf: pointer to the VF info 3038 * @msg: pointer to the msg buffer 3039 * 3040 * Enable vlan header stripping for the VF 3041 **/ 3042 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3043 { 3044 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3045 i40e_status aq_ret = 0; 3046 3047 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3048 aq_ret = I40E_ERR_PARAM; 3049 goto err; 3050 } 3051 3052 i40e_vlan_stripping_enable(vsi); 3053 3054 /* send the response to the VF */ 3055 err: 3056 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 3057 aq_ret); 3058 } 3059 3060 /** 3061 * i40e_vc_disable_vlan_stripping 3062 * @vf: pointer to the VF info 3063 * @msg: pointer to the msg buffer 3064 * 3065 * Disable vlan header stripping for the VF 3066 **/ 3067 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3068 { 3069 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3070 i40e_status aq_ret = 0; 3071 3072 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3073 aq_ret = I40E_ERR_PARAM; 3074 goto err; 3075 } 3076 3077 i40e_vlan_stripping_disable(vsi); 3078 3079 /* send the response to the VF */ 3080 err: 3081 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3082 aq_ret); 3083 } 3084 3085 /** 3086 * i40e_validate_cloud_filter 3087 * @mask: mask for TC filter 3088 * @data: data for TC filter 3089 * 3090 * This function validates cloud filter programmed as TC filter for ADq 3091 **/ 3092 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3093 struct virtchnl_filter *tc_filter) 3094 { 3095 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3096 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3097 struct i40e_pf *pf = vf->pf; 3098 struct i40e_vsi *vsi = NULL; 3099 struct i40e_mac_filter *f; 3100 struct hlist_node *h; 3101 bool found = false; 3102 int bkt; 3103 3104 if (!tc_filter->action) { 3105 dev_info(&pf->pdev->dev, 3106 "VF %d: Currently ADq doesn't support Drop Action\n", 3107 vf->vf_id); 3108 goto err; 3109 } 3110 3111 /* action_meta is TC number here to which the filter is applied */ 3112 if (!tc_filter->action_meta || 3113 tc_filter->action_meta > I40E_MAX_VF_VSI) { 3114 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3115 vf->vf_id, tc_filter->action_meta); 3116 goto err; 3117 } 3118 3119 /* Check filter if it's programmed for advanced mode or basic mode. 3120 * There are two ADq modes (for VF only), 3121 * 1. Basic mode: intended to allow as many filter options as possible 3122 * to be added to a VF in Non-trusted mode. Main goal is 3123 * to add filters to its own MAC and VLAN id. 3124 * 2. Advanced mode: is for allowing filters to be applied other than 3125 * its own MAC or VLAN. This mode requires the VF to be 3126 * Trusted. 3127 */ 3128 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3129 vsi = pf->vsi[vf->lan_vsi_idx]; 3130 f = i40e_find_mac(vsi, data.dst_mac); 3131 3132 if (!f) { 3133 dev_info(&pf->pdev->dev, 3134 "Destination MAC %pM doesn't belong to VF %d\n", 3135 data.dst_mac, vf->vf_id); 3136 goto err; 3137 } 3138 3139 if (mask.vlan_id) { 3140 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3141 hlist) { 3142 if (f->vlan == ntohs(data.vlan_id)) { 3143 found = true; 3144 break; 3145 } 3146 } 3147 if (!found) { 3148 dev_info(&pf->pdev->dev, 3149 "VF %d doesn't have any VLAN id %u\n", 3150 vf->vf_id, ntohs(data.vlan_id)); 3151 goto err; 3152 } 3153 } 3154 } else { 3155 /* Check if VF is trusted */ 3156 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3157 dev_err(&pf->pdev->dev, 3158 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3159 vf->vf_id); 3160 return I40E_ERR_CONFIG; 3161 } 3162 } 3163 3164 if (mask.dst_mac[0] & data.dst_mac[0]) { 3165 if (is_broadcast_ether_addr(data.dst_mac) || 3166 is_zero_ether_addr(data.dst_mac)) { 3167 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3168 vf->vf_id, data.dst_mac); 3169 goto err; 3170 } 3171 } 3172 3173 if (mask.src_mac[0] & data.src_mac[0]) { 3174 if (is_broadcast_ether_addr(data.src_mac) || 3175 is_zero_ether_addr(data.src_mac)) { 3176 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3177 vf->vf_id, data.src_mac); 3178 goto err; 3179 } 3180 } 3181 3182 if (mask.dst_port & data.dst_port) { 3183 if (!data.dst_port) { 3184 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3185 vf->vf_id); 3186 goto err; 3187 } 3188 } 3189 3190 if (mask.src_port & data.src_port) { 3191 if (!data.src_port) { 3192 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3193 vf->vf_id); 3194 goto err; 3195 } 3196 } 3197 3198 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3199 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3200 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3201 vf->vf_id); 3202 goto err; 3203 } 3204 3205 if (mask.vlan_id & data.vlan_id) { 3206 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3207 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3208 vf->vf_id); 3209 goto err; 3210 } 3211 } 3212 3213 return I40E_SUCCESS; 3214 err: 3215 return I40E_ERR_CONFIG; 3216 } 3217 3218 /** 3219 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3220 * @vf: pointer to the VF info 3221 * @seid - seid of the vsi it is searching for 3222 **/ 3223 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3224 { 3225 struct i40e_pf *pf = vf->pf; 3226 struct i40e_vsi *vsi = NULL; 3227 int i; 3228 3229 for (i = 0; i < vf->num_tc ; i++) { 3230 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3231 if (vsi && vsi->seid == seid) 3232 return vsi; 3233 } 3234 return NULL; 3235 } 3236 3237 /** 3238 * i40e_del_all_cloud_filters 3239 * @vf: pointer to the VF info 3240 * 3241 * This function deletes all cloud filters 3242 **/ 3243 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3244 { 3245 struct i40e_cloud_filter *cfilter = NULL; 3246 struct i40e_pf *pf = vf->pf; 3247 struct i40e_vsi *vsi = NULL; 3248 struct hlist_node *node; 3249 int ret; 3250 3251 hlist_for_each_entry_safe(cfilter, node, 3252 &vf->cloud_filter_list, cloud_node) { 3253 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3254 3255 if (!vsi) { 3256 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3257 vf->vf_id, cfilter->seid); 3258 continue; 3259 } 3260 3261 if (cfilter->dst_port) 3262 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3263 false); 3264 else 3265 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3266 if (ret) 3267 dev_err(&pf->pdev->dev, 3268 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3269 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3270 i40e_aq_str(&pf->hw, 3271 pf->hw.aq.asq_last_status)); 3272 3273 hlist_del(&cfilter->cloud_node); 3274 kfree(cfilter); 3275 vf->num_cloud_filters--; 3276 } 3277 } 3278 3279 /** 3280 * i40e_vc_del_cloud_filter 3281 * @vf: pointer to the VF info 3282 * @msg: pointer to the msg buffer 3283 * 3284 * This function deletes a cloud filter programmed as TC filter for ADq 3285 **/ 3286 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3287 { 3288 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3289 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3290 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3291 struct i40e_cloud_filter cfilter, *cf = NULL; 3292 struct i40e_pf *pf = vf->pf; 3293 struct i40e_vsi *vsi = NULL; 3294 struct hlist_node *node; 3295 i40e_status aq_ret = 0; 3296 int i, ret; 3297 3298 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3299 aq_ret = I40E_ERR_PARAM; 3300 goto err; 3301 } 3302 3303 if (!vf->adq_enabled) { 3304 dev_info(&pf->pdev->dev, 3305 "VF %d: ADq not enabled, can't apply cloud filter\n", 3306 vf->vf_id); 3307 aq_ret = I40E_ERR_PARAM; 3308 goto err; 3309 } 3310 3311 if (i40e_validate_cloud_filter(vf, vcf)) { 3312 dev_info(&pf->pdev->dev, 3313 "VF %d: Invalid input, can't apply cloud filter\n", 3314 vf->vf_id); 3315 aq_ret = I40E_ERR_PARAM; 3316 goto err; 3317 } 3318 3319 memset(&cfilter, 0, sizeof(cfilter)); 3320 /* parse destination mac address */ 3321 for (i = 0; i < ETH_ALEN; i++) 3322 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3323 3324 /* parse source mac address */ 3325 for (i = 0; i < ETH_ALEN; i++) 3326 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3327 3328 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3329 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3330 cfilter.src_port = mask.src_port & tcf.src_port; 3331 3332 switch (vcf->flow_type) { 3333 case VIRTCHNL_TCP_V4_FLOW: 3334 cfilter.n_proto = ETH_P_IP; 3335 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3336 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3337 ARRAY_SIZE(tcf.dst_ip)); 3338 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3339 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3340 ARRAY_SIZE(tcf.dst_ip)); 3341 break; 3342 case VIRTCHNL_TCP_V6_FLOW: 3343 cfilter.n_proto = ETH_P_IPV6; 3344 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3345 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3346 sizeof(cfilter.ip.v6.dst_ip6)); 3347 if (mask.src_ip[3] & tcf.src_ip[3]) 3348 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3349 sizeof(cfilter.ip.v6.src_ip6)); 3350 break; 3351 default: 3352 /* TC filter can be configured based on different combinations 3353 * and in this case IP is not a part of filter config 3354 */ 3355 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3356 vf->vf_id); 3357 } 3358 3359 /* get the vsi to which the tc belongs to */ 3360 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3361 cfilter.seid = vsi->seid; 3362 cfilter.flags = vcf->field_flags; 3363 3364 /* Deleting TC filter */ 3365 if (tcf.dst_port) 3366 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3367 else 3368 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3369 if (ret) { 3370 dev_err(&pf->pdev->dev, 3371 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3372 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3373 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3374 goto err; 3375 } 3376 3377 hlist_for_each_entry_safe(cf, node, 3378 &vf->cloud_filter_list, cloud_node) { 3379 if (cf->seid != cfilter.seid) 3380 continue; 3381 if (mask.dst_port) 3382 if (cfilter.dst_port != cf->dst_port) 3383 continue; 3384 if (mask.dst_mac[0]) 3385 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3386 continue; 3387 /* for ipv4 data to be valid, only first byte of mask is set */ 3388 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3389 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3390 ARRAY_SIZE(tcf.dst_ip))) 3391 continue; 3392 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3393 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3394 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3395 sizeof(cfilter.ip.v6.src_ip6))) 3396 continue; 3397 if (mask.vlan_id) 3398 if (cfilter.vlan_id != cf->vlan_id) 3399 continue; 3400 3401 hlist_del(&cf->cloud_node); 3402 kfree(cf); 3403 vf->num_cloud_filters--; 3404 } 3405 3406 err: 3407 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3408 aq_ret); 3409 } 3410 3411 /** 3412 * i40e_vc_add_cloud_filter 3413 * @vf: pointer to the VF info 3414 * @msg: pointer to the msg buffer 3415 * 3416 * This function adds a cloud filter programmed as TC filter for ADq 3417 **/ 3418 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3419 { 3420 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3421 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3422 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3423 struct i40e_cloud_filter *cfilter = NULL; 3424 struct i40e_pf *pf = vf->pf; 3425 struct i40e_vsi *vsi = NULL; 3426 i40e_status aq_ret = 0; 3427 int i, ret; 3428 3429 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3430 aq_ret = I40E_ERR_PARAM; 3431 goto err_out; 3432 } 3433 3434 if (!vf->adq_enabled) { 3435 dev_info(&pf->pdev->dev, 3436 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3437 vf->vf_id); 3438 aq_ret = I40E_ERR_PARAM; 3439 goto err_out; 3440 } 3441 3442 if (i40e_validate_cloud_filter(vf, vcf)) { 3443 dev_info(&pf->pdev->dev, 3444 "VF %d: Invalid input/s, can't apply cloud filter\n", 3445 vf->vf_id); 3446 aq_ret = I40E_ERR_PARAM; 3447 goto err_out; 3448 } 3449 3450 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3451 if (!cfilter) 3452 return -ENOMEM; 3453 3454 /* parse destination mac address */ 3455 for (i = 0; i < ETH_ALEN; i++) 3456 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3457 3458 /* parse source mac address */ 3459 for (i = 0; i < ETH_ALEN; i++) 3460 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3461 3462 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3463 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3464 cfilter->src_port = mask.src_port & tcf.src_port; 3465 3466 switch (vcf->flow_type) { 3467 case VIRTCHNL_TCP_V4_FLOW: 3468 cfilter->n_proto = ETH_P_IP; 3469 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3470 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3471 ARRAY_SIZE(tcf.dst_ip)); 3472 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3473 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3474 ARRAY_SIZE(tcf.dst_ip)); 3475 break; 3476 case VIRTCHNL_TCP_V6_FLOW: 3477 cfilter->n_proto = ETH_P_IPV6; 3478 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3479 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3480 sizeof(cfilter->ip.v6.dst_ip6)); 3481 if (mask.src_ip[3] & tcf.src_ip[3]) 3482 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3483 sizeof(cfilter->ip.v6.src_ip6)); 3484 break; 3485 default: 3486 /* TC filter can be configured based on different combinations 3487 * and in this case IP is not a part of filter config 3488 */ 3489 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3490 vf->vf_id); 3491 } 3492 3493 /* get the VSI to which the TC belongs to */ 3494 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3495 cfilter->seid = vsi->seid; 3496 cfilter->flags = vcf->field_flags; 3497 3498 /* Adding cloud filter programmed as TC filter */ 3499 if (tcf.dst_port) 3500 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3501 else 3502 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3503 if (ret) { 3504 dev_err(&pf->pdev->dev, 3505 "VF %d: Failed to add cloud filter, err %s aq_err %s\n", 3506 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3507 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3508 goto err_free; 3509 } 3510 3511 INIT_HLIST_NODE(&cfilter->cloud_node); 3512 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 3513 /* release the pointer passing it to the collection */ 3514 cfilter = NULL; 3515 vf->num_cloud_filters++; 3516 err_free: 3517 kfree(cfilter); 3518 err_out: 3519 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 3520 aq_ret); 3521 } 3522 3523 /** 3524 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 3525 * @vf: pointer to the VF info 3526 * @msg: pointer to the msg buffer 3527 **/ 3528 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 3529 { 3530 struct virtchnl_tc_info *tci = 3531 (struct virtchnl_tc_info *)msg; 3532 struct i40e_pf *pf = vf->pf; 3533 struct i40e_link_status *ls = &pf->hw.phy.link_info; 3534 int i, adq_request_qps = 0, speed = 0; 3535 i40e_status aq_ret = 0; 3536 3537 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3538 aq_ret = I40E_ERR_PARAM; 3539 goto err; 3540 } 3541 3542 /* ADq cannot be applied if spoof check is ON */ 3543 if (vf->spoofchk) { 3544 dev_err(&pf->pdev->dev, 3545 "Spoof check is ON, turn it OFF to enable ADq\n"); 3546 aq_ret = I40E_ERR_PARAM; 3547 goto err; 3548 } 3549 3550 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3551 dev_err(&pf->pdev->dev, 3552 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 3553 vf->vf_id); 3554 aq_ret = I40E_ERR_PARAM; 3555 goto err; 3556 } 3557 3558 /* max number of traffic classes for VF currently capped at 4 */ 3559 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 3560 dev_err(&pf->pdev->dev, 3561 "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n", 3562 vf->vf_id, tci->num_tc); 3563 aq_ret = I40E_ERR_PARAM; 3564 goto err; 3565 } 3566 3567 /* validate queues for each TC */ 3568 for (i = 0; i < tci->num_tc; i++) 3569 if (!tci->list[i].count || 3570 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 3571 dev_err(&pf->pdev->dev, 3572 "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n", 3573 vf->vf_id, i, tci->list[i].count); 3574 aq_ret = I40E_ERR_PARAM; 3575 goto err; 3576 } 3577 3578 /* need Max VF queues but already have default number of queues */ 3579 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 3580 3581 if (pf->queues_left < adq_request_qps) { 3582 dev_err(&pf->pdev->dev, 3583 "No queues left to allocate to VF %d\n", 3584 vf->vf_id); 3585 aq_ret = I40E_ERR_PARAM; 3586 goto err; 3587 } else { 3588 /* we need to allocate max VF queues to enable ADq so as to 3589 * make sure ADq enabled VF always gets back queues when it 3590 * goes through a reset. 3591 */ 3592 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 3593 } 3594 3595 /* get link speed in MB to validate rate limit */ 3596 switch (ls->link_speed) { 3597 case VIRTCHNL_LINK_SPEED_100MB: 3598 speed = SPEED_100; 3599 break; 3600 case VIRTCHNL_LINK_SPEED_1GB: 3601 speed = SPEED_1000; 3602 break; 3603 case VIRTCHNL_LINK_SPEED_10GB: 3604 speed = SPEED_10000; 3605 break; 3606 case VIRTCHNL_LINK_SPEED_20GB: 3607 speed = SPEED_20000; 3608 break; 3609 case VIRTCHNL_LINK_SPEED_25GB: 3610 speed = SPEED_25000; 3611 break; 3612 case VIRTCHNL_LINK_SPEED_40GB: 3613 speed = SPEED_40000; 3614 break; 3615 default: 3616 dev_err(&pf->pdev->dev, 3617 "Cannot detect link speed\n"); 3618 aq_ret = I40E_ERR_PARAM; 3619 goto err; 3620 } 3621 3622 /* parse data from the queue channel info */ 3623 vf->num_tc = tci->num_tc; 3624 for (i = 0; i < vf->num_tc; i++) { 3625 if (tci->list[i].max_tx_rate) { 3626 if (tci->list[i].max_tx_rate > speed) { 3627 dev_err(&pf->pdev->dev, 3628 "Invalid max tx rate %llu specified for VF %d.", 3629 tci->list[i].max_tx_rate, 3630 vf->vf_id); 3631 aq_ret = I40E_ERR_PARAM; 3632 goto err; 3633 } else { 3634 vf->ch[i].max_tx_rate = 3635 tci->list[i].max_tx_rate; 3636 } 3637 } 3638 vf->ch[i].num_qps = tci->list[i].count; 3639 } 3640 3641 /* set this flag only after making sure all inputs are sane */ 3642 vf->adq_enabled = true; 3643 /* num_req_queues is set when user changes number of queues via ethtool 3644 * and this causes issue for default VSI(which depends on this variable) 3645 * when ADq is enabled, hence reset it. 3646 */ 3647 vf->num_req_queues = 0; 3648 3649 /* reset the VF in order to allocate resources */ 3650 i40e_vc_notify_vf_reset(vf); 3651 i40e_reset_vf(vf, false); 3652 3653 return I40E_SUCCESS; 3654 3655 /* send the response to the VF */ 3656 err: 3657 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 3658 aq_ret); 3659 } 3660 3661 /** 3662 * i40e_vc_del_qch_msg 3663 * @vf: pointer to the VF info 3664 * @msg: pointer to the msg buffer 3665 **/ 3666 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 3667 { 3668 struct i40e_pf *pf = vf->pf; 3669 i40e_status aq_ret = 0; 3670 3671 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3672 aq_ret = I40E_ERR_PARAM; 3673 goto err; 3674 } 3675 3676 if (vf->adq_enabled) { 3677 i40e_del_all_cloud_filters(vf); 3678 i40e_del_qch(vf); 3679 vf->adq_enabled = false; 3680 vf->num_tc = 0; 3681 dev_info(&pf->pdev->dev, 3682 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 3683 vf->vf_id); 3684 } else { 3685 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 3686 vf->vf_id); 3687 aq_ret = I40E_ERR_PARAM; 3688 } 3689 3690 /* reset the VF in order to allocate resources */ 3691 i40e_vc_notify_vf_reset(vf); 3692 i40e_reset_vf(vf, false); 3693 3694 return I40E_SUCCESS; 3695 3696 err: 3697 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 3698 aq_ret); 3699 } 3700 3701 /** 3702 * i40e_vc_process_vf_msg 3703 * @pf: pointer to the PF structure 3704 * @vf_id: source VF id 3705 * @v_opcode: operation code 3706 * @v_retval: unused return value code 3707 * @msg: pointer to the msg buffer 3708 * @msglen: msg length 3709 * 3710 * called from the common aeq/arq handler to 3711 * process request from VF 3712 **/ 3713 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 3714 u32 __always_unused v_retval, u8 *msg, u16 msglen) 3715 { 3716 struct i40e_hw *hw = &pf->hw; 3717 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 3718 struct i40e_vf *vf; 3719 int ret; 3720 3721 pf->vf_aq_requests++; 3722 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) 3723 return -EINVAL; 3724 vf = &(pf->vf[local_vf_id]); 3725 3726 /* Check if VF is disabled. */ 3727 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 3728 return I40E_ERR_PARAM; 3729 3730 /* perform basic checks on the msg */ 3731 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 3732 3733 /* perform additional checks specific to this driver */ 3734 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { 3735 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; 3736 3737 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) 3738 ret = -EINVAL; 3739 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { 3740 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; 3741 3742 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) 3743 ret = -EINVAL; 3744 } 3745 3746 if (ret) { 3747 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 3748 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 3749 local_vf_id, v_opcode, msglen); 3750 switch (ret) { 3751 case VIRTCHNL_STATUS_ERR_PARAM: 3752 return -EPERM; 3753 default: 3754 return -EINVAL; 3755 } 3756 } 3757 3758 switch (v_opcode) { 3759 case VIRTCHNL_OP_VERSION: 3760 ret = i40e_vc_get_version_msg(vf, msg); 3761 break; 3762 case VIRTCHNL_OP_GET_VF_RESOURCES: 3763 ret = i40e_vc_get_vf_resources_msg(vf, msg); 3764 i40e_vc_notify_vf_link_state(vf); 3765 break; 3766 case VIRTCHNL_OP_RESET_VF: 3767 i40e_vc_reset_vf_msg(vf); 3768 ret = 0; 3769 break; 3770 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 3771 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 3772 break; 3773 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 3774 ret = i40e_vc_config_queues_msg(vf, msg); 3775 break; 3776 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 3777 ret = i40e_vc_config_irq_map_msg(vf, msg); 3778 break; 3779 case VIRTCHNL_OP_ENABLE_QUEUES: 3780 ret = i40e_vc_enable_queues_msg(vf, msg); 3781 i40e_vc_notify_vf_link_state(vf); 3782 break; 3783 case VIRTCHNL_OP_DISABLE_QUEUES: 3784 ret = i40e_vc_disable_queues_msg(vf, msg); 3785 break; 3786 case VIRTCHNL_OP_ADD_ETH_ADDR: 3787 ret = i40e_vc_add_mac_addr_msg(vf, msg); 3788 break; 3789 case VIRTCHNL_OP_DEL_ETH_ADDR: 3790 ret = i40e_vc_del_mac_addr_msg(vf, msg); 3791 break; 3792 case VIRTCHNL_OP_ADD_VLAN: 3793 ret = i40e_vc_add_vlan_msg(vf, msg); 3794 break; 3795 case VIRTCHNL_OP_DEL_VLAN: 3796 ret = i40e_vc_remove_vlan_msg(vf, msg); 3797 break; 3798 case VIRTCHNL_OP_GET_STATS: 3799 ret = i40e_vc_get_stats_msg(vf, msg); 3800 break; 3801 case VIRTCHNL_OP_IWARP: 3802 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 3803 break; 3804 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 3805 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); 3806 break; 3807 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 3808 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); 3809 break; 3810 case VIRTCHNL_OP_CONFIG_RSS_KEY: 3811 ret = i40e_vc_config_rss_key(vf, msg); 3812 break; 3813 case VIRTCHNL_OP_CONFIG_RSS_LUT: 3814 ret = i40e_vc_config_rss_lut(vf, msg); 3815 break; 3816 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 3817 ret = i40e_vc_get_rss_hena(vf, msg); 3818 break; 3819 case VIRTCHNL_OP_SET_RSS_HENA: 3820 ret = i40e_vc_set_rss_hena(vf, msg); 3821 break; 3822 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 3823 ret = i40e_vc_enable_vlan_stripping(vf, msg); 3824 break; 3825 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 3826 ret = i40e_vc_disable_vlan_stripping(vf, msg); 3827 break; 3828 case VIRTCHNL_OP_REQUEST_QUEUES: 3829 ret = i40e_vc_request_queues_msg(vf, msg); 3830 break; 3831 case VIRTCHNL_OP_ENABLE_CHANNELS: 3832 ret = i40e_vc_add_qch_msg(vf, msg); 3833 break; 3834 case VIRTCHNL_OP_DISABLE_CHANNELS: 3835 ret = i40e_vc_del_qch_msg(vf, msg); 3836 break; 3837 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 3838 ret = i40e_vc_add_cloud_filter(vf, msg); 3839 break; 3840 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 3841 ret = i40e_vc_del_cloud_filter(vf, msg); 3842 break; 3843 case VIRTCHNL_OP_UNKNOWN: 3844 default: 3845 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 3846 v_opcode, local_vf_id); 3847 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 3848 I40E_ERR_NOT_IMPLEMENTED); 3849 break; 3850 } 3851 3852 return ret; 3853 } 3854 3855 /** 3856 * i40e_vc_process_vflr_event 3857 * @pf: pointer to the PF structure 3858 * 3859 * called from the vlfr irq handler to 3860 * free up VF resources and state variables 3861 **/ 3862 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 3863 { 3864 struct i40e_hw *hw = &pf->hw; 3865 u32 reg, reg_idx, bit_idx; 3866 struct i40e_vf *vf; 3867 int vf_id; 3868 3869 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 3870 return 0; 3871 3872 /* Re-enable the VFLR interrupt cause here, before looking for which 3873 * VF got reset. Otherwise, if another VF gets a reset while the 3874 * first one is being processed, that interrupt will be lost, and 3875 * that VF will be stuck in reset forever. 3876 */ 3877 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 3878 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 3879 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 3880 i40e_flush(hw); 3881 3882 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 3883 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 3884 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 3885 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 3886 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 3887 vf = &pf->vf[vf_id]; 3888 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 3889 if (reg & BIT(bit_idx)) 3890 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 3891 i40e_reset_vf(vf, true); 3892 } 3893 3894 return 0; 3895 } 3896 3897 /** 3898 * i40e_validate_vf 3899 * @pf: the physical function 3900 * @vf_id: VF identifier 3901 * 3902 * Check that the VF is enabled and the VSI exists. 3903 * 3904 * Returns 0 on success, negative on failure 3905 **/ 3906 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 3907 { 3908 struct i40e_vsi *vsi; 3909 struct i40e_vf *vf; 3910 int ret = 0; 3911 3912 if (vf_id >= pf->num_alloc_vfs) { 3913 dev_err(&pf->pdev->dev, 3914 "Invalid VF Identifier %d\n", vf_id); 3915 ret = -EINVAL; 3916 goto err_out; 3917 } 3918 vf = &pf->vf[vf_id]; 3919 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 3920 if (!vsi) 3921 ret = -EINVAL; 3922 err_out: 3923 return ret; 3924 } 3925 3926 /** 3927 * i40e_ndo_set_vf_mac 3928 * @netdev: network interface device structure 3929 * @vf_id: VF identifier 3930 * @mac: mac address 3931 * 3932 * program VF mac address 3933 **/ 3934 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 3935 { 3936 struct i40e_netdev_priv *np = netdev_priv(netdev); 3937 struct i40e_vsi *vsi = np->vsi; 3938 struct i40e_pf *pf = vsi->back; 3939 struct i40e_mac_filter *f; 3940 struct i40e_vf *vf; 3941 int ret = 0; 3942 struct hlist_node *h; 3943 int bkt; 3944 u8 i; 3945 3946 /* validate the request */ 3947 ret = i40e_validate_vf(pf, vf_id); 3948 if (ret) 3949 goto error_param; 3950 3951 vf = &pf->vf[vf_id]; 3952 vsi = pf->vsi[vf->lan_vsi_idx]; 3953 3954 /* When the VF is resetting wait until it is done. 3955 * It can take up to 200 milliseconds, 3956 * but wait for up to 300 milliseconds to be safe. 3957 */ 3958 for (i = 0; i < 15; i++) { 3959 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 3960 break; 3961 msleep(20); 3962 } 3963 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3964 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3965 vf_id); 3966 ret = -EAGAIN; 3967 goto error_param; 3968 } 3969 3970 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 3971 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 3972 return -EAGAIN; 3973 } 3974 3975 if (is_multicast_ether_addr(mac)) { 3976 dev_err(&pf->pdev->dev, 3977 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 3978 ret = -EINVAL; 3979 goto error_param; 3980 } 3981 3982 /* Lock once because below invoked function add/del_filter requires 3983 * mac_filter_hash_lock to be held 3984 */ 3985 spin_lock_bh(&vsi->mac_filter_hash_lock); 3986 3987 /* delete the temporary mac address */ 3988 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 3989 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 3990 3991 /* Delete all the filters for this VSI - we're going to kill it 3992 * anyway. 3993 */ 3994 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 3995 __i40e_del_filter(vsi, f); 3996 3997 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3998 3999 /* program mac filter */ 4000 if (i40e_sync_vsi_filters(vsi)) { 4001 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 4002 ret = -EIO; 4003 goto error_param; 4004 } 4005 ether_addr_copy(vf->default_lan_addr.addr, mac); 4006 4007 if (is_zero_ether_addr(mac)) { 4008 vf->pf_set_mac = false; 4009 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 4010 } else { 4011 vf->pf_set_mac = true; 4012 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 4013 mac, vf_id); 4014 } 4015 4016 /* Force the VF interface down so it has to bring up with new MAC 4017 * address 4018 */ 4019 i40e_vc_disable_vf(vf); 4020 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 4021 4022 error_param: 4023 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4024 return ret; 4025 } 4026 4027 /** 4028 * i40e_vsi_has_vlans - True if VSI has configured VLANs 4029 * @vsi: pointer to the vsi 4030 * 4031 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if 4032 * we have no configured VLANs. Do not call while holding the 4033 * mac_filter_hash_lock. 4034 */ 4035 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) 4036 { 4037 bool have_vlans; 4038 4039 /* If we have a port VLAN, then the VSI cannot have any VLANs 4040 * configured, as all MAC/VLAN filters will be assigned to the PVID. 4041 */ 4042 if (vsi->info.pvid) 4043 return false; 4044 4045 /* Since we don't have a PVID, we know that if the device is in VLAN 4046 * mode it must be because of a VLAN filter configured on this VSI. 4047 */ 4048 spin_lock_bh(&vsi->mac_filter_hash_lock); 4049 have_vlans = i40e_is_vsi_in_vlan(vsi); 4050 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4051 4052 return have_vlans; 4053 } 4054 4055 /** 4056 * i40e_ndo_set_vf_port_vlan 4057 * @netdev: network interface device structure 4058 * @vf_id: VF identifier 4059 * @vlan_id: mac address 4060 * @qos: priority setting 4061 * @vlan_proto: vlan protocol 4062 * 4063 * program VF vlan id and/or qos 4064 **/ 4065 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4066 u16 vlan_id, u8 qos, __be16 vlan_proto) 4067 { 4068 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4069 struct i40e_netdev_priv *np = netdev_priv(netdev); 4070 bool allmulti = false, alluni = false; 4071 struct i40e_pf *pf = np->vsi->back; 4072 struct i40e_vsi *vsi; 4073 struct i40e_vf *vf; 4074 int ret = 0; 4075 4076 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4077 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4078 return -EAGAIN; 4079 } 4080 4081 /* validate the request */ 4082 ret = i40e_validate_vf(pf, vf_id); 4083 if (ret) 4084 goto error_pvid; 4085 4086 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4087 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4088 ret = -EINVAL; 4089 goto error_pvid; 4090 } 4091 4092 if (vlan_proto != htons(ETH_P_8021Q)) { 4093 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4094 ret = -EPROTONOSUPPORT; 4095 goto error_pvid; 4096 } 4097 4098 vf = &pf->vf[vf_id]; 4099 vsi = pf->vsi[vf->lan_vsi_idx]; 4100 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4101 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4102 vf_id); 4103 ret = -EAGAIN; 4104 goto error_pvid; 4105 } 4106 4107 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4108 /* duplicate request, so just return success */ 4109 goto error_pvid; 4110 4111 if (i40e_vsi_has_vlans(vsi)) { 4112 dev_err(&pf->pdev->dev, 4113 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 4114 vf_id); 4115 /* Administrator Error - knock the VF offline until he does 4116 * the right thing by reconfiguring his network correctly 4117 * and then reloading the VF driver. 4118 */ 4119 i40e_vc_disable_vf(vf); 4120 /* During reset the VF got a new VSI, so refresh the pointer. */ 4121 vsi = pf->vsi[vf->lan_vsi_idx]; 4122 } 4123 4124 /* Locked once because multiple functions below iterate list */ 4125 spin_lock_bh(&vsi->mac_filter_hash_lock); 4126 4127 /* Check for condition where there was already a port VLAN ID 4128 * filter set and now it is being deleted by setting it to zero. 4129 * Additionally check for the condition where there was a port 4130 * VLAN but now there is a new and different port VLAN being set. 4131 * Before deleting all the old VLAN filters we must add new ones 4132 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4133 * MAC addresses deleted. 4134 */ 4135 if ((!(vlan_id || qos) || 4136 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4137 vsi->info.pvid) { 4138 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4139 if (ret) { 4140 dev_info(&vsi->back->pdev->dev, 4141 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4142 vsi->back->hw.aq.asq_last_status); 4143 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4144 goto error_pvid; 4145 } 4146 } 4147 4148 if (vsi->info.pvid) { 4149 /* remove all filters on the old VLAN */ 4150 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4151 VLAN_VID_MASK)); 4152 } 4153 4154 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4155 4156 /* disable promisc modes in case they were enabled */ 4157 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, 4158 allmulti, alluni); 4159 if (ret) { 4160 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n"); 4161 goto error_pvid; 4162 } 4163 4164 if (vlan_id || qos) 4165 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4166 else 4167 i40e_vsi_remove_pvid(vsi); 4168 spin_lock_bh(&vsi->mac_filter_hash_lock); 4169 4170 if (vlan_id) { 4171 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4172 vlan_id, qos, vf_id); 4173 4174 /* add new VLAN filter for each MAC */ 4175 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4176 if (ret) { 4177 dev_info(&vsi->back->pdev->dev, 4178 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4179 vsi->back->hw.aq.asq_last_status); 4180 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4181 goto error_pvid; 4182 } 4183 4184 /* remove the previously added non-VLAN MAC filters */ 4185 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4186 } 4187 4188 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4189 4190 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 4191 alluni = true; 4192 4193 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 4194 allmulti = true; 4195 4196 /* Schedule the worker thread to take care of applying changes */ 4197 i40e_service_event_schedule(vsi->back); 4198 4199 if (ret) { 4200 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4201 goto error_pvid; 4202 } 4203 4204 /* The Port VLAN needs to be saved across resets the same as the 4205 * default LAN MAC address. 4206 */ 4207 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4208 4209 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); 4210 if (ret) { 4211 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); 4212 goto error_pvid; 4213 } 4214 4215 ret = 0; 4216 4217 error_pvid: 4218 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4219 return ret; 4220 } 4221 4222 /** 4223 * i40e_ndo_set_vf_bw 4224 * @netdev: network interface device structure 4225 * @vf_id: VF identifier 4226 * @min_tx_rate: Minimum Tx rate 4227 * @max_tx_rate: Maximum Tx rate 4228 * 4229 * configure VF Tx rate 4230 **/ 4231 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4232 int max_tx_rate) 4233 { 4234 struct i40e_netdev_priv *np = netdev_priv(netdev); 4235 struct i40e_pf *pf = np->vsi->back; 4236 struct i40e_vsi *vsi; 4237 struct i40e_vf *vf; 4238 int ret = 0; 4239 4240 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4241 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4242 return -EAGAIN; 4243 } 4244 4245 /* validate the request */ 4246 ret = i40e_validate_vf(pf, vf_id); 4247 if (ret) 4248 goto error; 4249 4250 if (min_tx_rate) { 4251 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4252 min_tx_rate, vf_id); 4253 return -EINVAL; 4254 } 4255 4256 vf = &pf->vf[vf_id]; 4257 vsi = pf->vsi[vf->lan_vsi_idx]; 4258 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4259 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4260 vf_id); 4261 ret = -EAGAIN; 4262 goto error; 4263 } 4264 4265 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4266 if (ret) 4267 goto error; 4268 4269 vf->tx_rate = max_tx_rate; 4270 error: 4271 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4272 return ret; 4273 } 4274 4275 /** 4276 * i40e_ndo_get_vf_config 4277 * @netdev: network interface device structure 4278 * @vf_id: VF identifier 4279 * @ivi: VF configuration structure 4280 * 4281 * return VF configuration 4282 **/ 4283 int i40e_ndo_get_vf_config(struct net_device *netdev, 4284 int vf_id, struct ifla_vf_info *ivi) 4285 { 4286 struct i40e_netdev_priv *np = netdev_priv(netdev); 4287 struct i40e_vsi *vsi = np->vsi; 4288 struct i40e_pf *pf = vsi->back; 4289 struct i40e_vf *vf; 4290 int ret = 0; 4291 4292 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4293 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4294 return -EAGAIN; 4295 } 4296 4297 /* validate the request */ 4298 ret = i40e_validate_vf(pf, vf_id); 4299 if (ret) 4300 goto error_param; 4301 4302 vf = &pf->vf[vf_id]; 4303 /* first vsi is always the LAN vsi */ 4304 vsi = pf->vsi[vf->lan_vsi_idx]; 4305 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4306 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4307 vf_id); 4308 ret = -EAGAIN; 4309 goto error_param; 4310 } 4311 4312 ivi->vf = vf_id; 4313 4314 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4315 4316 ivi->max_tx_rate = vf->tx_rate; 4317 ivi->min_tx_rate = 0; 4318 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 4319 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 4320 I40E_VLAN_PRIORITY_SHIFT; 4321 if (vf->link_forced == false) 4322 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4323 else if (vf->link_up == true) 4324 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4325 else 4326 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4327 ivi->spoofchk = vf->spoofchk; 4328 ivi->trusted = vf->trusted; 4329 ret = 0; 4330 4331 error_param: 4332 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4333 return ret; 4334 } 4335 4336 /** 4337 * i40e_ndo_set_vf_link_state 4338 * @netdev: network interface device structure 4339 * @vf_id: VF identifier 4340 * @link: required link state 4341 * 4342 * Set the link state of a specified VF, regardless of physical link state 4343 **/ 4344 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4345 { 4346 struct i40e_netdev_priv *np = netdev_priv(netdev); 4347 struct i40e_pf *pf = np->vsi->back; 4348 struct virtchnl_pf_event pfe; 4349 struct i40e_hw *hw = &pf->hw; 4350 struct i40e_vf *vf; 4351 int abs_vf_id; 4352 int ret = 0; 4353 4354 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4355 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4356 return -EAGAIN; 4357 } 4358 4359 /* validate the request */ 4360 if (vf_id >= pf->num_alloc_vfs) { 4361 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4362 ret = -EINVAL; 4363 goto error_out; 4364 } 4365 4366 vf = &pf->vf[vf_id]; 4367 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4368 4369 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4370 pfe.severity = PF_EVENT_SEVERITY_INFO; 4371 4372 switch (link) { 4373 case IFLA_VF_LINK_STATE_AUTO: 4374 vf->link_forced = false; 4375 pfe.event_data.link_event.link_status = 4376 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 4377 pfe.event_data.link_event.link_speed = 4378 (enum virtchnl_link_speed) 4379 pf->hw.phy.link_info.link_speed; 4380 break; 4381 case IFLA_VF_LINK_STATE_ENABLE: 4382 vf->link_forced = true; 4383 vf->link_up = true; 4384 pfe.event_data.link_event.link_status = true; 4385 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; 4386 break; 4387 case IFLA_VF_LINK_STATE_DISABLE: 4388 vf->link_forced = true; 4389 vf->link_up = false; 4390 pfe.event_data.link_event.link_status = false; 4391 pfe.event_data.link_event.link_speed = 0; 4392 break; 4393 default: 4394 ret = -EINVAL; 4395 goto error_out; 4396 } 4397 /* Notify the VF of its new link state */ 4398 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4399 0, (u8 *)&pfe, sizeof(pfe), NULL); 4400 4401 error_out: 4402 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4403 return ret; 4404 } 4405 4406 /** 4407 * i40e_ndo_set_vf_spoofchk 4408 * @netdev: network interface device structure 4409 * @vf_id: VF identifier 4410 * @enable: flag to enable or disable feature 4411 * 4412 * Enable or disable VF spoof checking 4413 **/ 4414 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4415 { 4416 struct i40e_netdev_priv *np = netdev_priv(netdev); 4417 struct i40e_vsi *vsi = np->vsi; 4418 struct i40e_pf *pf = vsi->back; 4419 struct i40e_vsi_context ctxt; 4420 struct i40e_hw *hw = &pf->hw; 4421 struct i40e_vf *vf; 4422 int ret = 0; 4423 4424 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4425 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4426 return -EAGAIN; 4427 } 4428 4429 /* validate the request */ 4430 if (vf_id >= pf->num_alloc_vfs) { 4431 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4432 ret = -EINVAL; 4433 goto out; 4434 } 4435 4436 vf = &(pf->vf[vf_id]); 4437 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4438 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4439 vf_id); 4440 ret = -EAGAIN; 4441 goto out; 4442 } 4443 4444 if (enable == vf->spoofchk) 4445 goto out; 4446 4447 vf->spoofchk = enable; 4448 memset(&ctxt, 0, sizeof(ctxt)); 4449 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4450 ctxt.pf_num = pf->hw.pf_id; 4451 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4452 if (enable) 4453 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4454 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4455 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4456 if (ret) { 4457 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4458 ret); 4459 ret = -EIO; 4460 } 4461 out: 4462 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4463 return ret; 4464 } 4465 4466 /** 4467 * i40e_ndo_set_vf_trust 4468 * @netdev: network interface device structure of the pf 4469 * @vf_id: VF identifier 4470 * @setting: trust setting 4471 * 4472 * Enable or disable VF trust setting 4473 **/ 4474 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4475 { 4476 struct i40e_netdev_priv *np = netdev_priv(netdev); 4477 struct i40e_pf *pf = np->vsi->back; 4478 struct i40e_vf *vf; 4479 int ret = 0; 4480 4481 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4482 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4483 return -EAGAIN; 4484 } 4485 4486 /* validate the request */ 4487 if (vf_id >= pf->num_alloc_vfs) { 4488 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4489 ret = -EINVAL; 4490 goto out; 4491 } 4492 4493 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4494 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4495 ret = -EINVAL; 4496 goto out; 4497 } 4498 4499 vf = &pf->vf[vf_id]; 4500 4501 if (setting == vf->trusted) 4502 goto out; 4503 4504 vf->trusted = setting; 4505 i40e_vc_disable_vf(vf); 4506 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4507 vf_id, setting ? "" : "un"); 4508 4509 if (vf->adq_enabled) { 4510 if (!vf->trusted) { 4511 dev_info(&pf->pdev->dev, 4512 "VF %u no longer Trusted, deleting all cloud filters\n", 4513 vf_id); 4514 i40e_del_all_cloud_filters(vf); 4515 } 4516 } 4517 4518 out: 4519 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4520 return ret; 4521 } 4522