1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 6 /*********************notification routines***********************/ 7 8 /** 9 * i40e_vc_vf_broadcast 10 * @pf: pointer to the PF structure 11 * @v_opcode: operation code 12 * @v_retval: return value 13 * @msg: pointer to the msg buffer 14 * @msglen: msg length 15 * 16 * send a message to all VFs on a given PF 17 **/ 18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 19 enum virtchnl_ops v_opcode, 20 i40e_status v_retval, u8 *msg, 21 u16 msglen) 22 { 23 struct i40e_hw *hw = &pf->hw; 24 struct i40e_vf *vf = pf->vf; 25 int i; 26 27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 29 /* Not all vfs are enabled so skip the ones that are not */ 30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 32 continue; 33 34 /* Ignore return value on purpose - a given VF may fail, but 35 * we need to keep going and send to all of them 36 */ 37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 38 msg, msglen, NULL); 39 } 40 } 41 42 /** 43 * i40e_vc_notify_vf_link_state 44 * @vf: pointer to the VF structure 45 * 46 * send a link status message to a single VF 47 **/ 48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 49 { 50 struct virtchnl_pf_event pfe; 51 struct i40e_pf *pf = vf->pf; 52 struct i40e_hw *hw = &pf->hw; 53 struct i40e_link_status *ls = &pf->hw.phy.link_info; 54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 55 56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 57 pfe.severity = PF_EVENT_SEVERITY_INFO; 58 if (vf->link_forced) { 59 pfe.event_data.link_event.link_status = vf->link_up; 60 pfe.event_data.link_event.link_speed = 61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); 62 } else { 63 pfe.event_data.link_event.link_status = 64 ls->link_info & I40E_AQ_LINK_UP; 65 pfe.event_data.link_event.link_speed = 66 i40e_virtchnl_link_speed(ls->link_speed); 67 } 68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 69 0, (u8 *)&pfe, sizeof(pfe), NULL); 70 } 71 72 /** 73 * i40e_vc_notify_link_state 74 * @pf: pointer to the PF structure 75 * 76 * send a link status message to all VFs on a given PF 77 **/ 78 void i40e_vc_notify_link_state(struct i40e_pf *pf) 79 { 80 int i; 81 82 for (i = 0; i < pf->num_alloc_vfs; i++) 83 i40e_vc_notify_vf_link_state(&pf->vf[i]); 84 } 85 86 /** 87 * i40e_vc_notify_reset 88 * @pf: pointer to the PF structure 89 * 90 * indicate a pending reset to all VFs on a given PF 91 **/ 92 void i40e_vc_notify_reset(struct i40e_pf *pf) 93 { 94 struct virtchnl_pf_event pfe; 95 96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 100 } 101 102 /** 103 * i40e_vc_notify_vf_reset 104 * @vf: pointer to the VF structure 105 * 106 * indicate a pending reset to the given VF 107 **/ 108 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 109 { 110 struct virtchnl_pf_event pfe; 111 int abs_vf_id; 112 113 /* validate the request */ 114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 115 return; 116 117 /* verify if the VF is in either init or active before proceeding */ 118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 120 return; 121 122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 123 124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 127 0, (u8 *)&pfe, 128 sizeof(struct virtchnl_pf_event), NULL); 129 } 130 /***********************misc routines*****************************/ 131 132 /** 133 * i40e_vc_disable_vf 134 * @vf: pointer to the VF info 135 * 136 * Disable the VF through a SW reset. 137 **/ 138 static inline void i40e_vc_disable_vf(struct i40e_vf *vf) 139 { 140 int i; 141 142 i40e_vc_notify_vf_reset(vf); 143 144 /* We want to ensure that an actual reset occurs initiated after this 145 * function was called. However, we do not want to wait forever, so 146 * we'll give a reasonable time and print a message if we failed to 147 * ensure a reset. 148 */ 149 for (i = 0; i < 20; i++) { 150 if (i40e_reset_vf(vf, false)) 151 return; 152 usleep_range(10000, 20000); 153 } 154 155 dev_warn(&vf->pf->pdev->dev, 156 "Failed to initiate reset for VF %d after 200 milliseconds\n", 157 vf->vf_id); 158 } 159 160 /** 161 * i40e_vc_isvalid_vsi_id 162 * @vf: pointer to the VF info 163 * @vsi_id: VF relative VSI id 164 * 165 * check for the valid VSI id 166 **/ 167 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 168 { 169 struct i40e_pf *pf = vf->pf; 170 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 171 172 return (vsi && (vsi->vf_id == vf->vf_id)); 173 } 174 175 /** 176 * i40e_vc_isvalid_queue_id 177 * @vf: pointer to the VF info 178 * @vsi_id: vsi id 179 * @qid: vsi relative queue id 180 * 181 * check for the valid queue id 182 **/ 183 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 184 u8 qid) 185 { 186 struct i40e_pf *pf = vf->pf; 187 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 188 189 return (vsi && (qid < vsi->alloc_queue_pairs)); 190 } 191 192 /** 193 * i40e_vc_isvalid_vector_id 194 * @vf: pointer to the VF info 195 * @vector_id: VF relative vector id 196 * 197 * check for the valid vector id 198 **/ 199 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 200 { 201 struct i40e_pf *pf = vf->pf; 202 203 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 204 } 205 206 /***********************vf resource mgmt routines*****************/ 207 208 /** 209 * i40e_vc_get_pf_queue_id 210 * @vf: pointer to the VF info 211 * @vsi_id: id of VSI as provided by the FW 212 * @vsi_queue_id: vsi relative queue id 213 * 214 * return PF relative queue id 215 **/ 216 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 217 u8 vsi_queue_id) 218 { 219 struct i40e_pf *pf = vf->pf; 220 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 221 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 222 223 if (!vsi) 224 return pf_queue_id; 225 226 if (le16_to_cpu(vsi->info.mapping_flags) & 227 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 228 pf_queue_id = 229 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 230 else 231 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 232 vsi_queue_id; 233 234 return pf_queue_id; 235 } 236 237 /** 238 * i40e_get_real_pf_qid 239 * @vf: pointer to the VF info 240 * @vsi_id: vsi id 241 * @queue_id: queue number 242 * 243 * wrapper function to get pf_queue_id handling ADq code as well 244 **/ 245 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 246 { 247 int i; 248 249 if (vf->adq_enabled) { 250 /* Although VF considers all the queues(can be 1 to 16) as its 251 * own but they may actually belong to different VSIs(up to 4). 252 * We need to find which queues belongs to which VSI. 253 */ 254 for (i = 0; i < vf->num_tc; i++) { 255 if (queue_id < vf->ch[i].num_qps) { 256 vsi_id = vf->ch[i].vsi_id; 257 break; 258 } 259 /* find right queue id which is relative to a 260 * given VSI. 261 */ 262 queue_id -= vf->ch[i].num_qps; 263 } 264 } 265 266 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 267 } 268 269 /** 270 * i40e_config_irq_link_list 271 * @vf: pointer to the VF info 272 * @vsi_id: id of VSI as given by the FW 273 * @vecmap: irq map info 274 * 275 * configure irq link list from the map 276 **/ 277 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 278 struct virtchnl_vector_map *vecmap) 279 { 280 unsigned long linklistmap = 0, tempmap; 281 struct i40e_pf *pf = vf->pf; 282 struct i40e_hw *hw = &pf->hw; 283 u16 vsi_queue_id, pf_queue_id; 284 enum i40e_queue_type qtype; 285 u16 next_q, vector_id, size; 286 u32 reg, reg_idx; 287 u16 itr_idx = 0; 288 289 vector_id = vecmap->vector_id; 290 /* setup the head */ 291 if (0 == vector_id) 292 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 293 else 294 reg_idx = I40E_VPINT_LNKLSTN( 295 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 296 (vector_id - 1)); 297 298 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 299 /* Special case - No queues mapped on this vector */ 300 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 301 goto irq_list_done; 302 } 303 tempmap = vecmap->rxq_map; 304 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 305 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 306 vsi_queue_id)); 307 } 308 309 tempmap = vecmap->txq_map; 310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 312 vsi_queue_id + 1)); 313 } 314 315 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 316 next_q = find_first_bit(&linklistmap, size); 317 if (unlikely(next_q == size)) 318 goto irq_list_done; 319 320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 323 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 324 325 wr32(hw, reg_idx, reg); 326 327 while (next_q < size) { 328 switch (qtype) { 329 case I40E_QUEUE_TYPE_RX: 330 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 331 itr_idx = vecmap->rxitr_idx; 332 break; 333 case I40E_QUEUE_TYPE_TX: 334 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 335 itr_idx = vecmap->txitr_idx; 336 break; 337 default: 338 break; 339 } 340 341 next_q = find_next_bit(&linklistmap, size, next_q + 1); 342 if (next_q < size) { 343 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 344 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 345 pf_queue_id = i40e_get_real_pf_qid(vf, 346 vsi_id, 347 vsi_queue_id); 348 } else { 349 pf_queue_id = I40E_QUEUE_END_OF_LIST; 350 qtype = 0; 351 } 352 353 /* format for the RQCTL & TQCTL regs is same */ 354 reg = (vector_id) | 355 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 356 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 357 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 358 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 359 wr32(hw, reg_idx, reg); 360 } 361 362 /* if the vf is running in polling mode and using interrupt zero, 363 * need to disable auto-mask on enabling zero interrupt for VFs. 364 */ 365 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 366 (vector_id == 0)) { 367 reg = rd32(hw, I40E_GLINT_CTL); 368 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 369 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 370 wr32(hw, I40E_GLINT_CTL, reg); 371 } 372 } 373 374 irq_list_done: 375 i40e_flush(hw); 376 } 377 378 /** 379 * i40e_release_iwarp_qvlist 380 * @vf: pointer to the VF. 381 * 382 **/ 383 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 384 { 385 struct i40e_pf *pf = vf->pf; 386 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 387 u32 msix_vf; 388 u32 i; 389 390 if (!vf->qvlist_info) 391 return; 392 393 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 394 for (i = 0; i < qvlist_info->num_vectors; i++) { 395 struct virtchnl_iwarp_qv_info *qv_info; 396 u32 next_q_index, next_q_type; 397 struct i40e_hw *hw = &pf->hw; 398 u32 v_idx, reg_idx, reg; 399 400 qv_info = &qvlist_info->qv_info[i]; 401 if (!qv_info) 402 continue; 403 v_idx = qv_info->v_idx; 404 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 405 /* Figure out the queue after CEQ and make that the 406 * first queue. 407 */ 408 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 409 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 410 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 411 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 412 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 413 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 414 415 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 416 reg = (next_q_index & 417 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 418 (next_q_type << 419 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 420 421 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 422 } 423 } 424 kfree(vf->qvlist_info); 425 vf->qvlist_info = NULL; 426 } 427 428 /** 429 * i40e_config_iwarp_qvlist 430 * @vf: pointer to the VF info 431 * @qvlist_info: queue and vector list 432 * 433 * Return 0 on success or < 0 on error 434 **/ 435 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 436 struct virtchnl_iwarp_qvlist_info *qvlist_info) 437 { 438 struct i40e_pf *pf = vf->pf; 439 struct i40e_hw *hw = &pf->hw; 440 struct virtchnl_iwarp_qv_info *qv_info; 441 u32 v_idx, i, reg_idx, reg; 442 u32 next_q_idx, next_q_type; 443 u32 msix_vf, size; 444 445 size = sizeof(struct virtchnl_iwarp_qvlist_info) + 446 (sizeof(struct virtchnl_iwarp_qv_info) * 447 (qvlist_info->num_vectors - 1)); 448 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 449 if (!vf->qvlist_info) 450 return -ENOMEM; 451 452 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 453 454 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 455 for (i = 0; i < qvlist_info->num_vectors; i++) { 456 qv_info = &qvlist_info->qv_info[i]; 457 if (!qv_info) 458 continue; 459 v_idx = qv_info->v_idx; 460 461 /* Validate vector id belongs to this vf */ 462 if (!i40e_vc_isvalid_vector_id(vf, v_idx)) 463 goto err; 464 465 vf->qvlist_info->qv_info[i] = *qv_info; 466 467 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 468 /* We might be sharing the interrupt, so get the first queue 469 * index and type, push it down the list by adding the new 470 * queue on top. Also link it with the new queue in CEQCTL. 471 */ 472 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 473 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 474 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 475 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 476 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 477 478 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 479 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 480 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 481 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 482 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 483 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 484 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 485 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 486 487 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 488 reg = (qv_info->ceq_idx & 489 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 490 (I40E_QUEUE_TYPE_PE_CEQ << 491 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 492 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 493 } 494 495 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 496 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 497 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 498 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 499 500 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 501 } 502 } 503 504 return 0; 505 err: 506 kfree(vf->qvlist_info); 507 vf->qvlist_info = NULL; 508 return -EINVAL; 509 } 510 511 /** 512 * i40e_config_vsi_tx_queue 513 * @vf: pointer to the VF info 514 * @vsi_id: id of VSI as provided by the FW 515 * @vsi_queue_id: vsi relative queue index 516 * @info: config. info 517 * 518 * configure tx queue 519 **/ 520 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 521 u16 vsi_queue_id, 522 struct virtchnl_txq_info *info) 523 { 524 struct i40e_pf *pf = vf->pf; 525 struct i40e_hw *hw = &pf->hw; 526 struct i40e_hmc_obj_txq tx_ctx; 527 struct i40e_vsi *vsi; 528 u16 pf_queue_id; 529 u32 qtx_ctl; 530 int ret = 0; 531 532 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 533 ret = -ENOENT; 534 goto error_context; 535 } 536 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 537 vsi = i40e_find_vsi_from_id(pf, vsi_id); 538 if (!vsi) { 539 ret = -ENOENT; 540 goto error_context; 541 } 542 543 /* clear the context structure first */ 544 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 545 546 /* only set the required fields */ 547 tx_ctx.base = info->dma_ring_addr / 128; 548 tx_ctx.qlen = info->ring_len; 549 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 550 tx_ctx.rdylist_act = 0; 551 tx_ctx.head_wb_ena = info->headwb_enabled; 552 tx_ctx.head_wb_addr = info->dma_headwb_addr; 553 554 /* clear the context in the HMC */ 555 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 556 if (ret) { 557 dev_err(&pf->pdev->dev, 558 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 559 pf_queue_id, ret); 560 ret = -ENOENT; 561 goto error_context; 562 } 563 564 /* set the context in the HMC */ 565 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 566 if (ret) { 567 dev_err(&pf->pdev->dev, 568 "Failed to set VF LAN Tx queue context %d error: %d\n", 569 pf_queue_id, ret); 570 ret = -ENOENT; 571 goto error_context; 572 } 573 574 /* associate this queue with the PCI VF function */ 575 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 576 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 577 & I40E_QTX_CTL_PF_INDX_MASK); 578 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 579 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 580 & I40E_QTX_CTL_VFVM_INDX_MASK); 581 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 582 i40e_flush(hw); 583 584 error_context: 585 return ret; 586 } 587 588 /** 589 * i40e_config_vsi_rx_queue 590 * @vf: pointer to the VF info 591 * @vsi_id: id of VSI as provided by the FW 592 * @vsi_queue_id: vsi relative queue index 593 * @info: config. info 594 * 595 * configure rx queue 596 **/ 597 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 598 u16 vsi_queue_id, 599 struct virtchnl_rxq_info *info) 600 { 601 struct i40e_pf *pf = vf->pf; 602 struct i40e_hw *hw = &pf->hw; 603 struct i40e_hmc_obj_rxq rx_ctx; 604 u16 pf_queue_id; 605 int ret = 0; 606 607 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 608 609 /* clear the context structure first */ 610 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 611 612 /* only set the required fields */ 613 rx_ctx.base = info->dma_ring_addr / 128; 614 rx_ctx.qlen = info->ring_len; 615 616 if (info->splithdr_enabled) { 617 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 618 I40E_RX_SPLIT_IP | 619 I40E_RX_SPLIT_TCP_UDP | 620 I40E_RX_SPLIT_SCTP; 621 /* header length validation */ 622 if (info->hdr_size > ((2 * 1024) - 64)) { 623 ret = -EINVAL; 624 goto error_param; 625 } 626 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 627 628 /* set split mode 10b */ 629 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 630 } 631 632 /* databuffer length validation */ 633 if (info->databuffer_size > ((16 * 1024) - 128)) { 634 ret = -EINVAL; 635 goto error_param; 636 } 637 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 638 639 /* max pkt. length validation */ 640 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 641 ret = -EINVAL; 642 goto error_param; 643 } 644 rx_ctx.rxmax = info->max_pkt_size; 645 646 /* enable 32bytes desc always */ 647 rx_ctx.dsize = 1; 648 649 /* default values */ 650 rx_ctx.lrxqthresh = 1; 651 rx_ctx.crcstrip = 1; 652 rx_ctx.prefena = 1; 653 rx_ctx.l2tsel = 1; 654 655 /* clear the context in the HMC */ 656 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 657 if (ret) { 658 dev_err(&pf->pdev->dev, 659 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 660 pf_queue_id, ret); 661 ret = -ENOENT; 662 goto error_param; 663 } 664 665 /* set the context in the HMC */ 666 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 667 if (ret) { 668 dev_err(&pf->pdev->dev, 669 "Failed to set VF LAN Rx queue context %d error: %d\n", 670 pf_queue_id, ret); 671 ret = -ENOENT; 672 goto error_param; 673 } 674 675 error_param: 676 return ret; 677 } 678 679 /** 680 * i40e_alloc_vsi_res 681 * @vf: pointer to the VF info 682 * @idx: VSI index, applies only for ADq mode, zero otherwise 683 * 684 * alloc VF vsi context & resources 685 **/ 686 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 687 { 688 struct i40e_mac_filter *f = NULL; 689 struct i40e_pf *pf = vf->pf; 690 struct i40e_vsi *vsi; 691 u64 max_tx_rate = 0; 692 int ret = 0; 693 694 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, 695 vf->vf_id); 696 697 if (!vsi) { 698 dev_err(&pf->pdev->dev, 699 "add vsi failed for VF %d, aq_err %d\n", 700 vf->vf_id, pf->hw.aq.asq_last_status); 701 ret = -ENOENT; 702 goto error_alloc_vsi_res; 703 } 704 705 if (!idx) { 706 u64 hena = i40e_pf_get_default_rss_hena(pf); 707 u8 broadcast[ETH_ALEN]; 708 709 vf->lan_vsi_idx = vsi->idx; 710 vf->lan_vsi_id = vsi->id; 711 /* If the port VLAN has been configured and then the 712 * VF driver was removed then the VSI port VLAN 713 * configuration was destroyed. Check if there is 714 * a port VLAN and restore the VSI configuration if 715 * needed. 716 */ 717 if (vf->port_vlan_id) 718 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 719 720 spin_lock_bh(&vsi->mac_filter_hash_lock); 721 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 722 f = i40e_add_mac_filter(vsi, 723 vf->default_lan_addr.addr); 724 if (!f) 725 dev_info(&pf->pdev->dev, 726 "Could not add MAC filter %pM for VF %d\n", 727 vf->default_lan_addr.addr, vf->vf_id); 728 } 729 eth_broadcast_addr(broadcast); 730 f = i40e_add_mac_filter(vsi, broadcast); 731 if (!f) 732 dev_info(&pf->pdev->dev, 733 "Could not allocate VF broadcast filter\n"); 734 spin_unlock_bh(&vsi->mac_filter_hash_lock); 735 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 736 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 737 /* program mac filter only for VF VSI */ 738 ret = i40e_sync_vsi_filters(vsi); 739 if (ret) 740 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 741 } 742 743 /* storing VSI index and id for ADq and don't apply the mac filter */ 744 if (vf->adq_enabled) { 745 vf->ch[idx].vsi_idx = vsi->idx; 746 vf->ch[idx].vsi_id = vsi->id; 747 } 748 749 /* Set VF bandwidth if specified */ 750 if (vf->tx_rate) { 751 max_tx_rate = vf->tx_rate; 752 } else if (vf->ch[idx].max_tx_rate) { 753 max_tx_rate = vf->ch[idx].max_tx_rate; 754 } 755 756 if (max_tx_rate) { 757 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 758 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 759 max_tx_rate, 0, NULL); 760 if (ret) 761 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 762 vf->vf_id, ret); 763 } 764 765 error_alloc_vsi_res: 766 return ret; 767 } 768 769 /** 770 * i40e_map_pf_queues_to_vsi 771 * @vf: pointer to the VF info 772 * 773 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 774 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 775 **/ 776 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 777 { 778 struct i40e_pf *pf = vf->pf; 779 struct i40e_hw *hw = &pf->hw; 780 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 781 u16 vsi_id, qps; 782 int i, j; 783 784 if (vf->adq_enabled) 785 num_tc = vf->num_tc; 786 787 for (i = 0; i < num_tc; i++) { 788 if (vf->adq_enabled) { 789 qps = vf->ch[i].num_qps; 790 vsi_id = vf->ch[i].vsi_id; 791 } else { 792 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 793 vsi_id = vf->lan_vsi_id; 794 } 795 796 for (j = 0; j < 7; j++) { 797 if (j * 2 >= qps) { 798 /* end of list */ 799 reg = 0x07FF07FF; 800 } else { 801 u16 qid = i40e_vc_get_pf_queue_id(vf, 802 vsi_id, 803 j * 2); 804 reg = qid; 805 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 806 (j * 2) + 1); 807 reg |= qid << 16; 808 } 809 i40e_write_rx_ctl(hw, 810 I40E_VSILAN_QTABLE(j, vsi_id), 811 reg); 812 } 813 } 814 } 815 816 /** 817 * i40e_map_pf_to_vf_queues 818 * @vf: pointer to the VF info 819 * 820 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 821 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 822 **/ 823 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 824 { 825 struct i40e_pf *pf = vf->pf; 826 struct i40e_hw *hw = &pf->hw; 827 u32 reg, total_qps = 0; 828 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 829 u16 vsi_id, qid; 830 int i, j; 831 832 if (vf->adq_enabled) 833 num_tc = vf->num_tc; 834 835 for (i = 0; i < num_tc; i++) { 836 if (vf->adq_enabled) { 837 qps = vf->ch[i].num_qps; 838 vsi_id = vf->ch[i].vsi_id; 839 } else { 840 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 841 vsi_id = vf->lan_vsi_id; 842 } 843 844 for (j = 0; j < qps; j++) { 845 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 846 847 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 848 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 849 reg); 850 total_qps++; 851 } 852 } 853 } 854 855 /** 856 * i40e_enable_vf_mappings 857 * @vf: pointer to the VF info 858 * 859 * enable VF mappings 860 **/ 861 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 862 { 863 struct i40e_pf *pf = vf->pf; 864 struct i40e_hw *hw = &pf->hw; 865 u32 reg; 866 867 /* Tell the hardware we're using noncontiguous mapping. HW requires 868 * that VF queues be mapped using this method, even when they are 869 * contiguous in real life 870 */ 871 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 872 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 873 874 /* enable VF vplan_qtable mappings */ 875 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 876 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 877 878 i40e_map_pf_to_vf_queues(vf); 879 i40e_map_pf_queues_to_vsi(vf); 880 881 i40e_flush(hw); 882 } 883 884 /** 885 * i40e_disable_vf_mappings 886 * @vf: pointer to the VF info 887 * 888 * disable VF mappings 889 **/ 890 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 891 { 892 struct i40e_pf *pf = vf->pf; 893 struct i40e_hw *hw = &pf->hw; 894 int i; 895 896 /* disable qp mappings */ 897 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 898 for (i = 0; i < I40E_MAX_VSI_QP; i++) 899 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 900 I40E_QUEUE_END_OF_LIST); 901 i40e_flush(hw); 902 } 903 904 /** 905 * i40e_free_vf_res 906 * @vf: pointer to the VF info 907 * 908 * free VF resources 909 **/ 910 static void i40e_free_vf_res(struct i40e_vf *vf) 911 { 912 struct i40e_pf *pf = vf->pf; 913 struct i40e_hw *hw = &pf->hw; 914 u32 reg_idx, reg; 915 int i, j, msix_vf; 916 917 /* Start by disabling VF's configuration API to prevent the OS from 918 * accessing the VF's VSI after it's freed / invalidated. 919 */ 920 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 921 922 /* It's possible the VF had requeuested more queues than the default so 923 * do the accounting here when we're about to free them. 924 */ 925 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 926 pf->queues_left += vf->num_queue_pairs - 927 I40E_DEFAULT_QUEUES_PER_VF; 928 } 929 930 /* free vsi & disconnect it from the parent uplink */ 931 if (vf->lan_vsi_idx) { 932 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 933 vf->lan_vsi_idx = 0; 934 vf->lan_vsi_id = 0; 935 vf->num_mac = 0; 936 } 937 938 /* do the accounting and remove additional ADq VSI's */ 939 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 940 for (j = 0; j < vf->num_tc; j++) { 941 /* At this point VSI0 is already released so don't 942 * release it again and only clear their values in 943 * structure variables 944 */ 945 if (j) 946 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 947 vf->ch[j].vsi_idx = 0; 948 vf->ch[j].vsi_id = 0; 949 } 950 } 951 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 952 953 /* disable interrupts so the VF starts in a known state */ 954 for (i = 0; i < msix_vf; i++) { 955 /* format is same for both registers */ 956 if (0 == i) 957 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 958 else 959 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 960 (vf->vf_id)) 961 + (i - 1)); 962 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 963 i40e_flush(hw); 964 } 965 966 /* clear the irq settings */ 967 for (i = 0; i < msix_vf; i++) { 968 /* format is same for both registers */ 969 if (0 == i) 970 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 971 else 972 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 973 (vf->vf_id)) 974 + (i - 1)); 975 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 976 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 977 wr32(hw, reg_idx, reg); 978 i40e_flush(hw); 979 } 980 /* reset some of the state variables keeping track of the resources */ 981 vf->num_queue_pairs = 0; 982 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 983 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 984 } 985 986 /** 987 * i40e_alloc_vf_res 988 * @vf: pointer to the VF info 989 * 990 * allocate VF resources 991 **/ 992 static int i40e_alloc_vf_res(struct i40e_vf *vf) 993 { 994 struct i40e_pf *pf = vf->pf; 995 int total_queue_pairs = 0; 996 int ret, idx; 997 998 if (vf->num_req_queues && 999 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1000 pf->num_vf_qps = vf->num_req_queues; 1001 else 1002 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1003 1004 /* allocate hw vsi context & associated resources */ 1005 ret = i40e_alloc_vsi_res(vf, 0); 1006 if (ret) 1007 goto error_alloc; 1008 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1009 1010 /* allocate additional VSIs based on tc information for ADq */ 1011 if (vf->adq_enabled) { 1012 if (pf->queues_left >= 1013 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1014 /* TC 0 always belongs to VF VSI */ 1015 for (idx = 1; idx < vf->num_tc; idx++) { 1016 ret = i40e_alloc_vsi_res(vf, idx); 1017 if (ret) 1018 goto error_alloc; 1019 } 1020 /* send correct number of queues */ 1021 total_queue_pairs = I40E_MAX_VF_QUEUES; 1022 } else { 1023 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1024 vf->vf_id); 1025 vf->adq_enabled = false; 1026 } 1027 } 1028 1029 /* We account for each VF to get a default number of queue pairs. If 1030 * the VF has now requested more, we need to account for that to make 1031 * certain we never request more queues than we actually have left in 1032 * HW. 1033 */ 1034 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1035 pf->queues_left -= 1036 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1037 1038 if (vf->trusted) 1039 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1040 else 1041 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1042 1043 /* store the total qps number for the runtime 1044 * VF req validation 1045 */ 1046 vf->num_queue_pairs = total_queue_pairs; 1047 1048 /* VF is now completely initialized */ 1049 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1050 1051 error_alloc: 1052 if (ret) 1053 i40e_free_vf_res(vf); 1054 1055 return ret; 1056 } 1057 1058 #define VF_DEVICE_STATUS 0xAA 1059 #define VF_TRANS_PENDING_MASK 0x20 1060 /** 1061 * i40e_quiesce_vf_pci 1062 * @vf: pointer to the VF structure 1063 * 1064 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1065 * if the transactions never clear. 1066 **/ 1067 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1068 { 1069 struct i40e_pf *pf = vf->pf; 1070 struct i40e_hw *hw = &pf->hw; 1071 int vf_abs_id, i; 1072 u32 reg; 1073 1074 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1075 1076 wr32(hw, I40E_PF_PCI_CIAA, 1077 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1078 for (i = 0; i < 100; i++) { 1079 reg = rd32(hw, I40E_PF_PCI_CIAD); 1080 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1081 return 0; 1082 udelay(1); 1083 } 1084 return -EIO; 1085 } 1086 1087 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi); 1088 1089 /** 1090 * i40e_config_vf_promiscuous_mode 1091 * @vf: pointer to the VF info 1092 * @vsi_id: VSI id 1093 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1094 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1095 * 1096 * Called from the VF to configure the promiscuous mode of 1097 * VF vsis and from the VF reset path to reset promiscuous mode. 1098 **/ 1099 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1100 u16 vsi_id, 1101 bool allmulti, 1102 bool alluni) 1103 { 1104 struct i40e_pf *pf = vf->pf; 1105 struct i40e_hw *hw = &pf->hw; 1106 struct i40e_mac_filter *f; 1107 i40e_status aq_ret = 0; 1108 struct i40e_vsi *vsi; 1109 int bkt; 1110 1111 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1112 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1113 return I40E_ERR_PARAM; 1114 1115 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 1116 (allmulti || alluni)) { 1117 dev_err(&pf->pdev->dev, 1118 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 1119 vf->vf_id); 1120 /* Lie to the VF on purpose. */ 1121 return 0; 1122 } 1123 1124 if (vf->port_vlan_id) { 1125 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, 1126 allmulti, 1127 vf->port_vlan_id, 1128 NULL); 1129 if (aq_ret) { 1130 int aq_err = pf->hw.aq.asq_last_status; 1131 1132 dev_err(&pf->pdev->dev, 1133 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1134 vf->vf_id, 1135 i40e_stat_str(&pf->hw, aq_ret), 1136 i40e_aq_str(&pf->hw, aq_err)); 1137 return aq_ret; 1138 } 1139 1140 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, 1141 alluni, 1142 vf->port_vlan_id, 1143 NULL); 1144 if (aq_ret) { 1145 int aq_err = pf->hw.aq.asq_last_status; 1146 1147 dev_err(&pf->pdev->dev, 1148 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1149 vf->vf_id, 1150 i40e_stat_str(&pf->hw, aq_ret), 1151 i40e_aq_str(&pf->hw, aq_err)); 1152 } 1153 return aq_ret; 1154 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1155 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1156 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1157 continue; 1158 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, 1159 vsi->seid, 1160 allmulti, 1161 f->vlan, 1162 NULL); 1163 if (aq_ret) { 1164 int aq_err = pf->hw.aq.asq_last_status; 1165 1166 dev_err(&pf->pdev->dev, 1167 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", 1168 f->vlan, 1169 i40e_stat_str(&pf->hw, aq_ret), 1170 i40e_aq_str(&pf->hw, aq_err)); 1171 } 1172 1173 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, 1174 vsi->seid, 1175 alluni, 1176 f->vlan, 1177 NULL); 1178 if (aq_ret) { 1179 int aq_err = pf->hw.aq.asq_last_status; 1180 1181 dev_err(&pf->pdev->dev, 1182 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", 1183 f->vlan, 1184 i40e_stat_str(&pf->hw, aq_ret), 1185 i40e_aq_str(&pf->hw, aq_err)); 1186 } 1187 } 1188 return aq_ret; 1189 } 1190 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, 1191 NULL); 1192 if (aq_ret) { 1193 int aq_err = pf->hw.aq.asq_last_status; 1194 1195 dev_err(&pf->pdev->dev, 1196 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1197 vf->vf_id, 1198 i40e_stat_str(&pf->hw, aq_ret), 1199 i40e_aq_str(&pf->hw, aq_err)); 1200 return aq_ret; 1201 } 1202 1203 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni, 1204 NULL, true); 1205 if (aq_ret) { 1206 int aq_err = pf->hw.aq.asq_last_status; 1207 1208 dev_err(&pf->pdev->dev, 1209 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1210 vf->vf_id, 1211 i40e_stat_str(&pf->hw, aq_ret), 1212 i40e_aq_str(&pf->hw, aq_err)); 1213 } 1214 1215 return aq_ret; 1216 } 1217 1218 /** 1219 * i40e_trigger_vf_reset 1220 * @vf: pointer to the VF structure 1221 * @flr: VFLR was issued or not 1222 * 1223 * Trigger hardware to start a reset for a particular VF. Expects the caller 1224 * to wait the proper amount of time to allow hardware to reset the VF before 1225 * it cleans up and restores VF functionality. 1226 **/ 1227 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1228 { 1229 struct i40e_pf *pf = vf->pf; 1230 struct i40e_hw *hw = &pf->hw; 1231 u32 reg, reg_idx, bit_idx; 1232 1233 /* warn the VF */ 1234 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1235 1236 /* Disable VF's configuration API during reset. The flag is re-enabled 1237 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1238 * It's normally disabled in i40e_free_vf_res(), but it's safer 1239 * to do it earlier to give some time to finish to any VF config 1240 * functions that may still be running at this point. 1241 */ 1242 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1243 1244 /* In the case of a VFLR, the HW has already reset the VF and we 1245 * just need to clean up, so don't hit the VFRTRIG register. 1246 */ 1247 if (!flr) { 1248 /* reset VF using VPGEN_VFRTRIG reg */ 1249 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1250 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1251 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1252 i40e_flush(hw); 1253 } 1254 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1255 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1256 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1257 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1258 i40e_flush(hw); 1259 1260 if (i40e_quiesce_vf_pci(vf)) 1261 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1262 vf->vf_id); 1263 } 1264 1265 /** 1266 * i40e_cleanup_reset_vf 1267 * @vf: pointer to the VF structure 1268 * 1269 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1270 * have verified whether the reset is finished properly, and ensure the 1271 * minimum amount of wait time has passed. 1272 **/ 1273 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1274 { 1275 struct i40e_pf *pf = vf->pf; 1276 struct i40e_hw *hw = &pf->hw; 1277 u32 reg; 1278 1279 /* disable promisc modes in case they were enabled */ 1280 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1281 1282 /* free VF resources to begin resetting the VSI state */ 1283 i40e_free_vf_res(vf); 1284 1285 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1286 * By doing this we allow HW to access VF memory at any point. If we 1287 * did it any sooner, HW could access memory while it was being freed 1288 * in i40e_free_vf_res(), causing an IOMMU fault. 1289 * 1290 * On the other hand, this needs to be done ASAP, because the VF driver 1291 * is waiting for this to happen and may report a timeout. It's 1292 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1293 * it. 1294 */ 1295 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1296 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1297 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1298 1299 /* reallocate VF resources to finish resetting the VSI state */ 1300 if (!i40e_alloc_vf_res(vf)) { 1301 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1302 i40e_enable_vf_mappings(vf); 1303 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1304 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1305 /* Do not notify the client during VF init */ 1306 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1307 &vf->vf_states)) 1308 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1309 vf->num_vlan = 0; 1310 } 1311 1312 /* Tell the VF driver the reset is done. This needs to be done only 1313 * after VF has been fully initialized, because the VF driver may 1314 * request resources immediately after setting this flag. 1315 */ 1316 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1317 } 1318 1319 /** 1320 * i40e_reset_vf 1321 * @vf: pointer to the VF structure 1322 * @flr: VFLR was issued or not 1323 * 1324 * Returns true if the VF is reset, false otherwise. 1325 **/ 1326 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1327 { 1328 struct i40e_pf *pf = vf->pf; 1329 struct i40e_hw *hw = &pf->hw; 1330 bool rsd = false; 1331 u32 reg; 1332 int i; 1333 1334 /* If the VFs have been disabled, this means something else is 1335 * resetting the VF, so we shouldn't continue. 1336 */ 1337 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1338 return false; 1339 1340 i40e_trigger_vf_reset(vf, flr); 1341 1342 /* poll VPGEN_VFRSTAT reg to make sure 1343 * that reset is complete 1344 */ 1345 for (i = 0; i < 10; i++) { 1346 /* VF reset requires driver to first reset the VF and then 1347 * poll the status register to make sure that the reset 1348 * completed successfully. Due to internal HW FIFO flushes, 1349 * we must wait 10ms before the register will be valid. 1350 */ 1351 usleep_range(10000, 20000); 1352 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1353 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1354 rsd = true; 1355 break; 1356 } 1357 } 1358 1359 if (flr) 1360 usleep_range(10000, 20000); 1361 1362 if (!rsd) 1363 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1364 vf->vf_id); 1365 usleep_range(10000, 20000); 1366 1367 /* On initial reset, we don't have any queues to disable */ 1368 if (vf->lan_vsi_idx != 0) 1369 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1370 1371 i40e_cleanup_reset_vf(vf); 1372 1373 i40e_flush(hw); 1374 clear_bit(__I40E_VF_DISABLE, pf->state); 1375 1376 return true; 1377 } 1378 1379 /** 1380 * i40e_reset_all_vfs 1381 * @pf: pointer to the PF structure 1382 * @flr: VFLR was issued or not 1383 * 1384 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1385 * VF, then do all the waiting in one chunk, and finally finish restoring each 1386 * VF after the wait. This is useful during PF routines which need to reset 1387 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1388 * 1389 * Returns true if any VFs were reset, and false otherwise. 1390 **/ 1391 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1392 { 1393 struct i40e_hw *hw = &pf->hw; 1394 struct i40e_vf *vf; 1395 int i, v; 1396 u32 reg; 1397 1398 /* If we don't have any VFs, then there is nothing to reset */ 1399 if (!pf->num_alloc_vfs) 1400 return false; 1401 1402 /* If VFs have been disabled, there is no need to reset */ 1403 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1404 return false; 1405 1406 /* Begin reset on all VFs at once */ 1407 for (v = 0; v < pf->num_alloc_vfs; v++) 1408 i40e_trigger_vf_reset(&pf->vf[v], flr); 1409 1410 /* HW requires some time to make sure it can flush the FIFO for a VF 1411 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1412 * sequence to make sure that it has completed. We'll keep track of 1413 * the VFs using a simple iterator that increments once that VF has 1414 * finished resetting. 1415 */ 1416 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1417 usleep_range(10000, 20000); 1418 1419 /* Check each VF in sequence, beginning with the VF to fail 1420 * the previous check. 1421 */ 1422 while (v < pf->num_alloc_vfs) { 1423 vf = &pf->vf[v]; 1424 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1425 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1426 break; 1427 1428 /* If the current VF has finished resetting, move on 1429 * to the next VF in sequence. 1430 */ 1431 v++; 1432 } 1433 } 1434 1435 if (flr) 1436 usleep_range(10000, 20000); 1437 1438 /* Display a warning if at least one VF didn't manage to reset in 1439 * time, but continue on with the operation. 1440 */ 1441 if (v < pf->num_alloc_vfs) 1442 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1443 pf->vf[v].vf_id); 1444 usleep_range(10000, 20000); 1445 1446 /* Begin disabling all the rings associated with VFs, but do not wait 1447 * between each VF. 1448 */ 1449 for (v = 0; v < pf->num_alloc_vfs; v++) { 1450 /* On initial reset, we don't have any queues to disable */ 1451 if (pf->vf[v].lan_vsi_idx == 0) 1452 continue; 1453 1454 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1455 } 1456 1457 /* Now that we've notified HW to disable all of the VF rings, wait 1458 * until they finish. 1459 */ 1460 for (v = 0; v < pf->num_alloc_vfs; v++) { 1461 /* On initial reset, we don't have any queues to disable */ 1462 if (pf->vf[v].lan_vsi_idx == 0) 1463 continue; 1464 1465 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1466 } 1467 1468 /* Hw may need up to 50ms to finish disabling the RX queues. We 1469 * minimize the wait by delaying only once for all VFs. 1470 */ 1471 mdelay(50); 1472 1473 /* Finish the reset on each VF */ 1474 for (v = 0; v < pf->num_alloc_vfs; v++) 1475 i40e_cleanup_reset_vf(&pf->vf[v]); 1476 1477 i40e_flush(hw); 1478 clear_bit(__I40E_VF_DISABLE, pf->state); 1479 1480 return true; 1481 } 1482 1483 /** 1484 * i40e_free_vfs 1485 * @pf: pointer to the PF structure 1486 * 1487 * free VF resources 1488 **/ 1489 void i40e_free_vfs(struct i40e_pf *pf) 1490 { 1491 struct i40e_hw *hw = &pf->hw; 1492 u32 reg_idx, bit_idx; 1493 int i, tmp, vf_id; 1494 1495 if (!pf->vf) 1496 return; 1497 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1498 usleep_range(1000, 2000); 1499 1500 i40e_notify_client_of_vf_enable(pf, 0); 1501 1502 /* Amortize wait time by stopping all VFs at the same time */ 1503 for (i = 0; i < pf->num_alloc_vfs; i++) { 1504 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1505 continue; 1506 1507 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1508 } 1509 1510 for (i = 0; i < pf->num_alloc_vfs; i++) { 1511 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1512 continue; 1513 1514 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1515 } 1516 1517 /* Disable IOV before freeing resources. This lets any VF drivers 1518 * running in the host get themselves cleaned up before we yank 1519 * the carpet out from underneath their feet. 1520 */ 1521 if (!pci_vfs_assigned(pf->pdev)) 1522 pci_disable_sriov(pf->pdev); 1523 else 1524 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1525 1526 /* free up VF resources */ 1527 tmp = pf->num_alloc_vfs; 1528 pf->num_alloc_vfs = 0; 1529 for (i = 0; i < tmp; i++) { 1530 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1531 i40e_free_vf_res(&pf->vf[i]); 1532 /* disable qp mappings */ 1533 i40e_disable_vf_mappings(&pf->vf[i]); 1534 } 1535 1536 kfree(pf->vf); 1537 pf->vf = NULL; 1538 1539 /* This check is for when the driver is unloaded while VFs are 1540 * assigned. Setting the number of VFs to 0 through sysfs is caught 1541 * before this function ever gets called. 1542 */ 1543 if (!pci_vfs_assigned(pf->pdev)) { 1544 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1545 * work correctly when SR-IOV gets re-enabled. 1546 */ 1547 for (vf_id = 0; vf_id < tmp; vf_id++) { 1548 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1549 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1550 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1551 } 1552 } 1553 clear_bit(__I40E_VF_DISABLE, pf->state); 1554 } 1555 1556 #ifdef CONFIG_PCI_IOV 1557 /** 1558 * i40e_alloc_vfs 1559 * @pf: pointer to the PF structure 1560 * @num_alloc_vfs: number of VFs to allocate 1561 * 1562 * allocate VF resources 1563 **/ 1564 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1565 { 1566 struct i40e_vf *vfs; 1567 int i, ret = 0; 1568 1569 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1570 i40e_irq_dynamic_disable_icr0(pf); 1571 1572 /* Check to see if we're just allocating resources for extant VFs */ 1573 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1574 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1575 if (ret) { 1576 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1577 pf->num_alloc_vfs = 0; 1578 goto err_iov; 1579 } 1580 } 1581 /* allocate memory */ 1582 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1583 if (!vfs) { 1584 ret = -ENOMEM; 1585 goto err_alloc; 1586 } 1587 pf->vf = vfs; 1588 1589 /* apply default profile */ 1590 for (i = 0; i < num_alloc_vfs; i++) { 1591 vfs[i].pf = pf; 1592 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1593 vfs[i].vf_id = i; 1594 1595 /* assign default capabilities */ 1596 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1597 vfs[i].spoofchk = true; 1598 1599 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1600 1601 } 1602 pf->num_alloc_vfs = num_alloc_vfs; 1603 1604 /* VF resources get allocated during reset */ 1605 i40e_reset_all_vfs(pf, false); 1606 1607 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1608 1609 err_alloc: 1610 if (ret) 1611 i40e_free_vfs(pf); 1612 err_iov: 1613 /* Re-enable interrupt 0. */ 1614 i40e_irq_dynamic_enable_icr0(pf); 1615 return ret; 1616 } 1617 1618 #endif 1619 /** 1620 * i40e_pci_sriov_enable 1621 * @pdev: pointer to a pci_dev structure 1622 * @num_vfs: number of VFs to allocate 1623 * 1624 * Enable or change the number of VFs 1625 **/ 1626 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1627 { 1628 #ifdef CONFIG_PCI_IOV 1629 struct i40e_pf *pf = pci_get_drvdata(pdev); 1630 int pre_existing_vfs = pci_num_vf(pdev); 1631 int err = 0; 1632 1633 if (test_bit(__I40E_TESTING, pf->state)) { 1634 dev_warn(&pdev->dev, 1635 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1636 err = -EPERM; 1637 goto err_out; 1638 } 1639 1640 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1641 i40e_free_vfs(pf); 1642 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1643 goto out; 1644 1645 if (num_vfs > pf->num_req_vfs) { 1646 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1647 num_vfs, pf->num_req_vfs); 1648 err = -EPERM; 1649 goto err_out; 1650 } 1651 1652 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1653 err = i40e_alloc_vfs(pf, num_vfs); 1654 if (err) { 1655 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1656 goto err_out; 1657 } 1658 1659 out: 1660 return num_vfs; 1661 1662 err_out: 1663 return err; 1664 #endif 1665 return 0; 1666 } 1667 1668 /** 1669 * i40e_pci_sriov_configure 1670 * @pdev: pointer to a pci_dev structure 1671 * @num_vfs: number of VFs to allocate 1672 * 1673 * Enable or change the number of VFs. Called when the user updates the number 1674 * of VFs in sysfs. 1675 **/ 1676 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1677 { 1678 struct i40e_pf *pf = pci_get_drvdata(pdev); 1679 int ret = 0; 1680 1681 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1682 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1683 return -EAGAIN; 1684 } 1685 1686 if (num_vfs) { 1687 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1688 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1689 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1690 } 1691 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1692 goto sriov_configure_out; 1693 } 1694 1695 if (!pci_vfs_assigned(pf->pdev)) { 1696 i40e_free_vfs(pf); 1697 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1698 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1699 } else { 1700 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1701 ret = -EINVAL; 1702 goto sriov_configure_out; 1703 } 1704 sriov_configure_out: 1705 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1706 return ret; 1707 } 1708 1709 /***********************virtual channel routines******************/ 1710 1711 /** 1712 * i40e_vc_send_msg_to_vf 1713 * @vf: pointer to the VF info 1714 * @v_opcode: virtual channel opcode 1715 * @v_retval: virtual channel return value 1716 * @msg: pointer to the msg buffer 1717 * @msglen: msg length 1718 * 1719 * send msg to VF 1720 **/ 1721 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1722 u32 v_retval, u8 *msg, u16 msglen) 1723 { 1724 struct i40e_pf *pf; 1725 struct i40e_hw *hw; 1726 int abs_vf_id; 1727 i40e_status aq_ret; 1728 1729 /* validate the request */ 1730 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1731 return -EINVAL; 1732 1733 pf = vf->pf; 1734 hw = &pf->hw; 1735 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1736 1737 /* single place to detect unsuccessful return values */ 1738 if (v_retval) { 1739 vf->num_invalid_msgs++; 1740 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1741 vf->vf_id, v_opcode, v_retval); 1742 if (vf->num_invalid_msgs > 1743 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1744 dev_err(&pf->pdev->dev, 1745 "Number of invalid messages exceeded for VF %d\n", 1746 vf->vf_id); 1747 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1748 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1749 } 1750 } else { 1751 vf->num_valid_msgs++; 1752 /* reset the invalid counter, if a valid message is received. */ 1753 vf->num_invalid_msgs = 0; 1754 } 1755 1756 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1757 msg, msglen, NULL); 1758 if (aq_ret) { 1759 dev_info(&pf->pdev->dev, 1760 "Unable to send the message to VF %d aq_err %d\n", 1761 vf->vf_id, pf->hw.aq.asq_last_status); 1762 return -EIO; 1763 } 1764 1765 return 0; 1766 } 1767 1768 /** 1769 * i40e_vc_send_resp_to_vf 1770 * @vf: pointer to the VF info 1771 * @opcode: operation code 1772 * @retval: return value 1773 * 1774 * send resp msg to VF 1775 **/ 1776 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1777 enum virtchnl_ops opcode, 1778 i40e_status retval) 1779 { 1780 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1781 } 1782 1783 /** 1784 * i40e_vc_get_version_msg 1785 * @vf: pointer to the VF info 1786 * @msg: pointer to the msg buffer 1787 * 1788 * called from the VF to request the API version used by the PF 1789 **/ 1790 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1791 { 1792 struct virtchnl_version_info info = { 1793 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1794 }; 1795 1796 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1797 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1798 if (VF_IS_V10(&vf->vf_ver)) 1799 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1800 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1801 I40E_SUCCESS, (u8 *)&info, 1802 sizeof(struct virtchnl_version_info)); 1803 } 1804 1805 /** 1806 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 1807 * @vf: pointer to VF structure 1808 **/ 1809 static void i40e_del_qch(struct i40e_vf *vf) 1810 { 1811 struct i40e_pf *pf = vf->pf; 1812 int i; 1813 1814 /* first element in the array belongs to primary VF VSI and we shouldn't 1815 * delete it. We should however delete the rest of the VSIs created 1816 */ 1817 for (i = 1; i < vf->num_tc; i++) { 1818 if (vf->ch[i].vsi_idx) { 1819 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 1820 vf->ch[i].vsi_idx = 0; 1821 vf->ch[i].vsi_id = 0; 1822 } 1823 } 1824 } 1825 1826 /** 1827 * i40e_vc_get_vf_resources_msg 1828 * @vf: pointer to the VF info 1829 * @msg: pointer to the msg buffer 1830 * 1831 * called from the VF to request its resources 1832 **/ 1833 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1834 { 1835 struct virtchnl_vf_resource *vfres = NULL; 1836 struct i40e_pf *pf = vf->pf; 1837 i40e_status aq_ret = 0; 1838 struct i40e_vsi *vsi; 1839 int num_vsis = 1; 1840 int len = 0; 1841 int ret; 1842 1843 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1844 aq_ret = I40E_ERR_PARAM; 1845 goto err; 1846 } 1847 1848 len = (sizeof(struct virtchnl_vf_resource) + 1849 sizeof(struct virtchnl_vsi_resource) * num_vsis); 1850 1851 vfres = kzalloc(len, GFP_KERNEL); 1852 if (!vfres) { 1853 aq_ret = I40E_ERR_NO_MEMORY; 1854 len = 0; 1855 goto err; 1856 } 1857 if (VF_IS_V11(&vf->vf_ver)) 1858 vf->driver_caps = *(u32 *)msg; 1859 else 1860 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 1861 VIRTCHNL_VF_OFFLOAD_RSS_REG | 1862 VIRTCHNL_VF_OFFLOAD_VLAN; 1863 1864 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 1865 vsi = pf->vsi[vf->lan_vsi_idx]; 1866 if (!vsi->info.pvid) 1867 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 1868 1869 if (i40e_vf_client_capable(pf, vf->vf_id) && 1870 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 1871 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 1872 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1873 } else { 1874 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1875 } 1876 1877 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1878 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 1879 } else { 1880 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 1881 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1882 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1883 else 1884 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 1885 } 1886 1887 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1888 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1889 vfres->vf_cap_flags |= 1890 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1891 } 1892 1893 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 1894 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 1895 1896 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && 1897 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 1898 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 1899 1900 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1901 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1902 dev_err(&pf->pdev->dev, 1903 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1904 vf->vf_id); 1905 aq_ret = I40E_ERR_PARAM; 1906 goto err; 1907 } 1908 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1909 } 1910 1911 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { 1912 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1913 vfres->vf_cap_flags |= 1914 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1915 } 1916 1917 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 1918 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 1919 1920 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 1921 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 1922 1923 vfres->num_vsis = num_vsis; 1924 vfres->num_queue_pairs = vf->num_queue_pairs; 1925 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1926 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 1927 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 1928 1929 if (vf->lan_vsi_idx) { 1930 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 1931 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 1932 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 1933 /* VFs only use TC 0 */ 1934 vfres->vsi_res[0].qset_handle 1935 = le16_to_cpu(vsi->info.qs_handle[0]); 1936 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 1937 vf->default_lan_addr.addr); 1938 } 1939 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1940 1941 err: 1942 /* send the response back to the VF */ 1943 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 1944 aq_ret, (u8 *)vfres, len); 1945 1946 kfree(vfres); 1947 return ret; 1948 } 1949 1950 /** 1951 * i40e_vc_reset_vf_msg 1952 * @vf: pointer to the VF info 1953 * 1954 * called from the VF to reset itself, 1955 * unlike other virtchnl messages, PF driver 1956 * doesn't send the response back to the VF 1957 **/ 1958 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1959 { 1960 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 1961 i40e_reset_vf(vf, false); 1962 } 1963 1964 /** 1965 * i40e_getnum_vf_vsi_vlan_filters 1966 * @vsi: pointer to the vsi 1967 * 1968 * called to get the number of VLANs offloaded on this VF 1969 **/ 1970 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1971 { 1972 struct i40e_mac_filter *f; 1973 int num_vlans = 0, bkt; 1974 1975 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1976 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1977 num_vlans++; 1978 } 1979 1980 return num_vlans; 1981 } 1982 1983 /** 1984 * i40e_vc_config_promiscuous_mode_msg 1985 * @vf: pointer to the VF info 1986 * @msg: pointer to the msg buffer 1987 * 1988 * called from the VF to configure the promiscuous mode of 1989 * VF vsis 1990 **/ 1991 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 1992 { 1993 struct virtchnl_promisc_info *info = 1994 (struct virtchnl_promisc_info *)msg; 1995 struct i40e_pf *pf = vf->pf; 1996 i40e_status aq_ret = 0; 1997 bool allmulti = false; 1998 bool alluni = false; 1999 2000 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2001 return I40E_ERR_PARAM; 2002 2003 /* Multicast promiscuous handling*/ 2004 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2005 allmulti = true; 2006 2007 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2008 alluni = true; 2009 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2010 alluni); 2011 if (!aq_ret) { 2012 if (allmulti) { 2013 dev_info(&pf->pdev->dev, 2014 "VF %d successfully set multicast promiscuous mode\n", 2015 vf->vf_id); 2016 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2017 } else { 2018 dev_info(&pf->pdev->dev, 2019 "VF %d successfully unset multicast promiscuous mode\n", 2020 vf->vf_id); 2021 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2022 } 2023 if (alluni) { 2024 dev_info(&pf->pdev->dev, 2025 "VF %d successfully set unicast promiscuous mode\n", 2026 vf->vf_id); 2027 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2028 } else { 2029 dev_info(&pf->pdev->dev, 2030 "VF %d successfully unset unicast promiscuous mode\n", 2031 vf->vf_id); 2032 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2033 } 2034 } 2035 2036 /* send the response to the VF */ 2037 return i40e_vc_send_resp_to_vf(vf, 2038 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2039 aq_ret); 2040 } 2041 2042 /** 2043 * i40e_vc_config_queues_msg 2044 * @vf: pointer to the VF info 2045 * @msg: pointer to the msg buffer 2046 * 2047 * called from the VF to configure the rx/tx 2048 * queues 2049 **/ 2050 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2051 { 2052 struct virtchnl_vsi_queue_config_info *qci = 2053 (struct virtchnl_vsi_queue_config_info *)msg; 2054 struct virtchnl_queue_pair_info *qpi; 2055 struct i40e_pf *pf = vf->pf; 2056 u16 vsi_id, vsi_queue_id = 0; 2057 i40e_status aq_ret = 0; 2058 int i, j = 0, idx = 0; 2059 2060 vsi_id = qci->vsi_id; 2061 2062 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2063 aq_ret = I40E_ERR_PARAM; 2064 goto error_param; 2065 } 2066 2067 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2068 aq_ret = I40E_ERR_PARAM; 2069 goto error_param; 2070 } 2071 2072 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { 2073 aq_ret = I40E_ERR_PARAM; 2074 goto error_param; 2075 } 2076 2077 for (i = 0; i < qci->num_queue_pairs; i++) { 2078 qpi = &qci->qpair[i]; 2079 2080 if (!vf->adq_enabled) { 2081 vsi_queue_id = qpi->txq.queue_id; 2082 2083 if (qpi->txq.vsi_id != qci->vsi_id || 2084 qpi->rxq.vsi_id != qci->vsi_id || 2085 qpi->rxq.queue_id != vsi_queue_id) { 2086 aq_ret = I40E_ERR_PARAM; 2087 goto error_param; 2088 } 2089 } 2090 2091 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 2092 aq_ret = I40E_ERR_PARAM; 2093 goto error_param; 2094 } 2095 2096 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2097 &qpi->rxq) || 2098 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2099 &qpi->txq)) { 2100 aq_ret = I40E_ERR_PARAM; 2101 goto error_param; 2102 } 2103 2104 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2105 * VF does not know about these additional VSIs and all 2106 * it cares is about its own queues. PF configures these queues 2107 * to its appropriate VSIs based on TC mapping 2108 **/ 2109 if (vf->adq_enabled) { 2110 if (j == (vf->ch[idx].num_qps - 1)) { 2111 idx++; 2112 j = 0; /* resetting the queue count */ 2113 vsi_queue_id = 0; 2114 } else { 2115 j++; 2116 vsi_queue_id++; 2117 } 2118 vsi_id = vf->ch[idx].vsi_id; 2119 } 2120 } 2121 /* set vsi num_queue_pairs in use to num configured by VF */ 2122 if (!vf->adq_enabled) { 2123 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2124 qci->num_queue_pairs; 2125 } else { 2126 for (i = 0; i < vf->num_tc; i++) 2127 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = 2128 vf->ch[i].num_qps; 2129 } 2130 2131 error_param: 2132 /* send the response to the VF */ 2133 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2134 aq_ret); 2135 } 2136 2137 /** 2138 * i40e_validate_queue_map 2139 * @vsi_id: vsi id 2140 * @queuemap: Tx or Rx queue map 2141 * 2142 * check if Tx or Rx queue map is valid 2143 **/ 2144 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2145 unsigned long queuemap) 2146 { 2147 u16 vsi_queue_id, queue_id; 2148 2149 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2150 if (vf->adq_enabled) { 2151 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; 2152 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2153 } else { 2154 queue_id = vsi_queue_id; 2155 } 2156 2157 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2158 return -EINVAL; 2159 } 2160 2161 return 0; 2162 } 2163 2164 /** 2165 * i40e_vc_config_irq_map_msg 2166 * @vf: pointer to the VF info 2167 * @msg: pointer to the msg buffer 2168 * 2169 * called from the VF to configure the irq to 2170 * queue map 2171 **/ 2172 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2173 { 2174 struct virtchnl_irq_map_info *irqmap_info = 2175 (struct virtchnl_irq_map_info *)msg; 2176 struct virtchnl_vector_map *map; 2177 u16 vsi_id, vector_id; 2178 i40e_status aq_ret = 0; 2179 int i; 2180 2181 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2182 aq_ret = I40E_ERR_PARAM; 2183 goto error_param; 2184 } 2185 2186 for (i = 0; i < irqmap_info->num_vectors; i++) { 2187 map = &irqmap_info->vecmap[i]; 2188 vector_id = map->vector_id; 2189 vsi_id = map->vsi_id; 2190 /* validate msg params */ 2191 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 2192 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2193 aq_ret = I40E_ERR_PARAM; 2194 goto error_param; 2195 } 2196 2197 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2198 aq_ret = I40E_ERR_PARAM; 2199 goto error_param; 2200 } 2201 2202 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2203 aq_ret = I40E_ERR_PARAM; 2204 goto error_param; 2205 } 2206 2207 i40e_config_irq_link_list(vf, vsi_id, map); 2208 } 2209 error_param: 2210 /* send the response to the VF */ 2211 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2212 aq_ret); 2213 } 2214 2215 /** 2216 * i40e_ctrl_vf_tx_rings 2217 * @vsi: the SRIOV VSI being configured 2218 * @q_map: bit map of the queues to be enabled 2219 * @enable: start or stop the queue 2220 **/ 2221 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2222 bool enable) 2223 { 2224 struct i40e_pf *pf = vsi->back; 2225 int ret = 0; 2226 u16 q_id; 2227 2228 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2229 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2230 vsi->base_queue + q_id, 2231 false /*is xdp*/, enable); 2232 if (ret) 2233 break; 2234 } 2235 return ret; 2236 } 2237 2238 /** 2239 * i40e_ctrl_vf_rx_rings 2240 * @vsi: the SRIOV VSI being configured 2241 * @q_map: bit map of the queues to be enabled 2242 * @enable: start or stop the queue 2243 **/ 2244 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2245 bool enable) 2246 { 2247 struct i40e_pf *pf = vsi->back; 2248 int ret = 0; 2249 u16 q_id; 2250 2251 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2252 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2253 enable); 2254 if (ret) 2255 break; 2256 } 2257 return ret; 2258 } 2259 2260 /** 2261 * i40e_vc_enable_queues_msg 2262 * @vf: pointer to the VF info 2263 * @msg: pointer to the msg buffer 2264 * 2265 * called from the VF to enable all or specific queue(s) 2266 **/ 2267 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2268 { 2269 struct virtchnl_queue_select *vqs = 2270 (struct virtchnl_queue_select *)msg; 2271 struct i40e_pf *pf = vf->pf; 2272 u16 vsi_id = vqs->vsi_id; 2273 i40e_status aq_ret = 0; 2274 int i; 2275 2276 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2277 aq_ret = I40E_ERR_PARAM; 2278 goto error_param; 2279 } 2280 2281 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2282 aq_ret = I40E_ERR_PARAM; 2283 goto error_param; 2284 } 2285 2286 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 2287 aq_ret = I40E_ERR_PARAM; 2288 goto error_param; 2289 } 2290 2291 /* Use the queue bit map sent by the VF */ 2292 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2293 true)) { 2294 aq_ret = I40E_ERR_TIMEOUT; 2295 goto error_param; 2296 } 2297 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2298 true)) { 2299 aq_ret = I40E_ERR_TIMEOUT; 2300 goto error_param; 2301 } 2302 2303 /* need to start the rings for additional ADq VSI's as well */ 2304 if (vf->adq_enabled) { 2305 /* zero belongs to LAN VSI */ 2306 for (i = 1; i < vf->num_tc; i++) { 2307 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2308 aq_ret = I40E_ERR_TIMEOUT; 2309 } 2310 } 2311 2312 error_param: 2313 /* send the response to the VF */ 2314 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2315 aq_ret); 2316 } 2317 2318 /** 2319 * i40e_vc_disable_queues_msg 2320 * @vf: pointer to the VF info 2321 * @msg: pointer to the msg buffer 2322 * 2323 * called from the VF to disable all or specific 2324 * queue(s) 2325 **/ 2326 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2327 { 2328 struct virtchnl_queue_select *vqs = 2329 (struct virtchnl_queue_select *)msg; 2330 struct i40e_pf *pf = vf->pf; 2331 i40e_status aq_ret = 0; 2332 2333 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2334 aq_ret = I40E_ERR_PARAM; 2335 goto error_param; 2336 } 2337 2338 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2339 aq_ret = I40E_ERR_PARAM; 2340 goto error_param; 2341 } 2342 2343 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 2344 aq_ret = I40E_ERR_PARAM; 2345 goto error_param; 2346 } 2347 2348 /* Use the queue bit map sent by the VF */ 2349 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2350 false)) { 2351 aq_ret = I40E_ERR_TIMEOUT; 2352 goto error_param; 2353 } 2354 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2355 false)) { 2356 aq_ret = I40E_ERR_TIMEOUT; 2357 goto error_param; 2358 } 2359 error_param: 2360 /* send the response to the VF */ 2361 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2362 aq_ret); 2363 } 2364 2365 /** 2366 * i40e_vc_request_queues_msg 2367 * @vf: pointer to the VF info 2368 * @msg: pointer to the msg buffer 2369 * 2370 * VFs get a default number of queues but can use this message to request a 2371 * different number. If the request is successful, PF will reset the VF and 2372 * return 0. If unsuccessful, PF will send message informing VF of number of 2373 * available queues and return result of sending VF a message. 2374 **/ 2375 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2376 { 2377 struct virtchnl_vf_res_request *vfres = 2378 (struct virtchnl_vf_res_request *)msg; 2379 int req_pairs = vfres->num_queue_pairs; 2380 int cur_pairs = vf->num_queue_pairs; 2381 struct i40e_pf *pf = vf->pf; 2382 2383 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2384 return -EINVAL; 2385 2386 if (req_pairs <= 0) { 2387 dev_err(&pf->pdev->dev, 2388 "VF %d tried to request %d queues. Ignoring.\n", 2389 vf->vf_id, req_pairs); 2390 } else if (req_pairs > I40E_MAX_VF_QUEUES) { 2391 dev_err(&pf->pdev->dev, 2392 "VF %d tried to request more than %d queues.\n", 2393 vf->vf_id, 2394 I40E_MAX_VF_QUEUES); 2395 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2396 } else if (req_pairs - cur_pairs > pf->queues_left) { 2397 dev_warn(&pf->pdev->dev, 2398 "VF %d requested %d more queues, but only %d left.\n", 2399 vf->vf_id, 2400 req_pairs - cur_pairs, 2401 pf->queues_left); 2402 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2403 } else { 2404 /* successful request */ 2405 vf->num_req_queues = req_pairs; 2406 i40e_vc_notify_vf_reset(vf); 2407 i40e_reset_vf(vf, false); 2408 return 0; 2409 } 2410 2411 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2412 (u8 *)vfres, sizeof(*vfres)); 2413 } 2414 2415 /** 2416 * i40e_vc_get_stats_msg 2417 * @vf: pointer to the VF info 2418 * @msg: pointer to the msg buffer 2419 * 2420 * called from the VF to get vsi stats 2421 **/ 2422 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2423 { 2424 struct virtchnl_queue_select *vqs = 2425 (struct virtchnl_queue_select *)msg; 2426 struct i40e_pf *pf = vf->pf; 2427 struct i40e_eth_stats stats; 2428 i40e_status aq_ret = 0; 2429 struct i40e_vsi *vsi; 2430 2431 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2432 2433 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2434 aq_ret = I40E_ERR_PARAM; 2435 goto error_param; 2436 } 2437 2438 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2439 aq_ret = I40E_ERR_PARAM; 2440 goto error_param; 2441 } 2442 2443 vsi = pf->vsi[vf->lan_vsi_idx]; 2444 if (!vsi) { 2445 aq_ret = I40E_ERR_PARAM; 2446 goto error_param; 2447 } 2448 i40e_update_eth_stats(vsi); 2449 stats = vsi->eth_stats; 2450 2451 error_param: 2452 /* send the response back to the VF */ 2453 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2454 (u8 *)&stats, sizeof(stats)); 2455 } 2456 2457 /* If the VF is not trusted restrict the number of MAC/VLAN it can program */ 2458 #define I40E_VC_MAX_MAC_ADDR_PER_VF 12 2459 #define I40E_VC_MAX_VLAN_PER_VF 8 2460 2461 /** 2462 * i40e_check_vf_permission 2463 * @vf: pointer to the VF info 2464 * @al: MAC address list from virtchnl 2465 * 2466 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2467 * if any address in the list is not valid. Checks the following conditions: 2468 * 2469 * 1) broadcast and zero addresses are never valid 2470 * 2) unicast addresses are not allowed if the VMM has administratively set 2471 * the VF MAC address, unless the VF is marked as privileged. 2472 * 3) There is enough space to add all the addresses. 2473 * 2474 * Note that to guarantee consistency, it is expected this function be called 2475 * while holding the mac_filter_hash_lock, as otherwise the current number of 2476 * addresses might not be accurate. 2477 **/ 2478 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2479 struct virtchnl_ether_addr_list *al) 2480 { 2481 struct i40e_pf *pf = vf->pf; 2482 int i; 2483 2484 /* If this VF is not privileged, then we can't add more than a limited 2485 * number of addresses. Check to make sure that the additions do not 2486 * push us over the limit. 2487 */ 2488 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2489 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) { 2490 dev_err(&pf->pdev->dev, 2491 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2492 return -EPERM; 2493 } 2494 2495 for (i = 0; i < al->num_elements; i++) { 2496 u8 *addr = al->list[i].addr; 2497 2498 if (is_broadcast_ether_addr(addr) || 2499 is_zero_ether_addr(addr)) { 2500 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2501 addr); 2502 return I40E_ERR_INVALID_MAC_ADDR; 2503 } 2504 2505 /* If the host VMM administrator has set the VF MAC address 2506 * administratively via the ndo_set_vf_mac command then deny 2507 * permission to the VF to add or delete unicast MAC addresses. 2508 * Unless the VF is privileged and then it can do whatever. 2509 * The VF may request to set the MAC address filter already 2510 * assigned to it so do not return an error in that case. 2511 */ 2512 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2513 !is_multicast_ether_addr(addr) && vf->pf_set_mac && 2514 !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2515 dev_err(&pf->pdev->dev, 2516 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2517 return -EPERM; 2518 } 2519 } 2520 2521 return 0; 2522 } 2523 2524 /** 2525 * i40e_vc_add_mac_addr_msg 2526 * @vf: pointer to the VF info 2527 * @msg: pointer to the msg buffer 2528 * 2529 * add guest mac address filter 2530 **/ 2531 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2532 { 2533 struct virtchnl_ether_addr_list *al = 2534 (struct virtchnl_ether_addr_list *)msg; 2535 struct i40e_pf *pf = vf->pf; 2536 struct i40e_vsi *vsi = NULL; 2537 u16 vsi_id = al->vsi_id; 2538 i40e_status ret = 0; 2539 int i; 2540 2541 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2542 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2543 ret = I40E_ERR_PARAM; 2544 goto error_param; 2545 } 2546 2547 vsi = pf->vsi[vf->lan_vsi_idx]; 2548 2549 /* Lock once, because all function inside for loop accesses VSI's 2550 * MAC filter list which needs to be protected using same lock. 2551 */ 2552 spin_lock_bh(&vsi->mac_filter_hash_lock); 2553 2554 ret = i40e_check_vf_permission(vf, al); 2555 if (ret) { 2556 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2557 goto error_param; 2558 } 2559 2560 /* add new addresses to the list */ 2561 for (i = 0; i < al->num_elements; i++) { 2562 struct i40e_mac_filter *f; 2563 2564 f = i40e_find_mac(vsi, al->list[i].addr); 2565 if (!f) { 2566 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2567 2568 if (!f) { 2569 dev_err(&pf->pdev->dev, 2570 "Unable to add MAC filter %pM for VF %d\n", 2571 al->list[i].addr, vf->vf_id); 2572 ret = I40E_ERR_PARAM; 2573 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2574 goto error_param; 2575 } else { 2576 vf->num_mac++; 2577 } 2578 } 2579 } 2580 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2581 2582 /* program the updated filter list */ 2583 ret = i40e_sync_vsi_filters(vsi); 2584 if (ret) 2585 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2586 vf->vf_id, ret); 2587 2588 error_param: 2589 /* send the response to the VF */ 2590 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2591 ret); 2592 } 2593 2594 /** 2595 * i40e_vc_del_mac_addr_msg 2596 * @vf: pointer to the VF info 2597 * @msg: pointer to the msg buffer 2598 * 2599 * remove guest mac address filter 2600 **/ 2601 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2602 { 2603 struct virtchnl_ether_addr_list *al = 2604 (struct virtchnl_ether_addr_list *)msg; 2605 struct i40e_pf *pf = vf->pf; 2606 struct i40e_vsi *vsi = NULL; 2607 u16 vsi_id = al->vsi_id; 2608 i40e_status ret = 0; 2609 int i; 2610 2611 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2612 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2613 ret = I40E_ERR_PARAM; 2614 goto error_param; 2615 } 2616 2617 for (i = 0; i < al->num_elements; i++) { 2618 if (is_broadcast_ether_addr(al->list[i].addr) || 2619 is_zero_ether_addr(al->list[i].addr)) { 2620 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2621 al->list[i].addr, vf->vf_id); 2622 ret = I40E_ERR_INVALID_MAC_ADDR; 2623 goto error_param; 2624 } 2625 2626 if (vf->pf_set_mac && 2627 ether_addr_equal(al->list[i].addr, 2628 vf->default_lan_addr.addr)) { 2629 dev_err(&pf->pdev->dev, 2630 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", 2631 vf->default_lan_addr.addr, vf->vf_id); 2632 ret = I40E_ERR_PARAM; 2633 goto error_param; 2634 } 2635 } 2636 vsi = pf->vsi[vf->lan_vsi_idx]; 2637 2638 spin_lock_bh(&vsi->mac_filter_hash_lock); 2639 /* delete addresses from the list */ 2640 for (i = 0; i < al->num_elements; i++) 2641 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2642 ret = I40E_ERR_INVALID_MAC_ADDR; 2643 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2644 goto error_param; 2645 } else { 2646 vf->num_mac--; 2647 } 2648 2649 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2650 2651 /* program the updated filter list */ 2652 ret = i40e_sync_vsi_filters(vsi); 2653 if (ret) 2654 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2655 vf->vf_id, ret); 2656 2657 error_param: 2658 /* send the response to the VF */ 2659 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 2660 ret); 2661 } 2662 2663 /** 2664 * i40e_vc_add_vlan_msg 2665 * @vf: pointer to the VF info 2666 * @msg: pointer to the msg buffer 2667 * 2668 * program guest vlan id 2669 **/ 2670 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 2671 { 2672 struct virtchnl_vlan_filter_list *vfl = 2673 (struct virtchnl_vlan_filter_list *)msg; 2674 struct i40e_pf *pf = vf->pf; 2675 struct i40e_vsi *vsi = NULL; 2676 u16 vsi_id = vfl->vsi_id; 2677 i40e_status aq_ret = 0; 2678 int i; 2679 2680 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2681 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2682 dev_err(&pf->pdev->dev, 2683 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2684 goto error_param; 2685 } 2686 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2687 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2688 aq_ret = I40E_ERR_PARAM; 2689 goto error_param; 2690 } 2691 2692 for (i = 0; i < vfl->num_elements; i++) { 2693 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2694 aq_ret = I40E_ERR_PARAM; 2695 dev_err(&pf->pdev->dev, 2696 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2697 goto error_param; 2698 } 2699 } 2700 vsi = pf->vsi[vf->lan_vsi_idx]; 2701 if (vsi->info.pvid) { 2702 aq_ret = I40E_ERR_PARAM; 2703 goto error_param; 2704 } 2705 2706 i40e_vlan_stripping_enable(vsi); 2707 for (i = 0; i < vfl->num_elements; i++) { 2708 /* add new VLAN filter */ 2709 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2710 if (!ret) 2711 vf->num_vlan++; 2712 2713 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2714 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2715 true, 2716 vfl->vlan_id[i], 2717 NULL); 2718 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2719 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2720 true, 2721 vfl->vlan_id[i], 2722 NULL); 2723 2724 if (ret) 2725 dev_err(&pf->pdev->dev, 2726 "Unable to add VLAN filter %d for VF %d, error %d\n", 2727 vfl->vlan_id[i], vf->vf_id, ret); 2728 } 2729 2730 error_param: 2731 /* send the response to the VF */ 2732 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2733 } 2734 2735 /** 2736 * i40e_vc_remove_vlan_msg 2737 * @vf: pointer to the VF info 2738 * @msg: pointer to the msg buffer 2739 * 2740 * remove programmed guest vlan id 2741 **/ 2742 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 2743 { 2744 struct virtchnl_vlan_filter_list *vfl = 2745 (struct virtchnl_vlan_filter_list *)msg; 2746 struct i40e_pf *pf = vf->pf; 2747 struct i40e_vsi *vsi = NULL; 2748 u16 vsi_id = vfl->vsi_id; 2749 i40e_status aq_ret = 0; 2750 int i; 2751 2752 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2753 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2754 aq_ret = I40E_ERR_PARAM; 2755 goto error_param; 2756 } 2757 2758 for (i = 0; i < vfl->num_elements; i++) { 2759 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2760 aq_ret = I40E_ERR_PARAM; 2761 goto error_param; 2762 } 2763 } 2764 2765 vsi = pf->vsi[vf->lan_vsi_idx]; 2766 if (vsi->info.pvid) { 2767 aq_ret = I40E_ERR_PARAM; 2768 goto error_param; 2769 } 2770 2771 for (i = 0; i < vfl->num_elements; i++) { 2772 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2773 vf->num_vlan--; 2774 2775 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2776 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2777 false, 2778 vfl->vlan_id[i], 2779 NULL); 2780 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2781 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2782 false, 2783 vfl->vlan_id[i], 2784 NULL); 2785 } 2786 2787 error_param: 2788 /* send the response to the VF */ 2789 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2790 } 2791 2792 /** 2793 * i40e_vc_iwarp_msg 2794 * @vf: pointer to the VF info 2795 * @msg: pointer to the msg buffer 2796 * @msglen: msg length 2797 * 2798 * called from the VF for the iwarp msgs 2799 **/ 2800 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2801 { 2802 struct i40e_pf *pf = vf->pf; 2803 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2804 i40e_status aq_ret = 0; 2805 2806 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2807 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2808 aq_ret = I40E_ERR_PARAM; 2809 goto error_param; 2810 } 2811 2812 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2813 msg, msglen); 2814 2815 error_param: 2816 /* send the response to the VF */ 2817 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 2818 aq_ret); 2819 } 2820 2821 /** 2822 * i40e_vc_iwarp_qvmap_msg 2823 * @vf: pointer to the VF info 2824 * @msg: pointer to the msg buffer 2825 * @config: config qvmap or release it 2826 * 2827 * called from the VF for the iwarp msgs 2828 **/ 2829 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 2830 { 2831 struct virtchnl_iwarp_qvlist_info *qvlist_info = 2832 (struct virtchnl_iwarp_qvlist_info *)msg; 2833 i40e_status aq_ret = 0; 2834 2835 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2836 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2837 aq_ret = I40E_ERR_PARAM; 2838 goto error_param; 2839 } 2840 2841 if (config) { 2842 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2843 aq_ret = I40E_ERR_PARAM; 2844 } else { 2845 i40e_release_iwarp_qvlist(vf); 2846 } 2847 2848 error_param: 2849 /* send the response to the VF */ 2850 return i40e_vc_send_resp_to_vf(vf, 2851 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 2852 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 2853 aq_ret); 2854 } 2855 2856 /** 2857 * i40e_vc_config_rss_key 2858 * @vf: pointer to the VF info 2859 * @msg: pointer to the msg buffer 2860 * 2861 * Configure the VF's RSS key 2862 **/ 2863 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 2864 { 2865 struct virtchnl_rss_key *vrk = 2866 (struct virtchnl_rss_key *)msg; 2867 struct i40e_pf *pf = vf->pf; 2868 struct i40e_vsi *vsi = NULL; 2869 u16 vsi_id = vrk->vsi_id; 2870 i40e_status aq_ret = 0; 2871 2872 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2873 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2874 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 2875 aq_ret = I40E_ERR_PARAM; 2876 goto err; 2877 } 2878 2879 vsi = pf->vsi[vf->lan_vsi_idx]; 2880 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 2881 err: 2882 /* send the response to the VF */ 2883 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 2884 aq_ret); 2885 } 2886 2887 /** 2888 * i40e_vc_config_rss_lut 2889 * @vf: pointer to the VF info 2890 * @msg: pointer to the msg buffer 2891 * 2892 * Configure the VF's RSS LUT 2893 **/ 2894 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 2895 { 2896 struct virtchnl_rss_lut *vrl = 2897 (struct virtchnl_rss_lut *)msg; 2898 struct i40e_pf *pf = vf->pf; 2899 struct i40e_vsi *vsi = NULL; 2900 u16 vsi_id = vrl->vsi_id; 2901 i40e_status aq_ret = 0; 2902 2903 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2904 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2905 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 2906 aq_ret = I40E_ERR_PARAM; 2907 goto err; 2908 } 2909 2910 vsi = pf->vsi[vf->lan_vsi_idx]; 2911 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 2912 /* send the response to the VF */ 2913 err: 2914 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 2915 aq_ret); 2916 } 2917 2918 /** 2919 * i40e_vc_get_rss_hena 2920 * @vf: pointer to the VF info 2921 * @msg: pointer to the msg buffer 2922 * 2923 * Return the RSS HENA bits allowed by the hardware 2924 **/ 2925 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) 2926 { 2927 struct virtchnl_rss_hena *vrh = NULL; 2928 struct i40e_pf *pf = vf->pf; 2929 i40e_status aq_ret = 0; 2930 int len = 0; 2931 2932 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2933 aq_ret = I40E_ERR_PARAM; 2934 goto err; 2935 } 2936 len = sizeof(struct virtchnl_rss_hena); 2937 2938 vrh = kzalloc(len, GFP_KERNEL); 2939 if (!vrh) { 2940 aq_ret = I40E_ERR_NO_MEMORY; 2941 len = 0; 2942 goto err; 2943 } 2944 vrh->hena = i40e_pf_get_default_rss_hena(pf); 2945 err: 2946 /* send the response back to the VF */ 2947 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 2948 aq_ret, (u8 *)vrh, len); 2949 kfree(vrh); 2950 return aq_ret; 2951 } 2952 2953 /** 2954 * i40e_vc_set_rss_hena 2955 * @vf: pointer to the VF info 2956 * @msg: pointer to the msg buffer 2957 * 2958 * Set the RSS HENA bits for the VF 2959 **/ 2960 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) 2961 { 2962 struct virtchnl_rss_hena *vrh = 2963 (struct virtchnl_rss_hena *)msg; 2964 struct i40e_pf *pf = vf->pf; 2965 struct i40e_hw *hw = &pf->hw; 2966 i40e_status aq_ret = 0; 2967 2968 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2969 aq_ret = I40E_ERR_PARAM; 2970 goto err; 2971 } 2972 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 2973 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 2974 (u32)(vrh->hena >> 32)); 2975 2976 /* send the response to the VF */ 2977 err: 2978 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 2979 } 2980 2981 /** 2982 * i40e_vc_enable_vlan_stripping 2983 * @vf: pointer to the VF info 2984 * @msg: pointer to the msg buffer 2985 * 2986 * Enable vlan header stripping for the VF 2987 **/ 2988 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 2989 { 2990 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 2991 i40e_status aq_ret = 0; 2992 2993 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2994 aq_ret = I40E_ERR_PARAM; 2995 goto err; 2996 } 2997 2998 i40e_vlan_stripping_enable(vsi); 2999 3000 /* send the response to the VF */ 3001 err: 3002 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 3003 aq_ret); 3004 } 3005 3006 /** 3007 * i40e_vc_disable_vlan_stripping 3008 * @vf: pointer to the VF info 3009 * @msg: pointer to the msg buffer 3010 * 3011 * Disable vlan header stripping for the VF 3012 **/ 3013 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3014 { 3015 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3016 i40e_status aq_ret = 0; 3017 3018 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3019 aq_ret = I40E_ERR_PARAM; 3020 goto err; 3021 } 3022 3023 i40e_vlan_stripping_disable(vsi); 3024 3025 /* send the response to the VF */ 3026 err: 3027 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3028 aq_ret); 3029 } 3030 3031 /** 3032 * i40e_validate_cloud_filter 3033 * @mask: mask for TC filter 3034 * @data: data for TC filter 3035 * 3036 * This function validates cloud filter programmed as TC filter for ADq 3037 **/ 3038 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3039 struct virtchnl_filter *tc_filter) 3040 { 3041 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3042 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3043 struct i40e_pf *pf = vf->pf; 3044 struct i40e_vsi *vsi = NULL; 3045 struct i40e_mac_filter *f; 3046 struct hlist_node *h; 3047 bool found = false; 3048 int bkt; 3049 3050 if (!tc_filter->action) { 3051 dev_info(&pf->pdev->dev, 3052 "VF %d: Currently ADq doesn't support Drop Action\n", 3053 vf->vf_id); 3054 goto err; 3055 } 3056 3057 /* action_meta is TC number here to which the filter is applied */ 3058 if (!tc_filter->action_meta || 3059 tc_filter->action_meta > I40E_MAX_VF_VSI) { 3060 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3061 vf->vf_id, tc_filter->action_meta); 3062 goto err; 3063 } 3064 3065 /* Check filter if it's programmed for advanced mode or basic mode. 3066 * There are two ADq modes (for VF only), 3067 * 1. Basic mode: intended to allow as many filter options as possible 3068 * to be added to a VF in Non-trusted mode. Main goal is 3069 * to add filters to its own MAC and VLAN id. 3070 * 2. Advanced mode: is for allowing filters to be applied other than 3071 * its own MAC or VLAN. This mode requires the VF to be 3072 * Trusted. 3073 */ 3074 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3075 vsi = pf->vsi[vf->lan_vsi_idx]; 3076 f = i40e_find_mac(vsi, data.dst_mac); 3077 3078 if (!f) { 3079 dev_info(&pf->pdev->dev, 3080 "Destination MAC %pM doesn't belong to VF %d\n", 3081 data.dst_mac, vf->vf_id); 3082 goto err; 3083 } 3084 3085 if (mask.vlan_id) { 3086 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3087 hlist) { 3088 if (f->vlan == ntohs(data.vlan_id)) { 3089 found = true; 3090 break; 3091 } 3092 } 3093 if (!found) { 3094 dev_info(&pf->pdev->dev, 3095 "VF %d doesn't have any VLAN id %u\n", 3096 vf->vf_id, ntohs(data.vlan_id)); 3097 goto err; 3098 } 3099 } 3100 } else { 3101 /* Check if VF is trusted */ 3102 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3103 dev_err(&pf->pdev->dev, 3104 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3105 vf->vf_id); 3106 return I40E_ERR_CONFIG; 3107 } 3108 } 3109 3110 if (mask.dst_mac[0] & data.dst_mac[0]) { 3111 if (is_broadcast_ether_addr(data.dst_mac) || 3112 is_zero_ether_addr(data.dst_mac)) { 3113 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3114 vf->vf_id, data.dst_mac); 3115 goto err; 3116 } 3117 } 3118 3119 if (mask.src_mac[0] & data.src_mac[0]) { 3120 if (is_broadcast_ether_addr(data.src_mac) || 3121 is_zero_ether_addr(data.src_mac)) { 3122 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3123 vf->vf_id, data.src_mac); 3124 goto err; 3125 } 3126 } 3127 3128 if (mask.dst_port & data.dst_port) { 3129 if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) { 3130 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3131 vf->vf_id); 3132 goto err; 3133 } 3134 } 3135 3136 if (mask.src_port & data.src_port) { 3137 if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) { 3138 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3139 vf->vf_id); 3140 goto err; 3141 } 3142 } 3143 3144 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3145 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3146 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3147 vf->vf_id); 3148 goto err; 3149 } 3150 3151 if (mask.vlan_id & data.vlan_id) { 3152 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3153 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3154 vf->vf_id); 3155 goto err; 3156 } 3157 } 3158 3159 return I40E_SUCCESS; 3160 err: 3161 return I40E_ERR_CONFIG; 3162 } 3163 3164 /** 3165 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3166 * @vf: pointer to the VF info 3167 * @seid - seid of the vsi it is searching for 3168 **/ 3169 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3170 { 3171 struct i40e_pf *pf = vf->pf; 3172 struct i40e_vsi *vsi = NULL; 3173 int i; 3174 3175 for (i = 0; i < vf->num_tc ; i++) { 3176 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3177 if (vsi && vsi->seid == seid) 3178 return vsi; 3179 } 3180 return NULL; 3181 } 3182 3183 /** 3184 * i40e_del_all_cloud_filters 3185 * @vf: pointer to the VF info 3186 * 3187 * This function deletes all cloud filters 3188 **/ 3189 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3190 { 3191 struct i40e_cloud_filter *cfilter = NULL; 3192 struct i40e_pf *pf = vf->pf; 3193 struct i40e_vsi *vsi = NULL; 3194 struct hlist_node *node; 3195 int ret; 3196 3197 hlist_for_each_entry_safe(cfilter, node, 3198 &vf->cloud_filter_list, cloud_node) { 3199 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3200 3201 if (!vsi) { 3202 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3203 vf->vf_id, cfilter->seid); 3204 continue; 3205 } 3206 3207 if (cfilter->dst_port) 3208 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3209 false); 3210 else 3211 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3212 if (ret) 3213 dev_err(&pf->pdev->dev, 3214 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3215 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3216 i40e_aq_str(&pf->hw, 3217 pf->hw.aq.asq_last_status)); 3218 3219 hlist_del(&cfilter->cloud_node); 3220 kfree(cfilter); 3221 vf->num_cloud_filters--; 3222 } 3223 } 3224 3225 /** 3226 * i40e_vc_del_cloud_filter 3227 * @vf: pointer to the VF info 3228 * @msg: pointer to the msg buffer 3229 * 3230 * This function deletes a cloud filter programmed as TC filter for ADq 3231 **/ 3232 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3233 { 3234 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3235 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3236 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3237 struct i40e_cloud_filter cfilter, *cf = NULL; 3238 struct i40e_pf *pf = vf->pf; 3239 struct i40e_vsi *vsi = NULL; 3240 struct hlist_node *node; 3241 i40e_status aq_ret = 0; 3242 int i, ret; 3243 3244 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3245 aq_ret = I40E_ERR_PARAM; 3246 goto err; 3247 } 3248 3249 if (!vf->adq_enabled) { 3250 dev_info(&pf->pdev->dev, 3251 "VF %d: ADq not enabled, can't apply cloud filter\n", 3252 vf->vf_id); 3253 aq_ret = I40E_ERR_PARAM; 3254 goto err; 3255 } 3256 3257 if (i40e_validate_cloud_filter(vf, vcf)) { 3258 dev_info(&pf->pdev->dev, 3259 "VF %d: Invalid input, can't apply cloud filter\n", 3260 vf->vf_id); 3261 aq_ret = I40E_ERR_PARAM; 3262 goto err; 3263 } 3264 3265 memset(&cfilter, 0, sizeof(cfilter)); 3266 /* parse destination mac address */ 3267 for (i = 0; i < ETH_ALEN; i++) 3268 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3269 3270 /* parse source mac address */ 3271 for (i = 0; i < ETH_ALEN; i++) 3272 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3273 3274 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3275 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3276 cfilter.src_port = mask.src_port & tcf.src_port; 3277 3278 switch (vcf->flow_type) { 3279 case VIRTCHNL_TCP_V4_FLOW: 3280 cfilter.n_proto = ETH_P_IP; 3281 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3282 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3283 ARRAY_SIZE(tcf.dst_ip)); 3284 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3285 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3286 ARRAY_SIZE(tcf.dst_ip)); 3287 break; 3288 case VIRTCHNL_TCP_V6_FLOW: 3289 cfilter.n_proto = ETH_P_IPV6; 3290 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3291 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3292 sizeof(cfilter.ip.v6.dst_ip6)); 3293 if (mask.src_ip[3] & tcf.src_ip[3]) 3294 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3295 sizeof(cfilter.ip.v6.src_ip6)); 3296 break; 3297 default: 3298 /* TC filter can be configured based on different combinations 3299 * and in this case IP is not a part of filter config 3300 */ 3301 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3302 vf->vf_id); 3303 } 3304 3305 /* get the vsi to which the tc belongs to */ 3306 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3307 cfilter.seid = vsi->seid; 3308 cfilter.flags = vcf->field_flags; 3309 3310 /* Deleting TC filter */ 3311 if (tcf.dst_port) 3312 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3313 else 3314 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3315 if (ret) { 3316 dev_err(&pf->pdev->dev, 3317 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3318 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3319 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3320 goto err; 3321 } 3322 3323 hlist_for_each_entry_safe(cf, node, 3324 &vf->cloud_filter_list, cloud_node) { 3325 if (cf->seid != cfilter.seid) 3326 continue; 3327 if (mask.dst_port) 3328 if (cfilter.dst_port != cf->dst_port) 3329 continue; 3330 if (mask.dst_mac[0]) 3331 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3332 continue; 3333 /* for ipv4 data to be valid, only first byte of mask is set */ 3334 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3335 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3336 ARRAY_SIZE(tcf.dst_ip))) 3337 continue; 3338 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3339 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3340 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3341 sizeof(cfilter.ip.v6.src_ip6))) 3342 continue; 3343 if (mask.vlan_id) 3344 if (cfilter.vlan_id != cf->vlan_id) 3345 continue; 3346 3347 hlist_del(&cf->cloud_node); 3348 kfree(cf); 3349 vf->num_cloud_filters--; 3350 } 3351 3352 err: 3353 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3354 aq_ret); 3355 } 3356 3357 /** 3358 * i40e_vc_add_cloud_filter 3359 * @vf: pointer to the VF info 3360 * @msg: pointer to the msg buffer 3361 * 3362 * This function adds a cloud filter programmed as TC filter for ADq 3363 **/ 3364 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3365 { 3366 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3367 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3368 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3369 struct i40e_cloud_filter *cfilter = NULL; 3370 struct i40e_pf *pf = vf->pf; 3371 struct i40e_vsi *vsi = NULL; 3372 i40e_status aq_ret = 0; 3373 int i, ret; 3374 3375 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3376 aq_ret = I40E_ERR_PARAM; 3377 goto err; 3378 } 3379 3380 if (!vf->adq_enabled) { 3381 dev_info(&pf->pdev->dev, 3382 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3383 vf->vf_id); 3384 aq_ret = I40E_ERR_PARAM; 3385 goto err; 3386 } 3387 3388 if (i40e_validate_cloud_filter(vf, vcf)) { 3389 dev_info(&pf->pdev->dev, 3390 "VF %d: Invalid input/s, can't apply cloud filter\n", 3391 vf->vf_id); 3392 aq_ret = I40E_ERR_PARAM; 3393 goto err; 3394 } 3395 3396 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3397 if (!cfilter) 3398 return -ENOMEM; 3399 3400 /* parse destination mac address */ 3401 for (i = 0; i < ETH_ALEN; i++) 3402 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3403 3404 /* parse source mac address */ 3405 for (i = 0; i < ETH_ALEN; i++) 3406 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3407 3408 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3409 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3410 cfilter->src_port = mask.src_port & tcf.src_port; 3411 3412 switch (vcf->flow_type) { 3413 case VIRTCHNL_TCP_V4_FLOW: 3414 cfilter->n_proto = ETH_P_IP; 3415 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3416 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3417 ARRAY_SIZE(tcf.dst_ip)); 3418 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3419 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3420 ARRAY_SIZE(tcf.dst_ip)); 3421 break; 3422 case VIRTCHNL_TCP_V6_FLOW: 3423 cfilter->n_proto = ETH_P_IPV6; 3424 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3425 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3426 sizeof(cfilter->ip.v6.dst_ip6)); 3427 if (mask.src_ip[3] & tcf.src_ip[3]) 3428 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3429 sizeof(cfilter->ip.v6.src_ip6)); 3430 break; 3431 default: 3432 /* TC filter can be configured based on different combinations 3433 * and in this case IP is not a part of filter config 3434 */ 3435 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3436 vf->vf_id); 3437 } 3438 3439 /* get the VSI to which the TC belongs to */ 3440 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3441 cfilter->seid = vsi->seid; 3442 cfilter->flags = vcf->field_flags; 3443 3444 /* Adding cloud filter programmed as TC filter */ 3445 if (tcf.dst_port) 3446 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3447 else 3448 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3449 if (ret) { 3450 dev_err(&pf->pdev->dev, 3451 "VF %d: Failed to add cloud filter, err %s aq_err %s\n", 3452 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3453 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3454 goto err; 3455 } 3456 3457 INIT_HLIST_NODE(&cfilter->cloud_node); 3458 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 3459 vf->num_cloud_filters++; 3460 err: 3461 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 3462 aq_ret); 3463 } 3464 3465 /** 3466 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 3467 * @vf: pointer to the VF info 3468 * @msg: pointer to the msg buffer 3469 **/ 3470 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 3471 { 3472 struct virtchnl_tc_info *tci = 3473 (struct virtchnl_tc_info *)msg; 3474 struct i40e_pf *pf = vf->pf; 3475 struct i40e_link_status *ls = &pf->hw.phy.link_info; 3476 int i, adq_request_qps = 0, speed = 0; 3477 i40e_status aq_ret = 0; 3478 3479 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3480 aq_ret = I40E_ERR_PARAM; 3481 goto err; 3482 } 3483 3484 /* ADq cannot be applied if spoof check is ON */ 3485 if (vf->spoofchk) { 3486 dev_err(&pf->pdev->dev, 3487 "Spoof check is ON, turn it OFF to enable ADq\n"); 3488 aq_ret = I40E_ERR_PARAM; 3489 goto err; 3490 } 3491 3492 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3493 dev_err(&pf->pdev->dev, 3494 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 3495 vf->vf_id); 3496 aq_ret = I40E_ERR_PARAM; 3497 goto err; 3498 } 3499 3500 /* max number of traffic classes for VF currently capped at 4 */ 3501 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 3502 dev_err(&pf->pdev->dev, 3503 "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n", 3504 vf->vf_id, tci->num_tc); 3505 aq_ret = I40E_ERR_PARAM; 3506 goto err; 3507 } 3508 3509 /* validate queues for each TC */ 3510 for (i = 0; i < tci->num_tc; i++) 3511 if (!tci->list[i].count || 3512 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 3513 dev_err(&pf->pdev->dev, 3514 "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n", 3515 vf->vf_id, i, tci->list[i].count); 3516 aq_ret = I40E_ERR_PARAM; 3517 goto err; 3518 } 3519 3520 /* need Max VF queues but already have default number of queues */ 3521 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 3522 3523 if (pf->queues_left < adq_request_qps) { 3524 dev_err(&pf->pdev->dev, 3525 "No queues left to allocate to VF %d\n", 3526 vf->vf_id); 3527 aq_ret = I40E_ERR_PARAM; 3528 goto err; 3529 } else { 3530 /* we need to allocate max VF queues to enable ADq so as to 3531 * make sure ADq enabled VF always gets back queues when it 3532 * goes through a reset. 3533 */ 3534 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 3535 } 3536 3537 /* get link speed in MB to validate rate limit */ 3538 switch (ls->link_speed) { 3539 case VIRTCHNL_LINK_SPEED_100MB: 3540 speed = SPEED_100; 3541 break; 3542 case VIRTCHNL_LINK_SPEED_1GB: 3543 speed = SPEED_1000; 3544 break; 3545 case VIRTCHNL_LINK_SPEED_10GB: 3546 speed = SPEED_10000; 3547 break; 3548 case VIRTCHNL_LINK_SPEED_20GB: 3549 speed = SPEED_20000; 3550 break; 3551 case VIRTCHNL_LINK_SPEED_25GB: 3552 speed = SPEED_25000; 3553 break; 3554 case VIRTCHNL_LINK_SPEED_40GB: 3555 speed = SPEED_40000; 3556 break; 3557 default: 3558 dev_err(&pf->pdev->dev, 3559 "Cannot detect link speed\n"); 3560 aq_ret = I40E_ERR_PARAM; 3561 goto err; 3562 } 3563 3564 /* parse data from the queue channel info */ 3565 vf->num_tc = tci->num_tc; 3566 for (i = 0; i < vf->num_tc; i++) { 3567 if (tci->list[i].max_tx_rate) { 3568 if (tci->list[i].max_tx_rate > speed) { 3569 dev_err(&pf->pdev->dev, 3570 "Invalid max tx rate %llu specified for VF %d.", 3571 tci->list[i].max_tx_rate, 3572 vf->vf_id); 3573 aq_ret = I40E_ERR_PARAM; 3574 goto err; 3575 } else { 3576 vf->ch[i].max_tx_rate = 3577 tci->list[i].max_tx_rate; 3578 } 3579 } 3580 vf->ch[i].num_qps = tci->list[i].count; 3581 } 3582 3583 /* set this flag only after making sure all inputs are sane */ 3584 vf->adq_enabled = true; 3585 /* num_req_queues is set when user changes number of queues via ethtool 3586 * and this causes issue for default VSI(which depends on this variable) 3587 * when ADq is enabled, hence reset it. 3588 */ 3589 vf->num_req_queues = 0; 3590 3591 /* reset the VF in order to allocate resources */ 3592 i40e_vc_notify_vf_reset(vf); 3593 i40e_reset_vf(vf, false); 3594 3595 return I40E_SUCCESS; 3596 3597 /* send the response to the VF */ 3598 err: 3599 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 3600 aq_ret); 3601 } 3602 3603 /** 3604 * i40e_vc_del_qch_msg 3605 * @vf: pointer to the VF info 3606 * @msg: pointer to the msg buffer 3607 **/ 3608 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 3609 { 3610 struct i40e_pf *pf = vf->pf; 3611 i40e_status aq_ret = 0; 3612 3613 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3614 aq_ret = I40E_ERR_PARAM; 3615 goto err; 3616 } 3617 3618 if (vf->adq_enabled) { 3619 i40e_del_all_cloud_filters(vf); 3620 i40e_del_qch(vf); 3621 vf->adq_enabled = false; 3622 vf->num_tc = 0; 3623 dev_info(&pf->pdev->dev, 3624 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 3625 vf->vf_id); 3626 } else { 3627 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 3628 vf->vf_id); 3629 aq_ret = I40E_ERR_PARAM; 3630 } 3631 3632 /* reset the VF in order to allocate resources */ 3633 i40e_vc_notify_vf_reset(vf); 3634 i40e_reset_vf(vf, false); 3635 3636 return I40E_SUCCESS; 3637 3638 err: 3639 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 3640 aq_ret); 3641 } 3642 3643 /** 3644 * i40e_vc_process_vf_msg 3645 * @pf: pointer to the PF structure 3646 * @vf_id: source VF id 3647 * @v_opcode: operation code 3648 * @v_retval: unused return value code 3649 * @msg: pointer to the msg buffer 3650 * @msglen: msg length 3651 * 3652 * called from the common aeq/arq handler to 3653 * process request from VF 3654 **/ 3655 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 3656 u32 __always_unused v_retval, u8 *msg, u16 msglen) 3657 { 3658 struct i40e_hw *hw = &pf->hw; 3659 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 3660 struct i40e_vf *vf; 3661 int ret; 3662 3663 pf->vf_aq_requests++; 3664 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) 3665 return -EINVAL; 3666 vf = &(pf->vf[local_vf_id]); 3667 3668 /* Check if VF is disabled. */ 3669 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 3670 return I40E_ERR_PARAM; 3671 3672 /* perform basic checks on the msg */ 3673 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 3674 3675 /* perform additional checks specific to this driver */ 3676 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { 3677 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; 3678 3679 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) 3680 ret = -EINVAL; 3681 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { 3682 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; 3683 3684 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) 3685 ret = -EINVAL; 3686 } 3687 3688 if (ret) { 3689 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 3690 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 3691 local_vf_id, v_opcode, msglen); 3692 switch (ret) { 3693 case VIRTCHNL_STATUS_ERR_PARAM: 3694 return -EPERM; 3695 default: 3696 return -EINVAL; 3697 } 3698 } 3699 3700 switch (v_opcode) { 3701 case VIRTCHNL_OP_VERSION: 3702 ret = i40e_vc_get_version_msg(vf, msg); 3703 break; 3704 case VIRTCHNL_OP_GET_VF_RESOURCES: 3705 ret = i40e_vc_get_vf_resources_msg(vf, msg); 3706 i40e_vc_notify_vf_link_state(vf); 3707 break; 3708 case VIRTCHNL_OP_RESET_VF: 3709 i40e_vc_reset_vf_msg(vf); 3710 ret = 0; 3711 break; 3712 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 3713 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 3714 break; 3715 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 3716 ret = i40e_vc_config_queues_msg(vf, msg); 3717 break; 3718 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 3719 ret = i40e_vc_config_irq_map_msg(vf, msg); 3720 break; 3721 case VIRTCHNL_OP_ENABLE_QUEUES: 3722 ret = i40e_vc_enable_queues_msg(vf, msg); 3723 i40e_vc_notify_vf_link_state(vf); 3724 break; 3725 case VIRTCHNL_OP_DISABLE_QUEUES: 3726 ret = i40e_vc_disable_queues_msg(vf, msg); 3727 break; 3728 case VIRTCHNL_OP_ADD_ETH_ADDR: 3729 ret = i40e_vc_add_mac_addr_msg(vf, msg); 3730 break; 3731 case VIRTCHNL_OP_DEL_ETH_ADDR: 3732 ret = i40e_vc_del_mac_addr_msg(vf, msg); 3733 break; 3734 case VIRTCHNL_OP_ADD_VLAN: 3735 ret = i40e_vc_add_vlan_msg(vf, msg); 3736 break; 3737 case VIRTCHNL_OP_DEL_VLAN: 3738 ret = i40e_vc_remove_vlan_msg(vf, msg); 3739 break; 3740 case VIRTCHNL_OP_GET_STATS: 3741 ret = i40e_vc_get_stats_msg(vf, msg); 3742 break; 3743 case VIRTCHNL_OP_IWARP: 3744 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 3745 break; 3746 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 3747 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); 3748 break; 3749 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 3750 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); 3751 break; 3752 case VIRTCHNL_OP_CONFIG_RSS_KEY: 3753 ret = i40e_vc_config_rss_key(vf, msg); 3754 break; 3755 case VIRTCHNL_OP_CONFIG_RSS_LUT: 3756 ret = i40e_vc_config_rss_lut(vf, msg); 3757 break; 3758 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 3759 ret = i40e_vc_get_rss_hena(vf, msg); 3760 break; 3761 case VIRTCHNL_OP_SET_RSS_HENA: 3762 ret = i40e_vc_set_rss_hena(vf, msg); 3763 break; 3764 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 3765 ret = i40e_vc_enable_vlan_stripping(vf, msg); 3766 break; 3767 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 3768 ret = i40e_vc_disable_vlan_stripping(vf, msg); 3769 break; 3770 case VIRTCHNL_OP_REQUEST_QUEUES: 3771 ret = i40e_vc_request_queues_msg(vf, msg); 3772 break; 3773 case VIRTCHNL_OP_ENABLE_CHANNELS: 3774 ret = i40e_vc_add_qch_msg(vf, msg); 3775 break; 3776 case VIRTCHNL_OP_DISABLE_CHANNELS: 3777 ret = i40e_vc_del_qch_msg(vf, msg); 3778 break; 3779 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 3780 ret = i40e_vc_add_cloud_filter(vf, msg); 3781 break; 3782 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 3783 ret = i40e_vc_del_cloud_filter(vf, msg); 3784 break; 3785 case VIRTCHNL_OP_UNKNOWN: 3786 default: 3787 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 3788 v_opcode, local_vf_id); 3789 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 3790 I40E_ERR_NOT_IMPLEMENTED); 3791 break; 3792 } 3793 3794 return ret; 3795 } 3796 3797 /** 3798 * i40e_vc_process_vflr_event 3799 * @pf: pointer to the PF structure 3800 * 3801 * called from the vlfr irq handler to 3802 * free up VF resources and state variables 3803 **/ 3804 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 3805 { 3806 struct i40e_hw *hw = &pf->hw; 3807 u32 reg, reg_idx, bit_idx; 3808 struct i40e_vf *vf; 3809 int vf_id; 3810 3811 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 3812 return 0; 3813 3814 /* Re-enable the VFLR interrupt cause here, before looking for which 3815 * VF got reset. Otherwise, if another VF gets a reset while the 3816 * first one is being processed, that interrupt will be lost, and 3817 * that VF will be stuck in reset forever. 3818 */ 3819 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 3820 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 3821 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 3822 i40e_flush(hw); 3823 3824 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 3825 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 3826 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 3827 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 3828 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 3829 vf = &pf->vf[vf_id]; 3830 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 3831 if (reg & BIT(bit_idx)) 3832 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 3833 i40e_reset_vf(vf, true); 3834 } 3835 3836 return 0; 3837 } 3838 3839 /** 3840 * i40e_validate_vf 3841 * @pf: the physical function 3842 * @vf_id: VF identifier 3843 * 3844 * Check that the VF is enabled and the VSI exists. 3845 * 3846 * Returns 0 on success, negative on failure 3847 **/ 3848 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 3849 { 3850 struct i40e_vsi *vsi; 3851 struct i40e_vf *vf; 3852 int ret = 0; 3853 3854 if (vf_id >= pf->num_alloc_vfs) { 3855 dev_err(&pf->pdev->dev, 3856 "Invalid VF Identifier %d\n", vf_id); 3857 ret = -EINVAL; 3858 goto err_out; 3859 } 3860 vf = &pf->vf[vf_id]; 3861 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 3862 if (!vsi) 3863 ret = -EINVAL; 3864 err_out: 3865 return ret; 3866 } 3867 3868 /** 3869 * i40e_ndo_set_vf_mac 3870 * @netdev: network interface device structure 3871 * @vf_id: VF identifier 3872 * @mac: mac address 3873 * 3874 * program VF mac address 3875 **/ 3876 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 3877 { 3878 struct i40e_netdev_priv *np = netdev_priv(netdev); 3879 struct i40e_vsi *vsi = np->vsi; 3880 struct i40e_pf *pf = vsi->back; 3881 struct i40e_mac_filter *f; 3882 struct i40e_vf *vf; 3883 int ret = 0; 3884 struct hlist_node *h; 3885 int bkt; 3886 u8 i; 3887 3888 /* validate the request */ 3889 ret = i40e_validate_vf(pf, vf_id); 3890 if (ret) 3891 goto error_param; 3892 3893 vf = &pf->vf[vf_id]; 3894 vsi = pf->vsi[vf->lan_vsi_idx]; 3895 3896 /* When the VF is resetting wait until it is done. 3897 * It can take up to 200 milliseconds, 3898 * but wait for up to 300 milliseconds to be safe. 3899 */ 3900 for (i = 0; i < 15; i++) { 3901 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 3902 break; 3903 msleep(20); 3904 } 3905 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3906 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3907 vf_id); 3908 ret = -EAGAIN; 3909 goto error_param; 3910 } 3911 3912 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 3913 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 3914 return -EAGAIN; 3915 } 3916 3917 if (is_multicast_ether_addr(mac)) { 3918 dev_err(&pf->pdev->dev, 3919 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 3920 ret = -EINVAL; 3921 goto error_param; 3922 } 3923 3924 /* Lock once because below invoked function add/del_filter requires 3925 * mac_filter_hash_lock to be held 3926 */ 3927 spin_lock_bh(&vsi->mac_filter_hash_lock); 3928 3929 /* delete the temporary mac address */ 3930 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 3931 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 3932 3933 /* Delete all the filters for this VSI - we're going to kill it 3934 * anyway. 3935 */ 3936 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 3937 __i40e_del_filter(vsi, f); 3938 3939 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3940 3941 /* program mac filter */ 3942 if (i40e_sync_vsi_filters(vsi)) { 3943 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 3944 ret = -EIO; 3945 goto error_param; 3946 } 3947 ether_addr_copy(vf->default_lan_addr.addr, mac); 3948 3949 if (is_zero_ether_addr(mac)) { 3950 vf->pf_set_mac = false; 3951 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 3952 } else { 3953 vf->pf_set_mac = true; 3954 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 3955 mac, vf_id); 3956 } 3957 3958 /* Force the VF interface down so it has to bring up with new MAC 3959 * address 3960 */ 3961 i40e_vc_disable_vf(vf); 3962 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 3963 3964 error_param: 3965 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 3966 return ret; 3967 } 3968 3969 /** 3970 * i40e_vsi_has_vlans - True if VSI has configured VLANs 3971 * @vsi: pointer to the vsi 3972 * 3973 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if 3974 * we have no configured VLANs. Do not call while holding the 3975 * mac_filter_hash_lock. 3976 */ 3977 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) 3978 { 3979 bool have_vlans; 3980 3981 /* If we have a port VLAN, then the VSI cannot have any VLANs 3982 * configured, as all MAC/VLAN filters will be assigned to the PVID. 3983 */ 3984 if (vsi->info.pvid) 3985 return false; 3986 3987 /* Since we don't have a PVID, we know that if the device is in VLAN 3988 * mode it must be because of a VLAN filter configured on this VSI. 3989 */ 3990 spin_lock_bh(&vsi->mac_filter_hash_lock); 3991 have_vlans = i40e_is_vsi_in_vlan(vsi); 3992 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3993 3994 return have_vlans; 3995 } 3996 3997 /** 3998 * i40e_ndo_set_vf_port_vlan 3999 * @netdev: network interface device structure 4000 * @vf_id: VF identifier 4001 * @vlan_id: mac address 4002 * @qos: priority setting 4003 * @vlan_proto: vlan protocol 4004 * 4005 * program VF vlan id and/or qos 4006 **/ 4007 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4008 u16 vlan_id, u8 qos, __be16 vlan_proto) 4009 { 4010 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4011 struct i40e_netdev_priv *np = netdev_priv(netdev); 4012 struct i40e_pf *pf = np->vsi->back; 4013 struct i40e_vsi *vsi; 4014 struct i40e_vf *vf; 4015 int ret = 0; 4016 4017 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4018 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4019 return -EAGAIN; 4020 } 4021 4022 /* validate the request */ 4023 ret = i40e_validate_vf(pf, vf_id); 4024 if (ret) 4025 goto error_pvid; 4026 4027 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4028 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4029 ret = -EINVAL; 4030 goto error_pvid; 4031 } 4032 4033 if (vlan_proto != htons(ETH_P_8021Q)) { 4034 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4035 ret = -EPROTONOSUPPORT; 4036 goto error_pvid; 4037 } 4038 4039 vf = &pf->vf[vf_id]; 4040 vsi = pf->vsi[vf->lan_vsi_idx]; 4041 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4042 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4043 vf_id); 4044 ret = -EAGAIN; 4045 goto error_pvid; 4046 } 4047 4048 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4049 /* duplicate request, so just return success */ 4050 goto error_pvid; 4051 4052 if (i40e_vsi_has_vlans(vsi)) { 4053 dev_err(&pf->pdev->dev, 4054 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 4055 vf_id); 4056 /* Administrator Error - knock the VF offline until he does 4057 * the right thing by reconfiguring his network correctly 4058 * and then reloading the VF driver. 4059 */ 4060 i40e_vc_disable_vf(vf); 4061 /* During reset the VF got a new VSI, so refresh the pointer. */ 4062 vsi = pf->vsi[vf->lan_vsi_idx]; 4063 } 4064 4065 /* Locked once because multiple functions below iterate list */ 4066 spin_lock_bh(&vsi->mac_filter_hash_lock); 4067 4068 /* Check for condition where there was already a port VLAN ID 4069 * filter set and now it is being deleted by setting it to zero. 4070 * Additionally check for the condition where there was a port 4071 * VLAN but now there is a new and different port VLAN being set. 4072 * Before deleting all the old VLAN filters we must add new ones 4073 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4074 * MAC addresses deleted. 4075 */ 4076 if ((!(vlan_id || qos) || 4077 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4078 vsi->info.pvid) { 4079 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4080 if (ret) { 4081 dev_info(&vsi->back->pdev->dev, 4082 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4083 vsi->back->hw.aq.asq_last_status); 4084 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4085 goto error_pvid; 4086 } 4087 } 4088 4089 if (vsi->info.pvid) { 4090 /* remove all filters on the old VLAN */ 4091 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4092 VLAN_VID_MASK)); 4093 } 4094 4095 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4096 if (vlan_id || qos) 4097 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4098 else 4099 i40e_vsi_remove_pvid(vsi); 4100 spin_lock_bh(&vsi->mac_filter_hash_lock); 4101 4102 if (vlan_id) { 4103 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4104 vlan_id, qos, vf_id); 4105 4106 /* add new VLAN filter for each MAC */ 4107 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4108 if (ret) { 4109 dev_info(&vsi->back->pdev->dev, 4110 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4111 vsi->back->hw.aq.asq_last_status); 4112 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4113 goto error_pvid; 4114 } 4115 4116 /* remove the previously added non-VLAN MAC filters */ 4117 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4118 } 4119 4120 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4121 4122 /* Schedule the worker thread to take care of applying changes */ 4123 i40e_service_event_schedule(vsi->back); 4124 4125 if (ret) { 4126 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4127 goto error_pvid; 4128 } 4129 4130 /* The Port VLAN needs to be saved across resets the same as the 4131 * default LAN MAC address. 4132 */ 4133 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4134 ret = 0; 4135 4136 error_pvid: 4137 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4138 return ret; 4139 } 4140 4141 /** 4142 * i40e_ndo_set_vf_bw 4143 * @netdev: network interface device structure 4144 * @vf_id: VF identifier 4145 * @min_tx_rate: Minimum Tx rate 4146 * @max_tx_rate: Maximum Tx rate 4147 * 4148 * configure VF Tx rate 4149 **/ 4150 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4151 int max_tx_rate) 4152 { 4153 struct i40e_netdev_priv *np = netdev_priv(netdev); 4154 struct i40e_pf *pf = np->vsi->back; 4155 struct i40e_vsi *vsi; 4156 struct i40e_vf *vf; 4157 int ret = 0; 4158 4159 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4160 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4161 return -EAGAIN; 4162 } 4163 4164 /* validate the request */ 4165 ret = i40e_validate_vf(pf, vf_id); 4166 if (ret) 4167 goto error; 4168 4169 if (min_tx_rate) { 4170 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4171 min_tx_rate, vf_id); 4172 return -EINVAL; 4173 } 4174 4175 vf = &pf->vf[vf_id]; 4176 vsi = pf->vsi[vf->lan_vsi_idx]; 4177 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4178 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4179 vf_id); 4180 ret = -EAGAIN; 4181 goto error; 4182 } 4183 4184 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4185 if (ret) 4186 goto error; 4187 4188 vf->tx_rate = max_tx_rate; 4189 error: 4190 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4191 return ret; 4192 } 4193 4194 /** 4195 * i40e_ndo_get_vf_config 4196 * @netdev: network interface device structure 4197 * @vf_id: VF identifier 4198 * @ivi: VF configuration structure 4199 * 4200 * return VF configuration 4201 **/ 4202 int i40e_ndo_get_vf_config(struct net_device *netdev, 4203 int vf_id, struct ifla_vf_info *ivi) 4204 { 4205 struct i40e_netdev_priv *np = netdev_priv(netdev); 4206 struct i40e_vsi *vsi = np->vsi; 4207 struct i40e_pf *pf = vsi->back; 4208 struct i40e_vf *vf; 4209 int ret = 0; 4210 4211 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4212 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4213 return -EAGAIN; 4214 } 4215 4216 /* validate the request */ 4217 ret = i40e_validate_vf(pf, vf_id); 4218 if (ret) 4219 goto error_param; 4220 4221 vf = &pf->vf[vf_id]; 4222 /* first vsi is always the LAN vsi */ 4223 vsi = pf->vsi[vf->lan_vsi_idx]; 4224 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4225 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4226 vf_id); 4227 ret = -EAGAIN; 4228 goto error_param; 4229 } 4230 4231 ivi->vf = vf_id; 4232 4233 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4234 4235 ivi->max_tx_rate = vf->tx_rate; 4236 ivi->min_tx_rate = 0; 4237 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 4238 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 4239 I40E_VLAN_PRIORITY_SHIFT; 4240 if (vf->link_forced == false) 4241 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4242 else if (vf->link_up == true) 4243 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4244 else 4245 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4246 ivi->spoofchk = vf->spoofchk; 4247 ivi->trusted = vf->trusted; 4248 ret = 0; 4249 4250 error_param: 4251 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4252 return ret; 4253 } 4254 4255 /** 4256 * i40e_ndo_set_vf_link_state 4257 * @netdev: network interface device structure 4258 * @vf_id: VF identifier 4259 * @link: required link state 4260 * 4261 * Set the link state of a specified VF, regardless of physical link state 4262 **/ 4263 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4264 { 4265 struct i40e_netdev_priv *np = netdev_priv(netdev); 4266 struct i40e_pf *pf = np->vsi->back; 4267 struct virtchnl_pf_event pfe; 4268 struct i40e_hw *hw = &pf->hw; 4269 struct i40e_vf *vf; 4270 int abs_vf_id; 4271 int ret = 0; 4272 4273 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4274 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4275 return -EAGAIN; 4276 } 4277 4278 /* validate the request */ 4279 if (vf_id >= pf->num_alloc_vfs) { 4280 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4281 ret = -EINVAL; 4282 goto error_out; 4283 } 4284 4285 vf = &pf->vf[vf_id]; 4286 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4287 4288 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4289 pfe.severity = PF_EVENT_SEVERITY_INFO; 4290 4291 switch (link) { 4292 case IFLA_VF_LINK_STATE_AUTO: 4293 vf->link_forced = false; 4294 pfe.event_data.link_event.link_status = 4295 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 4296 pfe.event_data.link_event.link_speed = 4297 (enum virtchnl_link_speed) 4298 pf->hw.phy.link_info.link_speed; 4299 break; 4300 case IFLA_VF_LINK_STATE_ENABLE: 4301 vf->link_forced = true; 4302 vf->link_up = true; 4303 pfe.event_data.link_event.link_status = true; 4304 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; 4305 break; 4306 case IFLA_VF_LINK_STATE_DISABLE: 4307 vf->link_forced = true; 4308 vf->link_up = false; 4309 pfe.event_data.link_event.link_status = false; 4310 pfe.event_data.link_event.link_speed = 0; 4311 break; 4312 default: 4313 ret = -EINVAL; 4314 goto error_out; 4315 } 4316 /* Notify the VF of its new link state */ 4317 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4318 0, (u8 *)&pfe, sizeof(pfe), NULL); 4319 4320 error_out: 4321 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4322 return ret; 4323 } 4324 4325 /** 4326 * i40e_ndo_set_vf_spoofchk 4327 * @netdev: network interface device structure 4328 * @vf_id: VF identifier 4329 * @enable: flag to enable or disable feature 4330 * 4331 * Enable or disable VF spoof checking 4332 **/ 4333 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4334 { 4335 struct i40e_netdev_priv *np = netdev_priv(netdev); 4336 struct i40e_vsi *vsi = np->vsi; 4337 struct i40e_pf *pf = vsi->back; 4338 struct i40e_vsi_context ctxt; 4339 struct i40e_hw *hw = &pf->hw; 4340 struct i40e_vf *vf; 4341 int ret = 0; 4342 4343 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4344 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4345 return -EAGAIN; 4346 } 4347 4348 /* validate the request */ 4349 if (vf_id >= pf->num_alloc_vfs) { 4350 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4351 ret = -EINVAL; 4352 goto out; 4353 } 4354 4355 vf = &(pf->vf[vf_id]); 4356 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4357 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4358 vf_id); 4359 ret = -EAGAIN; 4360 goto out; 4361 } 4362 4363 if (enable == vf->spoofchk) 4364 goto out; 4365 4366 vf->spoofchk = enable; 4367 memset(&ctxt, 0, sizeof(ctxt)); 4368 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4369 ctxt.pf_num = pf->hw.pf_id; 4370 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4371 if (enable) 4372 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4373 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4374 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4375 if (ret) { 4376 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4377 ret); 4378 ret = -EIO; 4379 } 4380 out: 4381 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4382 return ret; 4383 } 4384 4385 /** 4386 * i40e_ndo_set_vf_trust 4387 * @netdev: network interface device structure of the pf 4388 * @vf_id: VF identifier 4389 * @setting: trust setting 4390 * 4391 * Enable or disable VF trust setting 4392 **/ 4393 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4394 { 4395 struct i40e_netdev_priv *np = netdev_priv(netdev); 4396 struct i40e_pf *pf = np->vsi->back; 4397 struct i40e_vf *vf; 4398 int ret = 0; 4399 4400 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4401 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4402 return -EAGAIN; 4403 } 4404 4405 /* validate the request */ 4406 if (vf_id >= pf->num_alloc_vfs) { 4407 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4408 ret = -EINVAL; 4409 goto out; 4410 } 4411 4412 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4413 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4414 ret = -EINVAL; 4415 goto out; 4416 } 4417 4418 vf = &pf->vf[vf_id]; 4419 4420 if (setting == vf->trusted) 4421 goto out; 4422 4423 vf->trusted = setting; 4424 i40e_vc_disable_vf(vf); 4425 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4426 vf_id, setting ? "" : "un"); 4427 4428 if (vf->adq_enabled) { 4429 if (!vf->trusted) { 4430 dev_info(&pf->pdev->dev, 4431 "VF %u no longer Trusted, deleting all cloud filters\n", 4432 vf_id); 4433 i40e_del_all_cloud_filters(vf); 4434 } 4435 } 4436 4437 out: 4438 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4439 return ret; 4440 } 4441