1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 6 /*********************notification routines***********************/ 7 8 /** 9 * i40e_vc_vf_broadcast 10 * @pf: pointer to the PF structure 11 * @v_opcode: operation code 12 * @v_retval: return value 13 * @msg: pointer to the msg buffer 14 * @msglen: msg length 15 * 16 * send a message to all VFs on a given PF 17 **/ 18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 19 enum virtchnl_ops v_opcode, 20 i40e_status v_retval, u8 *msg, 21 u16 msglen) 22 { 23 struct i40e_hw *hw = &pf->hw; 24 struct i40e_vf *vf = pf->vf; 25 int i; 26 27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 29 /* Not all vfs are enabled so skip the ones that are not */ 30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 32 continue; 33 34 /* Ignore return value on purpose - a given VF may fail, but 35 * we need to keep going and send to all of them 36 */ 37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 38 msg, msglen, NULL); 39 } 40 } 41 42 /** 43 * i40e_vc_notify_vf_link_state 44 * @vf: pointer to the VF structure 45 * 46 * send a link status message to a single VF 47 **/ 48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 49 { 50 struct virtchnl_pf_event pfe; 51 struct i40e_pf *pf = vf->pf; 52 struct i40e_hw *hw = &pf->hw; 53 struct i40e_link_status *ls = &pf->hw.phy.link_info; 54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 55 56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 57 pfe.severity = PF_EVENT_SEVERITY_INFO; 58 if (vf->link_forced) { 59 pfe.event_data.link_event.link_status = vf->link_up; 60 pfe.event_data.link_event.link_speed = 61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); 62 } else { 63 pfe.event_data.link_event.link_status = 64 ls->link_info & I40E_AQ_LINK_UP; 65 pfe.event_data.link_event.link_speed = 66 i40e_virtchnl_link_speed(ls->link_speed); 67 } 68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 69 0, (u8 *)&pfe, sizeof(pfe), NULL); 70 } 71 72 /** 73 * i40e_vc_notify_link_state 74 * @pf: pointer to the PF structure 75 * 76 * send a link status message to all VFs on a given PF 77 **/ 78 void i40e_vc_notify_link_state(struct i40e_pf *pf) 79 { 80 int i; 81 82 for (i = 0; i < pf->num_alloc_vfs; i++) 83 i40e_vc_notify_vf_link_state(&pf->vf[i]); 84 } 85 86 /** 87 * i40e_vc_notify_reset 88 * @pf: pointer to the PF structure 89 * 90 * indicate a pending reset to all VFs on a given PF 91 **/ 92 void i40e_vc_notify_reset(struct i40e_pf *pf) 93 { 94 struct virtchnl_pf_event pfe; 95 96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 100 } 101 102 /** 103 * i40e_vc_notify_vf_reset 104 * @vf: pointer to the VF structure 105 * 106 * indicate a pending reset to the given VF 107 **/ 108 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 109 { 110 struct virtchnl_pf_event pfe; 111 int abs_vf_id; 112 113 /* validate the request */ 114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 115 return; 116 117 /* verify if the VF is in either init or active before proceeding */ 118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 120 return; 121 122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 123 124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 127 0, (u8 *)&pfe, 128 sizeof(struct virtchnl_pf_event), NULL); 129 } 130 /***********************misc routines*****************************/ 131 132 /** 133 * i40e_vc_disable_vf 134 * @vf: pointer to the VF info 135 * 136 * Disable the VF through a SW reset. 137 **/ 138 static inline void i40e_vc_disable_vf(struct i40e_vf *vf) 139 { 140 int i; 141 142 i40e_vc_notify_vf_reset(vf); 143 144 /* We want to ensure that an actual reset occurs initiated after this 145 * function was called. However, we do not want to wait forever, so 146 * we'll give a reasonable time and print a message if we failed to 147 * ensure a reset. 148 */ 149 for (i = 0; i < 20; i++) { 150 if (i40e_reset_vf(vf, false)) 151 return; 152 usleep_range(10000, 20000); 153 } 154 155 dev_warn(&vf->pf->pdev->dev, 156 "Failed to initiate reset for VF %d after 200 milliseconds\n", 157 vf->vf_id); 158 } 159 160 /** 161 * i40e_vc_isvalid_vsi_id 162 * @vf: pointer to the VF info 163 * @vsi_id: VF relative VSI id 164 * 165 * check for the valid VSI id 166 **/ 167 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 168 { 169 struct i40e_pf *pf = vf->pf; 170 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 171 172 return (vsi && (vsi->vf_id == vf->vf_id)); 173 } 174 175 /** 176 * i40e_vc_isvalid_queue_id 177 * @vf: pointer to the VF info 178 * @vsi_id: vsi id 179 * @qid: vsi relative queue id 180 * 181 * check for the valid queue id 182 **/ 183 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 184 u8 qid) 185 { 186 struct i40e_pf *pf = vf->pf; 187 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 188 189 return (vsi && (qid < vsi->alloc_queue_pairs)); 190 } 191 192 /** 193 * i40e_vc_isvalid_vector_id 194 * @vf: pointer to the VF info 195 * @vector_id: VF relative vector id 196 * 197 * check for the valid vector id 198 **/ 199 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 200 { 201 struct i40e_pf *pf = vf->pf; 202 203 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 204 } 205 206 /***********************vf resource mgmt routines*****************/ 207 208 /** 209 * i40e_vc_get_pf_queue_id 210 * @vf: pointer to the VF info 211 * @vsi_id: id of VSI as provided by the FW 212 * @vsi_queue_id: vsi relative queue id 213 * 214 * return PF relative queue id 215 **/ 216 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 217 u8 vsi_queue_id) 218 { 219 struct i40e_pf *pf = vf->pf; 220 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 221 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 222 223 if (!vsi) 224 return pf_queue_id; 225 226 if (le16_to_cpu(vsi->info.mapping_flags) & 227 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 228 pf_queue_id = 229 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 230 else 231 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 232 vsi_queue_id; 233 234 return pf_queue_id; 235 } 236 237 /** 238 * i40e_get_real_pf_qid 239 * @vf: pointer to the VF info 240 * @vsi_id: vsi id 241 * @queue_id: queue number 242 * 243 * wrapper function to get pf_queue_id handling ADq code as well 244 **/ 245 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 246 { 247 int i; 248 249 if (vf->adq_enabled) { 250 /* Although VF considers all the queues(can be 1 to 16) as its 251 * own but they may actually belong to different VSIs(up to 4). 252 * We need to find which queues belongs to which VSI. 253 */ 254 for (i = 0; i < vf->num_tc; i++) { 255 if (queue_id < vf->ch[i].num_qps) { 256 vsi_id = vf->ch[i].vsi_id; 257 break; 258 } 259 /* find right queue id which is relative to a 260 * given VSI. 261 */ 262 queue_id -= vf->ch[i].num_qps; 263 } 264 } 265 266 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 267 } 268 269 /** 270 * i40e_config_irq_link_list 271 * @vf: pointer to the VF info 272 * @vsi_id: id of VSI as given by the FW 273 * @vecmap: irq map info 274 * 275 * configure irq link list from the map 276 **/ 277 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 278 struct virtchnl_vector_map *vecmap) 279 { 280 unsigned long linklistmap = 0, tempmap; 281 struct i40e_pf *pf = vf->pf; 282 struct i40e_hw *hw = &pf->hw; 283 u16 vsi_queue_id, pf_queue_id; 284 enum i40e_queue_type qtype; 285 u16 next_q, vector_id, size; 286 u32 reg, reg_idx; 287 u16 itr_idx = 0; 288 289 vector_id = vecmap->vector_id; 290 /* setup the head */ 291 if (0 == vector_id) 292 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 293 else 294 reg_idx = I40E_VPINT_LNKLSTN( 295 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 296 (vector_id - 1)); 297 298 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 299 /* Special case - No queues mapped on this vector */ 300 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 301 goto irq_list_done; 302 } 303 tempmap = vecmap->rxq_map; 304 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 305 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 306 vsi_queue_id)); 307 } 308 309 tempmap = vecmap->txq_map; 310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 312 vsi_queue_id + 1)); 313 } 314 315 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 316 next_q = find_first_bit(&linklistmap, size); 317 if (unlikely(next_q == size)) 318 goto irq_list_done; 319 320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 323 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 324 325 wr32(hw, reg_idx, reg); 326 327 while (next_q < size) { 328 switch (qtype) { 329 case I40E_QUEUE_TYPE_RX: 330 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 331 itr_idx = vecmap->rxitr_idx; 332 break; 333 case I40E_QUEUE_TYPE_TX: 334 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 335 itr_idx = vecmap->txitr_idx; 336 break; 337 default: 338 break; 339 } 340 341 next_q = find_next_bit(&linklistmap, size, next_q + 1); 342 if (next_q < size) { 343 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 344 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 345 pf_queue_id = i40e_get_real_pf_qid(vf, 346 vsi_id, 347 vsi_queue_id); 348 } else { 349 pf_queue_id = I40E_QUEUE_END_OF_LIST; 350 qtype = 0; 351 } 352 353 /* format for the RQCTL & TQCTL regs is same */ 354 reg = (vector_id) | 355 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 356 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 357 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 358 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 359 wr32(hw, reg_idx, reg); 360 } 361 362 /* if the vf is running in polling mode and using interrupt zero, 363 * need to disable auto-mask on enabling zero interrupt for VFs. 364 */ 365 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 366 (vector_id == 0)) { 367 reg = rd32(hw, I40E_GLINT_CTL); 368 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 369 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 370 wr32(hw, I40E_GLINT_CTL, reg); 371 } 372 } 373 374 irq_list_done: 375 i40e_flush(hw); 376 } 377 378 /** 379 * i40e_release_iwarp_qvlist 380 * @vf: pointer to the VF. 381 * 382 **/ 383 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 384 { 385 struct i40e_pf *pf = vf->pf; 386 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 387 u32 msix_vf; 388 u32 i; 389 390 if (!vf->qvlist_info) 391 return; 392 393 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 394 for (i = 0; i < qvlist_info->num_vectors; i++) { 395 struct virtchnl_iwarp_qv_info *qv_info; 396 u32 next_q_index, next_q_type; 397 struct i40e_hw *hw = &pf->hw; 398 u32 v_idx, reg_idx, reg; 399 400 qv_info = &qvlist_info->qv_info[i]; 401 if (!qv_info) 402 continue; 403 v_idx = qv_info->v_idx; 404 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 405 /* Figure out the queue after CEQ and make that the 406 * first queue. 407 */ 408 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 409 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 410 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 411 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 412 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 413 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 414 415 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 416 reg = (next_q_index & 417 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 418 (next_q_type << 419 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 420 421 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 422 } 423 } 424 kfree(vf->qvlist_info); 425 vf->qvlist_info = NULL; 426 } 427 428 /** 429 * i40e_config_iwarp_qvlist 430 * @vf: pointer to the VF info 431 * @qvlist_info: queue and vector list 432 * 433 * Return 0 on success or < 0 on error 434 **/ 435 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 436 struct virtchnl_iwarp_qvlist_info *qvlist_info) 437 { 438 struct i40e_pf *pf = vf->pf; 439 struct i40e_hw *hw = &pf->hw; 440 struct virtchnl_iwarp_qv_info *qv_info; 441 u32 v_idx, i, reg_idx, reg; 442 u32 next_q_idx, next_q_type; 443 u32 msix_vf, size; 444 445 size = sizeof(struct virtchnl_iwarp_qvlist_info) + 446 (sizeof(struct virtchnl_iwarp_qv_info) * 447 (qvlist_info->num_vectors - 1)); 448 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 449 if (!vf->qvlist_info) 450 return -ENOMEM; 451 452 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 453 454 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 455 for (i = 0; i < qvlist_info->num_vectors; i++) { 456 qv_info = &qvlist_info->qv_info[i]; 457 if (!qv_info) 458 continue; 459 v_idx = qv_info->v_idx; 460 461 /* Validate vector id belongs to this vf */ 462 if (!i40e_vc_isvalid_vector_id(vf, v_idx)) 463 goto err; 464 465 vf->qvlist_info->qv_info[i] = *qv_info; 466 467 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 468 /* We might be sharing the interrupt, so get the first queue 469 * index and type, push it down the list by adding the new 470 * queue on top. Also link it with the new queue in CEQCTL. 471 */ 472 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 473 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 474 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 475 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 476 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 477 478 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 479 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 480 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 481 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 482 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 483 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 484 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 485 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 486 487 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 488 reg = (qv_info->ceq_idx & 489 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 490 (I40E_QUEUE_TYPE_PE_CEQ << 491 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 492 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 493 } 494 495 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 496 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 497 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 498 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 499 500 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 501 } 502 } 503 504 return 0; 505 err: 506 kfree(vf->qvlist_info); 507 vf->qvlist_info = NULL; 508 return -EINVAL; 509 } 510 511 /** 512 * i40e_config_vsi_tx_queue 513 * @vf: pointer to the VF info 514 * @vsi_id: id of VSI as provided by the FW 515 * @vsi_queue_id: vsi relative queue index 516 * @info: config. info 517 * 518 * configure tx queue 519 **/ 520 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 521 u16 vsi_queue_id, 522 struct virtchnl_txq_info *info) 523 { 524 struct i40e_pf *pf = vf->pf; 525 struct i40e_hw *hw = &pf->hw; 526 struct i40e_hmc_obj_txq tx_ctx; 527 struct i40e_vsi *vsi; 528 u16 pf_queue_id; 529 u32 qtx_ctl; 530 int ret = 0; 531 532 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 533 ret = -ENOENT; 534 goto error_context; 535 } 536 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 537 vsi = i40e_find_vsi_from_id(pf, vsi_id); 538 if (!vsi) { 539 ret = -ENOENT; 540 goto error_context; 541 } 542 543 /* clear the context structure first */ 544 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 545 546 /* only set the required fields */ 547 tx_ctx.base = info->dma_ring_addr / 128; 548 tx_ctx.qlen = info->ring_len; 549 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 550 tx_ctx.rdylist_act = 0; 551 tx_ctx.head_wb_ena = info->headwb_enabled; 552 tx_ctx.head_wb_addr = info->dma_headwb_addr; 553 554 /* clear the context in the HMC */ 555 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 556 if (ret) { 557 dev_err(&pf->pdev->dev, 558 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 559 pf_queue_id, ret); 560 ret = -ENOENT; 561 goto error_context; 562 } 563 564 /* set the context in the HMC */ 565 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 566 if (ret) { 567 dev_err(&pf->pdev->dev, 568 "Failed to set VF LAN Tx queue context %d error: %d\n", 569 pf_queue_id, ret); 570 ret = -ENOENT; 571 goto error_context; 572 } 573 574 /* associate this queue with the PCI VF function */ 575 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 576 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 577 & I40E_QTX_CTL_PF_INDX_MASK); 578 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 579 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 580 & I40E_QTX_CTL_VFVM_INDX_MASK); 581 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 582 i40e_flush(hw); 583 584 error_context: 585 return ret; 586 } 587 588 /** 589 * i40e_config_vsi_rx_queue 590 * @vf: pointer to the VF info 591 * @vsi_id: id of VSI as provided by the FW 592 * @vsi_queue_id: vsi relative queue index 593 * @info: config. info 594 * 595 * configure rx queue 596 **/ 597 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 598 u16 vsi_queue_id, 599 struct virtchnl_rxq_info *info) 600 { 601 struct i40e_pf *pf = vf->pf; 602 struct i40e_hw *hw = &pf->hw; 603 struct i40e_hmc_obj_rxq rx_ctx; 604 u16 pf_queue_id; 605 int ret = 0; 606 607 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 608 609 /* clear the context structure first */ 610 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 611 612 /* only set the required fields */ 613 rx_ctx.base = info->dma_ring_addr / 128; 614 rx_ctx.qlen = info->ring_len; 615 616 if (info->splithdr_enabled) { 617 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 618 I40E_RX_SPLIT_IP | 619 I40E_RX_SPLIT_TCP_UDP | 620 I40E_RX_SPLIT_SCTP; 621 /* header length validation */ 622 if (info->hdr_size > ((2 * 1024) - 64)) { 623 ret = -EINVAL; 624 goto error_param; 625 } 626 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 627 628 /* set split mode 10b */ 629 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 630 } 631 632 /* databuffer length validation */ 633 if (info->databuffer_size > ((16 * 1024) - 128)) { 634 ret = -EINVAL; 635 goto error_param; 636 } 637 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 638 639 /* max pkt. length validation */ 640 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 641 ret = -EINVAL; 642 goto error_param; 643 } 644 rx_ctx.rxmax = info->max_pkt_size; 645 646 /* enable 32bytes desc always */ 647 rx_ctx.dsize = 1; 648 649 /* default values */ 650 rx_ctx.lrxqthresh = 1; 651 rx_ctx.crcstrip = 1; 652 rx_ctx.prefena = 1; 653 rx_ctx.l2tsel = 1; 654 655 /* clear the context in the HMC */ 656 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 657 if (ret) { 658 dev_err(&pf->pdev->dev, 659 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 660 pf_queue_id, ret); 661 ret = -ENOENT; 662 goto error_param; 663 } 664 665 /* set the context in the HMC */ 666 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 667 if (ret) { 668 dev_err(&pf->pdev->dev, 669 "Failed to set VF LAN Rx queue context %d error: %d\n", 670 pf_queue_id, ret); 671 ret = -ENOENT; 672 goto error_param; 673 } 674 675 error_param: 676 return ret; 677 } 678 679 /** 680 * i40e_alloc_vsi_res 681 * @vf: pointer to the VF info 682 * @idx: VSI index, applies only for ADq mode, zero otherwise 683 * 684 * alloc VF vsi context & resources 685 **/ 686 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 687 { 688 struct i40e_mac_filter *f = NULL; 689 struct i40e_pf *pf = vf->pf; 690 struct i40e_vsi *vsi; 691 u64 max_tx_rate = 0; 692 int ret = 0; 693 694 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, 695 vf->vf_id); 696 697 if (!vsi) { 698 dev_err(&pf->pdev->dev, 699 "add vsi failed for VF %d, aq_err %d\n", 700 vf->vf_id, pf->hw.aq.asq_last_status); 701 ret = -ENOENT; 702 goto error_alloc_vsi_res; 703 } 704 705 if (!idx) { 706 u64 hena = i40e_pf_get_default_rss_hena(pf); 707 u8 broadcast[ETH_ALEN]; 708 709 vf->lan_vsi_idx = vsi->idx; 710 vf->lan_vsi_id = vsi->id; 711 /* If the port VLAN has been configured and then the 712 * VF driver was removed then the VSI port VLAN 713 * configuration was destroyed. Check if there is 714 * a port VLAN and restore the VSI configuration if 715 * needed. 716 */ 717 if (vf->port_vlan_id) 718 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 719 720 spin_lock_bh(&vsi->mac_filter_hash_lock); 721 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 722 f = i40e_add_mac_filter(vsi, 723 vf->default_lan_addr.addr); 724 if (!f) 725 dev_info(&pf->pdev->dev, 726 "Could not add MAC filter %pM for VF %d\n", 727 vf->default_lan_addr.addr, vf->vf_id); 728 } 729 eth_broadcast_addr(broadcast); 730 f = i40e_add_mac_filter(vsi, broadcast); 731 if (!f) 732 dev_info(&pf->pdev->dev, 733 "Could not allocate VF broadcast filter\n"); 734 spin_unlock_bh(&vsi->mac_filter_hash_lock); 735 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 736 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 737 /* program mac filter only for VF VSI */ 738 ret = i40e_sync_vsi_filters(vsi); 739 if (ret) 740 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 741 } 742 743 /* storing VSI index and id for ADq and don't apply the mac filter */ 744 if (vf->adq_enabled) { 745 vf->ch[idx].vsi_idx = vsi->idx; 746 vf->ch[idx].vsi_id = vsi->id; 747 } 748 749 /* Set VF bandwidth if specified */ 750 if (vf->tx_rate) { 751 max_tx_rate = vf->tx_rate; 752 } else if (vf->ch[idx].max_tx_rate) { 753 max_tx_rate = vf->ch[idx].max_tx_rate; 754 } 755 756 if (max_tx_rate) { 757 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 758 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 759 max_tx_rate, 0, NULL); 760 if (ret) 761 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 762 vf->vf_id, ret); 763 } 764 765 error_alloc_vsi_res: 766 return ret; 767 } 768 769 /** 770 * i40e_map_pf_queues_to_vsi 771 * @vf: pointer to the VF info 772 * 773 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 774 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 775 **/ 776 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 777 { 778 struct i40e_pf *pf = vf->pf; 779 struct i40e_hw *hw = &pf->hw; 780 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 781 u16 vsi_id, qps; 782 int i, j; 783 784 if (vf->adq_enabled) 785 num_tc = vf->num_tc; 786 787 for (i = 0; i < num_tc; i++) { 788 if (vf->adq_enabled) { 789 qps = vf->ch[i].num_qps; 790 vsi_id = vf->ch[i].vsi_id; 791 } else { 792 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 793 vsi_id = vf->lan_vsi_id; 794 } 795 796 for (j = 0; j < 7; j++) { 797 if (j * 2 >= qps) { 798 /* end of list */ 799 reg = 0x07FF07FF; 800 } else { 801 u16 qid = i40e_vc_get_pf_queue_id(vf, 802 vsi_id, 803 j * 2); 804 reg = qid; 805 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 806 (j * 2) + 1); 807 reg |= qid << 16; 808 } 809 i40e_write_rx_ctl(hw, 810 I40E_VSILAN_QTABLE(j, vsi_id), 811 reg); 812 } 813 } 814 } 815 816 /** 817 * i40e_map_pf_to_vf_queues 818 * @vf: pointer to the VF info 819 * 820 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 821 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 822 **/ 823 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 824 { 825 struct i40e_pf *pf = vf->pf; 826 struct i40e_hw *hw = &pf->hw; 827 u32 reg, total_qps = 0; 828 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 829 u16 vsi_id, qid; 830 int i, j; 831 832 if (vf->adq_enabled) 833 num_tc = vf->num_tc; 834 835 for (i = 0; i < num_tc; i++) { 836 if (vf->adq_enabled) { 837 qps = vf->ch[i].num_qps; 838 vsi_id = vf->ch[i].vsi_id; 839 } else { 840 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 841 vsi_id = vf->lan_vsi_id; 842 } 843 844 for (j = 0; j < qps; j++) { 845 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 846 847 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 848 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 849 reg); 850 total_qps++; 851 } 852 } 853 } 854 855 /** 856 * i40e_enable_vf_mappings 857 * @vf: pointer to the VF info 858 * 859 * enable VF mappings 860 **/ 861 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 862 { 863 struct i40e_pf *pf = vf->pf; 864 struct i40e_hw *hw = &pf->hw; 865 u32 reg; 866 867 /* Tell the hardware we're using noncontiguous mapping. HW requires 868 * that VF queues be mapped using this method, even when they are 869 * contiguous in real life 870 */ 871 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 872 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 873 874 /* enable VF vplan_qtable mappings */ 875 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 876 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 877 878 i40e_map_pf_to_vf_queues(vf); 879 i40e_map_pf_queues_to_vsi(vf); 880 881 i40e_flush(hw); 882 } 883 884 /** 885 * i40e_disable_vf_mappings 886 * @vf: pointer to the VF info 887 * 888 * disable VF mappings 889 **/ 890 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 891 { 892 struct i40e_pf *pf = vf->pf; 893 struct i40e_hw *hw = &pf->hw; 894 int i; 895 896 /* disable qp mappings */ 897 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 898 for (i = 0; i < I40E_MAX_VSI_QP; i++) 899 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 900 I40E_QUEUE_END_OF_LIST); 901 i40e_flush(hw); 902 } 903 904 /** 905 * i40e_free_vf_res 906 * @vf: pointer to the VF info 907 * 908 * free VF resources 909 **/ 910 static void i40e_free_vf_res(struct i40e_vf *vf) 911 { 912 struct i40e_pf *pf = vf->pf; 913 struct i40e_hw *hw = &pf->hw; 914 u32 reg_idx, reg; 915 int i, j, msix_vf; 916 917 /* Start by disabling VF's configuration API to prevent the OS from 918 * accessing the VF's VSI after it's freed / invalidated. 919 */ 920 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 921 922 /* It's possible the VF had requeuested more queues than the default so 923 * do the accounting here when we're about to free them. 924 */ 925 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 926 pf->queues_left += vf->num_queue_pairs - 927 I40E_DEFAULT_QUEUES_PER_VF; 928 } 929 930 /* free vsi & disconnect it from the parent uplink */ 931 if (vf->lan_vsi_idx) { 932 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 933 vf->lan_vsi_idx = 0; 934 vf->lan_vsi_id = 0; 935 vf->num_mac = 0; 936 } 937 938 /* do the accounting and remove additional ADq VSI's */ 939 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 940 for (j = 0; j < vf->num_tc; j++) { 941 /* At this point VSI0 is already released so don't 942 * release it again and only clear their values in 943 * structure variables 944 */ 945 if (j) 946 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 947 vf->ch[j].vsi_idx = 0; 948 vf->ch[j].vsi_id = 0; 949 } 950 } 951 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 952 953 /* disable interrupts so the VF starts in a known state */ 954 for (i = 0; i < msix_vf; i++) { 955 /* format is same for both registers */ 956 if (0 == i) 957 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 958 else 959 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 960 (vf->vf_id)) 961 + (i - 1)); 962 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 963 i40e_flush(hw); 964 } 965 966 /* clear the irq settings */ 967 for (i = 0; i < msix_vf; i++) { 968 /* format is same for both registers */ 969 if (0 == i) 970 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 971 else 972 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 973 (vf->vf_id)) 974 + (i - 1)); 975 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 976 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 977 wr32(hw, reg_idx, reg); 978 i40e_flush(hw); 979 } 980 /* reset some of the state variables keeping track of the resources */ 981 vf->num_queue_pairs = 0; 982 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 983 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 984 } 985 986 /** 987 * i40e_alloc_vf_res 988 * @vf: pointer to the VF info 989 * 990 * allocate VF resources 991 **/ 992 static int i40e_alloc_vf_res(struct i40e_vf *vf) 993 { 994 struct i40e_pf *pf = vf->pf; 995 int total_queue_pairs = 0; 996 int ret, idx; 997 998 if (vf->num_req_queues && 999 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1000 pf->num_vf_qps = vf->num_req_queues; 1001 else 1002 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1003 1004 /* allocate hw vsi context & associated resources */ 1005 ret = i40e_alloc_vsi_res(vf, 0); 1006 if (ret) 1007 goto error_alloc; 1008 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1009 1010 /* allocate additional VSIs based on tc information for ADq */ 1011 if (vf->adq_enabled) { 1012 if (pf->queues_left >= 1013 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1014 /* TC 0 always belongs to VF VSI */ 1015 for (idx = 1; idx < vf->num_tc; idx++) { 1016 ret = i40e_alloc_vsi_res(vf, idx); 1017 if (ret) 1018 goto error_alloc; 1019 } 1020 /* send correct number of queues */ 1021 total_queue_pairs = I40E_MAX_VF_QUEUES; 1022 } else { 1023 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1024 vf->vf_id); 1025 vf->adq_enabled = false; 1026 } 1027 } 1028 1029 /* We account for each VF to get a default number of queue pairs. If 1030 * the VF has now requested more, we need to account for that to make 1031 * certain we never request more queues than we actually have left in 1032 * HW. 1033 */ 1034 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1035 pf->queues_left -= 1036 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1037 1038 if (vf->trusted) 1039 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1040 else 1041 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1042 1043 /* store the total qps number for the runtime 1044 * VF req validation 1045 */ 1046 vf->num_queue_pairs = total_queue_pairs; 1047 1048 /* VF is now completely initialized */ 1049 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1050 1051 error_alloc: 1052 if (ret) 1053 i40e_free_vf_res(vf); 1054 1055 return ret; 1056 } 1057 1058 #define VF_DEVICE_STATUS 0xAA 1059 #define VF_TRANS_PENDING_MASK 0x20 1060 /** 1061 * i40e_quiesce_vf_pci 1062 * @vf: pointer to the VF structure 1063 * 1064 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1065 * if the transactions never clear. 1066 **/ 1067 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1068 { 1069 struct i40e_pf *pf = vf->pf; 1070 struct i40e_hw *hw = &pf->hw; 1071 int vf_abs_id, i; 1072 u32 reg; 1073 1074 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1075 1076 wr32(hw, I40E_PF_PCI_CIAA, 1077 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1078 for (i = 0; i < 100; i++) { 1079 reg = rd32(hw, I40E_PF_PCI_CIAD); 1080 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1081 return 0; 1082 udelay(1); 1083 } 1084 return -EIO; 1085 } 1086 1087 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi); 1088 1089 /** 1090 * i40e_config_vf_promiscuous_mode 1091 * @vf: pointer to the VF info 1092 * @vsi_id: VSI id 1093 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1094 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1095 * 1096 * Called from the VF to configure the promiscuous mode of 1097 * VF vsis and from the VF reset path to reset promiscuous mode. 1098 **/ 1099 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1100 u16 vsi_id, 1101 bool allmulti, 1102 bool alluni) 1103 { 1104 struct i40e_pf *pf = vf->pf; 1105 struct i40e_hw *hw = &pf->hw; 1106 struct i40e_mac_filter *f; 1107 i40e_status aq_ret = 0; 1108 struct i40e_vsi *vsi; 1109 int bkt; 1110 1111 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1112 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1113 return I40E_ERR_PARAM; 1114 1115 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 1116 (allmulti || alluni)) { 1117 dev_err(&pf->pdev->dev, 1118 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 1119 vf->vf_id); 1120 /* Lie to the VF on purpose. */ 1121 return 0; 1122 } 1123 1124 if (vf->port_vlan_id) { 1125 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, 1126 allmulti, 1127 vf->port_vlan_id, 1128 NULL); 1129 if (aq_ret) { 1130 int aq_err = pf->hw.aq.asq_last_status; 1131 1132 dev_err(&pf->pdev->dev, 1133 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1134 vf->vf_id, 1135 i40e_stat_str(&pf->hw, aq_ret), 1136 i40e_aq_str(&pf->hw, aq_err)); 1137 return aq_ret; 1138 } 1139 1140 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, 1141 alluni, 1142 vf->port_vlan_id, 1143 NULL); 1144 if (aq_ret) { 1145 int aq_err = pf->hw.aq.asq_last_status; 1146 1147 dev_err(&pf->pdev->dev, 1148 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1149 vf->vf_id, 1150 i40e_stat_str(&pf->hw, aq_ret), 1151 i40e_aq_str(&pf->hw, aq_err)); 1152 } 1153 return aq_ret; 1154 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1155 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1156 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1157 continue; 1158 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, 1159 vsi->seid, 1160 allmulti, 1161 f->vlan, 1162 NULL); 1163 if (aq_ret) { 1164 int aq_err = pf->hw.aq.asq_last_status; 1165 1166 dev_err(&pf->pdev->dev, 1167 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", 1168 f->vlan, 1169 i40e_stat_str(&pf->hw, aq_ret), 1170 i40e_aq_str(&pf->hw, aq_err)); 1171 } 1172 1173 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, 1174 vsi->seid, 1175 alluni, 1176 f->vlan, 1177 NULL); 1178 if (aq_ret) { 1179 int aq_err = pf->hw.aq.asq_last_status; 1180 1181 dev_err(&pf->pdev->dev, 1182 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", 1183 f->vlan, 1184 i40e_stat_str(&pf->hw, aq_ret), 1185 i40e_aq_str(&pf->hw, aq_err)); 1186 } 1187 } 1188 return aq_ret; 1189 } 1190 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, 1191 NULL); 1192 if (aq_ret) { 1193 int aq_err = pf->hw.aq.asq_last_status; 1194 1195 dev_err(&pf->pdev->dev, 1196 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1197 vf->vf_id, 1198 i40e_stat_str(&pf->hw, aq_ret), 1199 i40e_aq_str(&pf->hw, aq_err)); 1200 return aq_ret; 1201 } 1202 1203 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni, 1204 NULL, true); 1205 if (aq_ret) { 1206 int aq_err = pf->hw.aq.asq_last_status; 1207 1208 dev_err(&pf->pdev->dev, 1209 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1210 vf->vf_id, 1211 i40e_stat_str(&pf->hw, aq_ret), 1212 i40e_aq_str(&pf->hw, aq_err)); 1213 } 1214 1215 return aq_ret; 1216 } 1217 1218 /** 1219 * i40e_trigger_vf_reset 1220 * @vf: pointer to the VF structure 1221 * @flr: VFLR was issued or not 1222 * 1223 * Trigger hardware to start a reset for a particular VF. Expects the caller 1224 * to wait the proper amount of time to allow hardware to reset the VF before 1225 * it cleans up and restores VF functionality. 1226 **/ 1227 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1228 { 1229 struct i40e_pf *pf = vf->pf; 1230 struct i40e_hw *hw = &pf->hw; 1231 u32 reg, reg_idx, bit_idx; 1232 1233 /* warn the VF */ 1234 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1235 1236 /* Disable VF's configuration API during reset. The flag is re-enabled 1237 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1238 * It's normally disabled in i40e_free_vf_res(), but it's safer 1239 * to do it earlier to give some time to finish to any VF config 1240 * functions that may still be running at this point. 1241 */ 1242 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1243 1244 /* In the case of a VFLR, the HW has already reset the VF and we 1245 * just need to clean up, so don't hit the VFRTRIG register. 1246 */ 1247 if (!flr) { 1248 /* reset VF using VPGEN_VFRTRIG reg */ 1249 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1250 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1251 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1252 i40e_flush(hw); 1253 } 1254 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1255 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1256 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1257 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1258 i40e_flush(hw); 1259 1260 if (i40e_quiesce_vf_pci(vf)) 1261 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1262 vf->vf_id); 1263 } 1264 1265 /** 1266 * i40e_cleanup_reset_vf 1267 * @vf: pointer to the VF structure 1268 * 1269 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1270 * have verified whether the reset is finished properly, and ensure the 1271 * minimum amount of wait time has passed. 1272 **/ 1273 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1274 { 1275 struct i40e_pf *pf = vf->pf; 1276 struct i40e_hw *hw = &pf->hw; 1277 u32 reg; 1278 1279 /* disable promisc modes in case they were enabled */ 1280 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1281 1282 /* free VF resources to begin resetting the VSI state */ 1283 i40e_free_vf_res(vf); 1284 1285 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1286 * By doing this we allow HW to access VF memory at any point. If we 1287 * did it any sooner, HW could access memory while it was being freed 1288 * in i40e_free_vf_res(), causing an IOMMU fault. 1289 * 1290 * On the other hand, this needs to be done ASAP, because the VF driver 1291 * is waiting for this to happen and may report a timeout. It's 1292 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1293 * it. 1294 */ 1295 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1296 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1297 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1298 1299 /* reallocate VF resources to finish resetting the VSI state */ 1300 if (!i40e_alloc_vf_res(vf)) { 1301 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1302 i40e_enable_vf_mappings(vf); 1303 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1304 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1305 /* Do not notify the client during VF init */ 1306 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1307 &vf->vf_states)) 1308 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1309 vf->num_vlan = 0; 1310 } 1311 1312 /* Tell the VF driver the reset is done. This needs to be done only 1313 * after VF has been fully initialized, because the VF driver may 1314 * request resources immediately after setting this flag. 1315 */ 1316 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1317 } 1318 1319 /** 1320 * i40e_reset_vf 1321 * @vf: pointer to the VF structure 1322 * @flr: VFLR was issued or not 1323 * 1324 * Returns true if the VF is reset, false otherwise. 1325 **/ 1326 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1327 { 1328 struct i40e_pf *pf = vf->pf; 1329 struct i40e_hw *hw = &pf->hw; 1330 bool rsd = false; 1331 u32 reg; 1332 int i; 1333 1334 /* If the VFs have been disabled, this means something else is 1335 * resetting the VF, so we shouldn't continue. 1336 */ 1337 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1338 return false; 1339 1340 i40e_trigger_vf_reset(vf, flr); 1341 1342 /* poll VPGEN_VFRSTAT reg to make sure 1343 * that reset is complete 1344 */ 1345 for (i = 0; i < 10; i++) { 1346 /* VF reset requires driver to first reset the VF and then 1347 * poll the status register to make sure that the reset 1348 * completed successfully. Due to internal HW FIFO flushes, 1349 * we must wait 10ms before the register will be valid. 1350 */ 1351 usleep_range(10000, 20000); 1352 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1353 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1354 rsd = true; 1355 break; 1356 } 1357 } 1358 1359 if (flr) 1360 usleep_range(10000, 20000); 1361 1362 if (!rsd) 1363 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1364 vf->vf_id); 1365 usleep_range(10000, 20000); 1366 1367 /* On initial reset, we don't have any queues to disable */ 1368 if (vf->lan_vsi_idx != 0) 1369 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1370 1371 i40e_cleanup_reset_vf(vf); 1372 1373 i40e_flush(hw); 1374 clear_bit(__I40E_VF_DISABLE, pf->state); 1375 1376 return true; 1377 } 1378 1379 /** 1380 * i40e_reset_all_vfs 1381 * @pf: pointer to the PF structure 1382 * @flr: VFLR was issued or not 1383 * 1384 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1385 * VF, then do all the waiting in one chunk, and finally finish restoring each 1386 * VF after the wait. This is useful during PF routines which need to reset 1387 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1388 * 1389 * Returns true if any VFs were reset, and false otherwise. 1390 **/ 1391 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1392 { 1393 struct i40e_hw *hw = &pf->hw; 1394 struct i40e_vf *vf; 1395 int i, v; 1396 u32 reg; 1397 1398 /* If we don't have any VFs, then there is nothing to reset */ 1399 if (!pf->num_alloc_vfs) 1400 return false; 1401 1402 /* If VFs have been disabled, there is no need to reset */ 1403 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1404 return false; 1405 1406 /* Begin reset on all VFs at once */ 1407 for (v = 0; v < pf->num_alloc_vfs; v++) 1408 i40e_trigger_vf_reset(&pf->vf[v], flr); 1409 1410 /* HW requires some time to make sure it can flush the FIFO for a VF 1411 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1412 * sequence to make sure that it has completed. We'll keep track of 1413 * the VFs using a simple iterator that increments once that VF has 1414 * finished resetting. 1415 */ 1416 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1417 usleep_range(10000, 20000); 1418 1419 /* Check each VF in sequence, beginning with the VF to fail 1420 * the previous check. 1421 */ 1422 while (v < pf->num_alloc_vfs) { 1423 vf = &pf->vf[v]; 1424 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1425 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1426 break; 1427 1428 /* If the current VF has finished resetting, move on 1429 * to the next VF in sequence. 1430 */ 1431 v++; 1432 } 1433 } 1434 1435 if (flr) 1436 usleep_range(10000, 20000); 1437 1438 /* Display a warning if at least one VF didn't manage to reset in 1439 * time, but continue on with the operation. 1440 */ 1441 if (v < pf->num_alloc_vfs) 1442 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1443 pf->vf[v].vf_id); 1444 usleep_range(10000, 20000); 1445 1446 /* Begin disabling all the rings associated with VFs, but do not wait 1447 * between each VF. 1448 */ 1449 for (v = 0; v < pf->num_alloc_vfs; v++) { 1450 /* On initial reset, we don't have any queues to disable */ 1451 if (pf->vf[v].lan_vsi_idx == 0) 1452 continue; 1453 1454 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1455 } 1456 1457 /* Now that we've notified HW to disable all of the VF rings, wait 1458 * until they finish. 1459 */ 1460 for (v = 0; v < pf->num_alloc_vfs; v++) { 1461 /* On initial reset, we don't have any queues to disable */ 1462 if (pf->vf[v].lan_vsi_idx == 0) 1463 continue; 1464 1465 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1466 } 1467 1468 /* Hw may need up to 50ms to finish disabling the RX queues. We 1469 * minimize the wait by delaying only once for all VFs. 1470 */ 1471 mdelay(50); 1472 1473 /* Finish the reset on each VF */ 1474 for (v = 0; v < pf->num_alloc_vfs; v++) 1475 i40e_cleanup_reset_vf(&pf->vf[v]); 1476 1477 i40e_flush(hw); 1478 clear_bit(__I40E_VF_DISABLE, pf->state); 1479 1480 return true; 1481 } 1482 1483 /** 1484 * i40e_free_vfs 1485 * @pf: pointer to the PF structure 1486 * 1487 * free VF resources 1488 **/ 1489 void i40e_free_vfs(struct i40e_pf *pf) 1490 { 1491 struct i40e_hw *hw = &pf->hw; 1492 u32 reg_idx, bit_idx; 1493 int i, tmp, vf_id; 1494 1495 if (!pf->vf) 1496 return; 1497 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1498 usleep_range(1000, 2000); 1499 1500 i40e_notify_client_of_vf_enable(pf, 0); 1501 1502 /* Amortize wait time by stopping all VFs at the same time */ 1503 for (i = 0; i < pf->num_alloc_vfs; i++) { 1504 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1505 continue; 1506 1507 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1508 } 1509 1510 for (i = 0; i < pf->num_alloc_vfs; i++) { 1511 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1512 continue; 1513 1514 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1515 } 1516 1517 /* Disable IOV before freeing resources. This lets any VF drivers 1518 * running in the host get themselves cleaned up before we yank 1519 * the carpet out from underneath their feet. 1520 */ 1521 if (!pci_vfs_assigned(pf->pdev)) 1522 pci_disable_sriov(pf->pdev); 1523 else 1524 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1525 1526 /* free up VF resources */ 1527 tmp = pf->num_alloc_vfs; 1528 pf->num_alloc_vfs = 0; 1529 for (i = 0; i < tmp; i++) { 1530 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1531 i40e_free_vf_res(&pf->vf[i]); 1532 /* disable qp mappings */ 1533 i40e_disable_vf_mappings(&pf->vf[i]); 1534 } 1535 1536 kfree(pf->vf); 1537 pf->vf = NULL; 1538 1539 /* This check is for when the driver is unloaded while VFs are 1540 * assigned. Setting the number of VFs to 0 through sysfs is caught 1541 * before this function ever gets called. 1542 */ 1543 if (!pci_vfs_assigned(pf->pdev)) { 1544 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1545 * work correctly when SR-IOV gets re-enabled. 1546 */ 1547 for (vf_id = 0; vf_id < tmp; vf_id++) { 1548 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1549 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1550 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1551 } 1552 } 1553 clear_bit(__I40E_VF_DISABLE, pf->state); 1554 } 1555 1556 #ifdef CONFIG_PCI_IOV 1557 /** 1558 * i40e_alloc_vfs 1559 * @pf: pointer to the PF structure 1560 * @num_alloc_vfs: number of VFs to allocate 1561 * 1562 * allocate VF resources 1563 **/ 1564 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1565 { 1566 struct i40e_vf *vfs; 1567 int i, ret = 0; 1568 1569 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1570 i40e_irq_dynamic_disable_icr0(pf); 1571 1572 /* Check to see if we're just allocating resources for extant VFs */ 1573 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1574 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1575 if (ret) { 1576 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1577 pf->num_alloc_vfs = 0; 1578 goto err_iov; 1579 } 1580 } 1581 /* allocate memory */ 1582 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1583 if (!vfs) { 1584 ret = -ENOMEM; 1585 goto err_alloc; 1586 } 1587 pf->vf = vfs; 1588 1589 /* apply default profile */ 1590 for (i = 0; i < num_alloc_vfs; i++) { 1591 vfs[i].pf = pf; 1592 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1593 vfs[i].vf_id = i; 1594 1595 /* assign default capabilities */ 1596 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1597 vfs[i].spoofchk = true; 1598 1599 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1600 1601 } 1602 pf->num_alloc_vfs = num_alloc_vfs; 1603 1604 /* VF resources get allocated during reset */ 1605 i40e_reset_all_vfs(pf, false); 1606 1607 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1608 1609 err_alloc: 1610 if (ret) 1611 i40e_free_vfs(pf); 1612 err_iov: 1613 /* Re-enable interrupt 0. */ 1614 i40e_irq_dynamic_enable_icr0(pf); 1615 return ret; 1616 } 1617 1618 #endif 1619 /** 1620 * i40e_pci_sriov_enable 1621 * @pdev: pointer to a pci_dev structure 1622 * @num_vfs: number of VFs to allocate 1623 * 1624 * Enable or change the number of VFs 1625 **/ 1626 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1627 { 1628 #ifdef CONFIG_PCI_IOV 1629 struct i40e_pf *pf = pci_get_drvdata(pdev); 1630 int pre_existing_vfs = pci_num_vf(pdev); 1631 int err = 0; 1632 1633 if (test_bit(__I40E_TESTING, pf->state)) { 1634 dev_warn(&pdev->dev, 1635 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1636 err = -EPERM; 1637 goto err_out; 1638 } 1639 1640 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1641 i40e_free_vfs(pf); 1642 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1643 goto out; 1644 1645 if (num_vfs > pf->num_req_vfs) { 1646 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1647 num_vfs, pf->num_req_vfs); 1648 err = -EPERM; 1649 goto err_out; 1650 } 1651 1652 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1653 err = i40e_alloc_vfs(pf, num_vfs); 1654 if (err) { 1655 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1656 goto err_out; 1657 } 1658 1659 out: 1660 return num_vfs; 1661 1662 err_out: 1663 return err; 1664 #endif 1665 return 0; 1666 } 1667 1668 /** 1669 * i40e_pci_sriov_configure 1670 * @pdev: pointer to a pci_dev structure 1671 * @num_vfs: number of VFs to allocate 1672 * 1673 * Enable or change the number of VFs. Called when the user updates the number 1674 * of VFs in sysfs. 1675 **/ 1676 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1677 { 1678 struct i40e_pf *pf = pci_get_drvdata(pdev); 1679 int ret = 0; 1680 1681 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1682 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1683 return -EAGAIN; 1684 } 1685 1686 if (num_vfs) { 1687 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1688 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1689 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1690 } 1691 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1692 goto sriov_configure_out; 1693 } 1694 1695 if (!pci_vfs_assigned(pf->pdev)) { 1696 i40e_free_vfs(pf); 1697 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1698 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1699 } else { 1700 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1701 ret = -EINVAL; 1702 goto sriov_configure_out; 1703 } 1704 sriov_configure_out: 1705 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1706 return ret; 1707 } 1708 1709 /***********************virtual channel routines******************/ 1710 1711 /** 1712 * i40e_vc_send_msg_to_vf 1713 * @vf: pointer to the VF info 1714 * @v_opcode: virtual channel opcode 1715 * @v_retval: virtual channel return value 1716 * @msg: pointer to the msg buffer 1717 * @msglen: msg length 1718 * 1719 * send msg to VF 1720 **/ 1721 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1722 u32 v_retval, u8 *msg, u16 msglen) 1723 { 1724 struct i40e_pf *pf; 1725 struct i40e_hw *hw; 1726 int abs_vf_id; 1727 i40e_status aq_ret; 1728 1729 /* validate the request */ 1730 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1731 return -EINVAL; 1732 1733 pf = vf->pf; 1734 hw = &pf->hw; 1735 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1736 1737 /* single place to detect unsuccessful return values */ 1738 if (v_retval) { 1739 vf->num_invalid_msgs++; 1740 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1741 vf->vf_id, v_opcode, v_retval); 1742 if (vf->num_invalid_msgs > 1743 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1744 dev_err(&pf->pdev->dev, 1745 "Number of invalid messages exceeded for VF %d\n", 1746 vf->vf_id); 1747 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1748 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1749 } 1750 } else { 1751 vf->num_valid_msgs++; 1752 /* reset the invalid counter, if a valid message is received. */ 1753 vf->num_invalid_msgs = 0; 1754 } 1755 1756 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1757 msg, msglen, NULL); 1758 if (aq_ret) { 1759 dev_info(&pf->pdev->dev, 1760 "Unable to send the message to VF %d aq_err %d\n", 1761 vf->vf_id, pf->hw.aq.asq_last_status); 1762 return -EIO; 1763 } 1764 1765 return 0; 1766 } 1767 1768 /** 1769 * i40e_vc_send_resp_to_vf 1770 * @vf: pointer to the VF info 1771 * @opcode: operation code 1772 * @retval: return value 1773 * 1774 * send resp msg to VF 1775 **/ 1776 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1777 enum virtchnl_ops opcode, 1778 i40e_status retval) 1779 { 1780 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1781 } 1782 1783 /** 1784 * i40e_vc_get_version_msg 1785 * @vf: pointer to the VF info 1786 * @msg: pointer to the msg buffer 1787 * 1788 * called from the VF to request the API version used by the PF 1789 **/ 1790 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1791 { 1792 struct virtchnl_version_info info = { 1793 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1794 }; 1795 1796 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1797 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1798 if (VF_IS_V10(&vf->vf_ver)) 1799 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1800 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1801 I40E_SUCCESS, (u8 *)&info, 1802 sizeof(struct virtchnl_version_info)); 1803 } 1804 1805 /** 1806 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 1807 * @vf: pointer to VF structure 1808 **/ 1809 static void i40e_del_qch(struct i40e_vf *vf) 1810 { 1811 struct i40e_pf *pf = vf->pf; 1812 int i; 1813 1814 /* first element in the array belongs to primary VF VSI and we shouldn't 1815 * delete it. We should however delete the rest of the VSIs created 1816 */ 1817 for (i = 1; i < vf->num_tc; i++) { 1818 if (vf->ch[i].vsi_idx) { 1819 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 1820 vf->ch[i].vsi_idx = 0; 1821 vf->ch[i].vsi_id = 0; 1822 } 1823 } 1824 } 1825 1826 /** 1827 * i40e_vc_get_vf_resources_msg 1828 * @vf: pointer to the VF info 1829 * @msg: pointer to the msg buffer 1830 * 1831 * called from the VF to request its resources 1832 **/ 1833 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1834 { 1835 struct virtchnl_vf_resource *vfres = NULL; 1836 struct i40e_pf *pf = vf->pf; 1837 i40e_status aq_ret = 0; 1838 struct i40e_vsi *vsi; 1839 int num_vsis = 1; 1840 int len = 0; 1841 int ret; 1842 1843 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1844 aq_ret = I40E_ERR_PARAM; 1845 goto err; 1846 } 1847 1848 len = (sizeof(struct virtchnl_vf_resource) + 1849 sizeof(struct virtchnl_vsi_resource) * num_vsis); 1850 1851 vfres = kzalloc(len, GFP_KERNEL); 1852 if (!vfres) { 1853 aq_ret = I40E_ERR_NO_MEMORY; 1854 len = 0; 1855 goto err; 1856 } 1857 if (VF_IS_V11(&vf->vf_ver)) 1858 vf->driver_caps = *(u32 *)msg; 1859 else 1860 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 1861 VIRTCHNL_VF_OFFLOAD_RSS_REG | 1862 VIRTCHNL_VF_OFFLOAD_VLAN; 1863 1864 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 1865 vsi = pf->vsi[vf->lan_vsi_idx]; 1866 if (!vsi->info.pvid) 1867 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 1868 1869 if (i40e_vf_client_capable(pf, vf->vf_id) && 1870 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 1871 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 1872 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1873 } else { 1874 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1875 } 1876 1877 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1878 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 1879 } else { 1880 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 1881 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1882 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1883 else 1884 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 1885 } 1886 1887 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1888 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1889 vfres->vf_cap_flags |= 1890 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1891 } 1892 1893 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 1894 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 1895 1896 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && 1897 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 1898 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 1899 1900 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1901 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1902 dev_err(&pf->pdev->dev, 1903 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1904 vf->vf_id); 1905 aq_ret = I40E_ERR_PARAM; 1906 goto err; 1907 } 1908 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1909 } 1910 1911 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { 1912 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1913 vfres->vf_cap_flags |= 1914 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1915 } 1916 1917 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 1918 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 1919 1920 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 1921 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 1922 1923 vfres->num_vsis = num_vsis; 1924 vfres->num_queue_pairs = vf->num_queue_pairs; 1925 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1926 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 1927 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 1928 1929 if (vf->lan_vsi_idx) { 1930 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 1931 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 1932 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 1933 /* VFs only use TC 0 */ 1934 vfres->vsi_res[0].qset_handle 1935 = le16_to_cpu(vsi->info.qs_handle[0]); 1936 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 1937 vf->default_lan_addr.addr); 1938 } 1939 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1940 1941 err: 1942 /* send the response back to the VF */ 1943 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 1944 aq_ret, (u8 *)vfres, len); 1945 1946 kfree(vfres); 1947 return ret; 1948 } 1949 1950 /** 1951 * i40e_vc_reset_vf_msg 1952 * @vf: pointer to the VF info 1953 * 1954 * called from the VF to reset itself, 1955 * unlike other virtchnl messages, PF driver 1956 * doesn't send the response back to the VF 1957 **/ 1958 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1959 { 1960 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 1961 i40e_reset_vf(vf, false); 1962 } 1963 1964 /** 1965 * i40e_getnum_vf_vsi_vlan_filters 1966 * @vsi: pointer to the vsi 1967 * 1968 * called to get the number of VLANs offloaded on this VF 1969 **/ 1970 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1971 { 1972 struct i40e_mac_filter *f; 1973 int num_vlans = 0, bkt; 1974 1975 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1976 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1977 num_vlans++; 1978 } 1979 1980 return num_vlans; 1981 } 1982 1983 /** 1984 * i40e_vc_config_promiscuous_mode_msg 1985 * @vf: pointer to the VF info 1986 * @msg: pointer to the msg buffer 1987 * 1988 * called from the VF to configure the promiscuous mode of 1989 * VF vsis 1990 **/ 1991 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 1992 { 1993 struct virtchnl_promisc_info *info = 1994 (struct virtchnl_promisc_info *)msg; 1995 struct i40e_pf *pf = vf->pf; 1996 i40e_status aq_ret = 0; 1997 bool allmulti = false; 1998 bool alluni = false; 1999 2000 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2001 return I40E_ERR_PARAM; 2002 2003 /* Multicast promiscuous handling*/ 2004 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2005 allmulti = true; 2006 2007 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2008 alluni = true; 2009 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2010 alluni); 2011 if (!aq_ret) { 2012 if (allmulti) { 2013 dev_info(&pf->pdev->dev, 2014 "VF %d successfully set multicast promiscuous mode\n", 2015 vf->vf_id); 2016 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2017 } else { 2018 dev_info(&pf->pdev->dev, 2019 "VF %d successfully unset multicast promiscuous mode\n", 2020 vf->vf_id); 2021 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2022 } 2023 if (alluni) { 2024 dev_info(&pf->pdev->dev, 2025 "VF %d successfully set unicast promiscuous mode\n", 2026 vf->vf_id); 2027 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2028 } else { 2029 dev_info(&pf->pdev->dev, 2030 "VF %d successfully unset unicast promiscuous mode\n", 2031 vf->vf_id); 2032 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2033 } 2034 } 2035 2036 /* send the response to the VF */ 2037 return i40e_vc_send_resp_to_vf(vf, 2038 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2039 aq_ret); 2040 } 2041 2042 /** 2043 * i40e_vc_config_queues_msg 2044 * @vf: pointer to the VF info 2045 * @msg: pointer to the msg buffer 2046 * 2047 * called from the VF to configure the rx/tx 2048 * queues 2049 **/ 2050 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2051 { 2052 struct virtchnl_vsi_queue_config_info *qci = 2053 (struct virtchnl_vsi_queue_config_info *)msg; 2054 struct virtchnl_queue_pair_info *qpi; 2055 struct i40e_pf *pf = vf->pf; 2056 u16 vsi_id, vsi_queue_id = 0; 2057 i40e_status aq_ret = 0; 2058 int i, j = 0, idx = 0; 2059 2060 vsi_id = qci->vsi_id; 2061 2062 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2063 aq_ret = I40E_ERR_PARAM; 2064 goto error_param; 2065 } 2066 2067 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2068 aq_ret = I40E_ERR_PARAM; 2069 goto error_param; 2070 } 2071 2072 for (i = 0; i < qci->num_queue_pairs; i++) { 2073 qpi = &qci->qpair[i]; 2074 2075 if (!vf->adq_enabled) { 2076 vsi_queue_id = qpi->txq.queue_id; 2077 2078 if (qpi->txq.vsi_id != qci->vsi_id || 2079 qpi->rxq.vsi_id != qci->vsi_id || 2080 qpi->rxq.queue_id != vsi_queue_id) { 2081 aq_ret = I40E_ERR_PARAM; 2082 goto error_param; 2083 } 2084 } 2085 2086 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 2087 aq_ret = I40E_ERR_PARAM; 2088 goto error_param; 2089 } 2090 2091 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2092 &qpi->rxq) || 2093 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2094 &qpi->txq)) { 2095 aq_ret = I40E_ERR_PARAM; 2096 goto error_param; 2097 } 2098 2099 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2100 * VF does not know about these additional VSIs and all 2101 * it cares is about its own queues. PF configures these queues 2102 * to its appropriate VSIs based on TC mapping 2103 **/ 2104 if (vf->adq_enabled) { 2105 if (j == (vf->ch[idx].num_qps - 1)) { 2106 idx++; 2107 j = 0; /* resetting the queue count */ 2108 vsi_queue_id = 0; 2109 } else { 2110 j++; 2111 vsi_queue_id++; 2112 } 2113 vsi_id = vf->ch[idx].vsi_id; 2114 } 2115 } 2116 /* set vsi num_queue_pairs in use to num configured by VF */ 2117 if (!vf->adq_enabled) { 2118 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2119 qci->num_queue_pairs; 2120 } else { 2121 for (i = 0; i < vf->num_tc; i++) 2122 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = 2123 vf->ch[i].num_qps; 2124 } 2125 2126 error_param: 2127 /* send the response to the VF */ 2128 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2129 aq_ret); 2130 } 2131 2132 /** 2133 * i40e_validate_queue_map 2134 * @vsi_id: vsi id 2135 * @queuemap: Tx or Rx queue map 2136 * 2137 * check if Tx or Rx queue map is valid 2138 **/ 2139 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2140 unsigned long queuemap) 2141 { 2142 u16 vsi_queue_id, queue_id; 2143 2144 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2145 if (vf->adq_enabled) { 2146 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; 2147 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2148 } else { 2149 queue_id = vsi_queue_id; 2150 } 2151 2152 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2153 return -EINVAL; 2154 } 2155 2156 return 0; 2157 } 2158 2159 /** 2160 * i40e_vc_config_irq_map_msg 2161 * @vf: pointer to the VF info 2162 * @msg: pointer to the msg buffer 2163 * 2164 * called from the VF to configure the irq to 2165 * queue map 2166 **/ 2167 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2168 { 2169 struct virtchnl_irq_map_info *irqmap_info = 2170 (struct virtchnl_irq_map_info *)msg; 2171 struct virtchnl_vector_map *map; 2172 u16 vsi_id, vector_id; 2173 i40e_status aq_ret = 0; 2174 int i; 2175 2176 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2177 aq_ret = I40E_ERR_PARAM; 2178 goto error_param; 2179 } 2180 2181 for (i = 0; i < irqmap_info->num_vectors; i++) { 2182 map = &irqmap_info->vecmap[i]; 2183 vector_id = map->vector_id; 2184 vsi_id = map->vsi_id; 2185 /* validate msg params */ 2186 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 2187 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2188 aq_ret = I40E_ERR_PARAM; 2189 goto error_param; 2190 } 2191 2192 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2193 aq_ret = I40E_ERR_PARAM; 2194 goto error_param; 2195 } 2196 2197 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2198 aq_ret = I40E_ERR_PARAM; 2199 goto error_param; 2200 } 2201 2202 i40e_config_irq_link_list(vf, vsi_id, map); 2203 } 2204 error_param: 2205 /* send the response to the VF */ 2206 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2207 aq_ret); 2208 } 2209 2210 /** 2211 * i40e_ctrl_vf_tx_rings 2212 * @vsi: the SRIOV VSI being configured 2213 * @q_map: bit map of the queues to be enabled 2214 * @enable: start or stop the queue 2215 **/ 2216 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2217 bool enable) 2218 { 2219 struct i40e_pf *pf = vsi->back; 2220 int ret = 0; 2221 u16 q_id; 2222 2223 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2224 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2225 vsi->base_queue + q_id, 2226 false /*is xdp*/, enable); 2227 if (ret) 2228 break; 2229 } 2230 return ret; 2231 } 2232 2233 /** 2234 * i40e_ctrl_vf_rx_rings 2235 * @vsi: the SRIOV VSI being configured 2236 * @q_map: bit map of the queues to be enabled 2237 * @enable: start or stop the queue 2238 **/ 2239 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2240 bool enable) 2241 { 2242 struct i40e_pf *pf = vsi->back; 2243 int ret = 0; 2244 u16 q_id; 2245 2246 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2247 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2248 enable); 2249 if (ret) 2250 break; 2251 } 2252 return ret; 2253 } 2254 2255 /** 2256 * i40e_vc_enable_queues_msg 2257 * @vf: pointer to the VF info 2258 * @msg: pointer to the msg buffer 2259 * 2260 * called from the VF to enable all or specific queue(s) 2261 **/ 2262 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2263 { 2264 struct virtchnl_queue_select *vqs = 2265 (struct virtchnl_queue_select *)msg; 2266 struct i40e_pf *pf = vf->pf; 2267 u16 vsi_id = vqs->vsi_id; 2268 i40e_status aq_ret = 0; 2269 int i; 2270 2271 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2272 aq_ret = I40E_ERR_PARAM; 2273 goto error_param; 2274 } 2275 2276 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2277 aq_ret = I40E_ERR_PARAM; 2278 goto error_param; 2279 } 2280 2281 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 2282 aq_ret = I40E_ERR_PARAM; 2283 goto error_param; 2284 } 2285 2286 /* Use the queue bit map sent by the VF */ 2287 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2288 true)) { 2289 aq_ret = I40E_ERR_TIMEOUT; 2290 goto error_param; 2291 } 2292 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2293 true)) { 2294 aq_ret = I40E_ERR_TIMEOUT; 2295 goto error_param; 2296 } 2297 2298 /* need to start the rings for additional ADq VSI's as well */ 2299 if (vf->adq_enabled) { 2300 /* zero belongs to LAN VSI */ 2301 for (i = 1; i < vf->num_tc; i++) { 2302 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2303 aq_ret = I40E_ERR_TIMEOUT; 2304 } 2305 } 2306 2307 error_param: 2308 /* send the response to the VF */ 2309 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2310 aq_ret); 2311 } 2312 2313 /** 2314 * i40e_vc_disable_queues_msg 2315 * @vf: pointer to the VF info 2316 * @msg: pointer to the msg buffer 2317 * 2318 * called from the VF to disable all or specific 2319 * queue(s) 2320 **/ 2321 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2322 { 2323 struct virtchnl_queue_select *vqs = 2324 (struct virtchnl_queue_select *)msg; 2325 struct i40e_pf *pf = vf->pf; 2326 i40e_status aq_ret = 0; 2327 2328 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2329 aq_ret = I40E_ERR_PARAM; 2330 goto error_param; 2331 } 2332 2333 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2334 aq_ret = I40E_ERR_PARAM; 2335 goto error_param; 2336 } 2337 2338 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 2339 aq_ret = I40E_ERR_PARAM; 2340 goto error_param; 2341 } 2342 2343 /* Use the queue bit map sent by the VF */ 2344 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2345 false)) { 2346 aq_ret = I40E_ERR_TIMEOUT; 2347 goto error_param; 2348 } 2349 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2350 false)) { 2351 aq_ret = I40E_ERR_TIMEOUT; 2352 goto error_param; 2353 } 2354 error_param: 2355 /* send the response to the VF */ 2356 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2357 aq_ret); 2358 } 2359 2360 /** 2361 * i40e_vc_request_queues_msg 2362 * @vf: pointer to the VF info 2363 * @msg: pointer to the msg buffer 2364 * 2365 * VFs get a default number of queues but can use this message to request a 2366 * different number. If the request is successful, PF will reset the VF and 2367 * return 0. If unsuccessful, PF will send message informing VF of number of 2368 * available queues and return result of sending VF a message. 2369 **/ 2370 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2371 { 2372 struct virtchnl_vf_res_request *vfres = 2373 (struct virtchnl_vf_res_request *)msg; 2374 int req_pairs = vfres->num_queue_pairs; 2375 int cur_pairs = vf->num_queue_pairs; 2376 struct i40e_pf *pf = vf->pf; 2377 2378 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2379 return -EINVAL; 2380 2381 if (req_pairs <= 0) { 2382 dev_err(&pf->pdev->dev, 2383 "VF %d tried to request %d queues. Ignoring.\n", 2384 vf->vf_id, req_pairs); 2385 } else if (req_pairs > I40E_MAX_VF_QUEUES) { 2386 dev_err(&pf->pdev->dev, 2387 "VF %d tried to request more than %d queues.\n", 2388 vf->vf_id, 2389 I40E_MAX_VF_QUEUES); 2390 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2391 } else if (req_pairs - cur_pairs > pf->queues_left) { 2392 dev_warn(&pf->pdev->dev, 2393 "VF %d requested %d more queues, but only %d left.\n", 2394 vf->vf_id, 2395 req_pairs - cur_pairs, 2396 pf->queues_left); 2397 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2398 } else { 2399 /* successful request */ 2400 vf->num_req_queues = req_pairs; 2401 i40e_vc_notify_vf_reset(vf); 2402 i40e_reset_vf(vf, false); 2403 return 0; 2404 } 2405 2406 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2407 (u8 *)vfres, sizeof(*vfres)); 2408 } 2409 2410 /** 2411 * i40e_vc_get_stats_msg 2412 * @vf: pointer to the VF info 2413 * @msg: pointer to the msg buffer 2414 * 2415 * called from the VF to get vsi stats 2416 **/ 2417 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2418 { 2419 struct virtchnl_queue_select *vqs = 2420 (struct virtchnl_queue_select *)msg; 2421 struct i40e_pf *pf = vf->pf; 2422 struct i40e_eth_stats stats; 2423 i40e_status aq_ret = 0; 2424 struct i40e_vsi *vsi; 2425 2426 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2427 2428 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2429 aq_ret = I40E_ERR_PARAM; 2430 goto error_param; 2431 } 2432 2433 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2434 aq_ret = I40E_ERR_PARAM; 2435 goto error_param; 2436 } 2437 2438 vsi = pf->vsi[vf->lan_vsi_idx]; 2439 if (!vsi) { 2440 aq_ret = I40E_ERR_PARAM; 2441 goto error_param; 2442 } 2443 i40e_update_eth_stats(vsi); 2444 stats = vsi->eth_stats; 2445 2446 error_param: 2447 /* send the response back to the VF */ 2448 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2449 (u8 *)&stats, sizeof(stats)); 2450 } 2451 2452 /* If the VF is not trusted restrict the number of MAC/VLAN it can program */ 2453 #define I40E_VC_MAX_MAC_ADDR_PER_VF 12 2454 #define I40E_VC_MAX_VLAN_PER_VF 8 2455 2456 /** 2457 * i40e_check_vf_permission 2458 * @vf: pointer to the VF info 2459 * @al: MAC address list from virtchnl 2460 * 2461 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2462 * if any address in the list is not valid. Checks the following conditions: 2463 * 2464 * 1) broadcast and zero addresses are never valid 2465 * 2) unicast addresses are not allowed if the VMM has administratively set 2466 * the VF MAC address, unless the VF is marked as privileged. 2467 * 3) There is enough space to add all the addresses. 2468 * 2469 * Note that to guarantee consistency, it is expected this function be called 2470 * while holding the mac_filter_hash_lock, as otherwise the current number of 2471 * addresses might not be accurate. 2472 **/ 2473 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2474 struct virtchnl_ether_addr_list *al) 2475 { 2476 struct i40e_pf *pf = vf->pf; 2477 int i; 2478 2479 /* If this VF is not privileged, then we can't add more than a limited 2480 * number of addresses. Check to make sure that the additions do not 2481 * push us over the limit. 2482 */ 2483 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2484 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) { 2485 dev_err(&pf->pdev->dev, 2486 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2487 return -EPERM; 2488 } 2489 2490 for (i = 0; i < al->num_elements; i++) { 2491 u8 *addr = al->list[i].addr; 2492 2493 if (is_broadcast_ether_addr(addr) || 2494 is_zero_ether_addr(addr)) { 2495 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2496 addr); 2497 return I40E_ERR_INVALID_MAC_ADDR; 2498 } 2499 2500 /* If the host VMM administrator has set the VF MAC address 2501 * administratively via the ndo_set_vf_mac command then deny 2502 * permission to the VF to add or delete unicast MAC addresses. 2503 * Unless the VF is privileged and then it can do whatever. 2504 * The VF may request to set the MAC address filter already 2505 * assigned to it so do not return an error in that case. 2506 */ 2507 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2508 !is_multicast_ether_addr(addr) && vf->pf_set_mac && 2509 !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2510 dev_err(&pf->pdev->dev, 2511 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2512 return -EPERM; 2513 } 2514 } 2515 2516 return 0; 2517 } 2518 2519 /** 2520 * i40e_vc_add_mac_addr_msg 2521 * @vf: pointer to the VF info 2522 * @msg: pointer to the msg buffer 2523 * 2524 * add guest mac address filter 2525 **/ 2526 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2527 { 2528 struct virtchnl_ether_addr_list *al = 2529 (struct virtchnl_ether_addr_list *)msg; 2530 struct i40e_pf *pf = vf->pf; 2531 struct i40e_vsi *vsi = NULL; 2532 u16 vsi_id = al->vsi_id; 2533 i40e_status ret = 0; 2534 int i; 2535 2536 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2537 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2538 ret = I40E_ERR_PARAM; 2539 goto error_param; 2540 } 2541 2542 vsi = pf->vsi[vf->lan_vsi_idx]; 2543 2544 /* Lock once, because all function inside for loop accesses VSI's 2545 * MAC filter list which needs to be protected using same lock. 2546 */ 2547 spin_lock_bh(&vsi->mac_filter_hash_lock); 2548 2549 ret = i40e_check_vf_permission(vf, al); 2550 if (ret) { 2551 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2552 goto error_param; 2553 } 2554 2555 /* add new addresses to the list */ 2556 for (i = 0; i < al->num_elements; i++) { 2557 struct i40e_mac_filter *f; 2558 2559 f = i40e_find_mac(vsi, al->list[i].addr); 2560 if (!f) { 2561 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2562 2563 if (!f) { 2564 dev_err(&pf->pdev->dev, 2565 "Unable to add MAC filter %pM for VF %d\n", 2566 al->list[i].addr, vf->vf_id); 2567 ret = I40E_ERR_PARAM; 2568 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2569 goto error_param; 2570 } else { 2571 vf->num_mac++; 2572 } 2573 } 2574 } 2575 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2576 2577 /* program the updated filter list */ 2578 ret = i40e_sync_vsi_filters(vsi); 2579 if (ret) 2580 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2581 vf->vf_id, ret); 2582 2583 error_param: 2584 /* send the response to the VF */ 2585 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2586 ret); 2587 } 2588 2589 /** 2590 * i40e_vc_del_mac_addr_msg 2591 * @vf: pointer to the VF info 2592 * @msg: pointer to the msg buffer 2593 * 2594 * remove guest mac address filter 2595 **/ 2596 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2597 { 2598 struct virtchnl_ether_addr_list *al = 2599 (struct virtchnl_ether_addr_list *)msg; 2600 struct i40e_pf *pf = vf->pf; 2601 struct i40e_vsi *vsi = NULL; 2602 u16 vsi_id = al->vsi_id; 2603 i40e_status ret = 0; 2604 int i; 2605 2606 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2607 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2608 ret = I40E_ERR_PARAM; 2609 goto error_param; 2610 } 2611 2612 for (i = 0; i < al->num_elements; i++) { 2613 if (is_broadcast_ether_addr(al->list[i].addr) || 2614 is_zero_ether_addr(al->list[i].addr)) { 2615 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2616 al->list[i].addr, vf->vf_id); 2617 ret = I40E_ERR_INVALID_MAC_ADDR; 2618 goto error_param; 2619 } 2620 2621 if (vf->pf_set_mac && 2622 ether_addr_equal(al->list[i].addr, 2623 vf->default_lan_addr.addr)) { 2624 dev_err(&pf->pdev->dev, 2625 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", 2626 vf->default_lan_addr.addr, vf->vf_id); 2627 ret = I40E_ERR_PARAM; 2628 goto error_param; 2629 } 2630 } 2631 vsi = pf->vsi[vf->lan_vsi_idx]; 2632 2633 spin_lock_bh(&vsi->mac_filter_hash_lock); 2634 /* delete addresses from the list */ 2635 for (i = 0; i < al->num_elements; i++) 2636 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2637 ret = I40E_ERR_INVALID_MAC_ADDR; 2638 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2639 goto error_param; 2640 } else { 2641 vf->num_mac--; 2642 } 2643 2644 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2645 2646 /* program the updated filter list */ 2647 ret = i40e_sync_vsi_filters(vsi); 2648 if (ret) 2649 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2650 vf->vf_id, ret); 2651 2652 error_param: 2653 /* send the response to the VF */ 2654 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 2655 ret); 2656 } 2657 2658 /** 2659 * i40e_vc_add_vlan_msg 2660 * @vf: pointer to the VF info 2661 * @msg: pointer to the msg buffer 2662 * 2663 * program guest vlan id 2664 **/ 2665 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 2666 { 2667 struct virtchnl_vlan_filter_list *vfl = 2668 (struct virtchnl_vlan_filter_list *)msg; 2669 struct i40e_pf *pf = vf->pf; 2670 struct i40e_vsi *vsi = NULL; 2671 u16 vsi_id = vfl->vsi_id; 2672 i40e_status aq_ret = 0; 2673 int i; 2674 2675 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2676 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2677 dev_err(&pf->pdev->dev, 2678 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2679 goto error_param; 2680 } 2681 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2682 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2683 aq_ret = I40E_ERR_PARAM; 2684 goto error_param; 2685 } 2686 2687 for (i = 0; i < vfl->num_elements; i++) { 2688 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2689 aq_ret = I40E_ERR_PARAM; 2690 dev_err(&pf->pdev->dev, 2691 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2692 goto error_param; 2693 } 2694 } 2695 vsi = pf->vsi[vf->lan_vsi_idx]; 2696 if (vsi->info.pvid) { 2697 aq_ret = I40E_ERR_PARAM; 2698 goto error_param; 2699 } 2700 2701 i40e_vlan_stripping_enable(vsi); 2702 for (i = 0; i < vfl->num_elements; i++) { 2703 /* add new VLAN filter */ 2704 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2705 if (!ret) 2706 vf->num_vlan++; 2707 2708 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2709 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2710 true, 2711 vfl->vlan_id[i], 2712 NULL); 2713 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2714 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2715 true, 2716 vfl->vlan_id[i], 2717 NULL); 2718 2719 if (ret) 2720 dev_err(&pf->pdev->dev, 2721 "Unable to add VLAN filter %d for VF %d, error %d\n", 2722 vfl->vlan_id[i], vf->vf_id, ret); 2723 } 2724 2725 error_param: 2726 /* send the response to the VF */ 2727 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2728 } 2729 2730 /** 2731 * i40e_vc_remove_vlan_msg 2732 * @vf: pointer to the VF info 2733 * @msg: pointer to the msg buffer 2734 * 2735 * remove programmed guest vlan id 2736 **/ 2737 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 2738 { 2739 struct virtchnl_vlan_filter_list *vfl = 2740 (struct virtchnl_vlan_filter_list *)msg; 2741 struct i40e_pf *pf = vf->pf; 2742 struct i40e_vsi *vsi = NULL; 2743 u16 vsi_id = vfl->vsi_id; 2744 i40e_status aq_ret = 0; 2745 int i; 2746 2747 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2748 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2749 aq_ret = I40E_ERR_PARAM; 2750 goto error_param; 2751 } 2752 2753 for (i = 0; i < vfl->num_elements; i++) { 2754 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2755 aq_ret = I40E_ERR_PARAM; 2756 goto error_param; 2757 } 2758 } 2759 2760 vsi = pf->vsi[vf->lan_vsi_idx]; 2761 if (vsi->info.pvid) { 2762 aq_ret = I40E_ERR_PARAM; 2763 goto error_param; 2764 } 2765 2766 for (i = 0; i < vfl->num_elements; i++) { 2767 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2768 vf->num_vlan--; 2769 2770 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2771 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2772 false, 2773 vfl->vlan_id[i], 2774 NULL); 2775 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2776 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2777 false, 2778 vfl->vlan_id[i], 2779 NULL); 2780 } 2781 2782 error_param: 2783 /* send the response to the VF */ 2784 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2785 } 2786 2787 /** 2788 * i40e_vc_iwarp_msg 2789 * @vf: pointer to the VF info 2790 * @msg: pointer to the msg buffer 2791 * @msglen: msg length 2792 * 2793 * called from the VF for the iwarp msgs 2794 **/ 2795 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2796 { 2797 struct i40e_pf *pf = vf->pf; 2798 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2799 i40e_status aq_ret = 0; 2800 2801 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2802 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2803 aq_ret = I40E_ERR_PARAM; 2804 goto error_param; 2805 } 2806 2807 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2808 msg, msglen); 2809 2810 error_param: 2811 /* send the response to the VF */ 2812 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 2813 aq_ret); 2814 } 2815 2816 /** 2817 * i40e_vc_iwarp_qvmap_msg 2818 * @vf: pointer to the VF info 2819 * @msg: pointer to the msg buffer 2820 * @config: config qvmap or release it 2821 * 2822 * called from the VF for the iwarp msgs 2823 **/ 2824 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 2825 { 2826 struct virtchnl_iwarp_qvlist_info *qvlist_info = 2827 (struct virtchnl_iwarp_qvlist_info *)msg; 2828 i40e_status aq_ret = 0; 2829 2830 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2831 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2832 aq_ret = I40E_ERR_PARAM; 2833 goto error_param; 2834 } 2835 2836 if (config) { 2837 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2838 aq_ret = I40E_ERR_PARAM; 2839 } else { 2840 i40e_release_iwarp_qvlist(vf); 2841 } 2842 2843 error_param: 2844 /* send the response to the VF */ 2845 return i40e_vc_send_resp_to_vf(vf, 2846 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 2847 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 2848 aq_ret); 2849 } 2850 2851 /** 2852 * i40e_vc_config_rss_key 2853 * @vf: pointer to the VF info 2854 * @msg: pointer to the msg buffer 2855 * 2856 * Configure the VF's RSS key 2857 **/ 2858 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 2859 { 2860 struct virtchnl_rss_key *vrk = 2861 (struct virtchnl_rss_key *)msg; 2862 struct i40e_pf *pf = vf->pf; 2863 struct i40e_vsi *vsi = NULL; 2864 u16 vsi_id = vrk->vsi_id; 2865 i40e_status aq_ret = 0; 2866 2867 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2868 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2869 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 2870 aq_ret = I40E_ERR_PARAM; 2871 goto err; 2872 } 2873 2874 vsi = pf->vsi[vf->lan_vsi_idx]; 2875 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 2876 err: 2877 /* send the response to the VF */ 2878 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 2879 aq_ret); 2880 } 2881 2882 /** 2883 * i40e_vc_config_rss_lut 2884 * @vf: pointer to the VF info 2885 * @msg: pointer to the msg buffer 2886 * 2887 * Configure the VF's RSS LUT 2888 **/ 2889 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 2890 { 2891 struct virtchnl_rss_lut *vrl = 2892 (struct virtchnl_rss_lut *)msg; 2893 struct i40e_pf *pf = vf->pf; 2894 struct i40e_vsi *vsi = NULL; 2895 u16 vsi_id = vrl->vsi_id; 2896 i40e_status aq_ret = 0; 2897 2898 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2899 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2900 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 2901 aq_ret = I40E_ERR_PARAM; 2902 goto err; 2903 } 2904 2905 vsi = pf->vsi[vf->lan_vsi_idx]; 2906 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 2907 /* send the response to the VF */ 2908 err: 2909 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 2910 aq_ret); 2911 } 2912 2913 /** 2914 * i40e_vc_get_rss_hena 2915 * @vf: pointer to the VF info 2916 * @msg: pointer to the msg buffer 2917 * 2918 * Return the RSS HENA bits allowed by the hardware 2919 **/ 2920 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) 2921 { 2922 struct virtchnl_rss_hena *vrh = NULL; 2923 struct i40e_pf *pf = vf->pf; 2924 i40e_status aq_ret = 0; 2925 int len = 0; 2926 2927 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2928 aq_ret = I40E_ERR_PARAM; 2929 goto err; 2930 } 2931 len = sizeof(struct virtchnl_rss_hena); 2932 2933 vrh = kzalloc(len, GFP_KERNEL); 2934 if (!vrh) { 2935 aq_ret = I40E_ERR_NO_MEMORY; 2936 len = 0; 2937 goto err; 2938 } 2939 vrh->hena = i40e_pf_get_default_rss_hena(pf); 2940 err: 2941 /* send the response back to the VF */ 2942 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 2943 aq_ret, (u8 *)vrh, len); 2944 kfree(vrh); 2945 return aq_ret; 2946 } 2947 2948 /** 2949 * i40e_vc_set_rss_hena 2950 * @vf: pointer to the VF info 2951 * @msg: pointer to the msg buffer 2952 * 2953 * Set the RSS HENA bits for the VF 2954 **/ 2955 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) 2956 { 2957 struct virtchnl_rss_hena *vrh = 2958 (struct virtchnl_rss_hena *)msg; 2959 struct i40e_pf *pf = vf->pf; 2960 struct i40e_hw *hw = &pf->hw; 2961 i40e_status aq_ret = 0; 2962 2963 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2964 aq_ret = I40E_ERR_PARAM; 2965 goto err; 2966 } 2967 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 2968 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 2969 (u32)(vrh->hena >> 32)); 2970 2971 /* send the response to the VF */ 2972 err: 2973 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 2974 } 2975 2976 /** 2977 * i40e_vc_enable_vlan_stripping 2978 * @vf: pointer to the VF info 2979 * @msg: pointer to the msg buffer 2980 * 2981 * Enable vlan header stripping for the VF 2982 **/ 2983 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 2984 { 2985 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 2986 i40e_status aq_ret = 0; 2987 2988 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2989 aq_ret = I40E_ERR_PARAM; 2990 goto err; 2991 } 2992 2993 i40e_vlan_stripping_enable(vsi); 2994 2995 /* send the response to the VF */ 2996 err: 2997 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 2998 aq_ret); 2999 } 3000 3001 /** 3002 * i40e_vc_disable_vlan_stripping 3003 * @vf: pointer to the VF info 3004 * @msg: pointer to the msg buffer 3005 * 3006 * Disable vlan header stripping for the VF 3007 **/ 3008 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3009 { 3010 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3011 i40e_status aq_ret = 0; 3012 3013 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3014 aq_ret = I40E_ERR_PARAM; 3015 goto err; 3016 } 3017 3018 i40e_vlan_stripping_disable(vsi); 3019 3020 /* send the response to the VF */ 3021 err: 3022 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3023 aq_ret); 3024 } 3025 3026 /** 3027 * i40e_validate_cloud_filter 3028 * @mask: mask for TC filter 3029 * @data: data for TC filter 3030 * 3031 * This function validates cloud filter programmed as TC filter for ADq 3032 **/ 3033 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3034 struct virtchnl_filter *tc_filter) 3035 { 3036 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3037 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3038 struct i40e_pf *pf = vf->pf; 3039 struct i40e_vsi *vsi = NULL; 3040 struct i40e_mac_filter *f; 3041 struct hlist_node *h; 3042 bool found = false; 3043 int bkt; 3044 3045 if (!tc_filter->action) { 3046 dev_info(&pf->pdev->dev, 3047 "VF %d: Currently ADq doesn't support Drop Action\n", 3048 vf->vf_id); 3049 goto err; 3050 } 3051 3052 /* action_meta is TC number here to which the filter is applied */ 3053 if (!tc_filter->action_meta || 3054 tc_filter->action_meta > I40E_MAX_VF_VSI) { 3055 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3056 vf->vf_id, tc_filter->action_meta); 3057 goto err; 3058 } 3059 3060 /* Check filter if it's programmed for advanced mode or basic mode. 3061 * There are two ADq modes (for VF only), 3062 * 1. Basic mode: intended to allow as many filter options as possible 3063 * to be added to a VF in Non-trusted mode. Main goal is 3064 * to add filters to its own MAC and VLAN id. 3065 * 2. Advanced mode: is for allowing filters to be applied other than 3066 * its own MAC or VLAN. This mode requires the VF to be 3067 * Trusted. 3068 */ 3069 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3070 vsi = pf->vsi[vf->lan_vsi_idx]; 3071 f = i40e_find_mac(vsi, data.dst_mac); 3072 3073 if (!f) { 3074 dev_info(&pf->pdev->dev, 3075 "Destination MAC %pM doesn't belong to VF %d\n", 3076 data.dst_mac, vf->vf_id); 3077 goto err; 3078 } 3079 3080 if (mask.vlan_id) { 3081 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3082 hlist) { 3083 if (f->vlan == ntohs(data.vlan_id)) { 3084 found = true; 3085 break; 3086 } 3087 } 3088 if (!found) { 3089 dev_info(&pf->pdev->dev, 3090 "VF %d doesn't have any VLAN id %u\n", 3091 vf->vf_id, ntohs(data.vlan_id)); 3092 goto err; 3093 } 3094 } 3095 } else { 3096 /* Check if VF is trusted */ 3097 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3098 dev_err(&pf->pdev->dev, 3099 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3100 vf->vf_id); 3101 return I40E_ERR_CONFIG; 3102 } 3103 } 3104 3105 if (mask.dst_mac[0] & data.dst_mac[0]) { 3106 if (is_broadcast_ether_addr(data.dst_mac) || 3107 is_zero_ether_addr(data.dst_mac)) { 3108 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3109 vf->vf_id, data.dst_mac); 3110 goto err; 3111 } 3112 } 3113 3114 if (mask.src_mac[0] & data.src_mac[0]) { 3115 if (is_broadcast_ether_addr(data.src_mac) || 3116 is_zero_ether_addr(data.src_mac)) { 3117 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3118 vf->vf_id, data.src_mac); 3119 goto err; 3120 } 3121 } 3122 3123 if (mask.dst_port & data.dst_port) { 3124 if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) { 3125 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3126 vf->vf_id); 3127 goto err; 3128 } 3129 } 3130 3131 if (mask.src_port & data.src_port) { 3132 if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) { 3133 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3134 vf->vf_id); 3135 goto err; 3136 } 3137 } 3138 3139 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3140 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3141 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3142 vf->vf_id); 3143 goto err; 3144 } 3145 3146 if (mask.vlan_id & data.vlan_id) { 3147 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3148 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3149 vf->vf_id); 3150 goto err; 3151 } 3152 } 3153 3154 return I40E_SUCCESS; 3155 err: 3156 return I40E_ERR_CONFIG; 3157 } 3158 3159 /** 3160 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3161 * @vf: pointer to the VF info 3162 * @seid - seid of the vsi it is searching for 3163 **/ 3164 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3165 { 3166 struct i40e_pf *pf = vf->pf; 3167 struct i40e_vsi *vsi = NULL; 3168 int i; 3169 3170 for (i = 0; i < vf->num_tc ; i++) { 3171 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3172 if (vsi && vsi->seid == seid) 3173 return vsi; 3174 } 3175 return NULL; 3176 } 3177 3178 /** 3179 * i40e_del_all_cloud_filters 3180 * @vf: pointer to the VF info 3181 * 3182 * This function deletes all cloud filters 3183 **/ 3184 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3185 { 3186 struct i40e_cloud_filter *cfilter = NULL; 3187 struct i40e_pf *pf = vf->pf; 3188 struct i40e_vsi *vsi = NULL; 3189 struct hlist_node *node; 3190 int ret; 3191 3192 hlist_for_each_entry_safe(cfilter, node, 3193 &vf->cloud_filter_list, cloud_node) { 3194 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3195 3196 if (!vsi) { 3197 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3198 vf->vf_id, cfilter->seid); 3199 continue; 3200 } 3201 3202 if (cfilter->dst_port) 3203 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3204 false); 3205 else 3206 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3207 if (ret) 3208 dev_err(&pf->pdev->dev, 3209 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3210 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3211 i40e_aq_str(&pf->hw, 3212 pf->hw.aq.asq_last_status)); 3213 3214 hlist_del(&cfilter->cloud_node); 3215 kfree(cfilter); 3216 vf->num_cloud_filters--; 3217 } 3218 } 3219 3220 /** 3221 * i40e_vc_del_cloud_filter 3222 * @vf: pointer to the VF info 3223 * @msg: pointer to the msg buffer 3224 * 3225 * This function deletes a cloud filter programmed as TC filter for ADq 3226 **/ 3227 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3228 { 3229 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3230 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3231 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3232 struct i40e_cloud_filter cfilter, *cf = NULL; 3233 struct i40e_pf *pf = vf->pf; 3234 struct i40e_vsi *vsi = NULL; 3235 struct hlist_node *node; 3236 i40e_status aq_ret = 0; 3237 int i, ret; 3238 3239 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3240 aq_ret = I40E_ERR_PARAM; 3241 goto err; 3242 } 3243 3244 if (!vf->adq_enabled) { 3245 dev_info(&pf->pdev->dev, 3246 "VF %d: ADq not enabled, can't apply cloud filter\n", 3247 vf->vf_id); 3248 aq_ret = I40E_ERR_PARAM; 3249 goto err; 3250 } 3251 3252 if (i40e_validate_cloud_filter(vf, vcf)) { 3253 dev_info(&pf->pdev->dev, 3254 "VF %d: Invalid input, can't apply cloud filter\n", 3255 vf->vf_id); 3256 aq_ret = I40E_ERR_PARAM; 3257 goto err; 3258 } 3259 3260 memset(&cfilter, 0, sizeof(cfilter)); 3261 /* parse destination mac address */ 3262 for (i = 0; i < ETH_ALEN; i++) 3263 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3264 3265 /* parse source mac address */ 3266 for (i = 0; i < ETH_ALEN; i++) 3267 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3268 3269 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3270 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3271 cfilter.src_port = mask.src_port & tcf.src_port; 3272 3273 switch (vcf->flow_type) { 3274 case VIRTCHNL_TCP_V4_FLOW: 3275 cfilter.n_proto = ETH_P_IP; 3276 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3277 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3278 ARRAY_SIZE(tcf.dst_ip)); 3279 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3280 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3281 ARRAY_SIZE(tcf.dst_ip)); 3282 break; 3283 case VIRTCHNL_TCP_V6_FLOW: 3284 cfilter.n_proto = ETH_P_IPV6; 3285 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3286 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3287 sizeof(cfilter.ip.v6.dst_ip6)); 3288 if (mask.src_ip[3] & tcf.src_ip[3]) 3289 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3290 sizeof(cfilter.ip.v6.src_ip6)); 3291 break; 3292 default: 3293 /* TC filter can be configured based on different combinations 3294 * and in this case IP is not a part of filter config 3295 */ 3296 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3297 vf->vf_id); 3298 } 3299 3300 /* get the vsi to which the tc belongs to */ 3301 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3302 cfilter.seid = vsi->seid; 3303 cfilter.flags = vcf->field_flags; 3304 3305 /* Deleting TC filter */ 3306 if (tcf.dst_port) 3307 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3308 else 3309 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3310 if (ret) { 3311 dev_err(&pf->pdev->dev, 3312 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3313 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3314 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3315 goto err; 3316 } 3317 3318 hlist_for_each_entry_safe(cf, node, 3319 &vf->cloud_filter_list, cloud_node) { 3320 if (cf->seid != cfilter.seid) 3321 continue; 3322 if (mask.dst_port) 3323 if (cfilter.dst_port != cf->dst_port) 3324 continue; 3325 if (mask.dst_mac[0]) 3326 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3327 continue; 3328 /* for ipv4 data to be valid, only first byte of mask is set */ 3329 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3330 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3331 ARRAY_SIZE(tcf.dst_ip))) 3332 continue; 3333 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3334 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3335 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3336 sizeof(cfilter.ip.v6.src_ip6))) 3337 continue; 3338 if (mask.vlan_id) 3339 if (cfilter.vlan_id != cf->vlan_id) 3340 continue; 3341 3342 hlist_del(&cf->cloud_node); 3343 kfree(cf); 3344 vf->num_cloud_filters--; 3345 } 3346 3347 err: 3348 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3349 aq_ret); 3350 } 3351 3352 /** 3353 * i40e_vc_add_cloud_filter 3354 * @vf: pointer to the VF info 3355 * @msg: pointer to the msg buffer 3356 * 3357 * This function adds a cloud filter programmed as TC filter for ADq 3358 **/ 3359 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3360 { 3361 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3362 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3363 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3364 struct i40e_cloud_filter *cfilter = NULL; 3365 struct i40e_pf *pf = vf->pf; 3366 struct i40e_vsi *vsi = NULL; 3367 i40e_status aq_ret = 0; 3368 int i, ret; 3369 3370 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3371 aq_ret = I40E_ERR_PARAM; 3372 goto err; 3373 } 3374 3375 if (!vf->adq_enabled) { 3376 dev_info(&pf->pdev->dev, 3377 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3378 vf->vf_id); 3379 aq_ret = I40E_ERR_PARAM; 3380 goto err; 3381 } 3382 3383 if (i40e_validate_cloud_filter(vf, vcf)) { 3384 dev_info(&pf->pdev->dev, 3385 "VF %d: Invalid input/s, can't apply cloud filter\n", 3386 vf->vf_id); 3387 aq_ret = I40E_ERR_PARAM; 3388 goto err; 3389 } 3390 3391 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3392 if (!cfilter) 3393 return -ENOMEM; 3394 3395 /* parse destination mac address */ 3396 for (i = 0; i < ETH_ALEN; i++) 3397 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3398 3399 /* parse source mac address */ 3400 for (i = 0; i < ETH_ALEN; i++) 3401 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3402 3403 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3404 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3405 cfilter->src_port = mask.src_port & tcf.src_port; 3406 3407 switch (vcf->flow_type) { 3408 case VIRTCHNL_TCP_V4_FLOW: 3409 cfilter->n_proto = ETH_P_IP; 3410 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3411 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3412 ARRAY_SIZE(tcf.dst_ip)); 3413 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3414 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3415 ARRAY_SIZE(tcf.dst_ip)); 3416 break; 3417 case VIRTCHNL_TCP_V6_FLOW: 3418 cfilter->n_proto = ETH_P_IPV6; 3419 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3420 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3421 sizeof(cfilter->ip.v6.dst_ip6)); 3422 if (mask.src_ip[3] & tcf.src_ip[3]) 3423 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3424 sizeof(cfilter->ip.v6.src_ip6)); 3425 break; 3426 default: 3427 /* TC filter can be configured based on different combinations 3428 * and in this case IP is not a part of filter config 3429 */ 3430 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3431 vf->vf_id); 3432 } 3433 3434 /* get the VSI to which the TC belongs to */ 3435 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3436 cfilter->seid = vsi->seid; 3437 cfilter->flags = vcf->field_flags; 3438 3439 /* Adding cloud filter programmed as TC filter */ 3440 if (tcf.dst_port) 3441 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3442 else 3443 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3444 if (ret) { 3445 dev_err(&pf->pdev->dev, 3446 "VF %d: Failed to add cloud filter, err %s aq_err %s\n", 3447 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3448 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3449 goto err; 3450 } 3451 3452 INIT_HLIST_NODE(&cfilter->cloud_node); 3453 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 3454 vf->num_cloud_filters++; 3455 err: 3456 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 3457 aq_ret); 3458 } 3459 3460 /** 3461 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 3462 * @vf: pointer to the VF info 3463 * @msg: pointer to the msg buffer 3464 **/ 3465 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 3466 { 3467 struct virtchnl_tc_info *tci = 3468 (struct virtchnl_tc_info *)msg; 3469 struct i40e_pf *pf = vf->pf; 3470 struct i40e_link_status *ls = &pf->hw.phy.link_info; 3471 int i, adq_request_qps = 0, speed = 0; 3472 i40e_status aq_ret = 0; 3473 3474 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3475 aq_ret = I40E_ERR_PARAM; 3476 goto err; 3477 } 3478 3479 /* ADq cannot be applied if spoof check is ON */ 3480 if (vf->spoofchk) { 3481 dev_err(&pf->pdev->dev, 3482 "Spoof check is ON, turn it OFF to enable ADq\n"); 3483 aq_ret = I40E_ERR_PARAM; 3484 goto err; 3485 } 3486 3487 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3488 dev_err(&pf->pdev->dev, 3489 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 3490 vf->vf_id); 3491 aq_ret = I40E_ERR_PARAM; 3492 goto err; 3493 } 3494 3495 /* max number of traffic classes for VF currently capped at 4 */ 3496 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 3497 dev_err(&pf->pdev->dev, 3498 "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n", 3499 vf->vf_id, tci->num_tc); 3500 aq_ret = I40E_ERR_PARAM; 3501 goto err; 3502 } 3503 3504 /* validate queues for each TC */ 3505 for (i = 0; i < tci->num_tc; i++) 3506 if (!tci->list[i].count || 3507 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 3508 dev_err(&pf->pdev->dev, 3509 "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n", 3510 vf->vf_id, i, tci->list[i].count); 3511 aq_ret = I40E_ERR_PARAM; 3512 goto err; 3513 } 3514 3515 /* need Max VF queues but already have default number of queues */ 3516 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 3517 3518 if (pf->queues_left < adq_request_qps) { 3519 dev_err(&pf->pdev->dev, 3520 "No queues left to allocate to VF %d\n", 3521 vf->vf_id); 3522 aq_ret = I40E_ERR_PARAM; 3523 goto err; 3524 } else { 3525 /* we need to allocate max VF queues to enable ADq so as to 3526 * make sure ADq enabled VF always gets back queues when it 3527 * goes through a reset. 3528 */ 3529 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 3530 } 3531 3532 /* get link speed in MB to validate rate limit */ 3533 switch (ls->link_speed) { 3534 case VIRTCHNL_LINK_SPEED_100MB: 3535 speed = SPEED_100; 3536 break; 3537 case VIRTCHNL_LINK_SPEED_1GB: 3538 speed = SPEED_1000; 3539 break; 3540 case VIRTCHNL_LINK_SPEED_10GB: 3541 speed = SPEED_10000; 3542 break; 3543 case VIRTCHNL_LINK_SPEED_20GB: 3544 speed = SPEED_20000; 3545 break; 3546 case VIRTCHNL_LINK_SPEED_25GB: 3547 speed = SPEED_25000; 3548 break; 3549 case VIRTCHNL_LINK_SPEED_40GB: 3550 speed = SPEED_40000; 3551 break; 3552 default: 3553 dev_err(&pf->pdev->dev, 3554 "Cannot detect link speed\n"); 3555 aq_ret = I40E_ERR_PARAM; 3556 goto err; 3557 } 3558 3559 /* parse data from the queue channel info */ 3560 vf->num_tc = tci->num_tc; 3561 for (i = 0; i < vf->num_tc; i++) { 3562 if (tci->list[i].max_tx_rate) { 3563 if (tci->list[i].max_tx_rate > speed) { 3564 dev_err(&pf->pdev->dev, 3565 "Invalid max tx rate %llu specified for VF %d.", 3566 tci->list[i].max_tx_rate, 3567 vf->vf_id); 3568 aq_ret = I40E_ERR_PARAM; 3569 goto err; 3570 } else { 3571 vf->ch[i].max_tx_rate = 3572 tci->list[i].max_tx_rate; 3573 } 3574 } 3575 vf->ch[i].num_qps = tci->list[i].count; 3576 } 3577 3578 /* set this flag only after making sure all inputs are sane */ 3579 vf->adq_enabled = true; 3580 /* num_req_queues is set when user changes number of queues via ethtool 3581 * and this causes issue for default VSI(which depends on this variable) 3582 * when ADq is enabled, hence reset it. 3583 */ 3584 vf->num_req_queues = 0; 3585 3586 /* reset the VF in order to allocate resources */ 3587 i40e_vc_notify_vf_reset(vf); 3588 i40e_reset_vf(vf, false); 3589 3590 return I40E_SUCCESS; 3591 3592 /* send the response to the VF */ 3593 err: 3594 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 3595 aq_ret); 3596 } 3597 3598 /** 3599 * i40e_vc_del_qch_msg 3600 * @vf: pointer to the VF info 3601 * @msg: pointer to the msg buffer 3602 **/ 3603 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 3604 { 3605 struct i40e_pf *pf = vf->pf; 3606 i40e_status aq_ret = 0; 3607 3608 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3609 aq_ret = I40E_ERR_PARAM; 3610 goto err; 3611 } 3612 3613 if (vf->adq_enabled) { 3614 i40e_del_all_cloud_filters(vf); 3615 i40e_del_qch(vf); 3616 vf->adq_enabled = false; 3617 vf->num_tc = 0; 3618 dev_info(&pf->pdev->dev, 3619 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 3620 vf->vf_id); 3621 } else { 3622 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 3623 vf->vf_id); 3624 aq_ret = I40E_ERR_PARAM; 3625 } 3626 3627 /* reset the VF in order to allocate resources */ 3628 i40e_vc_notify_vf_reset(vf); 3629 i40e_reset_vf(vf, false); 3630 3631 return I40E_SUCCESS; 3632 3633 err: 3634 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 3635 aq_ret); 3636 } 3637 3638 /** 3639 * i40e_vc_process_vf_msg 3640 * @pf: pointer to the PF structure 3641 * @vf_id: source VF id 3642 * @v_opcode: operation code 3643 * @v_retval: unused return value code 3644 * @msg: pointer to the msg buffer 3645 * @msglen: msg length 3646 * 3647 * called from the common aeq/arq handler to 3648 * process request from VF 3649 **/ 3650 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 3651 u32 __always_unused v_retval, u8 *msg, u16 msglen) 3652 { 3653 struct i40e_hw *hw = &pf->hw; 3654 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 3655 struct i40e_vf *vf; 3656 int ret; 3657 3658 pf->vf_aq_requests++; 3659 if (local_vf_id >= pf->num_alloc_vfs) 3660 return -EINVAL; 3661 vf = &(pf->vf[local_vf_id]); 3662 3663 /* Check if VF is disabled. */ 3664 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 3665 return I40E_ERR_PARAM; 3666 3667 /* perform basic checks on the msg */ 3668 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 3669 3670 /* perform additional checks specific to this driver */ 3671 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { 3672 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; 3673 3674 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) 3675 ret = -EINVAL; 3676 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { 3677 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; 3678 3679 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) 3680 ret = -EINVAL; 3681 } 3682 3683 if (ret) { 3684 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 3685 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 3686 local_vf_id, v_opcode, msglen); 3687 switch (ret) { 3688 case VIRTCHNL_STATUS_ERR_PARAM: 3689 return -EPERM; 3690 default: 3691 return -EINVAL; 3692 } 3693 } 3694 3695 switch (v_opcode) { 3696 case VIRTCHNL_OP_VERSION: 3697 ret = i40e_vc_get_version_msg(vf, msg); 3698 break; 3699 case VIRTCHNL_OP_GET_VF_RESOURCES: 3700 ret = i40e_vc_get_vf_resources_msg(vf, msg); 3701 i40e_vc_notify_vf_link_state(vf); 3702 break; 3703 case VIRTCHNL_OP_RESET_VF: 3704 i40e_vc_reset_vf_msg(vf); 3705 ret = 0; 3706 break; 3707 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 3708 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 3709 break; 3710 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 3711 ret = i40e_vc_config_queues_msg(vf, msg); 3712 break; 3713 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 3714 ret = i40e_vc_config_irq_map_msg(vf, msg); 3715 break; 3716 case VIRTCHNL_OP_ENABLE_QUEUES: 3717 ret = i40e_vc_enable_queues_msg(vf, msg); 3718 i40e_vc_notify_vf_link_state(vf); 3719 break; 3720 case VIRTCHNL_OP_DISABLE_QUEUES: 3721 ret = i40e_vc_disable_queues_msg(vf, msg); 3722 break; 3723 case VIRTCHNL_OP_ADD_ETH_ADDR: 3724 ret = i40e_vc_add_mac_addr_msg(vf, msg); 3725 break; 3726 case VIRTCHNL_OP_DEL_ETH_ADDR: 3727 ret = i40e_vc_del_mac_addr_msg(vf, msg); 3728 break; 3729 case VIRTCHNL_OP_ADD_VLAN: 3730 ret = i40e_vc_add_vlan_msg(vf, msg); 3731 break; 3732 case VIRTCHNL_OP_DEL_VLAN: 3733 ret = i40e_vc_remove_vlan_msg(vf, msg); 3734 break; 3735 case VIRTCHNL_OP_GET_STATS: 3736 ret = i40e_vc_get_stats_msg(vf, msg); 3737 break; 3738 case VIRTCHNL_OP_IWARP: 3739 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 3740 break; 3741 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 3742 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); 3743 break; 3744 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 3745 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); 3746 break; 3747 case VIRTCHNL_OP_CONFIG_RSS_KEY: 3748 ret = i40e_vc_config_rss_key(vf, msg); 3749 break; 3750 case VIRTCHNL_OP_CONFIG_RSS_LUT: 3751 ret = i40e_vc_config_rss_lut(vf, msg); 3752 break; 3753 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 3754 ret = i40e_vc_get_rss_hena(vf, msg); 3755 break; 3756 case VIRTCHNL_OP_SET_RSS_HENA: 3757 ret = i40e_vc_set_rss_hena(vf, msg); 3758 break; 3759 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 3760 ret = i40e_vc_enable_vlan_stripping(vf, msg); 3761 break; 3762 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 3763 ret = i40e_vc_disable_vlan_stripping(vf, msg); 3764 break; 3765 case VIRTCHNL_OP_REQUEST_QUEUES: 3766 ret = i40e_vc_request_queues_msg(vf, msg); 3767 break; 3768 case VIRTCHNL_OP_ENABLE_CHANNELS: 3769 ret = i40e_vc_add_qch_msg(vf, msg); 3770 break; 3771 case VIRTCHNL_OP_DISABLE_CHANNELS: 3772 ret = i40e_vc_del_qch_msg(vf, msg); 3773 break; 3774 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 3775 ret = i40e_vc_add_cloud_filter(vf, msg); 3776 break; 3777 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 3778 ret = i40e_vc_del_cloud_filter(vf, msg); 3779 break; 3780 case VIRTCHNL_OP_UNKNOWN: 3781 default: 3782 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 3783 v_opcode, local_vf_id); 3784 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 3785 I40E_ERR_NOT_IMPLEMENTED); 3786 break; 3787 } 3788 3789 return ret; 3790 } 3791 3792 /** 3793 * i40e_vc_process_vflr_event 3794 * @pf: pointer to the PF structure 3795 * 3796 * called from the vlfr irq handler to 3797 * free up VF resources and state variables 3798 **/ 3799 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 3800 { 3801 struct i40e_hw *hw = &pf->hw; 3802 u32 reg, reg_idx, bit_idx; 3803 struct i40e_vf *vf; 3804 int vf_id; 3805 3806 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 3807 return 0; 3808 3809 /* Re-enable the VFLR interrupt cause here, before looking for which 3810 * VF got reset. Otherwise, if another VF gets a reset while the 3811 * first one is being processed, that interrupt will be lost, and 3812 * that VF will be stuck in reset forever. 3813 */ 3814 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 3815 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 3816 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 3817 i40e_flush(hw); 3818 3819 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 3820 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 3821 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 3822 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 3823 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 3824 vf = &pf->vf[vf_id]; 3825 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 3826 if (reg & BIT(bit_idx)) 3827 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 3828 i40e_reset_vf(vf, true); 3829 } 3830 3831 return 0; 3832 } 3833 3834 /** 3835 * i40e_validate_vf 3836 * @pf: the physical function 3837 * @vf_id: VF identifier 3838 * 3839 * Check that the VF is enabled and the VSI exists. 3840 * 3841 * Returns 0 on success, negative on failure 3842 **/ 3843 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 3844 { 3845 struct i40e_vsi *vsi; 3846 struct i40e_vf *vf; 3847 int ret = 0; 3848 3849 if (vf_id >= pf->num_alloc_vfs) { 3850 dev_err(&pf->pdev->dev, 3851 "Invalid VF Identifier %d\n", vf_id); 3852 ret = -EINVAL; 3853 goto err_out; 3854 } 3855 vf = &pf->vf[vf_id]; 3856 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 3857 if (!vsi) 3858 ret = -EINVAL; 3859 err_out: 3860 return ret; 3861 } 3862 3863 /** 3864 * i40e_ndo_set_vf_mac 3865 * @netdev: network interface device structure 3866 * @vf_id: VF identifier 3867 * @mac: mac address 3868 * 3869 * program VF mac address 3870 **/ 3871 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 3872 { 3873 struct i40e_netdev_priv *np = netdev_priv(netdev); 3874 struct i40e_vsi *vsi = np->vsi; 3875 struct i40e_pf *pf = vsi->back; 3876 struct i40e_mac_filter *f; 3877 struct i40e_vf *vf; 3878 int ret = 0; 3879 struct hlist_node *h; 3880 int bkt; 3881 u8 i; 3882 3883 /* validate the request */ 3884 ret = i40e_validate_vf(pf, vf_id); 3885 if (ret) 3886 goto error_param; 3887 3888 vf = &pf->vf[vf_id]; 3889 vsi = pf->vsi[vf->lan_vsi_idx]; 3890 3891 /* When the VF is resetting wait until it is done. 3892 * It can take up to 200 milliseconds, 3893 * but wait for up to 300 milliseconds to be safe. 3894 */ 3895 for (i = 0; i < 15; i++) { 3896 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 3897 break; 3898 msleep(20); 3899 } 3900 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3901 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3902 vf_id); 3903 ret = -EAGAIN; 3904 goto error_param; 3905 } 3906 3907 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 3908 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 3909 return -EAGAIN; 3910 } 3911 3912 if (is_multicast_ether_addr(mac)) { 3913 dev_err(&pf->pdev->dev, 3914 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 3915 ret = -EINVAL; 3916 goto error_param; 3917 } 3918 3919 /* Lock once because below invoked function add/del_filter requires 3920 * mac_filter_hash_lock to be held 3921 */ 3922 spin_lock_bh(&vsi->mac_filter_hash_lock); 3923 3924 /* delete the temporary mac address */ 3925 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 3926 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 3927 3928 /* Delete all the filters for this VSI - we're going to kill it 3929 * anyway. 3930 */ 3931 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 3932 __i40e_del_filter(vsi, f); 3933 3934 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3935 3936 /* program mac filter */ 3937 if (i40e_sync_vsi_filters(vsi)) { 3938 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 3939 ret = -EIO; 3940 goto error_param; 3941 } 3942 ether_addr_copy(vf->default_lan_addr.addr, mac); 3943 3944 if (is_zero_ether_addr(mac)) { 3945 vf->pf_set_mac = false; 3946 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 3947 } else { 3948 vf->pf_set_mac = true; 3949 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 3950 mac, vf_id); 3951 } 3952 3953 /* Force the VF interface down so it has to bring up with new MAC 3954 * address 3955 */ 3956 i40e_vc_disable_vf(vf); 3957 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 3958 3959 error_param: 3960 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 3961 return ret; 3962 } 3963 3964 /** 3965 * i40e_vsi_has_vlans - True if VSI has configured VLANs 3966 * @vsi: pointer to the vsi 3967 * 3968 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if 3969 * we have no configured VLANs. Do not call while holding the 3970 * mac_filter_hash_lock. 3971 */ 3972 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) 3973 { 3974 bool have_vlans; 3975 3976 /* If we have a port VLAN, then the VSI cannot have any VLANs 3977 * configured, as all MAC/VLAN filters will be assigned to the PVID. 3978 */ 3979 if (vsi->info.pvid) 3980 return false; 3981 3982 /* Since we don't have a PVID, we know that if the device is in VLAN 3983 * mode it must be because of a VLAN filter configured on this VSI. 3984 */ 3985 spin_lock_bh(&vsi->mac_filter_hash_lock); 3986 have_vlans = i40e_is_vsi_in_vlan(vsi); 3987 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3988 3989 return have_vlans; 3990 } 3991 3992 /** 3993 * i40e_ndo_set_vf_port_vlan 3994 * @netdev: network interface device structure 3995 * @vf_id: VF identifier 3996 * @vlan_id: mac address 3997 * @qos: priority setting 3998 * @vlan_proto: vlan protocol 3999 * 4000 * program VF vlan id and/or qos 4001 **/ 4002 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4003 u16 vlan_id, u8 qos, __be16 vlan_proto) 4004 { 4005 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4006 struct i40e_netdev_priv *np = netdev_priv(netdev); 4007 struct i40e_pf *pf = np->vsi->back; 4008 struct i40e_vsi *vsi; 4009 struct i40e_vf *vf; 4010 int ret = 0; 4011 4012 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4013 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4014 return -EAGAIN; 4015 } 4016 4017 /* validate the request */ 4018 ret = i40e_validate_vf(pf, vf_id); 4019 if (ret) 4020 goto error_pvid; 4021 4022 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4023 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4024 ret = -EINVAL; 4025 goto error_pvid; 4026 } 4027 4028 if (vlan_proto != htons(ETH_P_8021Q)) { 4029 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4030 ret = -EPROTONOSUPPORT; 4031 goto error_pvid; 4032 } 4033 4034 vf = &pf->vf[vf_id]; 4035 vsi = pf->vsi[vf->lan_vsi_idx]; 4036 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4037 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4038 vf_id); 4039 ret = -EAGAIN; 4040 goto error_pvid; 4041 } 4042 4043 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4044 /* duplicate request, so just return success */ 4045 goto error_pvid; 4046 4047 if (i40e_vsi_has_vlans(vsi)) { 4048 dev_err(&pf->pdev->dev, 4049 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 4050 vf_id); 4051 /* Administrator Error - knock the VF offline until he does 4052 * the right thing by reconfiguring his network correctly 4053 * and then reloading the VF driver. 4054 */ 4055 i40e_vc_disable_vf(vf); 4056 /* During reset the VF got a new VSI, so refresh the pointer. */ 4057 vsi = pf->vsi[vf->lan_vsi_idx]; 4058 } 4059 4060 /* Locked once because multiple functions below iterate list */ 4061 spin_lock_bh(&vsi->mac_filter_hash_lock); 4062 4063 /* Check for condition where there was already a port VLAN ID 4064 * filter set and now it is being deleted by setting it to zero. 4065 * Additionally check for the condition where there was a port 4066 * VLAN but now there is a new and different port VLAN being set. 4067 * Before deleting all the old VLAN filters we must add new ones 4068 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4069 * MAC addresses deleted. 4070 */ 4071 if ((!(vlan_id || qos) || 4072 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4073 vsi->info.pvid) { 4074 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4075 if (ret) { 4076 dev_info(&vsi->back->pdev->dev, 4077 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4078 vsi->back->hw.aq.asq_last_status); 4079 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4080 goto error_pvid; 4081 } 4082 } 4083 4084 if (vsi->info.pvid) { 4085 /* remove all filters on the old VLAN */ 4086 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4087 VLAN_VID_MASK)); 4088 } 4089 4090 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4091 if (vlan_id || qos) 4092 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4093 else 4094 i40e_vsi_remove_pvid(vsi); 4095 spin_lock_bh(&vsi->mac_filter_hash_lock); 4096 4097 if (vlan_id) { 4098 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4099 vlan_id, qos, vf_id); 4100 4101 /* add new VLAN filter for each MAC */ 4102 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4103 if (ret) { 4104 dev_info(&vsi->back->pdev->dev, 4105 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4106 vsi->back->hw.aq.asq_last_status); 4107 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4108 goto error_pvid; 4109 } 4110 4111 /* remove the previously added non-VLAN MAC filters */ 4112 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4113 } 4114 4115 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4116 4117 /* Schedule the worker thread to take care of applying changes */ 4118 i40e_service_event_schedule(vsi->back); 4119 4120 if (ret) { 4121 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4122 goto error_pvid; 4123 } 4124 4125 /* The Port VLAN needs to be saved across resets the same as the 4126 * default LAN MAC address. 4127 */ 4128 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4129 ret = 0; 4130 4131 error_pvid: 4132 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4133 return ret; 4134 } 4135 4136 /** 4137 * i40e_ndo_set_vf_bw 4138 * @netdev: network interface device structure 4139 * @vf_id: VF identifier 4140 * @min_tx_rate: Minimum Tx rate 4141 * @max_tx_rate: Maximum Tx rate 4142 * 4143 * configure VF Tx rate 4144 **/ 4145 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4146 int max_tx_rate) 4147 { 4148 struct i40e_netdev_priv *np = netdev_priv(netdev); 4149 struct i40e_pf *pf = np->vsi->back; 4150 struct i40e_vsi *vsi; 4151 struct i40e_vf *vf; 4152 int ret = 0; 4153 4154 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4155 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4156 return -EAGAIN; 4157 } 4158 4159 /* validate the request */ 4160 ret = i40e_validate_vf(pf, vf_id); 4161 if (ret) 4162 goto error; 4163 4164 if (min_tx_rate) { 4165 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4166 min_tx_rate, vf_id); 4167 return -EINVAL; 4168 } 4169 4170 vf = &pf->vf[vf_id]; 4171 vsi = pf->vsi[vf->lan_vsi_idx]; 4172 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4173 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4174 vf_id); 4175 ret = -EAGAIN; 4176 goto error; 4177 } 4178 4179 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4180 if (ret) 4181 goto error; 4182 4183 vf->tx_rate = max_tx_rate; 4184 error: 4185 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4186 return ret; 4187 } 4188 4189 /** 4190 * i40e_ndo_get_vf_config 4191 * @netdev: network interface device structure 4192 * @vf_id: VF identifier 4193 * @ivi: VF configuration structure 4194 * 4195 * return VF configuration 4196 **/ 4197 int i40e_ndo_get_vf_config(struct net_device *netdev, 4198 int vf_id, struct ifla_vf_info *ivi) 4199 { 4200 struct i40e_netdev_priv *np = netdev_priv(netdev); 4201 struct i40e_vsi *vsi = np->vsi; 4202 struct i40e_pf *pf = vsi->back; 4203 struct i40e_vf *vf; 4204 int ret = 0; 4205 4206 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4207 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4208 return -EAGAIN; 4209 } 4210 4211 /* validate the request */ 4212 ret = i40e_validate_vf(pf, vf_id); 4213 if (ret) 4214 goto error_param; 4215 4216 vf = &pf->vf[vf_id]; 4217 /* first vsi is always the LAN vsi */ 4218 vsi = pf->vsi[vf->lan_vsi_idx]; 4219 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4220 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4221 vf_id); 4222 ret = -EAGAIN; 4223 goto error_param; 4224 } 4225 4226 ivi->vf = vf_id; 4227 4228 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4229 4230 ivi->max_tx_rate = vf->tx_rate; 4231 ivi->min_tx_rate = 0; 4232 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 4233 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 4234 I40E_VLAN_PRIORITY_SHIFT; 4235 if (vf->link_forced == false) 4236 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4237 else if (vf->link_up == true) 4238 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4239 else 4240 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4241 ivi->spoofchk = vf->spoofchk; 4242 ivi->trusted = vf->trusted; 4243 ret = 0; 4244 4245 error_param: 4246 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4247 return ret; 4248 } 4249 4250 /** 4251 * i40e_ndo_set_vf_link_state 4252 * @netdev: network interface device structure 4253 * @vf_id: VF identifier 4254 * @link: required link state 4255 * 4256 * Set the link state of a specified VF, regardless of physical link state 4257 **/ 4258 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4259 { 4260 struct i40e_netdev_priv *np = netdev_priv(netdev); 4261 struct i40e_pf *pf = np->vsi->back; 4262 struct virtchnl_pf_event pfe; 4263 struct i40e_hw *hw = &pf->hw; 4264 struct i40e_vf *vf; 4265 int abs_vf_id; 4266 int ret = 0; 4267 4268 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4269 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4270 return -EAGAIN; 4271 } 4272 4273 /* validate the request */ 4274 if (vf_id >= pf->num_alloc_vfs) { 4275 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4276 ret = -EINVAL; 4277 goto error_out; 4278 } 4279 4280 vf = &pf->vf[vf_id]; 4281 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4282 4283 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4284 pfe.severity = PF_EVENT_SEVERITY_INFO; 4285 4286 switch (link) { 4287 case IFLA_VF_LINK_STATE_AUTO: 4288 vf->link_forced = false; 4289 pfe.event_data.link_event.link_status = 4290 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 4291 pfe.event_data.link_event.link_speed = 4292 (enum virtchnl_link_speed) 4293 pf->hw.phy.link_info.link_speed; 4294 break; 4295 case IFLA_VF_LINK_STATE_ENABLE: 4296 vf->link_forced = true; 4297 vf->link_up = true; 4298 pfe.event_data.link_event.link_status = true; 4299 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; 4300 break; 4301 case IFLA_VF_LINK_STATE_DISABLE: 4302 vf->link_forced = true; 4303 vf->link_up = false; 4304 pfe.event_data.link_event.link_status = false; 4305 pfe.event_data.link_event.link_speed = 0; 4306 break; 4307 default: 4308 ret = -EINVAL; 4309 goto error_out; 4310 } 4311 /* Notify the VF of its new link state */ 4312 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4313 0, (u8 *)&pfe, sizeof(pfe), NULL); 4314 4315 error_out: 4316 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4317 return ret; 4318 } 4319 4320 /** 4321 * i40e_ndo_set_vf_spoofchk 4322 * @netdev: network interface device structure 4323 * @vf_id: VF identifier 4324 * @enable: flag to enable or disable feature 4325 * 4326 * Enable or disable VF spoof checking 4327 **/ 4328 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4329 { 4330 struct i40e_netdev_priv *np = netdev_priv(netdev); 4331 struct i40e_vsi *vsi = np->vsi; 4332 struct i40e_pf *pf = vsi->back; 4333 struct i40e_vsi_context ctxt; 4334 struct i40e_hw *hw = &pf->hw; 4335 struct i40e_vf *vf; 4336 int ret = 0; 4337 4338 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4339 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4340 return -EAGAIN; 4341 } 4342 4343 /* validate the request */ 4344 if (vf_id >= pf->num_alloc_vfs) { 4345 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4346 ret = -EINVAL; 4347 goto out; 4348 } 4349 4350 vf = &(pf->vf[vf_id]); 4351 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4352 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4353 vf_id); 4354 ret = -EAGAIN; 4355 goto out; 4356 } 4357 4358 if (enable == vf->spoofchk) 4359 goto out; 4360 4361 vf->spoofchk = enable; 4362 memset(&ctxt, 0, sizeof(ctxt)); 4363 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4364 ctxt.pf_num = pf->hw.pf_id; 4365 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4366 if (enable) 4367 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4368 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4369 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4370 if (ret) { 4371 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4372 ret); 4373 ret = -EIO; 4374 } 4375 out: 4376 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4377 return ret; 4378 } 4379 4380 /** 4381 * i40e_ndo_set_vf_trust 4382 * @netdev: network interface device structure of the pf 4383 * @vf_id: VF identifier 4384 * @setting: trust setting 4385 * 4386 * Enable or disable VF trust setting 4387 **/ 4388 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4389 { 4390 struct i40e_netdev_priv *np = netdev_priv(netdev); 4391 struct i40e_pf *pf = np->vsi->back; 4392 struct i40e_vf *vf; 4393 int ret = 0; 4394 4395 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4396 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4397 return -EAGAIN; 4398 } 4399 4400 /* validate the request */ 4401 if (vf_id >= pf->num_alloc_vfs) { 4402 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4403 ret = -EINVAL; 4404 goto out; 4405 } 4406 4407 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4408 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4409 ret = -EINVAL; 4410 goto out; 4411 } 4412 4413 vf = &pf->vf[vf_id]; 4414 4415 if (setting == vf->trusted) 4416 goto out; 4417 4418 vf->trusted = setting; 4419 i40e_vc_disable_vf(vf); 4420 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4421 vf_id, setting ? "" : "un"); 4422 4423 if (vf->adq_enabled) { 4424 if (!vf->trusted) { 4425 dev_info(&pf->pdev->dev, 4426 "VF %u no longer Trusted, deleting all cloud filters\n", 4427 vf_id); 4428 i40e_del_all_cloud_filters(vf); 4429 } 4430 } 4431 4432 out: 4433 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4434 return ret; 4435 } 4436