1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2015 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /***********************misc routines*****************************/ 30 31 /** 32 * i40e_vc_disable_vf 33 * @pf: pointer to the PF info 34 * @vf: pointer to the VF info 35 * 36 * Disable the VF through a SW reset 37 **/ 38 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) 39 { 40 struct i40e_hw *hw = &pf->hw; 41 u32 reg; 42 43 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 44 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 45 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 46 i40e_flush(hw); 47 } 48 49 /** 50 * i40e_vc_isvalid_vsi_id 51 * @vf: pointer to the VF info 52 * @vsi_id: VF relative VSI id 53 * 54 * check for the valid VSI id 55 **/ 56 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 57 { 58 struct i40e_pf *pf = vf->pf; 59 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 60 61 return (vsi && (vsi->vf_id == vf->vf_id)); 62 } 63 64 /** 65 * i40e_vc_isvalid_queue_id 66 * @vf: pointer to the VF info 67 * @vsi_id: vsi id 68 * @qid: vsi relative queue id 69 * 70 * check for the valid queue id 71 **/ 72 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 73 u8 qid) 74 { 75 struct i40e_pf *pf = vf->pf; 76 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 77 78 return (vsi && (qid < vsi->alloc_queue_pairs)); 79 } 80 81 /** 82 * i40e_vc_isvalid_vector_id 83 * @vf: pointer to the VF info 84 * @vector_id: VF relative vector id 85 * 86 * check for the valid vector id 87 **/ 88 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 89 { 90 struct i40e_pf *pf = vf->pf; 91 92 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 93 } 94 95 /***********************vf resource mgmt routines*****************/ 96 97 /** 98 * i40e_vc_get_pf_queue_id 99 * @vf: pointer to the VF info 100 * @vsi_id: id of VSI as provided by the FW 101 * @vsi_queue_id: vsi relative queue id 102 * 103 * return PF relative queue id 104 **/ 105 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 106 u8 vsi_queue_id) 107 { 108 struct i40e_pf *pf = vf->pf; 109 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 110 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 111 112 if (!vsi) 113 return pf_queue_id; 114 115 if (le16_to_cpu(vsi->info.mapping_flags) & 116 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 117 pf_queue_id = 118 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 119 else 120 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 121 vsi_queue_id; 122 123 return pf_queue_id; 124 } 125 126 /** 127 * i40e_config_irq_link_list 128 * @vf: pointer to the VF info 129 * @vsi_id: id of VSI as given by the FW 130 * @vecmap: irq map info 131 * 132 * configure irq link list from the map 133 **/ 134 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 135 struct i40e_virtchnl_vector_map *vecmap) 136 { 137 unsigned long linklistmap = 0, tempmap; 138 struct i40e_pf *pf = vf->pf; 139 struct i40e_hw *hw = &pf->hw; 140 u16 vsi_queue_id, pf_queue_id; 141 enum i40e_queue_type qtype; 142 u16 next_q, vector_id; 143 u32 reg, reg_idx; 144 u16 itr_idx = 0; 145 146 vector_id = vecmap->vector_id; 147 /* setup the head */ 148 if (0 == vector_id) 149 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 150 else 151 reg_idx = I40E_VPINT_LNKLSTN( 152 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 153 (vector_id - 1)); 154 155 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 156 /* Special case - No queues mapped on this vector */ 157 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 158 goto irq_list_done; 159 } 160 tempmap = vecmap->rxq_map; 161 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 162 linklistmap |= (1 << 163 (I40E_VIRTCHNL_SUPPORTED_QTYPES * 164 vsi_queue_id)); 165 } 166 167 tempmap = vecmap->txq_map; 168 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 169 linklistmap |= (1 << 170 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id 171 + 1)); 172 } 173 174 next_q = find_first_bit(&linklistmap, 175 (I40E_MAX_VSI_QP * 176 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 177 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; 178 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; 179 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 180 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 181 182 wr32(hw, reg_idx, reg); 183 184 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 185 switch (qtype) { 186 case I40E_QUEUE_TYPE_RX: 187 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 188 itr_idx = vecmap->rxitr_idx; 189 break; 190 case I40E_QUEUE_TYPE_TX: 191 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 192 itr_idx = vecmap->txitr_idx; 193 break; 194 default: 195 break; 196 } 197 198 next_q = find_next_bit(&linklistmap, 199 (I40E_MAX_VSI_QP * 200 I40E_VIRTCHNL_SUPPORTED_QTYPES), 201 next_q + 1); 202 if (next_q < 203 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 204 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 205 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 206 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, 207 vsi_queue_id); 208 } else { 209 pf_queue_id = I40E_QUEUE_END_OF_LIST; 210 qtype = 0; 211 } 212 213 /* format for the RQCTL & TQCTL regs is same */ 214 reg = (vector_id) | 215 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 216 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 217 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 218 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 219 wr32(hw, reg_idx, reg); 220 } 221 222 irq_list_done: 223 i40e_flush(hw); 224 } 225 226 /** 227 * i40e_config_vsi_tx_queue 228 * @vf: pointer to the VF info 229 * @vsi_id: id of VSI as provided by the FW 230 * @vsi_queue_id: vsi relative queue index 231 * @info: config. info 232 * 233 * configure tx queue 234 **/ 235 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 236 u16 vsi_queue_id, 237 struct i40e_virtchnl_txq_info *info) 238 { 239 struct i40e_pf *pf = vf->pf; 240 struct i40e_hw *hw = &pf->hw; 241 struct i40e_hmc_obj_txq tx_ctx; 242 struct i40e_vsi *vsi; 243 u16 pf_queue_id; 244 u32 qtx_ctl; 245 int ret = 0; 246 247 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 248 vsi = i40e_find_vsi_from_id(pf, vsi_id); 249 250 /* clear the context structure first */ 251 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 252 253 /* only set the required fields */ 254 tx_ctx.base = info->dma_ring_addr / 128; 255 tx_ctx.qlen = info->ring_len; 256 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 257 tx_ctx.rdylist_act = 0; 258 tx_ctx.head_wb_ena = info->headwb_enabled; 259 tx_ctx.head_wb_addr = info->dma_headwb_addr; 260 261 /* clear the context in the HMC */ 262 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 263 if (ret) { 264 dev_err(&pf->pdev->dev, 265 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 266 pf_queue_id, ret); 267 ret = -ENOENT; 268 goto error_context; 269 } 270 271 /* set the context in the HMC */ 272 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 273 if (ret) { 274 dev_err(&pf->pdev->dev, 275 "Failed to set VF LAN Tx queue context %d error: %d\n", 276 pf_queue_id, ret); 277 ret = -ENOENT; 278 goto error_context; 279 } 280 281 /* associate this queue with the PCI VF function */ 282 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 283 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 284 & I40E_QTX_CTL_PF_INDX_MASK); 285 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 286 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 287 & I40E_QTX_CTL_VFVM_INDX_MASK); 288 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 289 i40e_flush(hw); 290 291 error_context: 292 return ret; 293 } 294 295 /** 296 * i40e_config_vsi_rx_queue 297 * @vf: pointer to the VF info 298 * @vsi_id: id of VSI as provided by the FW 299 * @vsi_queue_id: vsi relative queue index 300 * @info: config. info 301 * 302 * configure rx queue 303 **/ 304 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 305 u16 vsi_queue_id, 306 struct i40e_virtchnl_rxq_info *info) 307 { 308 struct i40e_pf *pf = vf->pf; 309 struct i40e_hw *hw = &pf->hw; 310 struct i40e_hmc_obj_rxq rx_ctx; 311 u16 pf_queue_id; 312 int ret = 0; 313 314 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 315 316 /* clear the context structure first */ 317 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 318 319 /* only set the required fields */ 320 rx_ctx.base = info->dma_ring_addr / 128; 321 rx_ctx.qlen = info->ring_len; 322 323 if (info->splithdr_enabled) { 324 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 325 I40E_RX_SPLIT_IP | 326 I40E_RX_SPLIT_TCP_UDP | 327 I40E_RX_SPLIT_SCTP; 328 /* header length validation */ 329 if (info->hdr_size > ((2 * 1024) - 64)) { 330 ret = -EINVAL; 331 goto error_param; 332 } 333 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 334 335 /* set splitalways mode 10b */ 336 rx_ctx.dtype = 0x2; 337 } 338 339 /* databuffer length validation */ 340 if (info->databuffer_size > ((16 * 1024) - 128)) { 341 ret = -EINVAL; 342 goto error_param; 343 } 344 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 345 346 /* max pkt. length validation */ 347 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 348 ret = -EINVAL; 349 goto error_param; 350 } 351 rx_ctx.rxmax = info->max_pkt_size; 352 353 /* enable 32bytes desc always */ 354 rx_ctx.dsize = 1; 355 356 /* default values */ 357 rx_ctx.lrxqthresh = 2; 358 rx_ctx.crcstrip = 1; 359 rx_ctx.prefena = 1; 360 rx_ctx.l2tsel = 1; 361 362 /* clear the context in the HMC */ 363 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 364 if (ret) { 365 dev_err(&pf->pdev->dev, 366 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 367 pf_queue_id, ret); 368 ret = -ENOENT; 369 goto error_param; 370 } 371 372 /* set the context in the HMC */ 373 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 374 if (ret) { 375 dev_err(&pf->pdev->dev, 376 "Failed to set VF LAN Rx queue context %d error: %d\n", 377 pf_queue_id, ret); 378 ret = -ENOENT; 379 goto error_param; 380 } 381 382 error_param: 383 return ret; 384 } 385 386 /** 387 * i40e_alloc_vsi_res 388 * @vf: pointer to the VF info 389 * @type: type of VSI to allocate 390 * 391 * alloc VF vsi context & resources 392 **/ 393 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 394 { 395 struct i40e_mac_filter *f = NULL; 396 struct i40e_pf *pf = vf->pf; 397 struct i40e_vsi *vsi; 398 int ret = 0; 399 400 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 401 402 if (!vsi) { 403 dev_err(&pf->pdev->dev, 404 "add vsi failed for VF %d, aq_err %d\n", 405 vf->vf_id, pf->hw.aq.asq_last_status); 406 ret = -ENOENT; 407 goto error_alloc_vsi_res; 408 } 409 if (type == I40E_VSI_SRIOV) { 410 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 411 vf->lan_vsi_idx = vsi->idx; 412 vf->lan_vsi_id = vsi->id; 413 /* If the port VLAN has been configured and then the 414 * VF driver was removed then the VSI port VLAN 415 * configuration was destroyed. Check if there is 416 * a port VLAN and restore the VSI configuration if 417 * needed. 418 */ 419 if (vf->port_vlan_id) 420 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 421 f = i40e_add_filter(vsi, vf->default_lan_addr.addr, 422 vf->port_vlan_id, true, false); 423 if (!f) 424 dev_info(&pf->pdev->dev, 425 "Could not allocate VF MAC addr\n"); 426 f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, 427 true, false); 428 if (!f) 429 dev_info(&pf->pdev->dev, 430 "Could not allocate VF broadcast filter\n"); 431 } 432 433 /* program mac filter */ 434 ret = i40e_sync_vsi_filters(vsi); 435 if (ret) 436 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 437 438 /* Set VF bandwidth if specified */ 439 if (vf->tx_rate) { 440 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 441 vf->tx_rate / 50, 0, NULL); 442 if (ret) 443 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 444 vf->vf_id, ret); 445 } 446 447 error_alloc_vsi_res: 448 return ret; 449 } 450 451 /** 452 * i40e_enable_vf_mappings 453 * @vf: pointer to the VF info 454 * 455 * enable VF mappings 456 **/ 457 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 458 { 459 struct i40e_pf *pf = vf->pf; 460 struct i40e_hw *hw = &pf->hw; 461 u32 reg, total_queue_pairs = 0; 462 int j; 463 464 /* Tell the hardware we're using noncontiguous mapping. HW requires 465 * that VF queues be mapped using this method, even when they are 466 * contiguous in real life 467 */ 468 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 469 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 470 471 /* enable VF vplan_qtable mappings */ 472 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 473 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 474 475 /* map PF queues to VF queues */ 476 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { 477 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); 478 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 479 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 480 total_queue_pairs++; 481 } 482 483 /* map PF queues to VSI */ 484 for (j = 0; j < 7; j++) { 485 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { 486 reg = 0x07FF07FF; /* unused */ 487 } else { 488 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 489 j * 2); 490 reg = qid; 491 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 492 (j * 2) + 1); 493 reg |= qid << 16; 494 } 495 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); 496 } 497 498 i40e_flush(hw); 499 } 500 501 /** 502 * i40e_disable_vf_mappings 503 * @vf: pointer to the VF info 504 * 505 * disable VF mappings 506 **/ 507 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 508 { 509 struct i40e_pf *pf = vf->pf; 510 struct i40e_hw *hw = &pf->hw; 511 int i; 512 513 /* disable qp mappings */ 514 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 515 for (i = 0; i < I40E_MAX_VSI_QP; i++) 516 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 517 I40E_QUEUE_END_OF_LIST); 518 i40e_flush(hw); 519 } 520 521 /** 522 * i40e_free_vf_res 523 * @vf: pointer to the VF info 524 * 525 * free VF resources 526 **/ 527 static void i40e_free_vf_res(struct i40e_vf *vf) 528 { 529 struct i40e_pf *pf = vf->pf; 530 struct i40e_hw *hw = &pf->hw; 531 u32 reg_idx, reg; 532 int i, msix_vf; 533 534 /* free vsi & disconnect it from the parent uplink */ 535 if (vf->lan_vsi_idx) { 536 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 537 vf->lan_vsi_idx = 0; 538 vf->lan_vsi_id = 0; 539 } 540 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 541 542 /* disable interrupts so the VF starts in a known state */ 543 for (i = 0; i < msix_vf; i++) { 544 /* format is same for both registers */ 545 if (0 == i) 546 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 547 else 548 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 549 (vf->vf_id)) 550 + (i - 1)); 551 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 552 i40e_flush(hw); 553 } 554 555 /* clear the irq settings */ 556 for (i = 0; i < msix_vf; i++) { 557 /* format is same for both registers */ 558 if (0 == i) 559 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 560 else 561 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 562 (vf->vf_id)) 563 + (i - 1)); 564 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 565 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 566 wr32(hw, reg_idx, reg); 567 i40e_flush(hw); 568 } 569 /* reset some of the state varibles keeping 570 * track of the resources 571 */ 572 vf->num_queue_pairs = 0; 573 vf->vf_states = 0; 574 } 575 576 /** 577 * i40e_alloc_vf_res 578 * @vf: pointer to the VF info 579 * 580 * allocate VF resources 581 **/ 582 static int i40e_alloc_vf_res(struct i40e_vf *vf) 583 { 584 struct i40e_pf *pf = vf->pf; 585 int total_queue_pairs = 0; 586 int ret; 587 588 /* allocate hw vsi context & associated resources */ 589 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 590 if (ret) 591 goto error_alloc; 592 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 593 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 594 595 /* store the total qps number for the runtime 596 * VF req validation 597 */ 598 vf->num_queue_pairs = total_queue_pairs; 599 600 /* VF is now completely initialized */ 601 set_bit(I40E_VF_STAT_INIT, &vf->vf_states); 602 603 error_alloc: 604 if (ret) 605 i40e_free_vf_res(vf); 606 607 return ret; 608 } 609 610 #define VF_DEVICE_STATUS 0xAA 611 #define VF_TRANS_PENDING_MASK 0x20 612 /** 613 * i40e_quiesce_vf_pci 614 * @vf: pointer to the VF structure 615 * 616 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 617 * if the transactions never clear. 618 **/ 619 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 620 { 621 struct i40e_pf *pf = vf->pf; 622 struct i40e_hw *hw = &pf->hw; 623 int vf_abs_id, i; 624 u32 reg; 625 626 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 627 628 wr32(hw, I40E_PF_PCI_CIAA, 629 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 630 for (i = 0; i < 100; i++) { 631 reg = rd32(hw, I40E_PF_PCI_CIAD); 632 if ((reg & VF_TRANS_PENDING_MASK) == 0) 633 return 0; 634 udelay(1); 635 } 636 return -EIO; 637 } 638 639 /** 640 * i40e_reset_vf 641 * @vf: pointer to the VF structure 642 * @flr: VFLR was issued or not 643 * 644 * reset the VF 645 **/ 646 void i40e_reset_vf(struct i40e_vf *vf, bool flr) 647 { 648 struct i40e_pf *pf = vf->pf; 649 struct i40e_hw *hw = &pf->hw; 650 bool rsd = false; 651 int i; 652 u32 reg; 653 654 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 655 return; 656 657 /* warn the VF */ 658 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 659 660 /* In the case of a VFLR, the HW has already reset the VF and we 661 * just need to clean up, so don't hit the VFRTRIG register. 662 */ 663 if (!flr) { 664 /* reset VF using VPGEN_VFRTRIG reg */ 665 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 666 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 667 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 668 i40e_flush(hw); 669 } 670 671 if (i40e_quiesce_vf_pci(vf)) 672 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 673 vf->vf_id); 674 675 /* poll VPGEN_VFRSTAT reg to make sure 676 * that reset is complete 677 */ 678 for (i = 0; i < 10; i++) { 679 /* VF reset requires driver to first reset the VF and then 680 * poll the status register to make sure that the reset 681 * completed successfully. Due to internal HW FIFO flushes, 682 * we must wait 10ms before the register will be valid. 683 */ 684 usleep_range(10000, 20000); 685 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 686 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 687 rsd = true; 688 break; 689 } 690 } 691 692 if (!rsd) 693 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 694 vf->vf_id); 695 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); 696 /* clear the reset bit in the VPGEN_VFRTRIG reg */ 697 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 698 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 699 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 700 701 /* On initial reset, we won't have any queues */ 702 if (vf->lan_vsi_idx == 0) 703 goto complete_reset; 704 705 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false); 706 complete_reset: 707 /* reallocate VF resources to reset the VSI state */ 708 i40e_free_vf_res(vf); 709 i40e_alloc_vf_res(vf); 710 i40e_enable_vf_mappings(vf); 711 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 712 713 /* tell the VF the reset is done */ 714 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 715 i40e_flush(hw); 716 clear_bit(__I40E_VF_DISABLE, &pf->state); 717 } 718 719 /** 720 * i40e_free_vfs 721 * @pf: pointer to the PF structure 722 * 723 * free VF resources 724 **/ 725 void i40e_free_vfs(struct i40e_pf *pf) 726 { 727 struct i40e_hw *hw = &pf->hw; 728 u32 reg_idx, bit_idx; 729 int i, tmp, vf_id; 730 731 if (!pf->vf) 732 return; 733 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 734 usleep_range(1000, 2000); 735 736 /* Disable IOV before freeing resources. This lets any VF drivers 737 * running in the host get themselves cleaned up before we yank 738 * the carpet out from underneath their feet. 739 */ 740 if (!pci_vfs_assigned(pf->pdev)) 741 pci_disable_sriov(pf->pdev); 742 else 743 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 744 745 msleep(20); /* let any messages in transit get finished up */ 746 747 /* free up VF resources */ 748 tmp = pf->num_alloc_vfs; 749 pf->num_alloc_vfs = 0; 750 for (i = 0; i < tmp; i++) { 751 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 752 i40e_free_vf_res(&pf->vf[i]); 753 /* disable qp mappings */ 754 i40e_disable_vf_mappings(&pf->vf[i]); 755 } 756 757 kfree(pf->vf); 758 pf->vf = NULL; 759 760 /* This check is for when the driver is unloaded while VFs are 761 * assigned. Setting the number of VFs to 0 through sysfs is caught 762 * before this function ever gets called. 763 */ 764 if (!pci_vfs_assigned(pf->pdev)) { 765 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 766 * work correctly when SR-IOV gets re-enabled. 767 */ 768 for (vf_id = 0; vf_id < tmp; vf_id++) { 769 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 770 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 771 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 772 } 773 } 774 clear_bit(__I40E_VF_DISABLE, &pf->state); 775 } 776 777 #ifdef CONFIG_PCI_IOV 778 /** 779 * i40e_alloc_vfs 780 * @pf: pointer to the PF structure 781 * @num_alloc_vfs: number of VFs to allocate 782 * 783 * allocate VF resources 784 **/ 785 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 786 { 787 struct i40e_vf *vfs; 788 int i, ret = 0; 789 790 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 791 i40e_irq_dynamic_disable_icr0(pf); 792 793 /* Check to see if we're just allocating resources for extant VFs */ 794 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 795 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 796 if (ret) { 797 dev_err(&pf->pdev->dev, 798 "Failed to enable SR-IOV, error %d.\n", ret); 799 pf->num_alloc_vfs = 0; 800 goto err_iov; 801 } 802 } 803 /* allocate memory */ 804 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 805 if (!vfs) { 806 ret = -ENOMEM; 807 goto err_alloc; 808 } 809 pf->vf = vfs; 810 811 /* apply default profile */ 812 for (i = 0; i < num_alloc_vfs; i++) { 813 vfs[i].pf = pf; 814 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 815 vfs[i].vf_id = i; 816 817 /* assign default capabilities */ 818 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 819 vfs[i].spoofchk = true; 820 /* VF resources get allocated during reset */ 821 i40e_reset_vf(&vfs[i], false); 822 823 /* enable VF vplan_qtable mappings */ 824 i40e_enable_vf_mappings(&vfs[i]); 825 } 826 pf->num_alloc_vfs = num_alloc_vfs; 827 828 err_alloc: 829 if (ret) 830 i40e_free_vfs(pf); 831 err_iov: 832 /* Re-enable interrupt 0. */ 833 i40e_irq_dynamic_enable_icr0(pf); 834 return ret; 835 } 836 837 #endif 838 /** 839 * i40e_pci_sriov_enable 840 * @pdev: pointer to a pci_dev structure 841 * @num_vfs: number of VFs to allocate 842 * 843 * Enable or change the number of VFs 844 **/ 845 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 846 { 847 #ifdef CONFIG_PCI_IOV 848 struct i40e_pf *pf = pci_get_drvdata(pdev); 849 int pre_existing_vfs = pci_num_vf(pdev); 850 int err = 0; 851 852 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 853 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 854 i40e_free_vfs(pf); 855 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 856 goto out; 857 858 if (num_vfs > pf->num_req_vfs) { 859 err = -EPERM; 860 goto err_out; 861 } 862 863 err = i40e_alloc_vfs(pf, num_vfs); 864 if (err) { 865 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 866 goto err_out; 867 } 868 869 out: 870 return num_vfs; 871 872 err_out: 873 return err; 874 #endif 875 return 0; 876 } 877 878 /** 879 * i40e_pci_sriov_configure 880 * @pdev: pointer to a pci_dev structure 881 * @num_vfs: number of VFs to allocate 882 * 883 * Enable or change the number of VFs. Called when the user updates the number 884 * of VFs in sysfs. 885 **/ 886 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 887 { 888 struct i40e_pf *pf = pci_get_drvdata(pdev); 889 890 if (num_vfs) 891 return i40e_pci_sriov_enable(pdev, num_vfs); 892 893 if (!pci_vfs_assigned(pf->pdev)) { 894 i40e_free_vfs(pf); 895 } else { 896 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 897 return -EINVAL; 898 } 899 return 0; 900 } 901 902 /***********************virtual channel routines******************/ 903 904 /** 905 * i40e_vc_send_msg_to_vf 906 * @vf: pointer to the VF info 907 * @v_opcode: virtual channel opcode 908 * @v_retval: virtual channel return value 909 * @msg: pointer to the msg buffer 910 * @msglen: msg length 911 * 912 * send msg to VF 913 **/ 914 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 915 u32 v_retval, u8 *msg, u16 msglen) 916 { 917 struct i40e_pf *pf; 918 struct i40e_hw *hw; 919 int abs_vf_id; 920 i40e_status aq_ret; 921 922 /* validate the request */ 923 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 924 return -EINVAL; 925 926 pf = vf->pf; 927 hw = &pf->hw; 928 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 929 930 /* single place to detect unsuccessful return values */ 931 if (v_retval) { 932 vf->num_invalid_msgs++; 933 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", 934 v_opcode, v_retval); 935 if (vf->num_invalid_msgs > 936 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 937 dev_err(&pf->pdev->dev, 938 "Number of invalid messages exceeded for VF %d\n", 939 vf->vf_id); 940 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 941 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 942 } 943 } else { 944 vf->num_valid_msgs++; 945 } 946 947 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 948 msg, msglen, NULL); 949 if (aq_ret) { 950 dev_err(&pf->pdev->dev, 951 "Unable to send the message to VF %d aq_err %d\n", 952 vf->vf_id, pf->hw.aq.asq_last_status); 953 return -EIO; 954 } 955 956 return 0; 957 } 958 959 /** 960 * i40e_vc_send_resp_to_vf 961 * @vf: pointer to the VF info 962 * @opcode: operation code 963 * @retval: return value 964 * 965 * send resp msg to VF 966 **/ 967 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 968 enum i40e_virtchnl_ops opcode, 969 i40e_status retval) 970 { 971 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 972 } 973 974 /** 975 * i40e_vc_get_version_msg 976 * @vf: pointer to the VF info 977 * 978 * called from the VF to request the API version used by the PF 979 **/ 980 static int i40e_vc_get_version_msg(struct i40e_vf *vf) 981 { 982 struct i40e_virtchnl_version_info info = { 983 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR 984 }; 985 986 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, 987 I40E_SUCCESS, (u8 *)&info, 988 sizeof(struct 989 i40e_virtchnl_version_info)); 990 } 991 992 /** 993 * i40e_vc_get_vf_resources_msg 994 * @vf: pointer to the VF info 995 * @msg: pointer to the msg buffer 996 * @msglen: msg length 997 * 998 * called from the VF to request its resources 999 **/ 1000 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) 1001 { 1002 struct i40e_virtchnl_vf_resource *vfres = NULL; 1003 struct i40e_pf *pf = vf->pf; 1004 i40e_status aq_ret = 0; 1005 struct i40e_vsi *vsi; 1006 int i = 0, len = 0; 1007 int num_vsis = 1; 1008 int ret; 1009 1010 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1011 aq_ret = I40E_ERR_PARAM; 1012 goto err; 1013 } 1014 1015 len = (sizeof(struct i40e_virtchnl_vf_resource) + 1016 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); 1017 1018 vfres = kzalloc(len, GFP_KERNEL); 1019 if (!vfres) { 1020 aq_ret = I40E_ERR_NO_MEMORY; 1021 len = 0; 1022 goto err; 1023 } 1024 1025 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; 1026 vsi = pf->vsi[vf->lan_vsi_idx]; 1027 if (!vsi->info.pvid) 1028 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1029 1030 vfres->num_vsis = num_vsis; 1031 vfres->num_queue_pairs = vf->num_queue_pairs; 1032 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1033 if (vf->lan_vsi_idx) { 1034 vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; 1035 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1036 vfres->vsi_res[i].num_queue_pairs = 1037 pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1038 memcpy(vfres->vsi_res[i].default_mac_addr, 1039 vf->default_lan_addr.addr, ETH_ALEN); 1040 i++; 1041 } 1042 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 1043 1044 err: 1045 /* send the response back to the VF */ 1046 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 1047 aq_ret, (u8 *)vfres, len); 1048 1049 kfree(vfres); 1050 return ret; 1051 } 1052 1053 /** 1054 * i40e_vc_reset_vf_msg 1055 * @vf: pointer to the VF info 1056 * @msg: pointer to the msg buffer 1057 * @msglen: msg length 1058 * 1059 * called from the VF to reset itself, 1060 * unlike other virtchnl messages, PF driver 1061 * doesn't send the response back to the VF 1062 **/ 1063 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1064 { 1065 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1066 i40e_reset_vf(vf, false); 1067 } 1068 1069 /** 1070 * i40e_vc_config_promiscuous_mode_msg 1071 * @vf: pointer to the VF info 1072 * @msg: pointer to the msg buffer 1073 * @msglen: msg length 1074 * 1075 * called from the VF to configure the promiscuous mode of 1076 * VF vsis 1077 **/ 1078 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1079 u8 *msg, u16 msglen) 1080 { 1081 struct i40e_virtchnl_promisc_info *info = 1082 (struct i40e_virtchnl_promisc_info *)msg; 1083 struct i40e_pf *pf = vf->pf; 1084 struct i40e_hw *hw = &pf->hw; 1085 struct i40e_vsi *vsi; 1086 bool allmulti = false; 1087 i40e_status aq_ret; 1088 1089 vsi = i40e_find_vsi_from_id(pf, info->vsi_id); 1090 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1091 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1092 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1093 (vsi->type != I40E_VSI_FCOE)) { 1094 aq_ret = I40E_ERR_PARAM; 1095 goto error_param; 1096 } 1097 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1098 allmulti = true; 1099 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, 1100 allmulti, NULL); 1101 1102 error_param: 1103 /* send the response to the VF */ 1104 return i40e_vc_send_resp_to_vf(vf, 1105 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1106 aq_ret); 1107 } 1108 1109 /** 1110 * i40e_vc_config_queues_msg 1111 * @vf: pointer to the VF info 1112 * @msg: pointer to the msg buffer 1113 * @msglen: msg length 1114 * 1115 * called from the VF to configure the rx/tx 1116 * queues 1117 **/ 1118 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1119 { 1120 struct i40e_virtchnl_vsi_queue_config_info *qci = 1121 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1122 struct i40e_virtchnl_queue_pair_info *qpi; 1123 struct i40e_pf *pf = vf->pf; 1124 u16 vsi_id, vsi_queue_id; 1125 i40e_status aq_ret = 0; 1126 int i; 1127 1128 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1129 aq_ret = I40E_ERR_PARAM; 1130 goto error_param; 1131 } 1132 1133 vsi_id = qci->vsi_id; 1134 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1135 aq_ret = I40E_ERR_PARAM; 1136 goto error_param; 1137 } 1138 for (i = 0; i < qci->num_queue_pairs; i++) { 1139 qpi = &qci->qpair[i]; 1140 vsi_queue_id = qpi->txq.queue_id; 1141 if ((qpi->txq.vsi_id != vsi_id) || 1142 (qpi->rxq.vsi_id != vsi_id) || 1143 (qpi->rxq.queue_id != vsi_queue_id) || 1144 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1145 aq_ret = I40E_ERR_PARAM; 1146 goto error_param; 1147 } 1148 1149 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1150 &qpi->rxq) || 1151 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1152 &qpi->txq)) { 1153 aq_ret = I40E_ERR_PARAM; 1154 goto error_param; 1155 } 1156 } 1157 /* set vsi num_queue_pairs in use to num configured by VF */ 1158 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; 1159 1160 error_param: 1161 /* send the response to the VF */ 1162 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1163 aq_ret); 1164 } 1165 1166 /** 1167 * i40e_vc_config_irq_map_msg 1168 * @vf: pointer to the VF info 1169 * @msg: pointer to the msg buffer 1170 * @msglen: msg length 1171 * 1172 * called from the VF to configure the irq to 1173 * queue map 1174 **/ 1175 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1176 { 1177 struct i40e_virtchnl_irq_map_info *irqmap_info = 1178 (struct i40e_virtchnl_irq_map_info *)msg; 1179 struct i40e_virtchnl_vector_map *map; 1180 u16 vsi_id, vsi_queue_id, vector_id; 1181 i40e_status aq_ret = 0; 1182 unsigned long tempmap; 1183 int i; 1184 1185 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1186 aq_ret = I40E_ERR_PARAM; 1187 goto error_param; 1188 } 1189 1190 for (i = 0; i < irqmap_info->num_vectors; i++) { 1191 map = &irqmap_info->vecmap[i]; 1192 1193 vector_id = map->vector_id; 1194 vsi_id = map->vsi_id; 1195 /* validate msg params */ 1196 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1197 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1198 aq_ret = I40E_ERR_PARAM; 1199 goto error_param; 1200 } 1201 1202 /* lookout for the invalid queue index */ 1203 tempmap = map->rxq_map; 1204 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1205 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1206 vsi_queue_id)) { 1207 aq_ret = I40E_ERR_PARAM; 1208 goto error_param; 1209 } 1210 } 1211 1212 tempmap = map->txq_map; 1213 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1214 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1215 vsi_queue_id)) { 1216 aq_ret = I40E_ERR_PARAM; 1217 goto error_param; 1218 } 1219 } 1220 1221 i40e_config_irq_link_list(vf, vsi_id, map); 1222 } 1223 error_param: 1224 /* send the response to the VF */ 1225 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 1226 aq_ret); 1227 } 1228 1229 /** 1230 * i40e_vc_enable_queues_msg 1231 * @vf: pointer to the VF info 1232 * @msg: pointer to the msg buffer 1233 * @msglen: msg length 1234 * 1235 * called from the VF to enable all or specific queue(s) 1236 **/ 1237 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1238 { 1239 struct i40e_virtchnl_queue_select *vqs = 1240 (struct i40e_virtchnl_queue_select *)msg; 1241 struct i40e_pf *pf = vf->pf; 1242 u16 vsi_id = vqs->vsi_id; 1243 i40e_status aq_ret = 0; 1244 1245 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1246 aq_ret = I40E_ERR_PARAM; 1247 goto error_param; 1248 } 1249 1250 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1251 aq_ret = I40E_ERR_PARAM; 1252 goto error_param; 1253 } 1254 1255 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1256 aq_ret = I40E_ERR_PARAM; 1257 goto error_param; 1258 } 1259 1260 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true)) 1261 aq_ret = I40E_ERR_TIMEOUT; 1262 error_param: 1263 /* send the response to the VF */ 1264 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 1265 aq_ret); 1266 } 1267 1268 /** 1269 * i40e_vc_disable_queues_msg 1270 * @vf: pointer to the VF info 1271 * @msg: pointer to the msg buffer 1272 * @msglen: msg length 1273 * 1274 * called from the VF to disable all or specific 1275 * queue(s) 1276 **/ 1277 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1278 { 1279 struct i40e_virtchnl_queue_select *vqs = 1280 (struct i40e_virtchnl_queue_select *)msg; 1281 struct i40e_pf *pf = vf->pf; 1282 i40e_status aq_ret = 0; 1283 1284 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1285 aq_ret = I40E_ERR_PARAM; 1286 goto error_param; 1287 } 1288 1289 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1290 aq_ret = I40E_ERR_PARAM; 1291 goto error_param; 1292 } 1293 1294 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1295 aq_ret = I40E_ERR_PARAM; 1296 goto error_param; 1297 } 1298 1299 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false)) 1300 aq_ret = I40E_ERR_TIMEOUT; 1301 1302 error_param: 1303 /* send the response to the VF */ 1304 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 1305 aq_ret); 1306 } 1307 1308 /** 1309 * i40e_vc_get_stats_msg 1310 * @vf: pointer to the VF info 1311 * @msg: pointer to the msg buffer 1312 * @msglen: msg length 1313 * 1314 * called from the VF to get vsi stats 1315 **/ 1316 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1317 { 1318 struct i40e_virtchnl_queue_select *vqs = 1319 (struct i40e_virtchnl_queue_select *)msg; 1320 struct i40e_pf *pf = vf->pf; 1321 struct i40e_eth_stats stats; 1322 i40e_status aq_ret = 0; 1323 struct i40e_vsi *vsi; 1324 1325 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 1326 1327 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1328 aq_ret = I40E_ERR_PARAM; 1329 goto error_param; 1330 } 1331 1332 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1333 aq_ret = I40E_ERR_PARAM; 1334 goto error_param; 1335 } 1336 1337 vsi = pf->vsi[vf->lan_vsi_idx]; 1338 if (!vsi) { 1339 aq_ret = I40E_ERR_PARAM; 1340 goto error_param; 1341 } 1342 i40e_update_eth_stats(vsi); 1343 stats = vsi->eth_stats; 1344 1345 error_param: 1346 /* send the response back to the VF */ 1347 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, 1348 (u8 *)&stats, sizeof(stats)); 1349 } 1350 1351 /** 1352 * i40e_check_vf_permission 1353 * @vf: pointer to the VF info 1354 * @macaddr: pointer to the MAC Address being checked 1355 * 1356 * Check if the VF has permission to add or delete unicast MAC address 1357 * filters and return error code -EPERM if not. Then check if the 1358 * address filter requested is broadcast or zero and if so return 1359 * an invalid MAC address error code. 1360 **/ 1361 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 1362 { 1363 struct i40e_pf *pf = vf->pf; 1364 int ret = 0; 1365 1366 if (is_broadcast_ether_addr(macaddr) || 1367 is_zero_ether_addr(macaddr)) { 1368 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 1369 ret = I40E_ERR_INVALID_MAC_ADDR; 1370 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 1371 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 1372 /* If the host VMM administrator has set the VF MAC address 1373 * administratively via the ndo_set_vf_mac command then deny 1374 * permission to the VF to add or delete unicast MAC addresses. 1375 * The VF may request to set the MAC address filter already 1376 * assigned to it so do not return an error in that case. 1377 */ 1378 dev_err(&pf->pdev->dev, 1379 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); 1380 ret = -EPERM; 1381 } 1382 return ret; 1383 } 1384 1385 /** 1386 * i40e_vc_add_mac_addr_msg 1387 * @vf: pointer to the VF info 1388 * @msg: pointer to the msg buffer 1389 * @msglen: msg length 1390 * 1391 * add guest mac address filter 1392 **/ 1393 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1394 { 1395 struct i40e_virtchnl_ether_addr_list *al = 1396 (struct i40e_virtchnl_ether_addr_list *)msg; 1397 struct i40e_pf *pf = vf->pf; 1398 struct i40e_vsi *vsi = NULL; 1399 u16 vsi_id = al->vsi_id; 1400 i40e_status ret = 0; 1401 int i; 1402 1403 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1404 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1405 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1406 ret = I40E_ERR_PARAM; 1407 goto error_param; 1408 } 1409 1410 for (i = 0; i < al->num_elements; i++) { 1411 ret = i40e_check_vf_permission(vf, al->list[i].addr); 1412 if (ret) 1413 goto error_param; 1414 } 1415 vsi = pf->vsi[vf->lan_vsi_idx]; 1416 1417 /* add new addresses to the list */ 1418 for (i = 0; i < al->num_elements; i++) { 1419 struct i40e_mac_filter *f; 1420 1421 f = i40e_find_mac(vsi, al->list[i].addr, true, false); 1422 if (!f) { 1423 if (i40e_is_vsi_in_vlan(vsi)) 1424 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, 1425 true, false); 1426 else 1427 f = i40e_add_filter(vsi, al->list[i].addr, -1, 1428 true, false); 1429 } 1430 1431 if (!f) { 1432 dev_err(&pf->pdev->dev, 1433 "Unable to add VF MAC filter\n"); 1434 ret = I40E_ERR_PARAM; 1435 goto error_param; 1436 } 1437 } 1438 1439 /* program the updated filter list */ 1440 if (i40e_sync_vsi_filters(vsi)) 1441 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1442 1443 error_param: 1444 /* send the response to the VF */ 1445 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 1446 ret); 1447 } 1448 1449 /** 1450 * i40e_vc_del_mac_addr_msg 1451 * @vf: pointer to the VF info 1452 * @msg: pointer to the msg buffer 1453 * @msglen: msg length 1454 * 1455 * remove guest mac address filter 1456 **/ 1457 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1458 { 1459 struct i40e_virtchnl_ether_addr_list *al = 1460 (struct i40e_virtchnl_ether_addr_list *)msg; 1461 struct i40e_pf *pf = vf->pf; 1462 struct i40e_vsi *vsi = NULL; 1463 u16 vsi_id = al->vsi_id; 1464 i40e_status ret = 0; 1465 int i; 1466 1467 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1468 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1469 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1470 ret = I40E_ERR_PARAM; 1471 goto error_param; 1472 } 1473 1474 for (i = 0; i < al->num_elements; i++) { 1475 if (is_broadcast_ether_addr(al->list[i].addr) || 1476 is_zero_ether_addr(al->list[i].addr)) { 1477 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 1478 al->list[i].addr); 1479 ret = I40E_ERR_INVALID_MAC_ADDR; 1480 goto error_param; 1481 } 1482 } 1483 vsi = pf->vsi[vf->lan_vsi_idx]; 1484 1485 /* delete addresses from the list */ 1486 for (i = 0; i < al->num_elements; i++) 1487 i40e_del_filter(vsi, al->list[i].addr, 1488 I40E_VLAN_ANY, true, false); 1489 1490 /* program the updated filter list */ 1491 if (i40e_sync_vsi_filters(vsi)) 1492 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1493 1494 error_param: 1495 /* send the response to the VF */ 1496 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 1497 ret); 1498 } 1499 1500 /** 1501 * i40e_vc_add_vlan_msg 1502 * @vf: pointer to the VF info 1503 * @msg: pointer to the msg buffer 1504 * @msglen: msg length 1505 * 1506 * program guest vlan id 1507 **/ 1508 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1509 { 1510 struct i40e_virtchnl_vlan_filter_list *vfl = 1511 (struct i40e_virtchnl_vlan_filter_list *)msg; 1512 struct i40e_pf *pf = vf->pf; 1513 struct i40e_vsi *vsi = NULL; 1514 u16 vsi_id = vfl->vsi_id; 1515 i40e_status aq_ret = 0; 1516 int i; 1517 1518 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1519 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1520 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1521 aq_ret = I40E_ERR_PARAM; 1522 goto error_param; 1523 } 1524 1525 for (i = 0; i < vfl->num_elements; i++) { 1526 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1527 aq_ret = I40E_ERR_PARAM; 1528 dev_err(&pf->pdev->dev, 1529 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 1530 goto error_param; 1531 } 1532 } 1533 vsi = pf->vsi[vf->lan_vsi_idx]; 1534 if (vsi->info.pvid) { 1535 aq_ret = I40E_ERR_PARAM; 1536 goto error_param; 1537 } 1538 1539 i40e_vlan_stripping_enable(vsi); 1540 for (i = 0; i < vfl->num_elements; i++) { 1541 /* add new VLAN filter */ 1542 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 1543 if (ret) 1544 dev_err(&pf->pdev->dev, 1545 "Unable to add VF vlan filter %d, error %d\n", 1546 vfl->vlan_id[i], ret); 1547 } 1548 1549 error_param: 1550 /* send the response to the VF */ 1551 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); 1552 } 1553 1554 /** 1555 * i40e_vc_remove_vlan_msg 1556 * @vf: pointer to the VF info 1557 * @msg: pointer to the msg buffer 1558 * @msglen: msg length 1559 * 1560 * remove programmed guest vlan id 1561 **/ 1562 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1563 { 1564 struct i40e_virtchnl_vlan_filter_list *vfl = 1565 (struct i40e_virtchnl_vlan_filter_list *)msg; 1566 struct i40e_pf *pf = vf->pf; 1567 struct i40e_vsi *vsi = NULL; 1568 u16 vsi_id = vfl->vsi_id; 1569 i40e_status aq_ret = 0; 1570 int i; 1571 1572 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1573 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1574 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1575 aq_ret = I40E_ERR_PARAM; 1576 goto error_param; 1577 } 1578 1579 for (i = 0; i < vfl->num_elements; i++) { 1580 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1581 aq_ret = I40E_ERR_PARAM; 1582 goto error_param; 1583 } 1584 } 1585 1586 vsi = pf->vsi[vf->lan_vsi_idx]; 1587 if (vsi->info.pvid) { 1588 aq_ret = I40E_ERR_PARAM; 1589 goto error_param; 1590 } 1591 1592 for (i = 0; i < vfl->num_elements; i++) { 1593 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 1594 if (ret) 1595 dev_err(&pf->pdev->dev, 1596 "Unable to delete VF vlan filter %d, error %d\n", 1597 vfl->vlan_id[i], ret); 1598 } 1599 1600 error_param: 1601 /* send the response to the VF */ 1602 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); 1603 } 1604 1605 /** 1606 * i40e_vc_validate_vf_msg 1607 * @vf: pointer to the VF info 1608 * @msg: pointer to the msg buffer 1609 * @msglen: msg length 1610 * @msghndl: msg handle 1611 * 1612 * validate msg 1613 **/ 1614 static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, 1615 u32 v_retval, u8 *msg, u16 msglen) 1616 { 1617 bool err_msg_format = false; 1618 int valid_len; 1619 1620 /* Check if VF is disabled. */ 1621 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) 1622 return I40E_ERR_PARAM; 1623 1624 /* Validate message length. */ 1625 switch (v_opcode) { 1626 case I40E_VIRTCHNL_OP_VERSION: 1627 valid_len = sizeof(struct i40e_virtchnl_version_info); 1628 break; 1629 case I40E_VIRTCHNL_OP_RESET_VF: 1630 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1631 valid_len = 0; 1632 break; 1633 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: 1634 valid_len = sizeof(struct i40e_virtchnl_txq_info); 1635 break; 1636 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: 1637 valid_len = sizeof(struct i40e_virtchnl_rxq_info); 1638 break; 1639 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1640 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); 1641 if (msglen >= valid_len) { 1642 struct i40e_virtchnl_vsi_queue_config_info *vqc = 1643 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1644 valid_len += (vqc->num_queue_pairs * 1645 sizeof(struct 1646 i40e_virtchnl_queue_pair_info)); 1647 if (vqc->num_queue_pairs == 0) 1648 err_msg_format = true; 1649 } 1650 break; 1651 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1652 valid_len = sizeof(struct i40e_virtchnl_irq_map_info); 1653 if (msglen >= valid_len) { 1654 struct i40e_virtchnl_irq_map_info *vimi = 1655 (struct i40e_virtchnl_irq_map_info *)msg; 1656 valid_len += (vimi->num_vectors * 1657 sizeof(struct i40e_virtchnl_vector_map)); 1658 if (vimi->num_vectors == 0) 1659 err_msg_format = true; 1660 } 1661 break; 1662 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1663 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1664 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1665 break; 1666 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1667 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1668 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); 1669 if (msglen >= valid_len) { 1670 struct i40e_virtchnl_ether_addr_list *veal = 1671 (struct i40e_virtchnl_ether_addr_list *)msg; 1672 valid_len += veal->num_elements * 1673 sizeof(struct i40e_virtchnl_ether_addr); 1674 if (veal->num_elements == 0) 1675 err_msg_format = true; 1676 } 1677 break; 1678 case I40E_VIRTCHNL_OP_ADD_VLAN: 1679 case I40E_VIRTCHNL_OP_DEL_VLAN: 1680 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); 1681 if (msglen >= valid_len) { 1682 struct i40e_virtchnl_vlan_filter_list *vfl = 1683 (struct i40e_virtchnl_vlan_filter_list *)msg; 1684 valid_len += vfl->num_elements * sizeof(u16); 1685 if (vfl->num_elements == 0) 1686 err_msg_format = true; 1687 } 1688 break; 1689 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1690 valid_len = sizeof(struct i40e_virtchnl_promisc_info); 1691 break; 1692 case I40E_VIRTCHNL_OP_GET_STATS: 1693 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1694 break; 1695 /* These are always errors coming from the VF. */ 1696 case I40E_VIRTCHNL_OP_EVENT: 1697 case I40E_VIRTCHNL_OP_UNKNOWN: 1698 default: 1699 return -EPERM; 1700 break; 1701 } 1702 /* few more checks */ 1703 if ((valid_len != msglen) || (err_msg_format)) { 1704 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 1705 return -EINVAL; 1706 } else { 1707 return 0; 1708 } 1709 } 1710 1711 /** 1712 * i40e_vc_process_vf_msg 1713 * @pf: pointer to the PF structure 1714 * @vf_id: source VF id 1715 * @msg: pointer to the msg buffer 1716 * @msglen: msg length 1717 * @msghndl: msg handle 1718 * 1719 * called from the common aeq/arq handler to 1720 * process request from VF 1721 **/ 1722 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, 1723 u32 v_retval, u8 *msg, u16 msglen) 1724 { 1725 struct i40e_hw *hw = &pf->hw; 1726 unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; 1727 struct i40e_vf *vf; 1728 int ret; 1729 1730 pf->vf_aq_requests++; 1731 if (local_vf_id >= pf->num_alloc_vfs) 1732 return -EINVAL; 1733 vf = &(pf->vf[local_vf_id]); 1734 /* perform basic checks on the msg */ 1735 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); 1736 1737 if (ret) { 1738 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 1739 local_vf_id, v_opcode, msglen); 1740 return ret; 1741 } 1742 1743 switch (v_opcode) { 1744 case I40E_VIRTCHNL_OP_VERSION: 1745 ret = i40e_vc_get_version_msg(vf); 1746 break; 1747 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1748 ret = i40e_vc_get_vf_resources_msg(vf); 1749 break; 1750 case I40E_VIRTCHNL_OP_RESET_VF: 1751 i40e_vc_reset_vf_msg(vf); 1752 ret = 0; 1753 break; 1754 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1755 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 1756 break; 1757 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1758 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 1759 break; 1760 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1761 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 1762 break; 1763 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1764 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 1765 break; 1766 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1767 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 1768 break; 1769 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1770 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 1771 break; 1772 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1773 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 1774 break; 1775 case I40E_VIRTCHNL_OP_ADD_VLAN: 1776 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 1777 break; 1778 case I40E_VIRTCHNL_OP_DEL_VLAN: 1779 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 1780 break; 1781 case I40E_VIRTCHNL_OP_GET_STATS: 1782 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 1783 break; 1784 case I40E_VIRTCHNL_OP_UNKNOWN: 1785 default: 1786 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 1787 v_opcode, local_vf_id); 1788 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 1789 I40E_ERR_NOT_IMPLEMENTED); 1790 break; 1791 } 1792 1793 return ret; 1794 } 1795 1796 /** 1797 * i40e_vc_process_vflr_event 1798 * @pf: pointer to the PF structure 1799 * 1800 * called from the vlfr irq handler to 1801 * free up VF resources and state variables 1802 **/ 1803 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 1804 { 1805 u32 reg, reg_idx, bit_idx, vf_id; 1806 struct i40e_hw *hw = &pf->hw; 1807 struct i40e_vf *vf; 1808 1809 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 1810 return 0; 1811 1812 /* re-enable vflr interrupt cause */ 1813 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1814 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 1815 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1816 i40e_flush(hw); 1817 1818 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 1819 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 1820 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1821 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1822 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 1823 vf = &pf->vf[vf_id]; 1824 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 1825 if (reg & (1 << bit_idx)) { 1826 /* clear the bit in GLGEN_VFLRSTAT */ 1827 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 1828 1829 if (!test_bit(__I40E_DOWN, &pf->state)) 1830 i40e_reset_vf(vf, true); 1831 } 1832 } 1833 1834 return 0; 1835 } 1836 1837 /** 1838 * i40e_vc_vf_broadcast 1839 * @pf: pointer to the PF structure 1840 * @opcode: operation code 1841 * @retval: return value 1842 * @msg: pointer to the msg buffer 1843 * @msglen: msg length 1844 * 1845 * send a message to all VFs on a given PF 1846 **/ 1847 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 1848 enum i40e_virtchnl_ops v_opcode, 1849 i40e_status v_retval, u8 *msg, 1850 u16 msglen) 1851 { 1852 struct i40e_hw *hw = &pf->hw; 1853 struct i40e_vf *vf = pf->vf; 1854 int i; 1855 1856 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 1857 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1858 /* Not all VFs are enabled so skip the ones that are not */ 1859 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 1860 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1861 continue; 1862 1863 /* Ignore return value on purpose - a given VF may fail, but 1864 * we need to keep going and send to all of them 1865 */ 1866 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1867 msg, msglen, NULL); 1868 } 1869 } 1870 1871 /** 1872 * i40e_vc_notify_link_state 1873 * @pf: pointer to the PF structure 1874 * 1875 * send a link status message to all VFs on a given PF 1876 **/ 1877 void i40e_vc_notify_link_state(struct i40e_pf *pf) 1878 { 1879 struct i40e_virtchnl_pf_event pfe; 1880 struct i40e_hw *hw = &pf->hw; 1881 struct i40e_vf *vf = pf->vf; 1882 struct i40e_link_status *ls = &pf->hw.phy.link_info; 1883 int i; 1884 1885 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 1886 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 1887 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 1888 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1889 if (vf->link_forced) { 1890 pfe.event_data.link_event.link_status = vf->link_up; 1891 pfe.event_data.link_event.link_speed = 1892 (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 1893 } else { 1894 pfe.event_data.link_event.link_status = 1895 ls->link_info & I40E_AQ_LINK_UP; 1896 pfe.event_data.link_event.link_speed = ls->link_speed; 1897 } 1898 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 1899 0, (u8 *)&pfe, sizeof(pfe), 1900 NULL); 1901 } 1902 } 1903 1904 /** 1905 * i40e_vc_notify_reset 1906 * @pf: pointer to the PF structure 1907 * 1908 * indicate a pending reset to all VFs on a given PF 1909 **/ 1910 void i40e_vc_notify_reset(struct i40e_pf *pf) 1911 { 1912 struct i40e_virtchnl_pf_event pfe; 1913 1914 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 1915 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 1916 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 1917 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 1918 } 1919 1920 /** 1921 * i40e_vc_notify_vf_reset 1922 * @vf: pointer to the VF structure 1923 * 1924 * indicate a pending reset to the given VF 1925 **/ 1926 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 1927 { 1928 struct i40e_virtchnl_pf_event pfe; 1929 int abs_vf_id; 1930 1931 /* validate the request */ 1932 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1933 return; 1934 1935 /* verify if the VF is in either init or active before proceeding */ 1936 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 1937 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1938 return; 1939 1940 abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; 1941 1942 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 1943 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 1944 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 1945 I40E_SUCCESS, (u8 *)&pfe, 1946 sizeof(struct i40e_virtchnl_pf_event), NULL); 1947 } 1948 1949 /** 1950 * i40e_ndo_set_vf_mac 1951 * @netdev: network interface device structure 1952 * @vf_id: VF identifier 1953 * @mac: mac address 1954 * 1955 * program VF mac address 1956 **/ 1957 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1958 { 1959 struct i40e_netdev_priv *np = netdev_priv(netdev); 1960 struct i40e_vsi *vsi = np->vsi; 1961 struct i40e_pf *pf = vsi->back; 1962 struct i40e_mac_filter *f; 1963 struct i40e_vf *vf; 1964 int ret = 0; 1965 1966 /* validate the request */ 1967 if (vf_id >= pf->num_alloc_vfs) { 1968 dev_err(&pf->pdev->dev, 1969 "Invalid VF Identifier %d\n", vf_id); 1970 ret = -EINVAL; 1971 goto error_param; 1972 } 1973 1974 vf = &(pf->vf[vf_id]); 1975 vsi = pf->vsi[vf->lan_vsi_idx]; 1976 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1977 dev_err(&pf->pdev->dev, 1978 "Uninitialized VF %d\n", vf_id); 1979 ret = -EINVAL; 1980 goto error_param; 1981 } 1982 1983 if (!is_valid_ether_addr(mac)) { 1984 dev_err(&pf->pdev->dev, 1985 "Invalid VF ethernet address\n"); 1986 ret = -EINVAL; 1987 goto error_param; 1988 } 1989 1990 /* delete the temporary mac address */ 1991 i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, 1992 true, false); 1993 1994 /* Delete all the filters for this VSI - we're going to kill it 1995 * anyway. 1996 */ 1997 list_for_each_entry(f, &vsi->mac_filter_list, list) 1998 i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); 1999 2000 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2001 /* program mac filter */ 2002 if (i40e_sync_vsi_filters(vsi)) { 2003 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2004 ret = -EIO; 2005 goto error_param; 2006 } 2007 ether_addr_copy(vf->default_lan_addr.addr, mac); 2008 vf->pf_set_mac = true; 2009 /* Force the VF driver stop so it has to reload with new MAC address */ 2010 i40e_vc_disable_vf(pf, vf); 2011 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2012 2013 error_param: 2014 return ret; 2015 } 2016 2017 /** 2018 * i40e_ndo_set_vf_port_vlan 2019 * @netdev: network interface device structure 2020 * @vf_id: VF identifier 2021 * @vlan_id: mac address 2022 * @qos: priority setting 2023 * 2024 * program VF vlan id and/or qos 2025 **/ 2026 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 2027 int vf_id, u16 vlan_id, u8 qos) 2028 { 2029 struct i40e_netdev_priv *np = netdev_priv(netdev); 2030 struct i40e_pf *pf = np->vsi->back; 2031 struct i40e_vsi *vsi; 2032 struct i40e_vf *vf; 2033 int ret = 0; 2034 2035 /* validate the request */ 2036 if (vf_id >= pf->num_alloc_vfs) { 2037 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2038 ret = -EINVAL; 2039 goto error_pvid; 2040 } 2041 2042 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2043 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2044 ret = -EINVAL; 2045 goto error_pvid; 2046 } 2047 2048 vf = &(pf->vf[vf_id]); 2049 vsi = pf->vsi[vf->lan_vsi_idx]; 2050 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2051 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2052 ret = -EINVAL; 2053 goto error_pvid; 2054 } 2055 2056 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { 2057 dev_err(&pf->pdev->dev, 2058 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2059 vf_id); 2060 /* Administrator Error - knock the VF offline until he does 2061 * the right thing by reconfiguring his network correctly 2062 * and then reloading the VF driver. 2063 */ 2064 i40e_vc_disable_vf(pf, vf); 2065 } 2066 2067 /* Check for condition where there was already a port VLAN ID 2068 * filter set and now it is being deleted by setting it to zero. 2069 * Additionally check for the condition where there was a port 2070 * VLAN but now there is a new and different port VLAN being set. 2071 * Before deleting all the old VLAN filters we must add new ones 2072 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2073 * MAC addresses deleted. 2074 */ 2075 if ((!(vlan_id || qos) || 2076 (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) && 2077 vsi->info.pvid) 2078 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2079 2080 if (vsi->info.pvid) { 2081 /* kill old VLAN */ 2082 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & 2083 VLAN_VID_MASK)); 2084 if (ret) { 2085 dev_info(&vsi->back->pdev->dev, 2086 "remove VLAN failed, ret=%d, aq_err=%d\n", 2087 ret, pf->hw.aq.asq_last_status); 2088 } 2089 } 2090 if (vlan_id || qos) 2091 ret = i40e_vsi_add_pvid(vsi, 2092 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); 2093 else 2094 i40e_vsi_remove_pvid(vsi); 2095 2096 if (vlan_id) { 2097 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2098 vlan_id, qos, vf_id); 2099 2100 /* add new VLAN filter */ 2101 ret = i40e_vsi_add_vlan(vsi, vlan_id); 2102 if (ret) { 2103 dev_info(&vsi->back->pdev->dev, 2104 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2105 vsi->back->hw.aq.asq_last_status); 2106 goto error_pvid; 2107 } 2108 /* Kill non-vlan MAC filters - ignore error return since 2109 * there might not be any non-vlan MAC filters. 2110 */ 2111 i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); 2112 } 2113 2114 if (ret) { 2115 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2116 goto error_pvid; 2117 } 2118 /* The Port VLAN needs to be saved across resets the same as the 2119 * default LAN MAC address. 2120 */ 2121 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 2122 ret = 0; 2123 2124 error_pvid: 2125 return ret; 2126 } 2127 2128 #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ 2129 #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ 2130 /** 2131 * i40e_ndo_set_vf_bw 2132 * @netdev: network interface device structure 2133 * @vf_id: VF identifier 2134 * @tx_rate: Tx rate 2135 * 2136 * configure VF Tx rate 2137 **/ 2138 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 2139 int max_tx_rate) 2140 { 2141 struct i40e_netdev_priv *np = netdev_priv(netdev); 2142 struct i40e_pf *pf = np->vsi->back; 2143 struct i40e_vsi *vsi; 2144 struct i40e_vf *vf; 2145 int speed = 0; 2146 int ret = 0; 2147 2148 /* validate the request */ 2149 if (vf_id >= pf->num_alloc_vfs) { 2150 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); 2151 ret = -EINVAL; 2152 goto error; 2153 } 2154 2155 if (min_tx_rate) { 2156 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 2157 min_tx_rate, vf_id); 2158 return -EINVAL; 2159 } 2160 2161 vf = &(pf->vf[vf_id]); 2162 vsi = pf->vsi[vf->lan_vsi_idx]; 2163 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2164 dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); 2165 ret = -EINVAL; 2166 goto error; 2167 } 2168 2169 switch (pf->hw.phy.link_info.link_speed) { 2170 case I40E_LINK_SPEED_40GB: 2171 speed = 40000; 2172 break; 2173 case I40E_LINK_SPEED_10GB: 2174 speed = 10000; 2175 break; 2176 case I40E_LINK_SPEED_1GB: 2177 speed = 1000; 2178 break; 2179 default: 2180 break; 2181 } 2182 2183 if (max_tx_rate > speed) { 2184 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", 2185 max_tx_rate, vf->vf_id); 2186 ret = -EINVAL; 2187 goto error; 2188 } 2189 2190 if ((max_tx_rate < 50) && (max_tx_rate > 0)) { 2191 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); 2192 max_tx_rate = 50; 2193 } 2194 2195 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ 2196 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 2197 max_tx_rate / I40E_BW_CREDIT_DIVISOR, 2198 I40E_MAX_BW_INACTIVE_ACCUM, NULL); 2199 if (ret) { 2200 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", 2201 ret); 2202 ret = -EIO; 2203 goto error; 2204 } 2205 vf->tx_rate = max_tx_rate; 2206 error: 2207 return ret; 2208 } 2209 2210 /** 2211 * i40e_ndo_get_vf_config 2212 * @netdev: network interface device structure 2213 * @vf_id: VF identifier 2214 * @ivi: VF configuration structure 2215 * 2216 * return VF configuration 2217 **/ 2218 int i40e_ndo_get_vf_config(struct net_device *netdev, 2219 int vf_id, struct ifla_vf_info *ivi) 2220 { 2221 struct i40e_netdev_priv *np = netdev_priv(netdev); 2222 struct i40e_vsi *vsi = np->vsi; 2223 struct i40e_pf *pf = vsi->back; 2224 struct i40e_vf *vf; 2225 int ret = 0; 2226 2227 /* validate the request */ 2228 if (vf_id >= pf->num_alloc_vfs) { 2229 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2230 ret = -EINVAL; 2231 goto error_param; 2232 } 2233 2234 vf = &(pf->vf[vf_id]); 2235 /* first vsi is always the LAN vsi */ 2236 vsi = pf->vsi[vf->lan_vsi_idx]; 2237 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2238 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2239 ret = -EINVAL; 2240 goto error_param; 2241 } 2242 2243 ivi->vf = vf_id; 2244 2245 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); 2246 2247 ivi->max_tx_rate = vf->tx_rate; 2248 ivi->min_tx_rate = 0; 2249 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2250 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2251 I40E_VLAN_PRIORITY_SHIFT; 2252 if (vf->link_forced == false) 2253 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 2254 else if (vf->link_up == true) 2255 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 2256 else 2257 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 2258 ivi->spoofchk = vf->spoofchk; 2259 ret = 0; 2260 2261 error_param: 2262 return ret; 2263 } 2264 2265 /** 2266 * i40e_ndo_set_vf_link_state 2267 * @netdev: network interface device structure 2268 * @vf_id: VF identifier 2269 * @link: required link state 2270 * 2271 * Set the link state of a specified VF, regardless of physical link state 2272 **/ 2273 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 2274 { 2275 struct i40e_netdev_priv *np = netdev_priv(netdev); 2276 struct i40e_pf *pf = np->vsi->back; 2277 struct i40e_virtchnl_pf_event pfe; 2278 struct i40e_hw *hw = &pf->hw; 2279 struct i40e_vf *vf; 2280 int abs_vf_id; 2281 int ret = 0; 2282 2283 /* validate the request */ 2284 if (vf_id >= pf->num_alloc_vfs) { 2285 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2286 ret = -EINVAL; 2287 goto error_out; 2288 } 2289 2290 vf = &pf->vf[vf_id]; 2291 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 2292 2293 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 2294 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 2295 2296 switch (link) { 2297 case IFLA_VF_LINK_STATE_AUTO: 2298 vf->link_forced = false; 2299 pfe.event_data.link_event.link_status = 2300 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 2301 pfe.event_data.link_event.link_speed = 2302 pf->hw.phy.link_info.link_speed; 2303 break; 2304 case IFLA_VF_LINK_STATE_ENABLE: 2305 vf->link_forced = true; 2306 vf->link_up = true; 2307 pfe.event_data.link_event.link_status = true; 2308 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; 2309 break; 2310 case IFLA_VF_LINK_STATE_DISABLE: 2311 vf->link_forced = true; 2312 vf->link_up = false; 2313 pfe.event_data.link_event.link_status = false; 2314 pfe.event_data.link_event.link_speed = 0; 2315 break; 2316 default: 2317 ret = -EINVAL; 2318 goto error_out; 2319 } 2320 /* Notify the VF of its new link state */ 2321 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 2322 0, (u8 *)&pfe, sizeof(pfe), NULL); 2323 2324 error_out: 2325 return ret; 2326 } 2327 2328 /** 2329 * i40e_ndo_set_vf_spoofchk 2330 * @netdev: network interface device structure 2331 * @vf_id: VF identifier 2332 * @enable: flag to enable or disable feature 2333 * 2334 * Enable or disable VF spoof checking 2335 **/ 2336 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 2337 { 2338 struct i40e_netdev_priv *np = netdev_priv(netdev); 2339 struct i40e_vsi *vsi = np->vsi; 2340 struct i40e_pf *pf = vsi->back; 2341 struct i40e_vsi_context ctxt; 2342 struct i40e_hw *hw = &pf->hw; 2343 struct i40e_vf *vf; 2344 int ret = 0; 2345 2346 /* validate the request */ 2347 if (vf_id >= pf->num_alloc_vfs) { 2348 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2349 ret = -EINVAL; 2350 goto out; 2351 } 2352 2353 vf = &(pf->vf[vf_id]); 2354 2355 if (enable == vf->spoofchk) 2356 goto out; 2357 2358 vf->spoofchk = enable; 2359 memset(&ctxt, 0, sizeof(ctxt)); 2360 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 2361 ctxt.pf_num = pf->hw.pf_id; 2362 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 2363 if (enable) 2364 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 2365 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 2366 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 2367 if (ret) { 2368 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 2369 ret); 2370 ret = -EIO; 2371 } 2372 out: 2373 return ret; 2374 } 2375