1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/net/intel/libie/rx.h> 5 6 #include "iavf.h" 7 #include "iavf_prototype.h" 8 9 /** 10 * iavf_send_pf_msg 11 * @adapter: adapter structure 12 * @op: virtual channel opcode 13 * @msg: pointer to message buffer 14 * @len: message length 15 * 16 * Send message to PF and print status if failure. 17 **/ 18 static int iavf_send_pf_msg(struct iavf_adapter *adapter, 19 enum virtchnl_ops op, u8 *msg, u16 len) 20 { 21 struct iavf_hw *hw = &adapter->hw; 22 enum iavf_status status; 23 24 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 25 return 0; /* nothing to see here, move along */ 26 27 status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 28 if (status) 29 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n", 30 op, iavf_stat_str(hw, status), 31 iavf_aq_str(hw, hw->aq.asq_last_status)); 32 return iavf_status_to_errno(status); 33 } 34 35 /** 36 * iavf_send_api_ver 37 * @adapter: adapter structure 38 * 39 * Send API version admin queue message to the PF. The reply is not checked 40 * in this function. Returns 0 if the message was successfully 41 * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 42 **/ 43 int iavf_send_api_ver(struct iavf_adapter *adapter) 44 { 45 struct virtchnl_version_info vvi; 46 47 vvi.major = VIRTCHNL_VERSION_MAJOR; 48 vvi.minor = VIRTCHNL_VERSION_MINOR; 49 50 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, 51 sizeof(vvi)); 52 } 53 54 /** 55 * iavf_poll_virtchnl_msg 56 * @hw: HW configuration structure 57 * @event: event to populate on success 58 * @op_to_poll: requested virtchnl op to poll for 59 * 60 * Initialize poll for virtchnl msg matching the requested_op. Returns 0 61 * if a message of the correct opcode is in the queue or an error code 62 * if no message matching the op code is waiting and other failures. 63 */ 64 static int 65 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event, 66 enum virtchnl_ops op_to_poll) 67 { 68 enum virtchnl_ops received_op; 69 enum iavf_status status; 70 u32 v_retval; 71 72 while (1) { 73 /* When the AQ is empty, iavf_clean_arq_element will return 74 * nonzero and this loop will terminate. 75 */ 76 status = iavf_clean_arq_element(hw, event, NULL); 77 if (status != IAVF_SUCCESS) 78 return iavf_status_to_errno(status); 79 received_op = 80 (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high); 81 if (op_to_poll == received_op) 82 break; 83 } 84 85 v_retval = le32_to_cpu(event->desc.cookie_low); 86 return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval); 87 } 88 89 /** 90 * iavf_verify_api_ver 91 * @adapter: adapter structure 92 * 93 * Compare API versions with the PF. Must be called after admin queue is 94 * initialized. Returns 0 if API versions match, -EIO if they do not, 95 * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors 96 * from the firmware are propagated. 97 **/ 98 int iavf_verify_api_ver(struct iavf_adapter *adapter) 99 { 100 struct iavf_arq_event_info event; 101 int err; 102 103 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 104 event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL); 105 if (!event.msg_buf) 106 return -ENOMEM; 107 108 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION); 109 if (!err) { 110 struct virtchnl_version_info *pf_vvi = 111 (struct virtchnl_version_info *)event.msg_buf; 112 adapter->pf_version = *pf_vvi; 113 114 if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR || 115 (pf_vvi->major == VIRTCHNL_VERSION_MAJOR && 116 pf_vvi->minor > VIRTCHNL_VERSION_MINOR)) 117 err = -EIO; 118 } 119 120 kfree(event.msg_buf); 121 122 return err; 123 } 124 125 /** 126 * iavf_send_vf_config_msg 127 * @adapter: adapter structure 128 * 129 * Send VF configuration request admin queue message to the PF. The reply 130 * is not checked in this function. Returns 0 if the message was 131 * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 132 **/ 133 int iavf_send_vf_config_msg(struct iavf_adapter *adapter) 134 { 135 u32 caps; 136 137 caps = VIRTCHNL_VF_OFFLOAD_L2 | 138 VIRTCHNL_VF_OFFLOAD_RSS_PF | 139 VIRTCHNL_VF_OFFLOAD_RSS_AQ | 140 VIRTCHNL_VF_OFFLOAD_RSS_REG | 141 VIRTCHNL_VF_OFFLOAD_VLAN | 142 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | 143 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | 144 VIRTCHNL_VF_OFFLOAD_ENCAP | 145 VIRTCHNL_VF_OFFLOAD_VLAN_V2 | 146 VIRTCHNL_VF_OFFLOAD_CRC | 147 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | 148 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | 149 VIRTCHNL_VF_OFFLOAD_ADQ | 150 VIRTCHNL_VF_OFFLOAD_USO | 151 VIRTCHNL_VF_OFFLOAD_FDIR_PF | 152 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | 153 VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 154 155 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; 156 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; 157 if (PF_IS_V11(adapter)) 158 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 159 (u8 *)&caps, sizeof(caps)); 160 else 161 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 162 NULL, 0); 163 } 164 165 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) 166 { 167 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; 168 169 if (!VLAN_V2_ALLOWED(adapter)) 170 return -EOPNOTSUPP; 171 172 adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; 173 174 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 175 NULL, 0); 176 } 177 178 /** 179 * iavf_validate_num_queues 180 * @adapter: adapter structure 181 * 182 * Validate that the number of queues the PF has sent in 183 * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. 184 **/ 185 static void iavf_validate_num_queues(struct iavf_adapter *adapter) 186 { 187 if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { 188 struct virtchnl_vsi_resource *vsi_res; 189 int i; 190 191 dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", 192 adapter->vf_res->num_queue_pairs, 193 IAVF_MAX_REQ_QUEUES); 194 dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", 195 IAVF_MAX_REQ_QUEUES); 196 adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 197 for (i = 0; i < adapter->vf_res->num_vsis; i++) { 198 vsi_res = &adapter->vf_res->vsi_res[i]; 199 vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 200 } 201 } 202 } 203 204 /** 205 * iavf_get_vf_config 206 * @adapter: private adapter structure 207 * 208 * Get VF configuration from PF and populate hw structure. Must be called after 209 * admin queue is initialized. Busy waits until response is received from PF, 210 * with maximum timeout. Response from PF is returned in the buffer for further 211 * processing by the caller. 212 **/ 213 int iavf_get_vf_config(struct iavf_adapter *adapter) 214 { 215 struct iavf_hw *hw = &adapter->hw; 216 struct iavf_arq_event_info event; 217 u16 len; 218 int err; 219 220 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE; 221 event.buf_len = len; 222 event.msg_buf = kzalloc(len, GFP_KERNEL); 223 if (!event.msg_buf) 224 return -ENOMEM; 225 226 err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES); 227 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); 228 229 /* some PFs send more queues than we should have so validate that 230 * we aren't getting too many queues 231 */ 232 if (!err) 233 iavf_validate_num_queues(adapter); 234 iavf_vf_parse_hw_config(hw, adapter->vf_res); 235 236 kfree(event.msg_buf); 237 238 return err; 239 } 240 241 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) 242 { 243 struct iavf_arq_event_info event; 244 int err; 245 u16 len; 246 247 len = sizeof(struct virtchnl_vlan_caps); 248 event.buf_len = len; 249 event.msg_buf = kzalloc(len, GFP_KERNEL); 250 if (!event.msg_buf) 251 return -ENOMEM; 252 253 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, 254 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS); 255 if (!err) 256 memcpy(&adapter->vlan_v2_caps, event.msg_buf, 257 min(event.msg_len, len)); 258 259 kfree(event.msg_buf); 260 261 return err; 262 } 263 264 /** 265 * iavf_configure_queues 266 * @adapter: adapter structure 267 * 268 * Request that the PF set up our (previously allocated) queues. 269 **/ 270 void iavf_configure_queues(struct iavf_adapter *adapter) 271 { 272 struct virtchnl_vsi_queue_config_info *vqci; 273 int pairs = adapter->num_active_queues; 274 struct virtchnl_queue_pair_info *vqpi; 275 u32 i, max_frame; 276 size_t len; 277 278 max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset); 279 max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame); 280 281 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 282 /* bail because we already have a command pending */ 283 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", 284 adapter->current_op); 285 return; 286 } 287 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; 288 len = virtchnl_struct_size(vqci, qpair, pairs); 289 vqci = kzalloc(len, GFP_KERNEL); 290 if (!vqci) 291 return; 292 293 vqci->vsi_id = adapter->vsi_res->vsi_id; 294 vqci->num_queue_pairs = pairs; 295 vqpi = vqci->qpair; 296 /* Size check is not needed here - HW max is 16 queue pairs, and we 297 * can fit info for 31 of them into the AQ buffer before it overflows. 298 */ 299 for (i = 0; i < pairs; i++) { 300 vqpi->txq.vsi_id = vqci->vsi_id; 301 vqpi->txq.queue_id = i; 302 vqpi->txq.ring_len = adapter->tx_rings[i].count; 303 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; 304 vqpi->rxq.vsi_id = vqci->vsi_id; 305 vqpi->rxq.queue_id = i; 306 vqpi->rxq.ring_len = adapter->rx_rings[i].count; 307 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; 308 vqpi->rxq.max_pkt_size = max_frame; 309 vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; 310 if (CRC_OFFLOAD_ALLOWED(adapter)) 311 vqpi->rxq.crc_disable = !!(adapter->netdev->features & 312 NETIF_F_RXFCS); 313 vqpi++; 314 } 315 316 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; 317 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 318 (u8 *)vqci, len); 319 kfree(vqci); 320 } 321 322 /** 323 * iavf_enable_queues 324 * @adapter: adapter structure 325 * 326 * Request that the PF enable all of our queues. 327 **/ 328 void iavf_enable_queues(struct iavf_adapter *adapter) 329 { 330 struct virtchnl_queue_select vqs; 331 332 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 333 /* bail because we already have a command pending */ 334 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", 335 adapter->current_op); 336 return; 337 } 338 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; 339 vqs.vsi_id = adapter->vsi_res->vsi_id; 340 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 341 vqs.rx_queues = vqs.tx_queues; 342 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; 343 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, 344 (u8 *)&vqs, sizeof(vqs)); 345 } 346 347 /** 348 * iavf_disable_queues 349 * @adapter: adapter structure 350 * 351 * Request that the PF disable all of our queues. 352 **/ 353 void iavf_disable_queues(struct iavf_adapter *adapter) 354 { 355 struct virtchnl_queue_select vqs; 356 357 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 358 /* bail because we already have a command pending */ 359 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", 360 adapter->current_op); 361 return; 362 } 363 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; 364 vqs.vsi_id = adapter->vsi_res->vsi_id; 365 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 366 vqs.rx_queues = vqs.tx_queues; 367 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; 368 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, 369 (u8 *)&vqs, sizeof(vqs)); 370 } 371 372 /** 373 * iavf_map_queues 374 * @adapter: adapter structure 375 * 376 * Request that the PF map queues to interrupt vectors. Misc causes, including 377 * admin queue, are always mapped to vector 0. 378 **/ 379 void iavf_map_queues(struct iavf_adapter *adapter) 380 { 381 struct virtchnl_irq_map_info *vimi; 382 struct virtchnl_vector_map *vecmap; 383 struct iavf_q_vector *q_vector; 384 int v_idx, q_vectors; 385 size_t len; 386 387 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 388 /* bail because we already have a command pending */ 389 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", 390 adapter->current_op); 391 return; 392 } 393 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; 394 395 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 396 397 len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors); 398 vimi = kzalloc(len, GFP_KERNEL); 399 if (!vimi) 400 return; 401 402 vimi->num_vectors = adapter->num_msix_vectors; 403 /* Queue vectors first */ 404 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 405 q_vector = &adapter->q_vectors[v_idx]; 406 vecmap = &vimi->vecmap[v_idx]; 407 408 vecmap->vsi_id = adapter->vsi_res->vsi_id; 409 vecmap->vector_id = v_idx + NONQ_VECS; 410 vecmap->txq_map = q_vector->ring_mask; 411 vecmap->rxq_map = q_vector->ring_mask; 412 vecmap->rxitr_idx = IAVF_RX_ITR; 413 vecmap->txitr_idx = IAVF_TX_ITR; 414 } 415 /* Misc vector last - this is only for AdminQ messages */ 416 vecmap = &vimi->vecmap[v_idx]; 417 vecmap->vsi_id = adapter->vsi_res->vsi_id; 418 vecmap->vector_id = 0; 419 vecmap->txq_map = 0; 420 vecmap->rxq_map = 0; 421 422 adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; 423 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, 424 (u8 *)vimi, len); 425 kfree(vimi); 426 } 427 428 /** 429 * iavf_set_mac_addr_type - Set the correct request type from the filter type 430 * @virtchnl_ether_addr: pointer to requested list element 431 * @filter: pointer to requested filter 432 **/ 433 static void 434 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr, 435 const struct iavf_mac_filter *filter) 436 { 437 virtchnl_ether_addr->type = filter->is_primary ? 438 VIRTCHNL_ETHER_ADDR_PRIMARY : 439 VIRTCHNL_ETHER_ADDR_EXTRA; 440 } 441 442 /** 443 * iavf_add_ether_addrs 444 * @adapter: adapter structure 445 * 446 * Request that the PF add one or more addresses to our filters. 447 **/ 448 void iavf_add_ether_addrs(struct iavf_adapter *adapter) 449 { 450 struct virtchnl_ether_addr_list *veal; 451 struct iavf_mac_filter *f; 452 int i = 0, count = 0; 453 bool more = false; 454 size_t len; 455 456 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 457 /* bail because we already have a command pending */ 458 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", 459 adapter->current_op); 460 return; 461 } 462 463 spin_lock_bh(&adapter->mac_vlan_list_lock); 464 465 list_for_each_entry(f, &adapter->mac_filter_list, list) { 466 if (f->add) 467 count++; 468 } 469 if (!count) { 470 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 471 spin_unlock_bh(&adapter->mac_vlan_list_lock); 472 return; 473 } 474 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; 475 476 len = virtchnl_struct_size(veal, list, count); 477 if (len > IAVF_MAX_AQ_BUF_SIZE) { 478 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); 479 while (len > IAVF_MAX_AQ_BUF_SIZE) 480 len = virtchnl_struct_size(veal, list, --count); 481 more = true; 482 } 483 484 veal = kzalloc(len, GFP_ATOMIC); 485 if (!veal) { 486 spin_unlock_bh(&adapter->mac_vlan_list_lock); 487 return; 488 } 489 490 veal->vsi_id = adapter->vsi_res->vsi_id; 491 veal->num_elements = count; 492 list_for_each_entry(f, &adapter->mac_filter_list, list) { 493 if (f->add) { 494 ether_addr_copy(veal->list[i].addr, f->macaddr); 495 iavf_set_mac_addr_type(&veal->list[i], f); 496 i++; 497 f->add = false; 498 if (i == count) 499 break; 500 } 501 } 502 if (!more) 503 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 504 505 spin_unlock_bh(&adapter->mac_vlan_list_lock); 506 507 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); 508 kfree(veal); 509 } 510 511 /** 512 * iavf_del_ether_addrs 513 * @adapter: adapter structure 514 * 515 * Request that the PF remove one or more addresses from our filters. 516 **/ 517 void iavf_del_ether_addrs(struct iavf_adapter *adapter) 518 { 519 struct virtchnl_ether_addr_list *veal; 520 struct iavf_mac_filter *f, *ftmp; 521 int i = 0, count = 0; 522 bool more = false; 523 size_t len; 524 525 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 526 /* bail because we already have a command pending */ 527 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", 528 adapter->current_op); 529 return; 530 } 531 532 spin_lock_bh(&adapter->mac_vlan_list_lock); 533 534 list_for_each_entry(f, &adapter->mac_filter_list, list) { 535 if (f->remove) 536 count++; 537 } 538 if (!count) { 539 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 540 spin_unlock_bh(&adapter->mac_vlan_list_lock); 541 return; 542 } 543 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; 544 545 len = virtchnl_struct_size(veal, list, count); 546 if (len > IAVF_MAX_AQ_BUF_SIZE) { 547 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); 548 while (len > IAVF_MAX_AQ_BUF_SIZE) 549 len = virtchnl_struct_size(veal, list, --count); 550 more = true; 551 } 552 veal = kzalloc(len, GFP_ATOMIC); 553 if (!veal) { 554 spin_unlock_bh(&adapter->mac_vlan_list_lock); 555 return; 556 } 557 558 veal->vsi_id = adapter->vsi_res->vsi_id; 559 veal->num_elements = count; 560 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 561 if (f->remove) { 562 ether_addr_copy(veal->list[i].addr, f->macaddr); 563 iavf_set_mac_addr_type(&veal->list[i], f); 564 i++; 565 list_del(&f->list); 566 kfree(f); 567 if (i == count) 568 break; 569 } 570 } 571 if (!more) 572 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 573 574 spin_unlock_bh(&adapter->mac_vlan_list_lock); 575 576 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); 577 kfree(veal); 578 } 579 580 /** 581 * iavf_mac_add_ok 582 * @adapter: adapter structure 583 * 584 * Submit list of filters based on PF response. 585 **/ 586 static void iavf_mac_add_ok(struct iavf_adapter *adapter) 587 { 588 struct iavf_mac_filter *f, *ftmp; 589 590 spin_lock_bh(&adapter->mac_vlan_list_lock); 591 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 592 f->is_new_mac = false; 593 if (!f->add && !f->add_handled) 594 f->add_handled = true; 595 } 596 spin_unlock_bh(&adapter->mac_vlan_list_lock); 597 } 598 599 /** 600 * iavf_mac_add_reject 601 * @adapter: adapter structure 602 * 603 * Remove filters from list based on PF response. 604 **/ 605 static void iavf_mac_add_reject(struct iavf_adapter *adapter) 606 { 607 struct net_device *netdev = adapter->netdev; 608 struct iavf_mac_filter *f, *ftmp; 609 610 spin_lock_bh(&adapter->mac_vlan_list_lock); 611 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 612 if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) 613 f->remove = false; 614 615 if (!f->add && !f->add_handled) 616 f->add_handled = true; 617 618 if (f->is_new_mac) { 619 list_del(&f->list); 620 kfree(f); 621 } 622 } 623 spin_unlock_bh(&adapter->mac_vlan_list_lock); 624 } 625 626 /** 627 * iavf_vlan_add_reject 628 * @adapter: adapter structure 629 * 630 * Remove VLAN filters from list based on PF response. 631 **/ 632 static void iavf_vlan_add_reject(struct iavf_adapter *adapter) 633 { 634 struct iavf_vlan_filter *f, *ftmp; 635 636 spin_lock_bh(&adapter->mac_vlan_list_lock); 637 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 638 if (f->state == IAVF_VLAN_IS_NEW) { 639 list_del(&f->list); 640 kfree(f); 641 adapter->num_vlan_filters--; 642 } 643 } 644 spin_unlock_bh(&adapter->mac_vlan_list_lock); 645 } 646 647 /** 648 * iavf_add_vlans 649 * @adapter: adapter structure 650 * 651 * Request that the PF add one or more VLAN filters to our VSI. 652 **/ 653 void iavf_add_vlans(struct iavf_adapter *adapter) 654 { 655 int len, i = 0, count = 0; 656 struct iavf_vlan_filter *f; 657 bool more = false; 658 659 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 660 /* bail because we already have a command pending */ 661 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", 662 adapter->current_op); 663 return; 664 } 665 666 spin_lock_bh(&adapter->mac_vlan_list_lock); 667 668 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 669 if (f->state == IAVF_VLAN_ADD) 670 count++; 671 } 672 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 673 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 674 spin_unlock_bh(&adapter->mac_vlan_list_lock); 675 return; 676 } 677 678 if (VLAN_ALLOWED(adapter)) { 679 struct virtchnl_vlan_filter_list *vvfl; 680 681 adapter->current_op = VIRTCHNL_OP_ADD_VLAN; 682 683 len = virtchnl_struct_size(vvfl, vlan_id, count); 684 if (len > IAVF_MAX_AQ_BUF_SIZE) { 685 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 686 while (len > IAVF_MAX_AQ_BUF_SIZE) 687 len = virtchnl_struct_size(vvfl, vlan_id, 688 --count); 689 more = true; 690 } 691 vvfl = kzalloc(len, GFP_ATOMIC); 692 if (!vvfl) { 693 spin_unlock_bh(&adapter->mac_vlan_list_lock); 694 return; 695 } 696 697 vvfl->vsi_id = adapter->vsi_res->vsi_id; 698 vvfl->num_elements = count; 699 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 700 if (f->state == IAVF_VLAN_ADD) { 701 vvfl->vlan_id[i] = f->vlan.vid; 702 i++; 703 f->state = IAVF_VLAN_IS_NEW; 704 if (i == count) 705 break; 706 } 707 } 708 if (!more) 709 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 710 711 spin_unlock_bh(&adapter->mac_vlan_list_lock); 712 713 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); 714 kfree(vvfl); 715 } else { 716 u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters; 717 u16 current_vlans = iavf_get_num_vlans_added(adapter); 718 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 719 720 adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; 721 722 if ((count + current_vlans) > max_vlans && 723 current_vlans < max_vlans) { 724 count = max_vlans - iavf_get_num_vlans_added(adapter); 725 more = true; 726 } 727 728 len = virtchnl_struct_size(vvfl_v2, filters, count); 729 if (len > IAVF_MAX_AQ_BUF_SIZE) { 730 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 731 while (len > IAVF_MAX_AQ_BUF_SIZE) 732 len = virtchnl_struct_size(vvfl_v2, filters, 733 --count); 734 more = true; 735 } 736 737 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 738 if (!vvfl_v2) { 739 spin_unlock_bh(&adapter->mac_vlan_list_lock); 740 return; 741 } 742 743 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 744 vvfl_v2->num_elements = count; 745 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 746 if (f->state == IAVF_VLAN_ADD) { 747 struct virtchnl_vlan_supported_caps *filtering_support = 748 &adapter->vlan_v2_caps.filtering.filtering_support; 749 struct virtchnl_vlan *vlan; 750 751 if (i == count) 752 break; 753 754 /* give priority over outer if it's enabled */ 755 if (filtering_support->outer) 756 vlan = &vvfl_v2->filters[i].outer; 757 else 758 vlan = &vvfl_v2->filters[i].inner; 759 760 vlan->tci = f->vlan.vid; 761 vlan->tpid = f->vlan.tpid; 762 763 i++; 764 f->state = IAVF_VLAN_IS_NEW; 765 } 766 } 767 768 if (!more) 769 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 770 771 spin_unlock_bh(&adapter->mac_vlan_list_lock); 772 773 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, 774 (u8 *)vvfl_v2, len); 775 kfree(vvfl_v2); 776 } 777 } 778 779 /** 780 * iavf_del_vlans 781 * @adapter: adapter structure 782 * 783 * Request that the PF remove one or more VLAN filters from our VSI. 784 **/ 785 void iavf_del_vlans(struct iavf_adapter *adapter) 786 { 787 struct iavf_vlan_filter *f, *ftmp; 788 int len, i = 0, count = 0; 789 bool more = false; 790 791 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 792 /* bail because we already have a command pending */ 793 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", 794 adapter->current_op); 795 return; 796 } 797 798 spin_lock_bh(&adapter->mac_vlan_list_lock); 799 800 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 801 /* since VLAN capabilities are not allowed, we dont want to send 802 * a VLAN delete request because it will most likely fail and 803 * create unnecessary errors/noise, so just free the VLAN 804 * filters marked for removal to enable bailing out before 805 * sending a virtchnl message 806 */ 807 if (f->state == IAVF_VLAN_REMOVE && 808 !VLAN_FILTERING_ALLOWED(adapter)) { 809 list_del(&f->list); 810 kfree(f); 811 adapter->num_vlan_filters--; 812 } else if (f->state == IAVF_VLAN_DISABLE && 813 !VLAN_FILTERING_ALLOWED(adapter)) { 814 f->state = IAVF_VLAN_INACTIVE; 815 } else if (f->state == IAVF_VLAN_REMOVE || 816 f->state == IAVF_VLAN_DISABLE) { 817 count++; 818 } 819 } 820 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 821 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 822 spin_unlock_bh(&adapter->mac_vlan_list_lock); 823 return; 824 } 825 826 if (VLAN_ALLOWED(adapter)) { 827 struct virtchnl_vlan_filter_list *vvfl; 828 829 adapter->current_op = VIRTCHNL_OP_DEL_VLAN; 830 831 len = virtchnl_struct_size(vvfl, vlan_id, count); 832 if (len > IAVF_MAX_AQ_BUF_SIZE) { 833 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); 834 while (len > IAVF_MAX_AQ_BUF_SIZE) 835 len = virtchnl_struct_size(vvfl, vlan_id, 836 --count); 837 more = true; 838 } 839 vvfl = kzalloc(len, GFP_ATOMIC); 840 if (!vvfl) { 841 spin_unlock_bh(&adapter->mac_vlan_list_lock); 842 return; 843 } 844 845 vvfl->vsi_id = adapter->vsi_res->vsi_id; 846 vvfl->num_elements = count; 847 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 848 if (f->state == IAVF_VLAN_DISABLE) { 849 vvfl->vlan_id[i] = f->vlan.vid; 850 f->state = IAVF_VLAN_INACTIVE; 851 i++; 852 if (i == count) 853 break; 854 } else if (f->state == IAVF_VLAN_REMOVE) { 855 vvfl->vlan_id[i] = f->vlan.vid; 856 list_del(&f->list); 857 kfree(f); 858 adapter->num_vlan_filters--; 859 i++; 860 if (i == count) 861 break; 862 } 863 } 864 865 if (!more) 866 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 867 868 spin_unlock_bh(&adapter->mac_vlan_list_lock); 869 870 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); 871 kfree(vvfl); 872 } else { 873 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 874 875 adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; 876 877 len = virtchnl_struct_size(vvfl_v2, filters, count); 878 if (len > IAVF_MAX_AQ_BUF_SIZE) { 879 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 880 while (len > IAVF_MAX_AQ_BUF_SIZE) 881 len = virtchnl_struct_size(vvfl_v2, filters, 882 --count); 883 more = true; 884 } 885 886 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 887 if (!vvfl_v2) { 888 spin_unlock_bh(&adapter->mac_vlan_list_lock); 889 return; 890 } 891 892 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 893 vvfl_v2->num_elements = count; 894 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 895 if (f->state == IAVF_VLAN_DISABLE || 896 f->state == IAVF_VLAN_REMOVE) { 897 struct virtchnl_vlan_supported_caps *filtering_support = 898 &adapter->vlan_v2_caps.filtering.filtering_support; 899 struct virtchnl_vlan *vlan; 900 901 /* give priority over outer if it's enabled */ 902 if (filtering_support->outer) 903 vlan = &vvfl_v2->filters[i].outer; 904 else 905 vlan = &vvfl_v2->filters[i].inner; 906 907 vlan->tci = f->vlan.vid; 908 vlan->tpid = f->vlan.tpid; 909 910 if (f->state == IAVF_VLAN_DISABLE) { 911 f->state = IAVF_VLAN_INACTIVE; 912 } else { 913 list_del(&f->list); 914 kfree(f); 915 adapter->num_vlan_filters--; 916 } 917 i++; 918 if (i == count) 919 break; 920 } 921 } 922 923 if (!more) 924 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 925 926 spin_unlock_bh(&adapter->mac_vlan_list_lock); 927 928 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2, 929 (u8 *)vvfl_v2, len); 930 kfree(vvfl_v2); 931 } 932 } 933 934 /** 935 * iavf_set_promiscuous 936 * @adapter: adapter structure 937 * 938 * Request that the PF enable promiscuous mode for our VSI. 939 **/ 940 void iavf_set_promiscuous(struct iavf_adapter *adapter) 941 { 942 struct net_device *netdev = adapter->netdev; 943 struct virtchnl_promisc_info vpi; 944 unsigned int flags; 945 946 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 947 /* bail because we already have a command pending */ 948 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", 949 adapter->current_op); 950 return; 951 } 952 953 /* prevent changes to promiscuous flags */ 954 spin_lock_bh(&adapter->current_netdev_promisc_flags_lock); 955 956 /* sanity check to prevent duplicate AQ calls */ 957 if (!iavf_promiscuous_mode_changed(adapter)) { 958 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; 959 dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n"); 960 /* allow changes to promiscuous flags */ 961 spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); 962 return; 963 } 964 965 /* there are 2 bits, but only 3 states */ 966 if (!(netdev->flags & IFF_PROMISC) && 967 netdev->flags & IFF_ALLMULTI) { 968 /* State 1 - only multicast promiscuous mode enabled 969 * - !IFF_PROMISC && IFF_ALLMULTI 970 */ 971 flags = FLAG_VF_MULTICAST_PROMISC; 972 adapter->current_netdev_promisc_flags |= IFF_ALLMULTI; 973 adapter->current_netdev_promisc_flags &= ~IFF_PROMISC; 974 dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); 975 } else if (!(netdev->flags & IFF_PROMISC) && 976 !(netdev->flags & IFF_ALLMULTI)) { 977 /* State 2 - unicast/multicast promiscuous mode disabled 978 * - !IFF_PROMISC && !IFF_ALLMULTI 979 */ 980 flags = 0; 981 adapter->current_netdev_promisc_flags &= 982 ~(IFF_PROMISC | IFF_ALLMULTI); 983 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); 984 } else { 985 /* State 3 - unicast/multicast promiscuous mode enabled 986 * - IFF_PROMISC && IFF_ALLMULTI 987 * - IFF_PROMISC && !IFF_ALLMULTI 988 */ 989 flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC; 990 adapter->current_netdev_promisc_flags |= IFF_PROMISC; 991 if (netdev->flags & IFF_ALLMULTI) 992 adapter->current_netdev_promisc_flags |= IFF_ALLMULTI; 993 else 994 adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI; 995 996 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); 997 } 998 999 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; 1000 1001 /* allow changes to promiscuous flags */ 1002 spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); 1003 1004 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; 1005 vpi.vsi_id = adapter->vsi_res->vsi_id; 1006 vpi.flags = flags; 1007 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1008 (u8 *)&vpi, sizeof(vpi)); 1009 } 1010 1011 /** 1012 * iavf_request_stats 1013 * @adapter: adapter structure 1014 * 1015 * Request VSI statistics from PF. 1016 **/ 1017 void iavf_request_stats(struct iavf_adapter *adapter) 1018 { 1019 struct virtchnl_queue_select vqs; 1020 1021 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1022 /* no error message, this isn't crucial */ 1023 return; 1024 } 1025 1026 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; 1027 adapter->current_op = VIRTCHNL_OP_GET_STATS; 1028 vqs.vsi_id = adapter->vsi_res->vsi_id; 1029 /* queue maps are ignored for this message - only the vsi is used */ 1030 if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, 1031 sizeof(vqs))) 1032 /* if the request failed, don't lock out others */ 1033 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1034 } 1035 1036 /** 1037 * iavf_get_hena 1038 * @adapter: adapter structure 1039 * 1040 * Request hash enable capabilities from PF 1041 **/ 1042 void iavf_get_hena(struct iavf_adapter *adapter) 1043 { 1044 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1045 /* bail because we already have a command pending */ 1046 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", 1047 adapter->current_op); 1048 return; 1049 } 1050 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; 1051 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; 1052 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); 1053 } 1054 1055 /** 1056 * iavf_set_hena 1057 * @adapter: adapter structure 1058 * 1059 * Request the PF to set our RSS hash capabilities 1060 **/ 1061 void iavf_set_hena(struct iavf_adapter *adapter) 1062 { 1063 struct virtchnl_rss_hena vrh; 1064 1065 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1066 /* bail because we already have a command pending */ 1067 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", 1068 adapter->current_op); 1069 return; 1070 } 1071 vrh.hena = adapter->hena; 1072 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; 1073 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; 1074 iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, 1075 sizeof(vrh)); 1076 } 1077 1078 /** 1079 * iavf_set_rss_key 1080 * @adapter: adapter structure 1081 * 1082 * Request the PF to set our RSS hash key 1083 **/ 1084 void iavf_set_rss_key(struct iavf_adapter *adapter) 1085 { 1086 struct virtchnl_rss_key *vrk; 1087 int len; 1088 1089 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1090 /* bail because we already have a command pending */ 1091 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", 1092 adapter->current_op); 1093 return; 1094 } 1095 len = virtchnl_struct_size(vrk, key, adapter->rss_key_size); 1096 vrk = kzalloc(len, GFP_KERNEL); 1097 if (!vrk) 1098 return; 1099 vrk->vsi_id = adapter->vsi.id; 1100 vrk->key_len = adapter->rss_key_size; 1101 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); 1102 1103 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; 1104 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; 1105 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); 1106 kfree(vrk); 1107 } 1108 1109 /** 1110 * iavf_set_rss_lut 1111 * @adapter: adapter structure 1112 * 1113 * Request the PF to set our RSS lookup table 1114 **/ 1115 void iavf_set_rss_lut(struct iavf_adapter *adapter) 1116 { 1117 struct virtchnl_rss_lut *vrl; 1118 int len; 1119 1120 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1121 /* bail because we already have a command pending */ 1122 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", 1123 adapter->current_op); 1124 return; 1125 } 1126 len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size); 1127 vrl = kzalloc(len, GFP_KERNEL); 1128 if (!vrl) 1129 return; 1130 vrl->vsi_id = adapter->vsi.id; 1131 vrl->lut_entries = adapter->rss_lut_size; 1132 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); 1133 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; 1134 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; 1135 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); 1136 kfree(vrl); 1137 } 1138 1139 /** 1140 * iavf_set_rss_hfunc 1141 * @adapter: adapter structure 1142 * 1143 * Request the PF to set our RSS Hash function 1144 **/ 1145 void iavf_set_rss_hfunc(struct iavf_adapter *adapter) 1146 { 1147 struct virtchnl_rss_hfunc *vrh; 1148 int len = sizeof(*vrh); 1149 1150 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1151 /* bail because we already have a command pending */ 1152 dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n", 1153 adapter->current_op); 1154 return; 1155 } 1156 vrh = kzalloc(len, GFP_KERNEL); 1157 if (!vrh) 1158 return; 1159 vrh->vsi_id = adapter->vsi.id; 1160 vrh->rss_algorithm = adapter->hfunc; 1161 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC; 1162 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC; 1163 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len); 1164 kfree(vrh); 1165 } 1166 1167 /** 1168 * iavf_enable_vlan_stripping 1169 * @adapter: adapter structure 1170 * 1171 * Request VLAN header stripping to be enabled 1172 **/ 1173 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) 1174 { 1175 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1176 /* bail because we already have a command pending */ 1177 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", 1178 adapter->current_op); 1179 return; 1180 } 1181 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; 1182 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 1183 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); 1184 } 1185 1186 /** 1187 * iavf_disable_vlan_stripping 1188 * @adapter: adapter structure 1189 * 1190 * Request VLAN header stripping to be disabled 1191 **/ 1192 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) 1193 { 1194 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1195 /* bail because we already have a command pending */ 1196 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", 1197 adapter->current_op); 1198 return; 1199 } 1200 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; 1201 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 1202 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); 1203 } 1204 1205 /** 1206 * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype 1207 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1208 */ 1209 static u32 iavf_tpid_to_vc_ethertype(u16 tpid) 1210 { 1211 switch (tpid) { 1212 case ETH_P_8021Q: 1213 return VIRTCHNL_VLAN_ETHERTYPE_8100; 1214 case ETH_P_8021AD: 1215 return VIRTCHNL_VLAN_ETHERTYPE_88A8; 1216 } 1217 1218 return 0; 1219 } 1220 1221 /** 1222 * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message 1223 * @adapter: adapter structure 1224 * @msg: message structure used for updating offloads over virtchnl to update 1225 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1226 * @offload_op: opcode used to determine which support structure to check 1227 */ 1228 static int 1229 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter, 1230 struct virtchnl_vlan_setting *msg, u16 tpid, 1231 enum virtchnl_ops offload_op) 1232 { 1233 struct virtchnl_vlan_supported_caps *offload_support; 1234 u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid); 1235 1236 /* reference the correct offload support structure */ 1237 switch (offload_op) { 1238 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1239 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1240 offload_support = 1241 &adapter->vlan_v2_caps.offloads.stripping_support; 1242 break; 1243 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1244 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1245 offload_support = 1246 &adapter->vlan_v2_caps.offloads.insertion_support; 1247 break; 1248 default: 1249 dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n", 1250 offload_op); 1251 return -EINVAL; 1252 } 1253 1254 /* make sure ethertype is supported */ 1255 if (offload_support->outer & vc_ethertype && 1256 offload_support->outer & VIRTCHNL_VLAN_TOGGLE) { 1257 msg->outer_ethertype_setting = vc_ethertype; 1258 } else if (offload_support->inner & vc_ethertype && 1259 offload_support->inner & VIRTCHNL_VLAN_TOGGLE) { 1260 msg->inner_ethertype_setting = vc_ethertype; 1261 } else { 1262 dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n", 1263 offload_op, tpid); 1264 return -EINVAL; 1265 } 1266 1267 return 0; 1268 } 1269 1270 /** 1271 * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request 1272 * @adapter: adapter structure 1273 * @tpid: VLAN TPID 1274 * @offload_op: opcode used to determine which AQ required bit to clear 1275 */ 1276 static void 1277 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, 1278 enum virtchnl_ops offload_op) 1279 { 1280 switch (offload_op) { 1281 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1282 if (tpid == ETH_P_8021Q) 1283 adapter->aq_required &= 1284 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; 1285 else if (tpid == ETH_P_8021AD) 1286 adapter->aq_required &= 1287 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; 1288 break; 1289 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1290 if (tpid == ETH_P_8021Q) 1291 adapter->aq_required &= 1292 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; 1293 else if (tpid == ETH_P_8021AD) 1294 adapter->aq_required &= 1295 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; 1296 break; 1297 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1298 if (tpid == ETH_P_8021Q) 1299 adapter->aq_required &= 1300 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; 1301 else if (tpid == ETH_P_8021AD) 1302 adapter->aq_required &= 1303 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; 1304 break; 1305 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1306 if (tpid == ETH_P_8021Q) 1307 adapter->aq_required &= 1308 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; 1309 else if (tpid == ETH_P_8021AD) 1310 adapter->aq_required &= 1311 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; 1312 break; 1313 default: 1314 dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n", 1315 offload_op); 1316 } 1317 } 1318 1319 /** 1320 * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl 1321 * @adapter: adapter structure 1322 * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) 1323 * @offload_op: offload_op used to make the request over virtchnl 1324 */ 1325 static void 1326 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, 1327 enum virtchnl_ops offload_op) 1328 { 1329 struct virtchnl_vlan_setting *msg; 1330 int len = sizeof(*msg); 1331 1332 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1333 /* bail because we already have a command pending */ 1334 dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n", 1335 offload_op, adapter->current_op); 1336 return; 1337 } 1338 1339 adapter->current_op = offload_op; 1340 1341 msg = kzalloc(len, GFP_KERNEL); 1342 if (!msg) 1343 return; 1344 1345 msg->vport_id = adapter->vsi_res->vsi_id; 1346 1347 /* always clear to prevent unsupported and endless requests */ 1348 iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op); 1349 1350 /* only send valid offload requests */ 1351 if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op)) 1352 iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len); 1353 else 1354 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1355 1356 kfree(msg); 1357 } 1358 1359 /** 1360 * iavf_enable_vlan_stripping_v2 - enable VLAN stripping 1361 * @adapter: adapter structure 1362 * @tpid: VLAN TPID used to enable VLAN stripping 1363 */ 1364 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1365 { 1366 iavf_send_vlan_offload_v2(adapter, tpid, 1367 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2); 1368 } 1369 1370 /** 1371 * iavf_disable_vlan_stripping_v2 - disable VLAN stripping 1372 * @adapter: adapter structure 1373 * @tpid: VLAN TPID used to disable VLAN stripping 1374 */ 1375 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1376 { 1377 iavf_send_vlan_offload_v2(adapter, tpid, 1378 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2); 1379 } 1380 1381 /** 1382 * iavf_enable_vlan_insertion_v2 - enable VLAN insertion 1383 * @adapter: adapter structure 1384 * @tpid: VLAN TPID used to enable VLAN insertion 1385 */ 1386 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1387 { 1388 iavf_send_vlan_offload_v2(adapter, tpid, 1389 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2); 1390 } 1391 1392 /** 1393 * iavf_disable_vlan_insertion_v2 - disable VLAN insertion 1394 * @adapter: adapter structure 1395 * @tpid: VLAN TPID used to disable VLAN insertion 1396 */ 1397 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1398 { 1399 iavf_send_vlan_offload_v2(adapter, tpid, 1400 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); 1401 } 1402 1403 /** 1404 * iavf_print_link_message - print link up or down 1405 * @adapter: adapter structure 1406 * 1407 * Log a message telling the world of our wonderous link status 1408 */ 1409 static void iavf_print_link_message(struct iavf_adapter *adapter) 1410 { 1411 struct net_device *netdev = adapter->netdev; 1412 int link_speed_mbps; 1413 char *speed; 1414 1415 if (!adapter->link_up) { 1416 netdev_info(netdev, "NIC Link is Down\n"); 1417 return; 1418 } 1419 1420 if (ADV_LINK_SUPPORT(adapter)) { 1421 link_speed_mbps = adapter->link_speed_mbps; 1422 goto print_link_msg; 1423 } 1424 1425 switch (adapter->link_speed) { 1426 case VIRTCHNL_LINK_SPEED_40GB: 1427 link_speed_mbps = SPEED_40000; 1428 break; 1429 case VIRTCHNL_LINK_SPEED_25GB: 1430 link_speed_mbps = SPEED_25000; 1431 break; 1432 case VIRTCHNL_LINK_SPEED_20GB: 1433 link_speed_mbps = SPEED_20000; 1434 break; 1435 case VIRTCHNL_LINK_SPEED_10GB: 1436 link_speed_mbps = SPEED_10000; 1437 break; 1438 case VIRTCHNL_LINK_SPEED_5GB: 1439 link_speed_mbps = SPEED_5000; 1440 break; 1441 case VIRTCHNL_LINK_SPEED_2_5GB: 1442 link_speed_mbps = SPEED_2500; 1443 break; 1444 case VIRTCHNL_LINK_SPEED_1GB: 1445 link_speed_mbps = SPEED_1000; 1446 break; 1447 case VIRTCHNL_LINK_SPEED_100MB: 1448 link_speed_mbps = SPEED_100; 1449 break; 1450 default: 1451 link_speed_mbps = SPEED_UNKNOWN; 1452 break; 1453 } 1454 1455 print_link_msg: 1456 if (link_speed_mbps > SPEED_1000) { 1457 if (link_speed_mbps == SPEED_2500) { 1458 speed = kasprintf(GFP_KERNEL, "%s", "2.5 Gbps"); 1459 } else { 1460 /* convert to Gbps inline */ 1461 speed = kasprintf(GFP_KERNEL, "%d Gbps", 1462 link_speed_mbps / 1000); 1463 } 1464 } else if (link_speed_mbps == SPEED_UNKNOWN) { 1465 speed = kasprintf(GFP_KERNEL, "%s", "Unknown Mbps"); 1466 } else { 1467 speed = kasprintf(GFP_KERNEL, "%d Mbps", link_speed_mbps); 1468 } 1469 1470 netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); 1471 kfree(speed); 1472 } 1473 1474 /** 1475 * iavf_get_vpe_link_status 1476 * @adapter: adapter structure 1477 * @vpe: virtchnl_pf_event structure 1478 * 1479 * Helper function for determining the link status 1480 **/ 1481 static bool 1482 iavf_get_vpe_link_status(struct iavf_adapter *adapter, 1483 struct virtchnl_pf_event *vpe) 1484 { 1485 if (ADV_LINK_SUPPORT(adapter)) 1486 return vpe->event_data.link_event_adv.link_status; 1487 else 1488 return vpe->event_data.link_event.link_status; 1489 } 1490 1491 /** 1492 * iavf_set_adapter_link_speed_from_vpe 1493 * @adapter: adapter structure for which we are setting the link speed 1494 * @vpe: virtchnl_pf_event structure that contains the link speed we are setting 1495 * 1496 * Helper function for setting iavf_adapter link speed 1497 **/ 1498 static void 1499 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, 1500 struct virtchnl_pf_event *vpe) 1501 { 1502 if (ADV_LINK_SUPPORT(adapter)) 1503 adapter->link_speed_mbps = 1504 vpe->event_data.link_event_adv.link_speed; 1505 else 1506 adapter->link_speed = vpe->event_data.link_event.link_speed; 1507 } 1508 1509 /** 1510 * iavf_enable_channels 1511 * @adapter: adapter structure 1512 * 1513 * Request that the PF enable channels as specified by 1514 * the user via tc tool. 1515 **/ 1516 void iavf_enable_channels(struct iavf_adapter *adapter) 1517 { 1518 struct virtchnl_tc_info *vti = NULL; 1519 size_t len; 1520 int i; 1521 1522 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1523 /* bail because we already have a command pending */ 1524 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1525 adapter->current_op); 1526 return; 1527 } 1528 1529 len = virtchnl_struct_size(vti, list, adapter->num_tc); 1530 vti = kzalloc(len, GFP_KERNEL); 1531 if (!vti) 1532 return; 1533 vti->num_tc = adapter->num_tc; 1534 for (i = 0; i < vti->num_tc; i++) { 1535 vti->list[i].count = adapter->ch_config.ch_info[i].count; 1536 vti->list[i].offset = adapter->ch_config.ch_info[i].offset; 1537 vti->list[i].pad = 0; 1538 vti->list[i].max_tx_rate = 1539 adapter->ch_config.ch_info[i].max_tx_rate; 1540 } 1541 1542 adapter->ch_config.state = __IAVF_TC_RUNNING; 1543 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1544 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; 1545 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; 1546 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); 1547 kfree(vti); 1548 } 1549 1550 /** 1551 * iavf_disable_channels 1552 * @adapter: adapter structure 1553 * 1554 * Request that the PF disable channels that are configured 1555 **/ 1556 void iavf_disable_channels(struct iavf_adapter *adapter) 1557 { 1558 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1559 /* bail because we already have a command pending */ 1560 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1561 adapter->current_op); 1562 return; 1563 } 1564 1565 adapter->ch_config.state = __IAVF_TC_INVALID; 1566 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1567 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; 1568 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; 1569 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); 1570 } 1571 1572 /** 1573 * iavf_print_cloud_filter 1574 * @adapter: adapter structure 1575 * @f: cloud filter to print 1576 * 1577 * Print the cloud filter 1578 **/ 1579 static void iavf_print_cloud_filter(struct iavf_adapter *adapter, 1580 struct virtchnl_filter *f) 1581 { 1582 switch (f->flow_type) { 1583 case VIRTCHNL_TCP_V4_FLOW: 1584 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", 1585 &f->data.tcp_spec.dst_mac, 1586 &f->data.tcp_spec.src_mac, 1587 ntohs(f->data.tcp_spec.vlan_id), 1588 &f->data.tcp_spec.dst_ip[0], 1589 &f->data.tcp_spec.src_ip[0], 1590 ntohs(f->data.tcp_spec.dst_port), 1591 ntohs(f->data.tcp_spec.src_port)); 1592 break; 1593 case VIRTCHNL_TCP_V6_FLOW: 1594 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", 1595 &f->data.tcp_spec.dst_mac, 1596 &f->data.tcp_spec.src_mac, 1597 ntohs(f->data.tcp_spec.vlan_id), 1598 &f->data.tcp_spec.dst_ip, 1599 &f->data.tcp_spec.src_ip, 1600 ntohs(f->data.tcp_spec.dst_port), 1601 ntohs(f->data.tcp_spec.src_port)); 1602 break; 1603 } 1604 } 1605 1606 /** 1607 * iavf_add_cloud_filter 1608 * @adapter: adapter structure 1609 * 1610 * Request that the PF add cloud filters as specified 1611 * by the user via tc tool. 1612 **/ 1613 void iavf_add_cloud_filter(struct iavf_adapter *adapter) 1614 { 1615 struct iavf_cloud_filter *cf; 1616 struct virtchnl_filter *f; 1617 int len = 0, count = 0; 1618 1619 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1620 /* bail because we already have a command pending */ 1621 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", 1622 adapter->current_op); 1623 return; 1624 } 1625 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1626 if (cf->add) { 1627 count++; 1628 break; 1629 } 1630 } 1631 if (!count) { 1632 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 1633 return; 1634 } 1635 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; 1636 1637 len = sizeof(struct virtchnl_filter); 1638 f = kzalloc(len, GFP_KERNEL); 1639 if (!f) 1640 return; 1641 1642 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1643 if (cf->add) { 1644 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1645 cf->add = false; 1646 cf->state = __IAVF_CF_ADD_PENDING; 1647 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, 1648 (u8 *)f, len); 1649 } 1650 } 1651 kfree(f); 1652 } 1653 1654 /** 1655 * iavf_del_cloud_filter 1656 * @adapter: adapter structure 1657 * 1658 * Request that the PF delete cloud filters as specified 1659 * by the user via tc tool. 1660 **/ 1661 void iavf_del_cloud_filter(struct iavf_adapter *adapter) 1662 { 1663 struct iavf_cloud_filter *cf, *cftmp; 1664 struct virtchnl_filter *f; 1665 int len = 0, count = 0; 1666 1667 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1668 /* bail because we already have a command pending */ 1669 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", 1670 adapter->current_op); 1671 return; 1672 } 1673 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1674 if (cf->del) { 1675 count++; 1676 break; 1677 } 1678 } 1679 if (!count) { 1680 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1681 return; 1682 } 1683 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; 1684 1685 len = sizeof(struct virtchnl_filter); 1686 f = kzalloc(len, GFP_KERNEL); 1687 if (!f) 1688 return; 1689 1690 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 1691 if (cf->del) { 1692 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1693 cf->del = false; 1694 cf->state = __IAVF_CF_DEL_PENDING; 1695 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, 1696 (u8 *)f, len); 1697 } 1698 } 1699 kfree(f); 1700 } 1701 1702 /** 1703 * iavf_add_fdir_filter 1704 * @adapter: the VF adapter structure 1705 * 1706 * Request that the PF add Flow Director filters as specified 1707 * by the user via ethtool. 1708 **/ 1709 void iavf_add_fdir_filter(struct iavf_adapter *adapter) 1710 { 1711 struct iavf_fdir_fltr *fdir; 1712 struct virtchnl_fdir_add *f; 1713 bool process_fltr = false; 1714 int len; 1715 1716 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1717 /* bail because we already have a command pending */ 1718 dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", 1719 adapter->current_op); 1720 return; 1721 } 1722 1723 len = sizeof(struct virtchnl_fdir_add); 1724 f = kzalloc(len, GFP_KERNEL); 1725 if (!f) 1726 return; 1727 1728 spin_lock_bh(&adapter->fdir_fltr_lock); 1729 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1730 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { 1731 process_fltr = true; 1732 fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; 1733 memcpy(f, &fdir->vc_add_msg, len); 1734 break; 1735 } 1736 } 1737 spin_unlock_bh(&adapter->fdir_fltr_lock); 1738 1739 if (!process_fltr) { 1740 /* prevent iavf_add_fdir_filter() from being called when there 1741 * are no filters to add 1742 */ 1743 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; 1744 kfree(f); 1745 return; 1746 } 1747 adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; 1748 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); 1749 kfree(f); 1750 } 1751 1752 /** 1753 * iavf_del_fdir_filter 1754 * @adapter: the VF adapter structure 1755 * 1756 * Request that the PF delete Flow Director filters as specified 1757 * by the user via ethtool. 1758 **/ 1759 void iavf_del_fdir_filter(struct iavf_adapter *adapter) 1760 { 1761 struct virtchnl_fdir_del f = {}; 1762 struct iavf_fdir_fltr *fdir; 1763 bool process_fltr = false; 1764 int len; 1765 1766 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1767 /* bail because we already have a command pending */ 1768 dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", 1769 adapter->current_op); 1770 return; 1771 } 1772 1773 len = sizeof(struct virtchnl_fdir_del); 1774 1775 spin_lock_bh(&adapter->fdir_fltr_lock); 1776 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1777 if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { 1778 process_fltr = true; 1779 f.vsi_id = fdir->vc_add_msg.vsi_id; 1780 f.flow_id = fdir->flow_id; 1781 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; 1782 break; 1783 } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) { 1784 process_fltr = true; 1785 f.vsi_id = fdir->vc_add_msg.vsi_id; 1786 f.flow_id = fdir->flow_id; 1787 fdir->state = IAVF_FDIR_FLTR_DIS_PENDING; 1788 break; 1789 } 1790 } 1791 spin_unlock_bh(&adapter->fdir_fltr_lock); 1792 1793 if (!process_fltr) { 1794 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1795 return; 1796 } 1797 1798 adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; 1799 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); 1800 } 1801 1802 /** 1803 * iavf_add_adv_rss_cfg 1804 * @adapter: the VF adapter structure 1805 * 1806 * Request that the PF add RSS configuration as specified 1807 * by the user via ethtool. 1808 **/ 1809 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) 1810 { 1811 struct virtchnl_rss_cfg *rss_cfg; 1812 struct iavf_adv_rss *rss; 1813 bool process_rss = false; 1814 int len; 1815 1816 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1817 /* bail because we already have a command pending */ 1818 dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n", 1819 adapter->current_op); 1820 return; 1821 } 1822 1823 len = sizeof(struct virtchnl_rss_cfg); 1824 rss_cfg = kzalloc(len, GFP_KERNEL); 1825 if (!rss_cfg) 1826 return; 1827 1828 spin_lock_bh(&adapter->adv_rss_lock); 1829 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1830 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { 1831 process_rss = true; 1832 rss->state = IAVF_ADV_RSS_ADD_PENDING; 1833 memcpy(rss_cfg, &rss->cfg_msg, len); 1834 iavf_print_adv_rss_cfg(adapter, rss, 1835 "Input set change for", 1836 "is pending"); 1837 break; 1838 } 1839 } 1840 spin_unlock_bh(&adapter->adv_rss_lock); 1841 1842 if (process_rss) { 1843 adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG; 1844 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG, 1845 (u8 *)rss_cfg, len); 1846 } else { 1847 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; 1848 } 1849 1850 kfree(rss_cfg); 1851 } 1852 1853 /** 1854 * iavf_del_adv_rss_cfg 1855 * @adapter: the VF adapter structure 1856 * 1857 * Request that the PF delete RSS configuration as specified 1858 * by the user via ethtool. 1859 **/ 1860 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) 1861 { 1862 struct virtchnl_rss_cfg *rss_cfg; 1863 struct iavf_adv_rss *rss; 1864 bool process_rss = false; 1865 int len; 1866 1867 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1868 /* bail because we already have a command pending */ 1869 dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n", 1870 adapter->current_op); 1871 return; 1872 } 1873 1874 len = sizeof(struct virtchnl_rss_cfg); 1875 rss_cfg = kzalloc(len, GFP_KERNEL); 1876 if (!rss_cfg) 1877 return; 1878 1879 spin_lock_bh(&adapter->adv_rss_lock); 1880 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1881 if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) { 1882 process_rss = true; 1883 rss->state = IAVF_ADV_RSS_DEL_PENDING; 1884 memcpy(rss_cfg, &rss->cfg_msg, len); 1885 break; 1886 } 1887 } 1888 spin_unlock_bh(&adapter->adv_rss_lock); 1889 1890 if (process_rss) { 1891 adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG; 1892 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG, 1893 (u8 *)rss_cfg, len); 1894 } else { 1895 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1896 } 1897 1898 kfree(rss_cfg); 1899 } 1900 1901 /** 1902 * iavf_request_reset 1903 * @adapter: adapter structure 1904 * 1905 * Request that the PF reset this VF. No response is expected. 1906 **/ 1907 int iavf_request_reset(struct iavf_adapter *adapter) 1908 { 1909 int err; 1910 /* Don't check CURRENT_OP - this is always higher priority */ 1911 err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); 1912 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1913 return err; 1914 } 1915 1916 /** 1917 * iavf_netdev_features_vlan_strip_set - update vlan strip status 1918 * @netdev: ptr to netdev being adjusted 1919 * @enable: enable or disable vlan strip 1920 * 1921 * Helper function to change vlan strip status in netdev->features. 1922 */ 1923 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, 1924 const bool enable) 1925 { 1926 if (enable) 1927 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1928 else 1929 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 1930 } 1931 1932 /** 1933 * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset 1934 * @adapter: private adapter structure 1935 * 1936 * Called after a reset to re-add all FDIR filters and delete some of them 1937 * if they were pending to be deleted. 1938 */ 1939 static void iavf_activate_fdir_filters(struct iavf_adapter *adapter) 1940 { 1941 struct iavf_fdir_fltr *f, *ftmp; 1942 bool add_filters = false; 1943 1944 spin_lock_bh(&adapter->fdir_fltr_lock); 1945 list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) { 1946 if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST || 1947 f->state == IAVF_FDIR_FLTR_ADD_PENDING || 1948 f->state == IAVF_FDIR_FLTR_ACTIVE) { 1949 /* All filters and requests have been removed in PF, 1950 * restore them 1951 */ 1952 f->state = IAVF_FDIR_FLTR_ADD_REQUEST; 1953 add_filters = true; 1954 } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST || 1955 f->state == IAVF_FDIR_FLTR_DIS_PENDING) { 1956 /* Link down state, leave filters as inactive */ 1957 f->state = IAVF_FDIR_FLTR_INACTIVE; 1958 } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST || 1959 f->state == IAVF_FDIR_FLTR_DEL_PENDING) { 1960 /* Delete filters that were pending to be deleted, the 1961 * list on PF is already cleared after a reset 1962 */ 1963 list_del(&f->list); 1964 kfree(f); 1965 adapter->fdir_active_fltr--; 1966 } 1967 } 1968 spin_unlock_bh(&adapter->fdir_fltr_lock); 1969 1970 if (add_filters) 1971 adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; 1972 } 1973 1974 /** 1975 * iavf_virtchnl_completion 1976 * @adapter: adapter structure 1977 * @v_opcode: opcode sent by PF 1978 * @v_retval: retval sent by PF 1979 * @msg: message sent by PF 1980 * @msglen: message length 1981 * 1982 * Asynchronous completion function for admin queue messages. Rather than busy 1983 * wait, we fire off our requests and assume that no errors will be returned. 1984 * This function handles the reply messages. 1985 **/ 1986 void iavf_virtchnl_completion(struct iavf_adapter *adapter, 1987 enum virtchnl_ops v_opcode, 1988 enum iavf_status v_retval, u8 *msg, u16 msglen) 1989 { 1990 struct net_device *netdev = adapter->netdev; 1991 1992 if (v_opcode == VIRTCHNL_OP_EVENT) { 1993 struct virtchnl_pf_event *vpe = 1994 (struct virtchnl_pf_event *)msg; 1995 bool link_up = iavf_get_vpe_link_status(adapter, vpe); 1996 1997 switch (vpe->event) { 1998 case VIRTCHNL_EVENT_LINK_CHANGE: 1999 iavf_set_adapter_link_speed_from_vpe(adapter, vpe); 2000 2001 /* we've already got the right link status, bail */ 2002 if (adapter->link_up == link_up) 2003 break; 2004 2005 if (link_up) { 2006 /* If we get link up message and start queues 2007 * before our queues are configured it will 2008 * trigger a TX hang. In that case, just ignore 2009 * the link status message,we'll get another one 2010 * after we enable queues and actually prepared 2011 * to send traffic. 2012 */ 2013 if (adapter->state != __IAVF_RUNNING) 2014 break; 2015 2016 /* For ADq enabled VF, we reconfigure VSIs and 2017 * re-allocate queues. Hence wait till all 2018 * queues are enabled. 2019 */ 2020 if (adapter->flags & 2021 IAVF_FLAG_QUEUES_DISABLED) 2022 break; 2023 } 2024 2025 adapter->link_up = link_up; 2026 if (link_up) { 2027 netif_tx_start_all_queues(netdev); 2028 netif_carrier_on(netdev); 2029 } else { 2030 netif_tx_stop_all_queues(netdev); 2031 netif_carrier_off(netdev); 2032 } 2033 iavf_print_link_message(adapter); 2034 break; 2035 case VIRTCHNL_EVENT_RESET_IMPENDING: 2036 dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); 2037 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { 2038 dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); 2039 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); 2040 } 2041 break; 2042 default: 2043 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", 2044 vpe->event); 2045 break; 2046 } 2047 return; 2048 } 2049 if (v_retval) { 2050 switch (v_opcode) { 2051 case VIRTCHNL_OP_ADD_VLAN: 2052 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", 2053 iavf_stat_str(&adapter->hw, v_retval)); 2054 break; 2055 case VIRTCHNL_OP_ADD_ETH_ADDR: 2056 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", 2057 iavf_stat_str(&adapter->hw, v_retval)); 2058 iavf_mac_add_reject(adapter); 2059 /* restore administratively set MAC address */ 2060 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 2061 wake_up(&adapter->vc_waitqueue); 2062 break; 2063 case VIRTCHNL_OP_DEL_VLAN: 2064 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", 2065 iavf_stat_str(&adapter->hw, v_retval)); 2066 break; 2067 case VIRTCHNL_OP_DEL_ETH_ADDR: 2068 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", 2069 iavf_stat_str(&adapter->hw, v_retval)); 2070 break; 2071 case VIRTCHNL_OP_ENABLE_CHANNELS: 2072 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", 2073 iavf_stat_str(&adapter->hw, v_retval)); 2074 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2075 adapter->ch_config.state = __IAVF_TC_INVALID; 2076 netdev_reset_tc(netdev); 2077 netif_tx_start_all_queues(netdev); 2078 break; 2079 case VIRTCHNL_OP_DISABLE_CHANNELS: 2080 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", 2081 iavf_stat_str(&adapter->hw, v_retval)); 2082 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2083 adapter->ch_config.state = __IAVF_TC_RUNNING; 2084 netif_tx_start_all_queues(netdev); 2085 break; 2086 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 2087 struct iavf_cloud_filter *cf, *cftmp; 2088 2089 list_for_each_entry_safe(cf, cftmp, 2090 &adapter->cloud_filter_list, 2091 list) { 2092 if (cf->state == __IAVF_CF_ADD_PENDING) { 2093 cf->state = __IAVF_CF_INVALID; 2094 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", 2095 iavf_stat_str(&adapter->hw, 2096 v_retval)); 2097 iavf_print_cloud_filter(adapter, 2098 &cf->f); 2099 list_del(&cf->list); 2100 kfree(cf); 2101 adapter->num_cloud_filters--; 2102 } 2103 } 2104 } 2105 break; 2106 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 2107 struct iavf_cloud_filter *cf; 2108 2109 list_for_each_entry(cf, &adapter->cloud_filter_list, 2110 list) { 2111 if (cf->state == __IAVF_CF_DEL_PENDING) { 2112 cf->state = __IAVF_CF_ACTIVE; 2113 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", 2114 iavf_stat_str(&adapter->hw, 2115 v_retval)); 2116 iavf_print_cloud_filter(adapter, 2117 &cf->f); 2118 } 2119 } 2120 } 2121 break; 2122 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 2123 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2124 2125 spin_lock_bh(&adapter->fdir_fltr_lock); 2126 list_for_each_entry_safe(fdir, fdir_tmp, 2127 &adapter->fdir_list_head, 2128 list) { 2129 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2130 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", 2131 iavf_stat_str(&adapter->hw, 2132 v_retval)); 2133 iavf_print_fdir_fltr(adapter, fdir); 2134 if (msglen) 2135 dev_err(&adapter->pdev->dev, 2136 "%s\n", msg); 2137 list_del(&fdir->list); 2138 kfree(fdir); 2139 adapter->fdir_active_fltr--; 2140 } 2141 } 2142 spin_unlock_bh(&adapter->fdir_fltr_lock); 2143 } 2144 break; 2145 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2146 struct iavf_fdir_fltr *fdir; 2147 2148 spin_lock_bh(&adapter->fdir_fltr_lock); 2149 list_for_each_entry(fdir, &adapter->fdir_list_head, 2150 list) { 2151 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING || 2152 fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { 2153 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2154 dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", 2155 iavf_stat_str(&adapter->hw, 2156 v_retval)); 2157 iavf_print_fdir_fltr(adapter, fdir); 2158 } 2159 } 2160 spin_unlock_bh(&adapter->fdir_fltr_lock); 2161 } 2162 break; 2163 case VIRTCHNL_OP_ADD_RSS_CFG: { 2164 struct iavf_adv_rss *rss, *rss_tmp; 2165 2166 spin_lock_bh(&adapter->adv_rss_lock); 2167 list_for_each_entry_safe(rss, rss_tmp, 2168 &adapter->adv_rss_list_head, 2169 list) { 2170 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2171 iavf_print_adv_rss_cfg(adapter, rss, 2172 "Failed to change the input set for", 2173 NULL); 2174 list_del(&rss->list); 2175 kfree(rss); 2176 } 2177 } 2178 spin_unlock_bh(&adapter->adv_rss_lock); 2179 } 2180 break; 2181 case VIRTCHNL_OP_DEL_RSS_CFG: { 2182 struct iavf_adv_rss *rss; 2183 2184 spin_lock_bh(&adapter->adv_rss_lock); 2185 list_for_each_entry(rss, &adapter->adv_rss_list_head, 2186 list) { 2187 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2188 rss->state = IAVF_ADV_RSS_ACTIVE; 2189 dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n", 2190 iavf_stat_str(&adapter->hw, 2191 v_retval)); 2192 } 2193 } 2194 spin_unlock_bh(&adapter->adv_rss_lock); 2195 } 2196 break; 2197 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2198 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2199 /* Vlan stripping could not be enabled by ethtool. 2200 * Disable it in netdev->features. 2201 */ 2202 iavf_netdev_features_vlan_strip_set(netdev, false); 2203 break; 2204 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2205 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2206 /* Vlan stripping could not be disabled by ethtool. 2207 * Enable it in netdev->features. 2208 */ 2209 iavf_netdev_features_vlan_strip_set(netdev, true); 2210 break; 2211 case VIRTCHNL_OP_ADD_VLAN_V2: 2212 iavf_vlan_add_reject(adapter); 2213 dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", 2214 iavf_stat_str(&adapter->hw, v_retval)); 2215 break; 2216 case VIRTCHNL_OP_CONFIG_RSS_HFUNC: 2217 dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n", 2218 iavf_stat_str(&adapter->hw, v_retval)); 2219 2220 if (adapter->hfunc == 2221 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) 2222 adapter->hfunc = 2223 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; 2224 else 2225 adapter->hfunc = 2226 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; 2227 2228 break; 2229 default: 2230 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", 2231 v_retval, iavf_stat_str(&adapter->hw, v_retval), 2232 v_opcode); 2233 } 2234 } 2235 switch (v_opcode) { 2236 case VIRTCHNL_OP_ADD_ETH_ADDR: 2237 if (!v_retval) 2238 iavf_mac_add_ok(adapter); 2239 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) 2240 if (!ether_addr_equal(netdev->dev_addr, 2241 adapter->hw.mac.addr)) { 2242 netif_addr_lock_bh(netdev); 2243 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2244 netif_addr_unlock_bh(netdev); 2245 } 2246 wake_up(&adapter->vc_waitqueue); 2247 break; 2248 case VIRTCHNL_OP_GET_STATS: { 2249 struct iavf_eth_stats *stats = 2250 (struct iavf_eth_stats *)msg; 2251 netdev->stats.rx_packets = stats->rx_unicast + 2252 stats->rx_multicast + 2253 stats->rx_broadcast; 2254 netdev->stats.tx_packets = stats->tx_unicast + 2255 stats->tx_multicast + 2256 stats->tx_broadcast; 2257 netdev->stats.rx_bytes = stats->rx_bytes; 2258 netdev->stats.tx_bytes = stats->tx_bytes; 2259 netdev->stats.tx_errors = stats->tx_errors; 2260 netdev->stats.rx_dropped = stats->rx_discards; 2261 netdev->stats.tx_dropped = stats->tx_discards; 2262 adapter->current_stats = *stats; 2263 } 2264 break; 2265 case VIRTCHNL_OP_GET_VF_RESOURCES: { 2266 u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE; 2267 2268 memcpy(adapter->vf_res, msg, min(msglen, len)); 2269 iavf_validate_num_queues(adapter); 2270 iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); 2271 if (is_zero_ether_addr(adapter->hw.mac.addr)) { 2272 /* restore current mac address */ 2273 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 2274 } else { 2275 netif_addr_lock_bh(netdev); 2276 /* refresh current mac address if changed */ 2277 ether_addr_copy(netdev->perm_addr, 2278 adapter->hw.mac.addr); 2279 netif_addr_unlock_bh(netdev); 2280 } 2281 spin_lock_bh(&adapter->mac_vlan_list_lock); 2282 iavf_add_filter(adapter, adapter->hw.mac.addr); 2283 2284 if (VLAN_ALLOWED(adapter)) { 2285 if (!list_empty(&adapter->vlan_filter_list)) { 2286 struct iavf_vlan_filter *vlf; 2287 2288 /* re-add all VLAN filters over virtchnl */ 2289 list_for_each_entry(vlf, 2290 &adapter->vlan_filter_list, 2291 list) 2292 vlf->state = IAVF_VLAN_ADD; 2293 2294 adapter->aq_required |= 2295 IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2296 } 2297 } 2298 2299 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2300 2301 iavf_activate_fdir_filters(adapter); 2302 2303 iavf_parse_vf_resource_msg(adapter); 2304 2305 /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the 2306 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish 2307 * configuration 2308 */ 2309 if (VLAN_V2_ALLOWED(adapter)) 2310 break; 2311 /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2 2312 * wasn't successfully negotiated with the PF 2313 */ 2314 } 2315 fallthrough; 2316 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: { 2317 struct iavf_mac_filter *f; 2318 bool was_mac_changed; 2319 u64 aq_required = 0; 2320 2321 if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) 2322 memcpy(&adapter->vlan_v2_caps, msg, 2323 min_t(u16, msglen, 2324 sizeof(adapter->vlan_v2_caps))); 2325 2326 iavf_process_config(adapter); 2327 adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; 2328 iavf_schedule_finish_config(adapter); 2329 2330 iavf_set_queue_vlan_tag_loc(adapter); 2331 2332 was_mac_changed = !ether_addr_equal(netdev->dev_addr, 2333 adapter->hw.mac.addr); 2334 2335 spin_lock_bh(&adapter->mac_vlan_list_lock); 2336 2337 /* re-add all MAC filters */ 2338 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2339 if (was_mac_changed && 2340 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2341 ether_addr_copy(f->macaddr, 2342 adapter->hw.mac.addr); 2343 2344 f->is_new_mac = true; 2345 f->add = true; 2346 f->add_handled = false; 2347 f->remove = false; 2348 } 2349 2350 /* re-add all VLAN filters */ 2351 if (VLAN_FILTERING_ALLOWED(adapter)) { 2352 struct iavf_vlan_filter *vlf; 2353 2354 if (!list_empty(&adapter->vlan_filter_list)) { 2355 list_for_each_entry(vlf, 2356 &adapter->vlan_filter_list, 2357 list) 2358 vlf->state = IAVF_VLAN_ADD; 2359 2360 aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2361 } 2362 } 2363 2364 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2365 2366 netif_addr_lock_bh(netdev); 2367 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2368 netif_addr_unlock_bh(netdev); 2369 2370 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER | 2371 aq_required; 2372 } 2373 break; 2374 case VIRTCHNL_OP_ENABLE_QUEUES: 2375 /* enable transmits */ 2376 iavf_irq_enable(adapter, true); 2377 wake_up(&adapter->reset_waitqueue); 2378 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; 2379 break; 2380 case VIRTCHNL_OP_DISABLE_QUEUES: 2381 iavf_free_all_tx_resources(adapter); 2382 iavf_free_all_rx_resources(adapter); 2383 if (adapter->state == __IAVF_DOWN_PENDING) { 2384 iavf_change_state(adapter, __IAVF_DOWN); 2385 wake_up(&adapter->down_waitqueue); 2386 } 2387 break; 2388 case VIRTCHNL_OP_VERSION: 2389 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2390 /* Don't display an error if we get these out of sequence. 2391 * If the firmware needed to get kicked, we'll get these and 2392 * it's no problem. 2393 */ 2394 if (v_opcode != adapter->current_op) 2395 return; 2396 break; 2397 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { 2398 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; 2399 2400 if (msglen == sizeof(*vrh)) 2401 adapter->hena = vrh->hena; 2402 else 2403 dev_warn(&adapter->pdev->dev, 2404 "Invalid message %d from PF\n", v_opcode); 2405 } 2406 break; 2407 case VIRTCHNL_OP_REQUEST_QUEUES: { 2408 struct virtchnl_vf_res_request *vfres = 2409 (struct virtchnl_vf_res_request *)msg; 2410 2411 if (vfres->num_queue_pairs != adapter->num_req_queues) { 2412 dev_info(&adapter->pdev->dev, 2413 "Requested %d queues, PF can support %d\n", 2414 adapter->num_req_queues, 2415 vfres->num_queue_pairs); 2416 adapter->num_req_queues = 0; 2417 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2418 } 2419 } 2420 break; 2421 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 2422 struct iavf_cloud_filter *cf; 2423 2424 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2425 if (cf->state == __IAVF_CF_ADD_PENDING) 2426 cf->state = __IAVF_CF_ACTIVE; 2427 } 2428 } 2429 break; 2430 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 2431 struct iavf_cloud_filter *cf, *cftmp; 2432 2433 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2434 list) { 2435 if (cf->state == __IAVF_CF_DEL_PENDING) { 2436 cf->state = __IAVF_CF_INVALID; 2437 list_del(&cf->list); 2438 kfree(cf); 2439 adapter->num_cloud_filters--; 2440 } 2441 } 2442 } 2443 break; 2444 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 2445 struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; 2446 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2447 2448 spin_lock_bh(&adapter->fdir_fltr_lock); 2449 list_for_each_entry_safe(fdir, fdir_tmp, 2450 &adapter->fdir_list_head, 2451 list) { 2452 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2453 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2454 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", 2455 fdir->loc); 2456 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2457 fdir->flow_id = add_fltr->flow_id; 2458 } else { 2459 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", 2460 add_fltr->status); 2461 iavf_print_fdir_fltr(adapter, fdir); 2462 list_del(&fdir->list); 2463 kfree(fdir); 2464 adapter->fdir_active_fltr--; 2465 } 2466 } 2467 } 2468 spin_unlock_bh(&adapter->fdir_fltr_lock); 2469 } 2470 break; 2471 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2472 struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; 2473 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2474 2475 spin_lock_bh(&adapter->fdir_fltr_lock); 2476 list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, 2477 list) { 2478 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2479 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || 2480 del_fltr->status == 2481 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { 2482 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", 2483 fdir->loc); 2484 list_del(&fdir->list); 2485 kfree(fdir); 2486 adapter->fdir_active_fltr--; 2487 } else { 2488 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2489 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", 2490 del_fltr->status); 2491 iavf_print_fdir_fltr(adapter, fdir); 2492 } 2493 } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { 2494 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || 2495 del_fltr->status == 2496 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { 2497 fdir->state = IAVF_FDIR_FLTR_INACTIVE; 2498 } else { 2499 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2500 dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n", 2501 del_fltr->status); 2502 iavf_print_fdir_fltr(adapter, fdir); 2503 } 2504 } 2505 } 2506 spin_unlock_bh(&adapter->fdir_fltr_lock); 2507 } 2508 break; 2509 case VIRTCHNL_OP_ADD_RSS_CFG: { 2510 struct iavf_adv_rss *rss; 2511 2512 spin_lock_bh(&adapter->adv_rss_lock); 2513 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 2514 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2515 iavf_print_adv_rss_cfg(adapter, rss, 2516 "Input set change for", 2517 "successful"); 2518 rss->state = IAVF_ADV_RSS_ACTIVE; 2519 } 2520 } 2521 spin_unlock_bh(&adapter->adv_rss_lock); 2522 } 2523 break; 2524 case VIRTCHNL_OP_DEL_RSS_CFG: { 2525 struct iavf_adv_rss *rss, *rss_tmp; 2526 2527 spin_lock_bh(&adapter->adv_rss_lock); 2528 list_for_each_entry_safe(rss, rss_tmp, 2529 &adapter->adv_rss_list_head, list) { 2530 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2531 list_del(&rss->list); 2532 kfree(rss); 2533 } 2534 } 2535 spin_unlock_bh(&adapter->adv_rss_lock); 2536 } 2537 break; 2538 case VIRTCHNL_OP_ADD_VLAN_V2: { 2539 struct iavf_vlan_filter *f; 2540 2541 spin_lock_bh(&adapter->mac_vlan_list_lock); 2542 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 2543 if (f->state == IAVF_VLAN_IS_NEW) 2544 f->state = IAVF_VLAN_ACTIVE; 2545 } 2546 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2547 } 2548 break; 2549 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2550 /* PF enabled vlan strip on this VF. 2551 * Update netdev->features if needed to be in sync with ethtool. 2552 */ 2553 if (!v_retval) 2554 iavf_netdev_features_vlan_strip_set(netdev, true); 2555 break; 2556 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2557 /* PF disabled vlan strip on this VF. 2558 * Update netdev->features if needed to be in sync with ethtool. 2559 */ 2560 if (!v_retval) 2561 iavf_netdev_features_vlan_strip_set(netdev, false); 2562 break; 2563 default: 2564 if (adapter->current_op && (v_opcode != adapter->current_op)) 2565 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", 2566 adapter->current_op, v_opcode); 2567 break; 2568 } /* switch v_opcode */ 2569 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2570 } 2571