1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 8 /* busy wait delay in msec */ 9 #define IAVF_BUSY_WAIT_DELAY 10 10 #define IAVF_BUSY_WAIT_COUNT 50 11 12 /** 13 * iavf_send_pf_msg 14 * @adapter: adapter structure 15 * @op: virtual channel opcode 16 * @msg: pointer to message buffer 17 * @len: message length 18 * 19 * Send message to PF and print status if failure. 20 **/ 21 static int iavf_send_pf_msg(struct iavf_adapter *adapter, 22 enum virtchnl_ops op, u8 *msg, u16 len) 23 { 24 struct iavf_hw *hw = &adapter->hw; 25 enum iavf_status err; 26 27 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 28 return 0; /* nothing to see here, move along */ 29 30 err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 31 if (err) 32 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", 33 op, iavf_stat_str(hw, err), 34 iavf_aq_str(hw, hw->aq.asq_last_status)); 35 return err; 36 } 37 38 /** 39 * iavf_send_api_ver 40 * @adapter: adapter structure 41 * 42 * Send API version admin queue message to the PF. The reply is not checked 43 * in this function. Returns 0 if the message was successfully 44 * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 45 **/ 46 int iavf_send_api_ver(struct iavf_adapter *adapter) 47 { 48 struct virtchnl_version_info vvi; 49 50 vvi.major = VIRTCHNL_VERSION_MAJOR; 51 vvi.minor = VIRTCHNL_VERSION_MINOR; 52 53 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, 54 sizeof(vvi)); 55 } 56 57 /** 58 * iavf_verify_api_ver 59 * @adapter: adapter structure 60 * 61 * Compare API versions with the PF. Must be called after admin queue is 62 * initialized. Returns 0 if API versions match, -EIO if they do not, 63 * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors 64 * from the firmware are propagated. 65 **/ 66 int iavf_verify_api_ver(struct iavf_adapter *adapter) 67 { 68 struct virtchnl_version_info *pf_vvi; 69 struct iavf_hw *hw = &adapter->hw; 70 struct iavf_arq_event_info event; 71 enum virtchnl_ops op; 72 enum iavf_status err; 73 74 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 75 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 76 if (!event.msg_buf) { 77 err = -ENOMEM; 78 goto out; 79 } 80 81 while (1) { 82 err = iavf_clean_arq_element(hw, &event, NULL); 83 /* When the AQ is empty, iavf_clean_arq_element will return 84 * nonzero and this loop will terminate. 85 */ 86 if (err) 87 goto out_alloc; 88 op = 89 (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 90 if (op == VIRTCHNL_OP_VERSION) 91 break; 92 } 93 94 95 err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 96 if (err) 97 goto out_alloc; 98 99 if (op != VIRTCHNL_OP_VERSION) { 100 dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", 101 op); 102 err = -EIO; 103 goto out_alloc; 104 } 105 106 pf_vvi = (struct virtchnl_version_info *)event.msg_buf; 107 adapter->pf_version = *pf_vvi; 108 109 if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || 110 ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && 111 (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) 112 err = -EIO; 113 114 out_alloc: 115 kfree(event.msg_buf); 116 out: 117 return err; 118 } 119 120 /** 121 * iavf_send_vf_config_msg 122 * @adapter: adapter structure 123 * 124 * Send VF configuration request admin queue message to the PF. The reply 125 * is not checked in this function. Returns 0 if the message was 126 * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 127 **/ 128 int iavf_send_vf_config_msg(struct iavf_adapter *adapter) 129 { 130 u32 caps; 131 132 caps = VIRTCHNL_VF_OFFLOAD_L2 | 133 VIRTCHNL_VF_OFFLOAD_RSS_PF | 134 VIRTCHNL_VF_OFFLOAD_RSS_AQ | 135 VIRTCHNL_VF_OFFLOAD_RSS_REG | 136 VIRTCHNL_VF_OFFLOAD_VLAN | 137 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | 138 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | 139 VIRTCHNL_VF_OFFLOAD_ENCAP | 140 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | 141 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | 142 VIRTCHNL_VF_OFFLOAD_ADQ | 143 VIRTCHNL_VF_OFFLOAD_USO | 144 VIRTCHNL_VF_OFFLOAD_FDIR_PF | 145 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | 146 VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 147 148 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; 149 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; 150 if (PF_IS_V11(adapter)) 151 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 152 (u8 *)&caps, sizeof(caps)); 153 else 154 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 155 NULL, 0); 156 } 157 158 /** 159 * iavf_validate_num_queues 160 * @adapter: adapter structure 161 * 162 * Validate that the number of queues the PF has sent in 163 * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. 164 **/ 165 static void iavf_validate_num_queues(struct iavf_adapter *adapter) 166 { 167 if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { 168 struct virtchnl_vsi_resource *vsi_res; 169 int i; 170 171 dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", 172 adapter->vf_res->num_queue_pairs, 173 IAVF_MAX_REQ_QUEUES); 174 dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", 175 IAVF_MAX_REQ_QUEUES); 176 adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 177 for (i = 0; i < adapter->vf_res->num_vsis; i++) { 178 vsi_res = &adapter->vf_res->vsi_res[i]; 179 vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 180 } 181 } 182 } 183 184 /** 185 * iavf_get_vf_config 186 * @adapter: private adapter structure 187 * 188 * Get VF configuration from PF and populate hw structure. Must be called after 189 * admin queue is initialized. Busy waits until response is received from PF, 190 * with maximum timeout. Response from PF is returned in the buffer for further 191 * processing by the caller. 192 **/ 193 int iavf_get_vf_config(struct iavf_adapter *adapter) 194 { 195 struct iavf_hw *hw = &adapter->hw; 196 struct iavf_arq_event_info event; 197 enum virtchnl_ops op; 198 enum iavf_status err; 199 u16 len; 200 201 len = sizeof(struct virtchnl_vf_resource) + 202 IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); 203 event.buf_len = len; 204 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 205 if (!event.msg_buf) { 206 err = -ENOMEM; 207 goto out; 208 } 209 210 while (1) { 211 /* When the AQ is empty, iavf_clean_arq_element will return 212 * nonzero and this loop will terminate. 213 */ 214 err = iavf_clean_arq_element(hw, &event, NULL); 215 if (err) 216 goto out_alloc; 217 op = 218 (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 219 if (op == VIRTCHNL_OP_GET_VF_RESOURCES) 220 break; 221 } 222 223 err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 224 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); 225 226 /* some PFs send more queues than we should have so validate that 227 * we aren't getting too many queues 228 */ 229 if (!err) 230 iavf_validate_num_queues(adapter); 231 iavf_vf_parse_hw_config(hw, adapter->vf_res); 232 out_alloc: 233 kfree(event.msg_buf); 234 out: 235 return err; 236 } 237 238 /** 239 * iavf_configure_queues 240 * @adapter: adapter structure 241 * 242 * Request that the PF set up our (previously allocated) queues. 243 **/ 244 void iavf_configure_queues(struct iavf_adapter *adapter) 245 { 246 struct virtchnl_vsi_queue_config_info *vqci; 247 struct virtchnl_queue_pair_info *vqpi; 248 int pairs = adapter->num_active_queues; 249 int i, max_frame = IAVF_MAX_RXBUFFER; 250 size_t len; 251 252 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 253 /* bail because we already have a command pending */ 254 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", 255 adapter->current_op); 256 return; 257 } 258 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; 259 len = struct_size(vqci, qpair, pairs); 260 vqci = kzalloc(len, GFP_KERNEL); 261 if (!vqci) 262 return; 263 264 /* Limit maximum frame size when jumbo frames is not enabled */ 265 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && 266 (adapter->netdev->mtu <= ETH_DATA_LEN)) 267 max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 268 269 vqci->vsi_id = adapter->vsi_res->vsi_id; 270 vqci->num_queue_pairs = pairs; 271 vqpi = vqci->qpair; 272 /* Size check is not needed here - HW max is 16 queue pairs, and we 273 * can fit info for 31 of them into the AQ buffer before it overflows. 274 */ 275 for (i = 0; i < pairs; i++) { 276 vqpi->txq.vsi_id = vqci->vsi_id; 277 vqpi->txq.queue_id = i; 278 vqpi->txq.ring_len = adapter->tx_rings[i].count; 279 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; 280 vqpi->rxq.vsi_id = vqci->vsi_id; 281 vqpi->rxq.queue_id = i; 282 vqpi->rxq.ring_len = adapter->rx_rings[i].count; 283 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; 284 vqpi->rxq.max_pkt_size = max_frame; 285 vqpi->rxq.databuffer_size = 286 ALIGN(adapter->rx_rings[i].rx_buf_len, 287 BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); 288 vqpi++; 289 } 290 291 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; 292 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 293 (u8 *)vqci, len); 294 kfree(vqci); 295 } 296 297 /** 298 * iavf_enable_queues 299 * @adapter: adapter structure 300 * 301 * Request that the PF enable all of our queues. 302 **/ 303 void iavf_enable_queues(struct iavf_adapter *adapter) 304 { 305 struct virtchnl_queue_select vqs; 306 307 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 308 /* bail because we already have a command pending */ 309 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", 310 adapter->current_op); 311 return; 312 } 313 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; 314 vqs.vsi_id = adapter->vsi_res->vsi_id; 315 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 316 vqs.rx_queues = vqs.tx_queues; 317 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; 318 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, 319 (u8 *)&vqs, sizeof(vqs)); 320 } 321 322 /** 323 * iavf_disable_queues 324 * @adapter: adapter structure 325 * 326 * Request that the PF disable all of our queues. 327 **/ 328 void iavf_disable_queues(struct iavf_adapter *adapter) 329 { 330 struct virtchnl_queue_select vqs; 331 332 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 333 /* bail because we already have a command pending */ 334 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", 335 adapter->current_op); 336 return; 337 } 338 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; 339 vqs.vsi_id = adapter->vsi_res->vsi_id; 340 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 341 vqs.rx_queues = vqs.tx_queues; 342 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; 343 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, 344 (u8 *)&vqs, sizeof(vqs)); 345 } 346 347 /** 348 * iavf_map_queues 349 * @adapter: adapter structure 350 * 351 * Request that the PF map queues to interrupt vectors. Misc causes, including 352 * admin queue, are always mapped to vector 0. 353 **/ 354 void iavf_map_queues(struct iavf_adapter *adapter) 355 { 356 struct virtchnl_irq_map_info *vimi; 357 struct virtchnl_vector_map *vecmap; 358 struct iavf_q_vector *q_vector; 359 int v_idx, q_vectors; 360 size_t len; 361 362 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 363 /* bail because we already have a command pending */ 364 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", 365 adapter->current_op); 366 return; 367 } 368 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; 369 370 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 371 372 len = struct_size(vimi, vecmap, adapter->num_msix_vectors); 373 vimi = kzalloc(len, GFP_KERNEL); 374 if (!vimi) 375 return; 376 377 vimi->num_vectors = adapter->num_msix_vectors; 378 /* Queue vectors first */ 379 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 380 q_vector = &adapter->q_vectors[v_idx]; 381 vecmap = &vimi->vecmap[v_idx]; 382 383 vecmap->vsi_id = adapter->vsi_res->vsi_id; 384 vecmap->vector_id = v_idx + NONQ_VECS; 385 vecmap->txq_map = q_vector->ring_mask; 386 vecmap->rxq_map = q_vector->ring_mask; 387 vecmap->rxitr_idx = IAVF_RX_ITR; 388 vecmap->txitr_idx = IAVF_TX_ITR; 389 } 390 /* Misc vector last - this is only for AdminQ messages */ 391 vecmap = &vimi->vecmap[v_idx]; 392 vecmap->vsi_id = adapter->vsi_res->vsi_id; 393 vecmap->vector_id = 0; 394 vecmap->txq_map = 0; 395 vecmap->rxq_map = 0; 396 397 adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; 398 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, 399 (u8 *)vimi, len); 400 kfree(vimi); 401 } 402 403 /** 404 * iavf_add_ether_addrs 405 * @adapter: adapter structure 406 * 407 * Request that the PF add one or more addresses to our filters. 408 **/ 409 void iavf_add_ether_addrs(struct iavf_adapter *adapter) 410 { 411 struct virtchnl_ether_addr_list *veal; 412 struct iavf_mac_filter *f; 413 int i = 0, count = 0; 414 bool more = false; 415 size_t len; 416 417 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 418 /* bail because we already have a command pending */ 419 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", 420 adapter->current_op); 421 return; 422 } 423 424 spin_lock_bh(&adapter->mac_vlan_list_lock); 425 426 list_for_each_entry(f, &adapter->mac_filter_list, list) { 427 if (f->add) 428 count++; 429 } 430 if (!count) { 431 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 432 spin_unlock_bh(&adapter->mac_vlan_list_lock); 433 return; 434 } 435 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; 436 437 len = struct_size(veal, list, count); 438 if (len > IAVF_MAX_AQ_BUF_SIZE) { 439 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); 440 count = (IAVF_MAX_AQ_BUF_SIZE - 441 sizeof(struct virtchnl_ether_addr_list)) / 442 sizeof(struct virtchnl_ether_addr); 443 len = struct_size(veal, list, count); 444 more = true; 445 } 446 447 veal = kzalloc(len, GFP_ATOMIC); 448 if (!veal) { 449 spin_unlock_bh(&adapter->mac_vlan_list_lock); 450 return; 451 } 452 453 veal->vsi_id = adapter->vsi_res->vsi_id; 454 veal->num_elements = count; 455 list_for_each_entry(f, &adapter->mac_filter_list, list) { 456 if (f->add) { 457 ether_addr_copy(veal->list[i].addr, f->macaddr); 458 i++; 459 f->add = false; 460 if (i == count) 461 break; 462 } 463 } 464 if (!more) 465 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 466 467 spin_unlock_bh(&adapter->mac_vlan_list_lock); 468 469 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); 470 kfree(veal); 471 } 472 473 /** 474 * iavf_del_ether_addrs 475 * @adapter: adapter structure 476 * 477 * Request that the PF remove one or more addresses from our filters. 478 **/ 479 void iavf_del_ether_addrs(struct iavf_adapter *adapter) 480 { 481 struct virtchnl_ether_addr_list *veal; 482 struct iavf_mac_filter *f, *ftmp; 483 int i = 0, count = 0; 484 bool more = false; 485 size_t len; 486 487 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 488 /* bail because we already have a command pending */ 489 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", 490 adapter->current_op); 491 return; 492 } 493 494 spin_lock_bh(&adapter->mac_vlan_list_lock); 495 496 list_for_each_entry(f, &adapter->mac_filter_list, list) { 497 if (f->remove) 498 count++; 499 } 500 if (!count) { 501 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 502 spin_unlock_bh(&adapter->mac_vlan_list_lock); 503 return; 504 } 505 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; 506 507 len = struct_size(veal, list, count); 508 if (len > IAVF_MAX_AQ_BUF_SIZE) { 509 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); 510 count = (IAVF_MAX_AQ_BUF_SIZE - 511 sizeof(struct virtchnl_ether_addr_list)) / 512 sizeof(struct virtchnl_ether_addr); 513 len = struct_size(veal, list, count); 514 more = true; 515 } 516 veal = kzalloc(len, GFP_ATOMIC); 517 if (!veal) { 518 spin_unlock_bh(&adapter->mac_vlan_list_lock); 519 return; 520 } 521 522 veal->vsi_id = adapter->vsi_res->vsi_id; 523 veal->num_elements = count; 524 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 525 if (f->remove) { 526 ether_addr_copy(veal->list[i].addr, f->macaddr); 527 i++; 528 list_del(&f->list); 529 kfree(f); 530 if (i == count) 531 break; 532 } 533 } 534 if (!more) 535 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 536 537 spin_unlock_bh(&adapter->mac_vlan_list_lock); 538 539 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); 540 kfree(veal); 541 } 542 543 /** 544 * iavf_mac_add_ok 545 * @adapter: adapter structure 546 * 547 * Submit list of filters based on PF response. 548 **/ 549 static void iavf_mac_add_ok(struct iavf_adapter *adapter) 550 { 551 struct iavf_mac_filter *f, *ftmp; 552 553 spin_lock_bh(&adapter->mac_vlan_list_lock); 554 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 555 f->is_new_mac = false; 556 } 557 spin_unlock_bh(&adapter->mac_vlan_list_lock); 558 } 559 560 /** 561 * iavf_mac_add_reject 562 * @adapter: adapter structure 563 * 564 * Remove filters from list based on PF response. 565 **/ 566 static void iavf_mac_add_reject(struct iavf_adapter *adapter) 567 { 568 struct net_device *netdev = adapter->netdev; 569 struct iavf_mac_filter *f, *ftmp; 570 571 spin_lock_bh(&adapter->mac_vlan_list_lock); 572 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 573 if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) 574 f->remove = false; 575 576 if (f->is_new_mac) { 577 list_del(&f->list); 578 kfree(f); 579 } 580 } 581 spin_unlock_bh(&adapter->mac_vlan_list_lock); 582 } 583 584 /** 585 * iavf_add_vlans 586 * @adapter: adapter structure 587 * 588 * Request that the PF add one or more VLAN filters to our VSI. 589 **/ 590 void iavf_add_vlans(struct iavf_adapter *adapter) 591 { 592 struct virtchnl_vlan_filter_list *vvfl; 593 int len, i = 0, count = 0; 594 struct iavf_vlan_filter *f; 595 bool more = false; 596 597 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 598 /* bail because we already have a command pending */ 599 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", 600 adapter->current_op); 601 return; 602 } 603 604 spin_lock_bh(&adapter->mac_vlan_list_lock); 605 606 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 607 if (f->add) 608 count++; 609 } 610 if (!count || !VLAN_ALLOWED(adapter)) { 611 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 612 spin_unlock_bh(&adapter->mac_vlan_list_lock); 613 return; 614 } 615 adapter->current_op = VIRTCHNL_OP_ADD_VLAN; 616 617 len = sizeof(struct virtchnl_vlan_filter_list) + 618 (count * sizeof(u16)); 619 if (len > IAVF_MAX_AQ_BUF_SIZE) { 620 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 621 count = (IAVF_MAX_AQ_BUF_SIZE - 622 sizeof(struct virtchnl_vlan_filter_list)) / 623 sizeof(u16); 624 len = sizeof(struct virtchnl_vlan_filter_list) + 625 (count * sizeof(u16)); 626 more = true; 627 } 628 vvfl = kzalloc(len, GFP_ATOMIC); 629 if (!vvfl) { 630 spin_unlock_bh(&adapter->mac_vlan_list_lock); 631 return; 632 } 633 634 vvfl->vsi_id = adapter->vsi_res->vsi_id; 635 vvfl->num_elements = count; 636 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 637 if (f->add) { 638 vvfl->vlan_id[i] = f->vlan; 639 i++; 640 f->add = false; 641 if (i == count) 642 break; 643 } 644 } 645 if (!more) 646 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 647 648 spin_unlock_bh(&adapter->mac_vlan_list_lock); 649 650 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); 651 kfree(vvfl); 652 } 653 654 /** 655 * iavf_del_vlans 656 * @adapter: adapter structure 657 * 658 * Request that the PF remove one or more VLAN filters from our VSI. 659 **/ 660 void iavf_del_vlans(struct iavf_adapter *adapter) 661 { 662 struct virtchnl_vlan_filter_list *vvfl; 663 struct iavf_vlan_filter *f, *ftmp; 664 int len, i = 0, count = 0; 665 bool more = false; 666 667 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 668 /* bail because we already have a command pending */ 669 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", 670 adapter->current_op); 671 return; 672 } 673 674 spin_lock_bh(&adapter->mac_vlan_list_lock); 675 676 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 677 /* since VLAN capabilities are not allowed, we dont want to send 678 * a VLAN delete request because it will most likely fail and 679 * create unnecessary errors/noise, so just free the VLAN 680 * filters marked for removal to enable bailing out before 681 * sending a virtchnl message 682 */ 683 if (f->remove && !VLAN_ALLOWED(adapter)) { 684 list_del(&f->list); 685 kfree(f); 686 } else if (f->remove) { 687 count++; 688 } 689 } 690 if (!count) { 691 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 692 spin_unlock_bh(&adapter->mac_vlan_list_lock); 693 return; 694 } 695 adapter->current_op = VIRTCHNL_OP_DEL_VLAN; 696 697 len = sizeof(struct virtchnl_vlan_filter_list) + 698 (count * sizeof(u16)); 699 if (len > IAVF_MAX_AQ_BUF_SIZE) { 700 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); 701 count = (IAVF_MAX_AQ_BUF_SIZE - 702 sizeof(struct virtchnl_vlan_filter_list)) / 703 sizeof(u16); 704 len = sizeof(struct virtchnl_vlan_filter_list) + 705 (count * sizeof(u16)); 706 more = true; 707 } 708 vvfl = kzalloc(len, GFP_ATOMIC); 709 if (!vvfl) { 710 spin_unlock_bh(&adapter->mac_vlan_list_lock); 711 return; 712 } 713 714 vvfl->vsi_id = adapter->vsi_res->vsi_id; 715 vvfl->num_elements = count; 716 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 717 if (f->remove) { 718 vvfl->vlan_id[i] = f->vlan; 719 i++; 720 list_del(&f->list); 721 kfree(f); 722 if (i == count) 723 break; 724 } 725 } 726 if (!more) 727 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 728 729 spin_unlock_bh(&adapter->mac_vlan_list_lock); 730 731 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); 732 kfree(vvfl); 733 } 734 735 /** 736 * iavf_set_promiscuous 737 * @adapter: adapter structure 738 * @flags: bitmask to control unicast/multicast promiscuous. 739 * 740 * Request that the PF enable promiscuous mode for our VSI. 741 **/ 742 void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) 743 { 744 struct virtchnl_promisc_info vpi; 745 int promisc_all; 746 747 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 748 /* bail because we already have a command pending */ 749 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", 750 adapter->current_op); 751 return; 752 } 753 754 promisc_all = FLAG_VF_UNICAST_PROMISC | 755 FLAG_VF_MULTICAST_PROMISC; 756 if ((flags & promisc_all) == promisc_all) { 757 adapter->flags |= IAVF_FLAG_PROMISC_ON; 758 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; 759 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); 760 } 761 762 if (flags & FLAG_VF_MULTICAST_PROMISC) { 763 adapter->flags |= IAVF_FLAG_ALLMULTI_ON; 764 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; 765 dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", 766 adapter->netdev->name); 767 } 768 769 if (!flags) { 770 if (adapter->flags & IAVF_FLAG_PROMISC_ON) { 771 adapter->flags &= ~IAVF_FLAG_PROMISC_ON; 772 adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; 773 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); 774 } 775 776 if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { 777 adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; 778 adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; 779 dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", 780 adapter->netdev->name); 781 } 782 } 783 784 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; 785 vpi.vsi_id = adapter->vsi_res->vsi_id; 786 vpi.flags = flags; 787 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 788 (u8 *)&vpi, sizeof(vpi)); 789 } 790 791 /** 792 * iavf_request_stats 793 * @adapter: adapter structure 794 * 795 * Request VSI statistics from PF. 796 **/ 797 void iavf_request_stats(struct iavf_adapter *adapter) 798 { 799 struct virtchnl_queue_select vqs; 800 801 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 802 /* no error message, this isn't crucial */ 803 return; 804 } 805 806 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; 807 adapter->current_op = VIRTCHNL_OP_GET_STATS; 808 vqs.vsi_id = adapter->vsi_res->vsi_id; 809 /* queue maps are ignored for this message - only the vsi is used */ 810 if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, 811 sizeof(vqs))) 812 /* if the request failed, don't lock out others */ 813 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 814 } 815 816 /** 817 * iavf_get_hena 818 * @adapter: adapter structure 819 * 820 * Request hash enable capabilities from PF 821 **/ 822 void iavf_get_hena(struct iavf_adapter *adapter) 823 { 824 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 825 /* bail because we already have a command pending */ 826 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", 827 adapter->current_op); 828 return; 829 } 830 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; 831 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; 832 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); 833 } 834 835 /** 836 * iavf_set_hena 837 * @adapter: adapter structure 838 * 839 * Request the PF to set our RSS hash capabilities 840 **/ 841 void iavf_set_hena(struct iavf_adapter *adapter) 842 { 843 struct virtchnl_rss_hena vrh; 844 845 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 846 /* bail because we already have a command pending */ 847 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", 848 adapter->current_op); 849 return; 850 } 851 vrh.hena = adapter->hena; 852 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; 853 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; 854 iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, 855 sizeof(vrh)); 856 } 857 858 /** 859 * iavf_set_rss_key 860 * @adapter: adapter structure 861 * 862 * Request the PF to set our RSS hash key 863 **/ 864 void iavf_set_rss_key(struct iavf_adapter *adapter) 865 { 866 struct virtchnl_rss_key *vrk; 867 int len; 868 869 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 870 /* bail because we already have a command pending */ 871 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", 872 adapter->current_op); 873 return; 874 } 875 len = sizeof(struct virtchnl_rss_key) + 876 (adapter->rss_key_size * sizeof(u8)) - 1; 877 vrk = kzalloc(len, GFP_KERNEL); 878 if (!vrk) 879 return; 880 vrk->vsi_id = adapter->vsi.id; 881 vrk->key_len = adapter->rss_key_size; 882 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); 883 884 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; 885 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; 886 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); 887 kfree(vrk); 888 } 889 890 /** 891 * iavf_set_rss_lut 892 * @adapter: adapter structure 893 * 894 * Request the PF to set our RSS lookup table 895 **/ 896 void iavf_set_rss_lut(struct iavf_adapter *adapter) 897 { 898 struct virtchnl_rss_lut *vrl; 899 int len; 900 901 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 902 /* bail because we already have a command pending */ 903 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", 904 adapter->current_op); 905 return; 906 } 907 len = sizeof(struct virtchnl_rss_lut) + 908 (adapter->rss_lut_size * sizeof(u8)) - 1; 909 vrl = kzalloc(len, GFP_KERNEL); 910 if (!vrl) 911 return; 912 vrl->vsi_id = adapter->vsi.id; 913 vrl->lut_entries = adapter->rss_lut_size; 914 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); 915 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; 916 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; 917 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); 918 kfree(vrl); 919 } 920 921 /** 922 * iavf_enable_vlan_stripping 923 * @adapter: adapter structure 924 * 925 * Request VLAN header stripping to be enabled 926 **/ 927 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) 928 { 929 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 930 /* bail because we already have a command pending */ 931 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", 932 adapter->current_op); 933 return; 934 } 935 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; 936 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 937 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); 938 } 939 940 /** 941 * iavf_disable_vlan_stripping 942 * @adapter: adapter structure 943 * 944 * Request VLAN header stripping to be disabled 945 **/ 946 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) 947 { 948 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 949 /* bail because we already have a command pending */ 950 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", 951 adapter->current_op); 952 return; 953 } 954 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; 955 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 956 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); 957 } 958 959 #define IAVF_MAX_SPEED_STRLEN 13 960 961 /** 962 * iavf_print_link_message - print link up or down 963 * @adapter: adapter structure 964 * 965 * Log a message telling the world of our wonderous link status 966 */ 967 static void iavf_print_link_message(struct iavf_adapter *adapter) 968 { 969 struct net_device *netdev = adapter->netdev; 970 int link_speed_mbps; 971 char *speed; 972 973 if (!adapter->link_up) { 974 netdev_info(netdev, "NIC Link is Down\n"); 975 return; 976 } 977 978 speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); 979 if (!speed) 980 return; 981 982 if (ADV_LINK_SUPPORT(adapter)) { 983 link_speed_mbps = adapter->link_speed_mbps; 984 goto print_link_msg; 985 } 986 987 switch (adapter->link_speed) { 988 case VIRTCHNL_LINK_SPEED_40GB: 989 link_speed_mbps = SPEED_40000; 990 break; 991 case VIRTCHNL_LINK_SPEED_25GB: 992 link_speed_mbps = SPEED_25000; 993 break; 994 case VIRTCHNL_LINK_SPEED_20GB: 995 link_speed_mbps = SPEED_20000; 996 break; 997 case VIRTCHNL_LINK_SPEED_10GB: 998 link_speed_mbps = SPEED_10000; 999 break; 1000 case VIRTCHNL_LINK_SPEED_5GB: 1001 link_speed_mbps = SPEED_5000; 1002 break; 1003 case VIRTCHNL_LINK_SPEED_2_5GB: 1004 link_speed_mbps = SPEED_2500; 1005 break; 1006 case VIRTCHNL_LINK_SPEED_1GB: 1007 link_speed_mbps = SPEED_1000; 1008 break; 1009 case VIRTCHNL_LINK_SPEED_100MB: 1010 link_speed_mbps = SPEED_100; 1011 break; 1012 default: 1013 link_speed_mbps = SPEED_UNKNOWN; 1014 break; 1015 } 1016 1017 print_link_msg: 1018 if (link_speed_mbps > SPEED_1000) { 1019 if (link_speed_mbps == SPEED_2500) 1020 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); 1021 else 1022 /* convert to Gbps inline */ 1023 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", 1024 link_speed_mbps / 1000, "Gbps"); 1025 } else if (link_speed_mbps == SPEED_UNKNOWN) { 1026 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); 1027 } else { 1028 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", 1029 link_speed_mbps, "Mbps"); 1030 } 1031 1032 netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); 1033 kfree(speed); 1034 } 1035 1036 /** 1037 * iavf_get_vpe_link_status 1038 * @adapter: adapter structure 1039 * @vpe: virtchnl_pf_event structure 1040 * 1041 * Helper function for determining the link status 1042 **/ 1043 static bool 1044 iavf_get_vpe_link_status(struct iavf_adapter *adapter, 1045 struct virtchnl_pf_event *vpe) 1046 { 1047 if (ADV_LINK_SUPPORT(adapter)) 1048 return vpe->event_data.link_event_adv.link_status; 1049 else 1050 return vpe->event_data.link_event.link_status; 1051 } 1052 1053 /** 1054 * iavf_set_adapter_link_speed_from_vpe 1055 * @adapter: adapter structure for which we are setting the link speed 1056 * @vpe: virtchnl_pf_event structure that contains the link speed we are setting 1057 * 1058 * Helper function for setting iavf_adapter link speed 1059 **/ 1060 static void 1061 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, 1062 struct virtchnl_pf_event *vpe) 1063 { 1064 if (ADV_LINK_SUPPORT(adapter)) 1065 adapter->link_speed_mbps = 1066 vpe->event_data.link_event_adv.link_speed; 1067 else 1068 adapter->link_speed = vpe->event_data.link_event.link_speed; 1069 } 1070 1071 /** 1072 * iavf_enable_channels 1073 * @adapter: adapter structure 1074 * 1075 * Request that the PF enable channels as specified by 1076 * the user via tc tool. 1077 **/ 1078 void iavf_enable_channels(struct iavf_adapter *adapter) 1079 { 1080 struct virtchnl_tc_info *vti = NULL; 1081 size_t len; 1082 int i; 1083 1084 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1085 /* bail because we already have a command pending */ 1086 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1087 adapter->current_op); 1088 return; 1089 } 1090 1091 len = struct_size(vti, list, adapter->num_tc - 1); 1092 vti = kzalloc(len, GFP_KERNEL); 1093 if (!vti) 1094 return; 1095 vti->num_tc = adapter->num_tc; 1096 for (i = 0; i < vti->num_tc; i++) { 1097 vti->list[i].count = adapter->ch_config.ch_info[i].count; 1098 vti->list[i].offset = adapter->ch_config.ch_info[i].offset; 1099 vti->list[i].pad = 0; 1100 vti->list[i].max_tx_rate = 1101 adapter->ch_config.ch_info[i].max_tx_rate; 1102 } 1103 1104 adapter->ch_config.state = __IAVF_TC_RUNNING; 1105 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1106 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; 1107 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; 1108 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); 1109 kfree(vti); 1110 } 1111 1112 /** 1113 * iavf_disable_channels 1114 * @adapter: adapter structure 1115 * 1116 * Request that the PF disable channels that are configured 1117 **/ 1118 void iavf_disable_channels(struct iavf_adapter *adapter) 1119 { 1120 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1121 /* bail because we already have a command pending */ 1122 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1123 adapter->current_op); 1124 return; 1125 } 1126 1127 adapter->ch_config.state = __IAVF_TC_INVALID; 1128 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1129 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; 1130 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; 1131 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); 1132 } 1133 1134 /** 1135 * iavf_print_cloud_filter 1136 * @adapter: adapter structure 1137 * @f: cloud filter to print 1138 * 1139 * Print the cloud filter 1140 **/ 1141 static void iavf_print_cloud_filter(struct iavf_adapter *adapter, 1142 struct virtchnl_filter *f) 1143 { 1144 switch (f->flow_type) { 1145 case VIRTCHNL_TCP_V4_FLOW: 1146 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", 1147 &f->data.tcp_spec.dst_mac, 1148 &f->data.tcp_spec.src_mac, 1149 ntohs(f->data.tcp_spec.vlan_id), 1150 &f->data.tcp_spec.dst_ip[0], 1151 &f->data.tcp_spec.src_ip[0], 1152 ntohs(f->data.tcp_spec.dst_port), 1153 ntohs(f->data.tcp_spec.src_port)); 1154 break; 1155 case VIRTCHNL_TCP_V6_FLOW: 1156 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", 1157 &f->data.tcp_spec.dst_mac, 1158 &f->data.tcp_spec.src_mac, 1159 ntohs(f->data.tcp_spec.vlan_id), 1160 &f->data.tcp_spec.dst_ip, 1161 &f->data.tcp_spec.src_ip, 1162 ntohs(f->data.tcp_spec.dst_port), 1163 ntohs(f->data.tcp_spec.src_port)); 1164 break; 1165 } 1166 } 1167 1168 /** 1169 * iavf_add_cloud_filter 1170 * @adapter: adapter structure 1171 * 1172 * Request that the PF add cloud filters as specified 1173 * by the user via tc tool. 1174 **/ 1175 void iavf_add_cloud_filter(struct iavf_adapter *adapter) 1176 { 1177 struct iavf_cloud_filter *cf; 1178 struct virtchnl_filter *f; 1179 int len = 0, count = 0; 1180 1181 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1182 /* bail because we already have a command pending */ 1183 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", 1184 adapter->current_op); 1185 return; 1186 } 1187 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1188 if (cf->add) { 1189 count++; 1190 break; 1191 } 1192 } 1193 if (!count) { 1194 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 1195 return; 1196 } 1197 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; 1198 1199 len = sizeof(struct virtchnl_filter); 1200 f = kzalloc(len, GFP_KERNEL); 1201 if (!f) 1202 return; 1203 1204 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1205 if (cf->add) { 1206 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1207 cf->add = false; 1208 cf->state = __IAVF_CF_ADD_PENDING; 1209 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, 1210 (u8 *)f, len); 1211 } 1212 } 1213 kfree(f); 1214 } 1215 1216 /** 1217 * iavf_del_cloud_filter 1218 * @adapter: adapter structure 1219 * 1220 * Request that the PF delete cloud filters as specified 1221 * by the user via tc tool. 1222 **/ 1223 void iavf_del_cloud_filter(struct iavf_adapter *adapter) 1224 { 1225 struct iavf_cloud_filter *cf, *cftmp; 1226 struct virtchnl_filter *f; 1227 int len = 0, count = 0; 1228 1229 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1230 /* bail because we already have a command pending */ 1231 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", 1232 adapter->current_op); 1233 return; 1234 } 1235 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1236 if (cf->del) { 1237 count++; 1238 break; 1239 } 1240 } 1241 if (!count) { 1242 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1243 return; 1244 } 1245 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; 1246 1247 len = sizeof(struct virtchnl_filter); 1248 f = kzalloc(len, GFP_KERNEL); 1249 if (!f) 1250 return; 1251 1252 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 1253 if (cf->del) { 1254 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1255 cf->del = false; 1256 cf->state = __IAVF_CF_DEL_PENDING; 1257 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, 1258 (u8 *)f, len); 1259 } 1260 } 1261 kfree(f); 1262 } 1263 1264 /** 1265 * iavf_add_fdir_filter 1266 * @adapter: the VF adapter structure 1267 * 1268 * Request that the PF add Flow Director filters as specified 1269 * by the user via ethtool. 1270 **/ 1271 void iavf_add_fdir_filter(struct iavf_adapter *adapter) 1272 { 1273 struct iavf_fdir_fltr *fdir; 1274 struct virtchnl_fdir_add *f; 1275 bool process_fltr = false; 1276 int len; 1277 1278 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1279 /* bail because we already have a command pending */ 1280 dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", 1281 adapter->current_op); 1282 return; 1283 } 1284 1285 len = sizeof(struct virtchnl_fdir_add); 1286 f = kzalloc(len, GFP_KERNEL); 1287 if (!f) 1288 return; 1289 1290 spin_lock_bh(&adapter->fdir_fltr_lock); 1291 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1292 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { 1293 process_fltr = true; 1294 fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; 1295 memcpy(f, &fdir->vc_add_msg, len); 1296 break; 1297 } 1298 } 1299 spin_unlock_bh(&adapter->fdir_fltr_lock); 1300 1301 if (!process_fltr) { 1302 /* prevent iavf_add_fdir_filter() from being called when there 1303 * are no filters to add 1304 */ 1305 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; 1306 kfree(f); 1307 return; 1308 } 1309 adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; 1310 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); 1311 kfree(f); 1312 } 1313 1314 /** 1315 * iavf_del_fdir_filter 1316 * @adapter: the VF adapter structure 1317 * 1318 * Request that the PF delete Flow Director filters as specified 1319 * by the user via ethtool. 1320 **/ 1321 void iavf_del_fdir_filter(struct iavf_adapter *adapter) 1322 { 1323 struct iavf_fdir_fltr *fdir; 1324 struct virtchnl_fdir_del f; 1325 bool process_fltr = false; 1326 int len; 1327 1328 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1329 /* bail because we already have a command pending */ 1330 dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", 1331 adapter->current_op); 1332 return; 1333 } 1334 1335 len = sizeof(struct virtchnl_fdir_del); 1336 1337 spin_lock_bh(&adapter->fdir_fltr_lock); 1338 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1339 if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { 1340 process_fltr = true; 1341 memset(&f, 0, len); 1342 f.vsi_id = fdir->vc_add_msg.vsi_id; 1343 f.flow_id = fdir->flow_id; 1344 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; 1345 break; 1346 } 1347 } 1348 spin_unlock_bh(&adapter->fdir_fltr_lock); 1349 1350 if (!process_fltr) { 1351 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1352 return; 1353 } 1354 1355 adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; 1356 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); 1357 } 1358 1359 /** 1360 * iavf_add_adv_rss_cfg 1361 * @adapter: the VF adapter structure 1362 * 1363 * Request that the PF add RSS configuration as specified 1364 * by the user via ethtool. 1365 **/ 1366 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) 1367 { 1368 struct virtchnl_rss_cfg *rss_cfg; 1369 struct iavf_adv_rss *rss; 1370 bool process_rss = false; 1371 int len; 1372 1373 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1374 /* bail because we already have a command pending */ 1375 dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n", 1376 adapter->current_op); 1377 return; 1378 } 1379 1380 len = sizeof(struct virtchnl_rss_cfg); 1381 rss_cfg = kzalloc(len, GFP_KERNEL); 1382 if (!rss_cfg) 1383 return; 1384 1385 spin_lock_bh(&adapter->adv_rss_lock); 1386 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1387 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { 1388 process_rss = true; 1389 rss->state = IAVF_ADV_RSS_ADD_PENDING; 1390 memcpy(rss_cfg, &rss->cfg_msg, len); 1391 iavf_print_adv_rss_cfg(adapter, rss, 1392 "Input set change for", 1393 "is pending"); 1394 break; 1395 } 1396 } 1397 spin_unlock_bh(&adapter->adv_rss_lock); 1398 1399 if (process_rss) { 1400 adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG; 1401 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG, 1402 (u8 *)rss_cfg, len); 1403 } else { 1404 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; 1405 } 1406 1407 kfree(rss_cfg); 1408 } 1409 1410 /** 1411 * iavf_del_adv_rss_cfg 1412 * @adapter: the VF adapter structure 1413 * 1414 * Request that the PF delete RSS configuration as specified 1415 * by the user via ethtool. 1416 **/ 1417 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) 1418 { 1419 struct virtchnl_rss_cfg *rss_cfg; 1420 struct iavf_adv_rss *rss; 1421 bool process_rss = false; 1422 int len; 1423 1424 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1425 /* bail because we already have a command pending */ 1426 dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n", 1427 adapter->current_op); 1428 return; 1429 } 1430 1431 len = sizeof(struct virtchnl_rss_cfg); 1432 rss_cfg = kzalloc(len, GFP_KERNEL); 1433 if (!rss_cfg) 1434 return; 1435 1436 spin_lock_bh(&adapter->adv_rss_lock); 1437 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1438 if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) { 1439 process_rss = true; 1440 rss->state = IAVF_ADV_RSS_DEL_PENDING; 1441 memcpy(rss_cfg, &rss->cfg_msg, len); 1442 break; 1443 } 1444 } 1445 spin_unlock_bh(&adapter->adv_rss_lock); 1446 1447 if (process_rss) { 1448 adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG; 1449 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG, 1450 (u8 *)rss_cfg, len); 1451 } else { 1452 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1453 } 1454 1455 kfree(rss_cfg); 1456 } 1457 1458 /** 1459 * iavf_request_reset 1460 * @adapter: adapter structure 1461 * 1462 * Request that the PF reset this VF. No response is expected. 1463 **/ 1464 void iavf_request_reset(struct iavf_adapter *adapter) 1465 { 1466 /* Don't check CURRENT_OP - this is always higher priority */ 1467 iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); 1468 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1469 } 1470 1471 /** 1472 * iavf_virtchnl_completion 1473 * @adapter: adapter structure 1474 * @v_opcode: opcode sent by PF 1475 * @v_retval: retval sent by PF 1476 * @msg: message sent by PF 1477 * @msglen: message length 1478 * 1479 * Asynchronous completion function for admin queue messages. Rather than busy 1480 * wait, we fire off our requests and assume that no errors will be returned. 1481 * This function handles the reply messages. 1482 **/ 1483 void iavf_virtchnl_completion(struct iavf_adapter *adapter, 1484 enum virtchnl_ops v_opcode, 1485 enum iavf_status v_retval, u8 *msg, u16 msglen) 1486 { 1487 struct net_device *netdev = adapter->netdev; 1488 1489 if (v_opcode == VIRTCHNL_OP_EVENT) { 1490 struct virtchnl_pf_event *vpe = 1491 (struct virtchnl_pf_event *)msg; 1492 bool link_up = iavf_get_vpe_link_status(adapter, vpe); 1493 1494 switch (vpe->event) { 1495 case VIRTCHNL_EVENT_LINK_CHANGE: 1496 iavf_set_adapter_link_speed_from_vpe(adapter, vpe); 1497 1498 /* we've already got the right link status, bail */ 1499 if (adapter->link_up == link_up) 1500 break; 1501 1502 if (link_up) { 1503 /* If we get link up message and start queues 1504 * before our queues are configured it will 1505 * trigger a TX hang. In that case, just ignore 1506 * the link status message,we'll get another one 1507 * after we enable queues and actually prepared 1508 * to send traffic. 1509 */ 1510 if (adapter->state != __IAVF_RUNNING) 1511 break; 1512 1513 /* For ADq enabled VF, we reconfigure VSIs and 1514 * re-allocate queues. Hence wait till all 1515 * queues are enabled. 1516 */ 1517 if (adapter->flags & 1518 IAVF_FLAG_QUEUES_DISABLED) 1519 break; 1520 } 1521 1522 adapter->link_up = link_up; 1523 if (link_up) { 1524 netif_tx_start_all_queues(netdev); 1525 netif_carrier_on(netdev); 1526 } else { 1527 netif_tx_stop_all_queues(netdev); 1528 netif_carrier_off(netdev); 1529 } 1530 iavf_print_link_message(adapter); 1531 break; 1532 case VIRTCHNL_EVENT_RESET_IMPENDING: 1533 dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); 1534 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { 1535 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1536 dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); 1537 queue_work(iavf_wq, &adapter->reset_task); 1538 } 1539 break; 1540 default: 1541 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", 1542 vpe->event); 1543 break; 1544 } 1545 return; 1546 } 1547 if (v_retval) { 1548 switch (v_opcode) { 1549 case VIRTCHNL_OP_ADD_VLAN: 1550 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", 1551 iavf_stat_str(&adapter->hw, v_retval)); 1552 break; 1553 case VIRTCHNL_OP_ADD_ETH_ADDR: 1554 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", 1555 iavf_stat_str(&adapter->hw, v_retval)); 1556 iavf_mac_add_reject(adapter); 1557 /* restore administratively set MAC address */ 1558 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1559 break; 1560 case VIRTCHNL_OP_DEL_VLAN: 1561 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", 1562 iavf_stat_str(&adapter->hw, v_retval)); 1563 break; 1564 case VIRTCHNL_OP_DEL_ETH_ADDR: 1565 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", 1566 iavf_stat_str(&adapter->hw, v_retval)); 1567 break; 1568 case VIRTCHNL_OP_ENABLE_CHANNELS: 1569 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", 1570 iavf_stat_str(&adapter->hw, v_retval)); 1571 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1572 adapter->ch_config.state = __IAVF_TC_INVALID; 1573 netdev_reset_tc(netdev); 1574 netif_tx_start_all_queues(netdev); 1575 break; 1576 case VIRTCHNL_OP_DISABLE_CHANNELS: 1577 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", 1578 iavf_stat_str(&adapter->hw, v_retval)); 1579 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1580 adapter->ch_config.state = __IAVF_TC_RUNNING; 1581 netif_tx_start_all_queues(netdev); 1582 break; 1583 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 1584 struct iavf_cloud_filter *cf, *cftmp; 1585 1586 list_for_each_entry_safe(cf, cftmp, 1587 &adapter->cloud_filter_list, 1588 list) { 1589 if (cf->state == __IAVF_CF_ADD_PENDING) { 1590 cf->state = __IAVF_CF_INVALID; 1591 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", 1592 iavf_stat_str(&adapter->hw, 1593 v_retval)); 1594 iavf_print_cloud_filter(adapter, 1595 &cf->f); 1596 list_del(&cf->list); 1597 kfree(cf); 1598 adapter->num_cloud_filters--; 1599 } 1600 } 1601 } 1602 break; 1603 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 1604 struct iavf_cloud_filter *cf; 1605 1606 list_for_each_entry(cf, &adapter->cloud_filter_list, 1607 list) { 1608 if (cf->state == __IAVF_CF_DEL_PENDING) { 1609 cf->state = __IAVF_CF_ACTIVE; 1610 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", 1611 iavf_stat_str(&adapter->hw, 1612 v_retval)); 1613 iavf_print_cloud_filter(adapter, 1614 &cf->f); 1615 } 1616 } 1617 } 1618 break; 1619 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 1620 struct iavf_fdir_fltr *fdir, *fdir_tmp; 1621 1622 spin_lock_bh(&adapter->fdir_fltr_lock); 1623 list_for_each_entry_safe(fdir, fdir_tmp, 1624 &adapter->fdir_list_head, 1625 list) { 1626 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 1627 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", 1628 iavf_stat_str(&adapter->hw, 1629 v_retval)); 1630 iavf_print_fdir_fltr(adapter, fdir); 1631 if (msglen) 1632 dev_err(&adapter->pdev->dev, 1633 "%s\n", msg); 1634 list_del(&fdir->list); 1635 kfree(fdir); 1636 adapter->fdir_active_fltr--; 1637 } 1638 } 1639 spin_unlock_bh(&adapter->fdir_fltr_lock); 1640 } 1641 break; 1642 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 1643 struct iavf_fdir_fltr *fdir; 1644 1645 spin_lock_bh(&adapter->fdir_fltr_lock); 1646 list_for_each_entry(fdir, &adapter->fdir_list_head, 1647 list) { 1648 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 1649 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 1650 dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", 1651 iavf_stat_str(&adapter->hw, 1652 v_retval)); 1653 iavf_print_fdir_fltr(adapter, fdir); 1654 } 1655 } 1656 spin_unlock_bh(&adapter->fdir_fltr_lock); 1657 } 1658 break; 1659 case VIRTCHNL_OP_ADD_RSS_CFG: { 1660 struct iavf_adv_rss *rss, *rss_tmp; 1661 1662 spin_lock_bh(&adapter->adv_rss_lock); 1663 list_for_each_entry_safe(rss, rss_tmp, 1664 &adapter->adv_rss_list_head, 1665 list) { 1666 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 1667 iavf_print_adv_rss_cfg(adapter, rss, 1668 "Failed to change the input set for", 1669 NULL); 1670 list_del(&rss->list); 1671 kfree(rss); 1672 } 1673 } 1674 spin_unlock_bh(&adapter->adv_rss_lock); 1675 } 1676 break; 1677 case VIRTCHNL_OP_DEL_RSS_CFG: { 1678 struct iavf_adv_rss *rss; 1679 1680 spin_lock_bh(&adapter->adv_rss_lock); 1681 list_for_each_entry(rss, &adapter->adv_rss_list_head, 1682 list) { 1683 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 1684 rss->state = IAVF_ADV_RSS_ACTIVE; 1685 dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n", 1686 iavf_stat_str(&adapter->hw, 1687 v_retval)); 1688 } 1689 } 1690 spin_unlock_bh(&adapter->adv_rss_lock); 1691 } 1692 break; 1693 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 1694 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 1695 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 1696 break; 1697 default: 1698 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", 1699 v_retval, iavf_stat_str(&adapter->hw, v_retval), 1700 v_opcode); 1701 } 1702 } 1703 switch (v_opcode) { 1704 case VIRTCHNL_OP_ADD_ETH_ADDR: 1705 if (!v_retval) 1706 iavf_mac_add_ok(adapter); 1707 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) 1708 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 1709 break; 1710 case VIRTCHNL_OP_GET_STATS: { 1711 struct iavf_eth_stats *stats = 1712 (struct iavf_eth_stats *)msg; 1713 netdev->stats.rx_packets = stats->rx_unicast + 1714 stats->rx_multicast + 1715 stats->rx_broadcast; 1716 netdev->stats.tx_packets = stats->tx_unicast + 1717 stats->tx_multicast + 1718 stats->tx_broadcast; 1719 netdev->stats.rx_bytes = stats->rx_bytes; 1720 netdev->stats.tx_bytes = stats->tx_bytes; 1721 netdev->stats.tx_errors = stats->tx_errors; 1722 netdev->stats.rx_dropped = stats->rx_discards; 1723 netdev->stats.tx_dropped = stats->tx_discards; 1724 adapter->current_stats = *stats; 1725 } 1726 break; 1727 case VIRTCHNL_OP_GET_VF_RESOURCES: { 1728 u16 len = sizeof(struct virtchnl_vf_resource) + 1729 IAVF_MAX_VF_VSI * 1730 sizeof(struct virtchnl_vsi_resource); 1731 memcpy(adapter->vf_res, msg, min(msglen, len)); 1732 iavf_validate_num_queues(adapter); 1733 iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); 1734 if (is_zero_ether_addr(adapter->hw.mac.addr)) { 1735 /* restore current mac address */ 1736 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1737 } else { 1738 /* refresh current mac address if changed */ 1739 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 1740 ether_addr_copy(netdev->perm_addr, 1741 adapter->hw.mac.addr); 1742 } 1743 spin_lock_bh(&adapter->mac_vlan_list_lock); 1744 iavf_add_filter(adapter, adapter->hw.mac.addr); 1745 1746 if (VLAN_ALLOWED(adapter)) { 1747 if (!list_empty(&adapter->vlan_filter_list)) { 1748 struct iavf_vlan_filter *vlf; 1749 1750 /* re-add all VLAN filters over virtchnl */ 1751 list_for_each_entry(vlf, 1752 &adapter->vlan_filter_list, 1753 list) 1754 vlf->add = true; 1755 1756 adapter->aq_required |= 1757 IAVF_FLAG_AQ_ADD_VLAN_FILTER; 1758 } 1759 } 1760 1761 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1762 iavf_process_config(adapter); 1763 1764 /* unlock crit_lock before acquiring rtnl_lock as other 1765 * processes holding rtnl_lock could be waiting for the same 1766 * crit_lock 1767 */ 1768 mutex_unlock(&adapter->crit_lock); 1769 rtnl_lock(); 1770 netdev_update_features(adapter->netdev); 1771 rtnl_unlock(); 1772 if (iavf_lock_timeout(&adapter->crit_lock, 10000)) 1773 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", 1774 __FUNCTION__); 1775 1776 } 1777 break; 1778 case VIRTCHNL_OP_ENABLE_QUEUES: 1779 /* enable transmits */ 1780 iavf_irq_enable(adapter, true); 1781 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; 1782 break; 1783 case VIRTCHNL_OP_DISABLE_QUEUES: 1784 iavf_free_all_tx_resources(adapter); 1785 iavf_free_all_rx_resources(adapter); 1786 if (adapter->state == __IAVF_DOWN_PENDING) { 1787 iavf_change_state(adapter, __IAVF_DOWN); 1788 wake_up(&adapter->down_waitqueue); 1789 } 1790 break; 1791 case VIRTCHNL_OP_VERSION: 1792 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 1793 /* Don't display an error if we get these out of sequence. 1794 * If the firmware needed to get kicked, we'll get these and 1795 * it's no problem. 1796 */ 1797 if (v_opcode != adapter->current_op) 1798 return; 1799 break; 1800 case VIRTCHNL_OP_IWARP: 1801 /* Gobble zero-length replies from the PF. They indicate that 1802 * a previous message was received OK, and the client doesn't 1803 * care about that. 1804 */ 1805 if (msglen && CLIENT_ENABLED(adapter)) 1806 iavf_notify_client_message(&adapter->vsi, msg, msglen); 1807 break; 1808 1809 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 1810 adapter->client_pending &= 1811 ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); 1812 break; 1813 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { 1814 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; 1815 1816 if (msglen == sizeof(*vrh)) 1817 adapter->hena = vrh->hena; 1818 else 1819 dev_warn(&adapter->pdev->dev, 1820 "Invalid message %d from PF\n", v_opcode); 1821 } 1822 break; 1823 case VIRTCHNL_OP_REQUEST_QUEUES: { 1824 struct virtchnl_vf_res_request *vfres = 1825 (struct virtchnl_vf_res_request *)msg; 1826 1827 if (vfres->num_queue_pairs != adapter->num_req_queues) { 1828 dev_info(&adapter->pdev->dev, 1829 "Requested %d queues, PF can support %d\n", 1830 adapter->num_req_queues, 1831 vfres->num_queue_pairs); 1832 adapter->num_req_queues = 0; 1833 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1834 } 1835 } 1836 break; 1837 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 1838 struct iavf_cloud_filter *cf; 1839 1840 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1841 if (cf->state == __IAVF_CF_ADD_PENDING) 1842 cf->state = __IAVF_CF_ACTIVE; 1843 } 1844 } 1845 break; 1846 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 1847 struct iavf_cloud_filter *cf, *cftmp; 1848 1849 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 1850 list) { 1851 if (cf->state == __IAVF_CF_DEL_PENDING) { 1852 cf->state = __IAVF_CF_INVALID; 1853 list_del(&cf->list); 1854 kfree(cf); 1855 adapter->num_cloud_filters--; 1856 } 1857 } 1858 } 1859 break; 1860 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 1861 struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; 1862 struct iavf_fdir_fltr *fdir, *fdir_tmp; 1863 1864 spin_lock_bh(&adapter->fdir_fltr_lock); 1865 list_for_each_entry_safe(fdir, fdir_tmp, 1866 &adapter->fdir_list_head, 1867 list) { 1868 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 1869 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 1870 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", 1871 fdir->loc); 1872 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 1873 fdir->flow_id = add_fltr->flow_id; 1874 } else { 1875 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", 1876 add_fltr->status); 1877 iavf_print_fdir_fltr(adapter, fdir); 1878 list_del(&fdir->list); 1879 kfree(fdir); 1880 adapter->fdir_active_fltr--; 1881 } 1882 } 1883 } 1884 spin_unlock_bh(&adapter->fdir_fltr_lock); 1885 } 1886 break; 1887 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 1888 struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; 1889 struct iavf_fdir_fltr *fdir, *fdir_tmp; 1890 1891 spin_lock_bh(&adapter->fdir_fltr_lock); 1892 list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, 1893 list) { 1894 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 1895 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 1896 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", 1897 fdir->loc); 1898 list_del(&fdir->list); 1899 kfree(fdir); 1900 adapter->fdir_active_fltr--; 1901 } else { 1902 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 1903 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", 1904 del_fltr->status); 1905 iavf_print_fdir_fltr(adapter, fdir); 1906 } 1907 } 1908 } 1909 spin_unlock_bh(&adapter->fdir_fltr_lock); 1910 } 1911 break; 1912 case VIRTCHNL_OP_ADD_RSS_CFG: { 1913 struct iavf_adv_rss *rss; 1914 1915 spin_lock_bh(&adapter->adv_rss_lock); 1916 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1917 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 1918 iavf_print_adv_rss_cfg(adapter, rss, 1919 "Input set change for", 1920 "successful"); 1921 rss->state = IAVF_ADV_RSS_ACTIVE; 1922 } 1923 } 1924 spin_unlock_bh(&adapter->adv_rss_lock); 1925 } 1926 break; 1927 case VIRTCHNL_OP_DEL_RSS_CFG: { 1928 struct iavf_adv_rss *rss, *rss_tmp; 1929 1930 spin_lock_bh(&adapter->adv_rss_lock); 1931 list_for_each_entry_safe(rss, rss_tmp, 1932 &adapter->adv_rss_list_head, list) { 1933 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 1934 list_del(&rss->list); 1935 kfree(rss); 1936 } 1937 } 1938 spin_unlock_bh(&adapter->adv_rss_lock); 1939 } 1940 break; 1941 default: 1942 if (adapter->current_op && (v_opcode != adapter->current_op)) 1943 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", 1944 adapter->current_op, v_opcode); 1945 break; 1946 } /* switch v_opcode */ 1947 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1948 } 1949