1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/net/intel/libie/rx.h> 5 6 #include "iavf.h" 7 #include "iavf_ptp.h" 8 #include "iavf_prototype.h" 9 10 /** 11 * iavf_send_pf_msg 12 * @adapter: adapter structure 13 * @op: virtual channel opcode 14 * @msg: pointer to message buffer 15 * @len: message length 16 * 17 * Send message to PF and print status if failure. 18 **/ 19 static int iavf_send_pf_msg(struct iavf_adapter *adapter, 20 enum virtchnl_ops op, u8 *msg, u16 len) 21 { 22 struct iavf_hw *hw = &adapter->hw; 23 enum iavf_status status; 24 25 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 26 return 0; /* nothing to see here, move along */ 27 28 status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 29 if (status) 30 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n", 31 op, iavf_stat_str(hw, status), 32 iavf_aq_str(hw, hw->aq.asq_last_status)); 33 return iavf_status_to_errno(status); 34 } 35 36 /** 37 * iavf_send_api_ver 38 * @adapter: adapter structure 39 * 40 * Send API version admin queue message to the PF. The reply is not checked 41 * in this function. Returns 0 if the message was successfully 42 * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 43 **/ 44 int iavf_send_api_ver(struct iavf_adapter *adapter) 45 { 46 struct virtchnl_version_info vvi; 47 48 vvi.major = VIRTCHNL_VERSION_MAJOR; 49 vvi.minor = VIRTCHNL_VERSION_MINOR; 50 51 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, 52 sizeof(vvi)); 53 } 54 55 /** 56 * iavf_poll_virtchnl_msg 57 * @hw: HW configuration structure 58 * @event: event to populate on success 59 * @op_to_poll: requested virtchnl op to poll for 60 * 61 * Initialize poll for virtchnl msg matching the requested_op. Returns 0 62 * if a message of the correct opcode is in the queue or an error code 63 * if no message matching the op code is waiting and other failures. 64 */ 65 static int 66 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event, 67 enum virtchnl_ops op_to_poll) 68 { 69 enum virtchnl_ops received_op; 70 enum iavf_status status; 71 u32 v_retval; 72 73 while (1) { 74 /* When the AQ is empty, iavf_clean_arq_element will return 75 * nonzero and this loop will terminate. 76 */ 77 status = iavf_clean_arq_element(hw, event, NULL); 78 if (status != IAVF_SUCCESS) 79 return iavf_status_to_errno(status); 80 received_op = 81 (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high); 82 83 if (received_op == VIRTCHNL_OP_EVENT) { 84 struct iavf_adapter *adapter = hw->back; 85 struct virtchnl_pf_event *vpe = 86 (struct virtchnl_pf_event *)event->msg_buf; 87 88 if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING) 89 continue; 90 91 dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); 92 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) 93 iavf_schedule_reset(adapter, 94 IAVF_FLAG_RESET_PENDING); 95 96 return -EIO; 97 } 98 99 if (op_to_poll == received_op) 100 break; 101 } 102 103 v_retval = le32_to_cpu(event->desc.cookie_low); 104 return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval); 105 } 106 107 /** 108 * iavf_verify_api_ver 109 * @adapter: adapter structure 110 * 111 * Compare API versions with the PF. Must be called after admin queue is 112 * initialized. Returns 0 if API versions match, -EIO if they do not, 113 * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors 114 * from the firmware are propagated. 115 **/ 116 int iavf_verify_api_ver(struct iavf_adapter *adapter) 117 { 118 struct iavf_arq_event_info event; 119 int err; 120 121 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 122 event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL); 123 if (!event.msg_buf) 124 return -ENOMEM; 125 126 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION); 127 if (!err) { 128 struct virtchnl_version_info *pf_vvi = 129 (struct virtchnl_version_info *)event.msg_buf; 130 adapter->pf_version = *pf_vvi; 131 132 if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR || 133 (pf_vvi->major == VIRTCHNL_VERSION_MAJOR && 134 pf_vvi->minor > VIRTCHNL_VERSION_MINOR)) 135 err = -EIO; 136 } 137 138 kfree(event.msg_buf); 139 140 return err; 141 } 142 143 /** 144 * iavf_send_vf_config_msg 145 * @adapter: adapter structure 146 * 147 * Send VF configuration request admin queue message to the PF. The reply 148 * is not checked in this function. Returns 0 if the message was 149 * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 150 **/ 151 int iavf_send_vf_config_msg(struct iavf_adapter *adapter) 152 { 153 u32 caps; 154 155 caps = VIRTCHNL_VF_OFFLOAD_L2 | 156 VIRTCHNL_VF_OFFLOAD_RSS_PF | 157 VIRTCHNL_VF_OFFLOAD_RSS_AQ | 158 VIRTCHNL_VF_OFFLOAD_RSS_REG | 159 VIRTCHNL_VF_OFFLOAD_VLAN | 160 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | 161 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | 162 VIRTCHNL_VF_OFFLOAD_ENCAP | 163 VIRTCHNL_VF_OFFLOAD_TC_U32 | 164 VIRTCHNL_VF_OFFLOAD_VLAN_V2 | 165 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | 166 VIRTCHNL_VF_OFFLOAD_CRC | 167 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | 168 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | 169 VIRTCHNL_VF_CAP_PTP | 170 VIRTCHNL_VF_OFFLOAD_ADQ | 171 VIRTCHNL_VF_OFFLOAD_USO | 172 VIRTCHNL_VF_OFFLOAD_FDIR_PF | 173 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | 174 VIRTCHNL_VF_CAP_ADV_LINK_SPEED | 175 VIRTCHNL_VF_OFFLOAD_QOS; 176 177 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; 178 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; 179 if (PF_IS_V11(adapter)) 180 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 181 (u8 *)&caps, sizeof(caps)); 182 else 183 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 184 NULL, 0); 185 } 186 187 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) 188 { 189 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; 190 191 if (!VLAN_V2_ALLOWED(adapter)) 192 return -EOPNOTSUPP; 193 194 adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; 195 196 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 197 NULL, 0); 198 } 199 200 int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter) 201 { 202 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS; 203 204 if (!IAVF_RXDID_ALLOWED(adapter)) 205 return -EOPNOTSUPP; 206 207 adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS; 208 209 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS, 210 NULL, 0); 211 } 212 213 /** 214 * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities 215 * @adapter: private adapter structure 216 * 217 * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP 218 * capabilities available to this device. This includes the following 219 * potential access: 220 * 221 * * READ_PHC - access to read the PTP hardware clock time 222 * * RX_TSTAMP - access to request Rx timestamps on all received packets 223 * 224 * The PF will reply with the same opcode a filled out copy of the 225 * virtchnl_ptp_caps structure which defines the specifics of which features 226 * are accessible to this device. 227 * 228 * Return: 0 if success, error code otherwise. 229 */ 230 int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter) 231 { 232 struct virtchnl_ptp_caps hw_caps = { 233 .caps = VIRTCHNL_1588_PTP_CAP_READ_PHC | 234 VIRTCHNL_1588_PTP_CAP_RX_TSTAMP 235 }; 236 237 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS; 238 239 if (!IAVF_PTP_ALLOWED(adapter)) 240 return -EOPNOTSUPP; 241 242 adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS; 243 244 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS, 245 (u8 *)&hw_caps, sizeof(hw_caps)); 246 } 247 248 /** 249 * iavf_validate_num_queues 250 * @adapter: adapter structure 251 * 252 * Validate that the number of queues the PF has sent in 253 * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. 254 **/ 255 static void iavf_validate_num_queues(struct iavf_adapter *adapter) 256 { 257 if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { 258 struct virtchnl_vsi_resource *vsi_res; 259 int i; 260 261 dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", 262 adapter->vf_res->num_queue_pairs, 263 IAVF_MAX_REQ_QUEUES); 264 dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", 265 IAVF_MAX_REQ_QUEUES); 266 adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 267 for (i = 0; i < adapter->vf_res->num_vsis; i++) { 268 vsi_res = &adapter->vf_res->vsi_res[i]; 269 vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 270 } 271 } 272 } 273 274 /** 275 * iavf_get_vf_config 276 * @adapter: private adapter structure 277 * 278 * Get VF configuration from PF and populate hw structure. Must be called after 279 * admin queue is initialized. Busy waits until response is received from PF, 280 * with maximum timeout. Response from PF is returned in the buffer for further 281 * processing by the caller. 282 **/ 283 int iavf_get_vf_config(struct iavf_adapter *adapter) 284 { 285 struct iavf_hw *hw = &adapter->hw; 286 struct iavf_arq_event_info event; 287 u16 len; 288 int err; 289 290 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE; 291 event.buf_len = len; 292 event.msg_buf = kzalloc(len, GFP_KERNEL); 293 if (!event.msg_buf) 294 return -ENOMEM; 295 296 err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES); 297 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); 298 299 /* some PFs send more queues than we should have so validate that 300 * we aren't getting too many queues 301 */ 302 if (!err) 303 iavf_validate_num_queues(adapter); 304 iavf_vf_parse_hw_config(hw, adapter->vf_res); 305 306 kfree(event.msg_buf); 307 308 return err; 309 } 310 311 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) 312 { 313 struct iavf_arq_event_info event; 314 int err; 315 u16 len; 316 317 len = sizeof(struct virtchnl_vlan_caps); 318 event.buf_len = len; 319 event.msg_buf = kzalloc(len, GFP_KERNEL); 320 if (!event.msg_buf) 321 return -ENOMEM; 322 323 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, 324 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS); 325 if (!err) 326 memcpy(&adapter->vlan_v2_caps, event.msg_buf, 327 min(event.msg_len, len)); 328 329 kfree(event.msg_buf); 330 331 return err; 332 } 333 334 int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter) 335 { 336 struct iavf_arq_event_info event; 337 u64 rxdids; 338 int err; 339 340 event.msg_buf = (u8 *)&rxdids; 341 event.buf_len = sizeof(rxdids); 342 343 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, 344 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS); 345 if (!err) 346 adapter->supp_rxdids = rxdids; 347 348 return err; 349 } 350 351 int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter) 352 { 353 struct virtchnl_ptp_caps caps = {}; 354 struct iavf_arq_event_info event; 355 int err; 356 357 event.msg_buf = (u8 *)∩︀ 358 event.buf_len = sizeof(caps); 359 360 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, 361 VIRTCHNL_OP_1588_PTP_GET_CAPS); 362 if (!err) 363 adapter->ptp.hw_caps = caps; 364 365 return err; 366 } 367 368 /** 369 * iavf_configure_queues 370 * @adapter: adapter structure 371 * 372 * Request that the PF set up our (previously allocated) queues. 373 **/ 374 void iavf_configure_queues(struct iavf_adapter *adapter) 375 { 376 struct virtchnl_vsi_queue_config_info *vqci; 377 int pairs = adapter->num_active_queues; 378 struct virtchnl_queue_pair_info *vqpi; 379 u32 i, max_frame; 380 u8 rx_flags = 0; 381 size_t len; 382 383 max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset); 384 max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame); 385 386 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 387 /* bail because we already have a command pending */ 388 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", 389 adapter->current_op); 390 return; 391 } 392 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; 393 len = virtchnl_struct_size(vqci, qpair, pairs); 394 vqci = kzalloc(len, GFP_KERNEL); 395 if (!vqci) 396 return; 397 398 if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) 399 rx_flags |= VIRTCHNL_PTP_RX_TSTAMP; 400 401 vqci->vsi_id = adapter->vsi_res->vsi_id; 402 vqci->num_queue_pairs = pairs; 403 vqpi = vqci->qpair; 404 /* Size check is not needed here - HW max is 16 queue pairs, and we 405 * can fit info for 31 of them into the AQ buffer before it overflows. 406 */ 407 for (i = 0; i < pairs; i++) { 408 vqpi->txq.vsi_id = vqci->vsi_id; 409 vqpi->txq.queue_id = i; 410 vqpi->txq.ring_len = adapter->tx_rings[i].count; 411 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; 412 vqpi->rxq.vsi_id = vqci->vsi_id; 413 vqpi->rxq.queue_id = i; 414 vqpi->rxq.ring_len = adapter->rx_rings[i].count; 415 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; 416 vqpi->rxq.max_pkt_size = max_frame; 417 vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; 418 if (IAVF_RXDID_ALLOWED(adapter)) 419 vqpi->rxq.rxdid = adapter->rxdid; 420 if (CRC_OFFLOAD_ALLOWED(adapter)) 421 vqpi->rxq.crc_disable = !!(adapter->netdev->features & 422 NETIF_F_RXFCS); 423 vqpi->rxq.flags = rx_flags; 424 vqpi++; 425 } 426 427 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; 428 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 429 (u8 *)vqci, len); 430 kfree(vqci); 431 } 432 433 /** 434 * iavf_enable_queues 435 * @adapter: adapter structure 436 * 437 * Request that the PF enable all of our queues. 438 **/ 439 void iavf_enable_queues(struct iavf_adapter *adapter) 440 { 441 struct virtchnl_queue_select vqs; 442 443 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 444 /* bail because we already have a command pending */ 445 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", 446 adapter->current_op); 447 return; 448 } 449 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; 450 vqs.vsi_id = adapter->vsi_res->vsi_id; 451 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 452 vqs.rx_queues = vqs.tx_queues; 453 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; 454 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, 455 (u8 *)&vqs, sizeof(vqs)); 456 } 457 458 /** 459 * iavf_disable_queues 460 * @adapter: adapter structure 461 * 462 * Request that the PF disable all of our queues. 463 **/ 464 void iavf_disable_queues(struct iavf_adapter *adapter) 465 { 466 struct virtchnl_queue_select vqs; 467 468 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 469 /* bail because we already have a command pending */ 470 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", 471 adapter->current_op); 472 return; 473 } 474 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; 475 vqs.vsi_id = adapter->vsi_res->vsi_id; 476 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 477 vqs.rx_queues = vqs.tx_queues; 478 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; 479 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, 480 (u8 *)&vqs, sizeof(vqs)); 481 } 482 483 /** 484 * iavf_map_queues 485 * @adapter: adapter structure 486 * 487 * Request that the PF map queues to interrupt vectors. Misc causes, including 488 * admin queue, are always mapped to vector 0. 489 **/ 490 void iavf_map_queues(struct iavf_adapter *adapter) 491 { 492 struct virtchnl_irq_map_info *vimi; 493 struct virtchnl_vector_map *vecmap; 494 struct iavf_q_vector *q_vector; 495 int v_idx, q_vectors; 496 size_t len; 497 498 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 499 /* bail because we already have a command pending */ 500 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", 501 adapter->current_op); 502 return; 503 } 504 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; 505 506 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 507 508 len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors); 509 vimi = kzalloc(len, GFP_KERNEL); 510 if (!vimi) 511 return; 512 513 vimi->num_vectors = adapter->num_msix_vectors; 514 /* Queue vectors first */ 515 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 516 q_vector = &adapter->q_vectors[v_idx]; 517 vecmap = &vimi->vecmap[v_idx]; 518 519 vecmap->vsi_id = adapter->vsi_res->vsi_id; 520 vecmap->vector_id = v_idx + NONQ_VECS; 521 vecmap->txq_map = q_vector->ring_mask; 522 vecmap->rxq_map = q_vector->ring_mask; 523 vecmap->rxitr_idx = IAVF_RX_ITR; 524 vecmap->txitr_idx = IAVF_TX_ITR; 525 } 526 /* Misc vector last - this is only for AdminQ messages */ 527 vecmap = &vimi->vecmap[v_idx]; 528 vecmap->vsi_id = adapter->vsi_res->vsi_id; 529 vecmap->vector_id = 0; 530 vecmap->txq_map = 0; 531 vecmap->rxq_map = 0; 532 533 adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; 534 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, 535 (u8 *)vimi, len); 536 kfree(vimi); 537 } 538 539 /** 540 * iavf_set_mac_addr_type - Set the correct request type from the filter type 541 * @virtchnl_ether_addr: pointer to requested list element 542 * @filter: pointer to requested filter 543 **/ 544 static void 545 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr, 546 const struct iavf_mac_filter *filter) 547 { 548 virtchnl_ether_addr->type = filter->is_primary ? 549 VIRTCHNL_ETHER_ADDR_PRIMARY : 550 VIRTCHNL_ETHER_ADDR_EXTRA; 551 } 552 553 /** 554 * iavf_add_ether_addrs 555 * @adapter: adapter structure 556 * 557 * Request that the PF add one or more addresses to our filters. 558 **/ 559 void iavf_add_ether_addrs(struct iavf_adapter *adapter) 560 { 561 struct virtchnl_ether_addr_list *veal; 562 struct iavf_mac_filter *f; 563 int i = 0, count = 0; 564 bool more = false; 565 size_t len; 566 567 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 568 /* bail because we already have a command pending */ 569 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", 570 adapter->current_op); 571 return; 572 } 573 574 spin_lock_bh(&adapter->mac_vlan_list_lock); 575 576 list_for_each_entry(f, &adapter->mac_filter_list, list) { 577 if (f->add) 578 count++; 579 } 580 if (!count) { 581 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 582 spin_unlock_bh(&adapter->mac_vlan_list_lock); 583 return; 584 } 585 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; 586 587 len = virtchnl_struct_size(veal, list, count); 588 if (len > IAVF_MAX_AQ_BUF_SIZE) { 589 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); 590 while (len > IAVF_MAX_AQ_BUF_SIZE) 591 len = virtchnl_struct_size(veal, list, --count); 592 more = true; 593 } 594 595 veal = kzalloc(len, GFP_ATOMIC); 596 if (!veal) { 597 spin_unlock_bh(&adapter->mac_vlan_list_lock); 598 return; 599 } 600 601 veal->vsi_id = adapter->vsi_res->vsi_id; 602 veal->num_elements = count; 603 list_for_each_entry(f, &adapter->mac_filter_list, list) { 604 if (f->add) { 605 ether_addr_copy(veal->list[i].addr, f->macaddr); 606 iavf_set_mac_addr_type(&veal->list[i], f); 607 i++; 608 f->add = false; 609 if (i == count) 610 break; 611 } 612 } 613 if (!more) 614 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 615 616 spin_unlock_bh(&adapter->mac_vlan_list_lock); 617 618 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); 619 kfree(veal); 620 } 621 622 /** 623 * iavf_del_ether_addrs 624 * @adapter: adapter structure 625 * 626 * Request that the PF remove one or more addresses from our filters. 627 **/ 628 void iavf_del_ether_addrs(struct iavf_adapter *adapter) 629 { 630 struct virtchnl_ether_addr_list *veal; 631 struct iavf_mac_filter *f, *ftmp; 632 int i = 0, count = 0; 633 bool more = false; 634 size_t len; 635 636 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 637 /* bail because we already have a command pending */ 638 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", 639 adapter->current_op); 640 return; 641 } 642 643 spin_lock_bh(&adapter->mac_vlan_list_lock); 644 645 list_for_each_entry(f, &adapter->mac_filter_list, list) { 646 if (f->remove) 647 count++; 648 } 649 if (!count) { 650 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 651 spin_unlock_bh(&adapter->mac_vlan_list_lock); 652 return; 653 } 654 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; 655 656 len = virtchnl_struct_size(veal, list, count); 657 if (len > IAVF_MAX_AQ_BUF_SIZE) { 658 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); 659 while (len > IAVF_MAX_AQ_BUF_SIZE) 660 len = virtchnl_struct_size(veal, list, --count); 661 more = true; 662 } 663 veal = kzalloc(len, GFP_ATOMIC); 664 if (!veal) { 665 spin_unlock_bh(&adapter->mac_vlan_list_lock); 666 return; 667 } 668 669 veal->vsi_id = adapter->vsi_res->vsi_id; 670 veal->num_elements = count; 671 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 672 if (f->remove) { 673 ether_addr_copy(veal->list[i].addr, f->macaddr); 674 iavf_set_mac_addr_type(&veal->list[i], f); 675 i++; 676 list_del(&f->list); 677 kfree(f); 678 if (i == count) 679 break; 680 } 681 } 682 if (!more) 683 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 684 685 spin_unlock_bh(&adapter->mac_vlan_list_lock); 686 687 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); 688 kfree(veal); 689 } 690 691 /** 692 * iavf_mac_add_ok 693 * @adapter: adapter structure 694 * 695 * Submit list of filters based on PF response. 696 **/ 697 static void iavf_mac_add_ok(struct iavf_adapter *adapter) 698 { 699 struct iavf_mac_filter *f, *ftmp; 700 701 spin_lock_bh(&adapter->mac_vlan_list_lock); 702 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 703 f->is_new_mac = false; 704 if (!f->add && !f->add_handled) 705 f->add_handled = true; 706 } 707 spin_unlock_bh(&adapter->mac_vlan_list_lock); 708 } 709 710 /** 711 * iavf_mac_add_reject 712 * @adapter: adapter structure 713 * 714 * Remove filters from list based on PF response. 715 **/ 716 static void iavf_mac_add_reject(struct iavf_adapter *adapter) 717 { 718 struct net_device *netdev = adapter->netdev; 719 struct iavf_mac_filter *f, *ftmp; 720 721 spin_lock_bh(&adapter->mac_vlan_list_lock); 722 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 723 if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) 724 f->remove = false; 725 726 if (!f->add && !f->add_handled) 727 f->add_handled = true; 728 729 if (f->is_new_mac) { 730 list_del(&f->list); 731 kfree(f); 732 } 733 } 734 spin_unlock_bh(&adapter->mac_vlan_list_lock); 735 } 736 737 /** 738 * iavf_vlan_add_reject 739 * @adapter: adapter structure 740 * 741 * Remove VLAN filters from list based on PF response. 742 **/ 743 static void iavf_vlan_add_reject(struct iavf_adapter *adapter) 744 { 745 struct iavf_vlan_filter *f, *ftmp; 746 747 spin_lock_bh(&adapter->mac_vlan_list_lock); 748 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 749 if (f->state == IAVF_VLAN_IS_NEW) { 750 list_del(&f->list); 751 kfree(f); 752 adapter->num_vlan_filters--; 753 } 754 } 755 spin_unlock_bh(&adapter->mac_vlan_list_lock); 756 } 757 758 /** 759 * iavf_add_vlans 760 * @adapter: adapter structure 761 * 762 * Request that the PF add one or more VLAN filters to our VSI. 763 **/ 764 void iavf_add_vlans(struct iavf_adapter *adapter) 765 { 766 int len, i = 0, count = 0; 767 struct iavf_vlan_filter *f; 768 bool more = false; 769 770 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 771 /* bail because we already have a command pending */ 772 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", 773 adapter->current_op); 774 return; 775 } 776 777 spin_lock_bh(&adapter->mac_vlan_list_lock); 778 779 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 780 if (f->state == IAVF_VLAN_ADD) 781 count++; 782 } 783 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 784 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 785 spin_unlock_bh(&adapter->mac_vlan_list_lock); 786 return; 787 } 788 789 if (VLAN_ALLOWED(adapter)) { 790 struct virtchnl_vlan_filter_list *vvfl; 791 792 adapter->current_op = VIRTCHNL_OP_ADD_VLAN; 793 794 len = virtchnl_struct_size(vvfl, vlan_id, count); 795 if (len > IAVF_MAX_AQ_BUF_SIZE) { 796 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 797 while (len > IAVF_MAX_AQ_BUF_SIZE) 798 len = virtchnl_struct_size(vvfl, vlan_id, 799 --count); 800 more = true; 801 } 802 vvfl = kzalloc(len, GFP_ATOMIC); 803 if (!vvfl) { 804 spin_unlock_bh(&adapter->mac_vlan_list_lock); 805 return; 806 } 807 808 vvfl->vsi_id = adapter->vsi_res->vsi_id; 809 vvfl->num_elements = count; 810 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 811 if (f->state == IAVF_VLAN_ADD) { 812 vvfl->vlan_id[i] = f->vlan.vid; 813 i++; 814 f->state = IAVF_VLAN_IS_NEW; 815 if (i == count) 816 break; 817 } 818 } 819 if (!more) 820 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 821 822 spin_unlock_bh(&adapter->mac_vlan_list_lock); 823 824 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); 825 kfree(vvfl); 826 } else { 827 u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters; 828 u16 current_vlans = iavf_get_num_vlans_added(adapter); 829 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 830 831 adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; 832 833 if ((count + current_vlans) > max_vlans && 834 current_vlans < max_vlans) { 835 count = max_vlans - iavf_get_num_vlans_added(adapter); 836 more = true; 837 } 838 839 len = virtchnl_struct_size(vvfl_v2, filters, count); 840 if (len > IAVF_MAX_AQ_BUF_SIZE) { 841 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 842 while (len > IAVF_MAX_AQ_BUF_SIZE) 843 len = virtchnl_struct_size(vvfl_v2, filters, 844 --count); 845 more = true; 846 } 847 848 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 849 if (!vvfl_v2) { 850 spin_unlock_bh(&adapter->mac_vlan_list_lock); 851 return; 852 } 853 854 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 855 vvfl_v2->num_elements = count; 856 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 857 if (f->state == IAVF_VLAN_ADD) { 858 struct virtchnl_vlan_supported_caps *filtering_support = 859 &adapter->vlan_v2_caps.filtering.filtering_support; 860 struct virtchnl_vlan *vlan; 861 862 if (i == count) 863 break; 864 865 /* give priority over outer if it's enabled */ 866 if (filtering_support->outer) 867 vlan = &vvfl_v2->filters[i].outer; 868 else 869 vlan = &vvfl_v2->filters[i].inner; 870 871 vlan->tci = f->vlan.vid; 872 vlan->tpid = f->vlan.tpid; 873 874 i++; 875 f->state = IAVF_VLAN_IS_NEW; 876 } 877 } 878 879 if (!more) 880 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 881 882 spin_unlock_bh(&adapter->mac_vlan_list_lock); 883 884 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, 885 (u8 *)vvfl_v2, len); 886 kfree(vvfl_v2); 887 } 888 } 889 890 /** 891 * iavf_del_vlans 892 * @adapter: adapter structure 893 * 894 * Request that the PF remove one or more VLAN filters from our VSI. 895 **/ 896 void iavf_del_vlans(struct iavf_adapter *adapter) 897 { 898 struct iavf_vlan_filter *f, *ftmp; 899 int len, i = 0, count = 0; 900 bool more = false; 901 902 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 903 /* bail because we already have a command pending */ 904 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", 905 adapter->current_op); 906 return; 907 } 908 909 spin_lock_bh(&adapter->mac_vlan_list_lock); 910 911 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 912 /* since VLAN capabilities are not allowed, we dont want to send 913 * a VLAN delete request because it will most likely fail and 914 * create unnecessary errors/noise, so just free the VLAN 915 * filters marked for removal to enable bailing out before 916 * sending a virtchnl message 917 */ 918 if (f->state == IAVF_VLAN_REMOVE && 919 !VLAN_FILTERING_ALLOWED(adapter)) { 920 list_del(&f->list); 921 kfree(f); 922 adapter->num_vlan_filters--; 923 } else if (f->state == IAVF_VLAN_DISABLE && 924 !VLAN_FILTERING_ALLOWED(adapter)) { 925 f->state = IAVF_VLAN_INACTIVE; 926 } else if (f->state == IAVF_VLAN_REMOVE || 927 f->state == IAVF_VLAN_DISABLE) { 928 count++; 929 } 930 } 931 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 932 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 933 spin_unlock_bh(&adapter->mac_vlan_list_lock); 934 return; 935 } 936 937 if (VLAN_ALLOWED(adapter)) { 938 struct virtchnl_vlan_filter_list *vvfl; 939 940 adapter->current_op = VIRTCHNL_OP_DEL_VLAN; 941 942 len = virtchnl_struct_size(vvfl, vlan_id, count); 943 if (len > IAVF_MAX_AQ_BUF_SIZE) { 944 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); 945 while (len > IAVF_MAX_AQ_BUF_SIZE) 946 len = virtchnl_struct_size(vvfl, vlan_id, 947 --count); 948 more = true; 949 } 950 vvfl = kzalloc(len, GFP_ATOMIC); 951 if (!vvfl) { 952 spin_unlock_bh(&adapter->mac_vlan_list_lock); 953 return; 954 } 955 956 vvfl->vsi_id = adapter->vsi_res->vsi_id; 957 vvfl->num_elements = count; 958 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 959 if (f->state == IAVF_VLAN_DISABLE) { 960 vvfl->vlan_id[i] = f->vlan.vid; 961 f->state = IAVF_VLAN_INACTIVE; 962 i++; 963 if (i == count) 964 break; 965 } else if (f->state == IAVF_VLAN_REMOVE) { 966 vvfl->vlan_id[i] = f->vlan.vid; 967 list_del(&f->list); 968 kfree(f); 969 adapter->num_vlan_filters--; 970 i++; 971 if (i == count) 972 break; 973 } 974 } 975 976 if (!more) 977 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 978 979 spin_unlock_bh(&adapter->mac_vlan_list_lock); 980 981 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); 982 kfree(vvfl); 983 } else { 984 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 985 986 adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; 987 988 len = virtchnl_struct_size(vvfl_v2, filters, count); 989 if (len > IAVF_MAX_AQ_BUF_SIZE) { 990 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 991 while (len > IAVF_MAX_AQ_BUF_SIZE) 992 len = virtchnl_struct_size(vvfl_v2, filters, 993 --count); 994 more = true; 995 } 996 997 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 998 if (!vvfl_v2) { 999 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1000 return; 1001 } 1002 1003 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 1004 vvfl_v2->num_elements = count; 1005 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 1006 if (f->state == IAVF_VLAN_DISABLE || 1007 f->state == IAVF_VLAN_REMOVE) { 1008 struct virtchnl_vlan_supported_caps *filtering_support = 1009 &adapter->vlan_v2_caps.filtering.filtering_support; 1010 struct virtchnl_vlan *vlan; 1011 1012 /* give priority over outer if it's enabled */ 1013 if (filtering_support->outer) 1014 vlan = &vvfl_v2->filters[i].outer; 1015 else 1016 vlan = &vvfl_v2->filters[i].inner; 1017 1018 vlan->tci = f->vlan.vid; 1019 vlan->tpid = f->vlan.tpid; 1020 1021 if (f->state == IAVF_VLAN_DISABLE) { 1022 f->state = IAVF_VLAN_INACTIVE; 1023 } else { 1024 list_del(&f->list); 1025 kfree(f); 1026 adapter->num_vlan_filters--; 1027 } 1028 i++; 1029 if (i == count) 1030 break; 1031 } 1032 } 1033 1034 if (!more) 1035 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1036 1037 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1038 1039 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2, 1040 (u8 *)vvfl_v2, len); 1041 kfree(vvfl_v2); 1042 } 1043 } 1044 1045 /** 1046 * iavf_set_promiscuous 1047 * @adapter: adapter structure 1048 * 1049 * Request that the PF enable promiscuous mode for our VSI. 1050 **/ 1051 void iavf_set_promiscuous(struct iavf_adapter *adapter) 1052 { 1053 struct net_device *netdev = adapter->netdev; 1054 struct virtchnl_promisc_info vpi; 1055 unsigned int flags; 1056 1057 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1058 /* bail because we already have a command pending */ 1059 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", 1060 adapter->current_op); 1061 return; 1062 } 1063 1064 /* prevent changes to promiscuous flags */ 1065 spin_lock_bh(&adapter->current_netdev_promisc_flags_lock); 1066 1067 /* sanity check to prevent duplicate AQ calls */ 1068 if (!iavf_promiscuous_mode_changed(adapter)) { 1069 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; 1070 dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n"); 1071 /* allow changes to promiscuous flags */ 1072 spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); 1073 return; 1074 } 1075 1076 /* there are 2 bits, but only 3 states */ 1077 if (!(netdev->flags & IFF_PROMISC) && 1078 netdev->flags & IFF_ALLMULTI) { 1079 /* State 1 - only multicast promiscuous mode enabled 1080 * - !IFF_PROMISC && IFF_ALLMULTI 1081 */ 1082 flags = FLAG_VF_MULTICAST_PROMISC; 1083 adapter->current_netdev_promisc_flags |= IFF_ALLMULTI; 1084 adapter->current_netdev_promisc_flags &= ~IFF_PROMISC; 1085 dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); 1086 } else if (!(netdev->flags & IFF_PROMISC) && 1087 !(netdev->flags & IFF_ALLMULTI)) { 1088 /* State 2 - unicast/multicast promiscuous mode disabled 1089 * - !IFF_PROMISC && !IFF_ALLMULTI 1090 */ 1091 flags = 0; 1092 adapter->current_netdev_promisc_flags &= 1093 ~(IFF_PROMISC | IFF_ALLMULTI); 1094 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); 1095 } else { 1096 /* State 3 - unicast/multicast promiscuous mode enabled 1097 * - IFF_PROMISC && IFF_ALLMULTI 1098 * - IFF_PROMISC && !IFF_ALLMULTI 1099 */ 1100 flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC; 1101 adapter->current_netdev_promisc_flags |= IFF_PROMISC; 1102 if (netdev->flags & IFF_ALLMULTI) 1103 adapter->current_netdev_promisc_flags |= IFF_ALLMULTI; 1104 else 1105 adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI; 1106 1107 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); 1108 } 1109 1110 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; 1111 1112 /* allow changes to promiscuous flags */ 1113 spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); 1114 1115 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; 1116 vpi.vsi_id = adapter->vsi_res->vsi_id; 1117 vpi.flags = flags; 1118 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1119 (u8 *)&vpi, sizeof(vpi)); 1120 } 1121 1122 /** 1123 * iavf_request_stats 1124 * @adapter: adapter structure 1125 * 1126 * Request VSI statistics from PF. 1127 **/ 1128 void iavf_request_stats(struct iavf_adapter *adapter) 1129 { 1130 struct virtchnl_queue_select vqs; 1131 1132 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1133 /* no error message, this isn't crucial */ 1134 return; 1135 } 1136 1137 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; 1138 adapter->current_op = VIRTCHNL_OP_GET_STATS; 1139 vqs.vsi_id = adapter->vsi_res->vsi_id; 1140 /* queue maps are ignored for this message - only the vsi is used */ 1141 if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, 1142 sizeof(vqs))) 1143 /* if the request failed, don't lock out others */ 1144 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1145 } 1146 1147 /** 1148 * iavf_get_rss_hashcfg 1149 * @adapter: adapter structure 1150 * 1151 * Request RSS Hash enable bits from PF 1152 **/ 1153 void iavf_get_rss_hashcfg(struct iavf_adapter *adapter) 1154 { 1155 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1156 /* bail because we already have a command pending */ 1157 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", 1158 adapter->current_op); 1159 return; 1160 } 1161 adapter->current_op = VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS; 1162 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_RSS_HASHCFG; 1163 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, NULL, 0); 1164 } 1165 1166 /** 1167 * iavf_set_rss_hashcfg 1168 * @adapter: adapter structure 1169 * 1170 * Request the PF to set our RSS hash capabilities 1171 **/ 1172 void iavf_set_rss_hashcfg(struct iavf_adapter *adapter) 1173 { 1174 struct virtchnl_rss_hashcfg vrh; 1175 1176 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1177 /* bail because we already have a command pending */ 1178 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", 1179 adapter->current_op); 1180 return; 1181 } 1182 vrh.hashcfg = adapter->rss_hashcfg; 1183 adapter->current_op = VIRTCHNL_OP_SET_RSS_HASHCFG; 1184 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HASHCFG; 1185 iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HASHCFG, (u8 *)&vrh, 1186 sizeof(vrh)); 1187 } 1188 1189 /** 1190 * iavf_set_rss_key 1191 * @adapter: adapter structure 1192 * 1193 * Request the PF to set our RSS hash key 1194 **/ 1195 void iavf_set_rss_key(struct iavf_adapter *adapter) 1196 { 1197 struct virtchnl_rss_key *vrk; 1198 int len; 1199 1200 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1201 /* bail because we already have a command pending */ 1202 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", 1203 adapter->current_op); 1204 return; 1205 } 1206 len = virtchnl_struct_size(vrk, key, adapter->rss_key_size); 1207 vrk = kzalloc(len, GFP_KERNEL); 1208 if (!vrk) 1209 return; 1210 vrk->vsi_id = adapter->vsi.id; 1211 vrk->key_len = adapter->rss_key_size; 1212 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); 1213 1214 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; 1215 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; 1216 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); 1217 kfree(vrk); 1218 } 1219 1220 /** 1221 * iavf_set_rss_lut 1222 * @adapter: adapter structure 1223 * 1224 * Request the PF to set our RSS lookup table 1225 **/ 1226 void iavf_set_rss_lut(struct iavf_adapter *adapter) 1227 { 1228 struct virtchnl_rss_lut *vrl; 1229 int len; 1230 1231 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1232 /* bail because we already have a command pending */ 1233 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", 1234 adapter->current_op); 1235 return; 1236 } 1237 len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size); 1238 vrl = kzalloc(len, GFP_KERNEL); 1239 if (!vrl) 1240 return; 1241 vrl->vsi_id = adapter->vsi.id; 1242 vrl->lut_entries = adapter->rss_lut_size; 1243 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); 1244 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; 1245 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; 1246 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); 1247 kfree(vrl); 1248 } 1249 1250 /** 1251 * iavf_set_rss_hfunc 1252 * @adapter: adapter structure 1253 * 1254 * Request the PF to set our RSS Hash function 1255 **/ 1256 void iavf_set_rss_hfunc(struct iavf_adapter *adapter) 1257 { 1258 struct virtchnl_rss_hfunc *vrh; 1259 int len = sizeof(*vrh); 1260 1261 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1262 /* bail because we already have a command pending */ 1263 dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n", 1264 adapter->current_op); 1265 return; 1266 } 1267 vrh = kzalloc(len, GFP_KERNEL); 1268 if (!vrh) 1269 return; 1270 vrh->vsi_id = adapter->vsi.id; 1271 vrh->rss_algorithm = adapter->hfunc; 1272 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC; 1273 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC; 1274 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len); 1275 kfree(vrh); 1276 } 1277 1278 /** 1279 * iavf_enable_vlan_stripping 1280 * @adapter: adapter structure 1281 * 1282 * Request VLAN header stripping to be enabled 1283 **/ 1284 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) 1285 { 1286 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1287 /* bail because we already have a command pending */ 1288 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", 1289 adapter->current_op); 1290 return; 1291 } 1292 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; 1293 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 1294 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); 1295 } 1296 1297 /** 1298 * iavf_disable_vlan_stripping 1299 * @adapter: adapter structure 1300 * 1301 * Request VLAN header stripping to be disabled 1302 **/ 1303 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) 1304 { 1305 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1306 /* bail because we already have a command pending */ 1307 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", 1308 adapter->current_op); 1309 return; 1310 } 1311 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; 1312 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 1313 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); 1314 } 1315 1316 /** 1317 * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype 1318 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1319 */ 1320 static u32 iavf_tpid_to_vc_ethertype(u16 tpid) 1321 { 1322 switch (tpid) { 1323 case ETH_P_8021Q: 1324 return VIRTCHNL_VLAN_ETHERTYPE_8100; 1325 case ETH_P_8021AD: 1326 return VIRTCHNL_VLAN_ETHERTYPE_88A8; 1327 } 1328 1329 return 0; 1330 } 1331 1332 /** 1333 * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message 1334 * @adapter: adapter structure 1335 * @msg: message structure used for updating offloads over virtchnl to update 1336 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1337 * @offload_op: opcode used to determine which support structure to check 1338 */ 1339 static int 1340 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter, 1341 struct virtchnl_vlan_setting *msg, u16 tpid, 1342 enum virtchnl_ops offload_op) 1343 { 1344 struct virtchnl_vlan_supported_caps *offload_support; 1345 u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid); 1346 1347 /* reference the correct offload support structure */ 1348 switch (offload_op) { 1349 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1350 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1351 offload_support = 1352 &adapter->vlan_v2_caps.offloads.stripping_support; 1353 break; 1354 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1355 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1356 offload_support = 1357 &adapter->vlan_v2_caps.offloads.insertion_support; 1358 break; 1359 default: 1360 dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n", 1361 offload_op); 1362 return -EINVAL; 1363 } 1364 1365 /* make sure ethertype is supported */ 1366 if (offload_support->outer & vc_ethertype && 1367 offload_support->outer & VIRTCHNL_VLAN_TOGGLE) { 1368 msg->outer_ethertype_setting = vc_ethertype; 1369 } else if (offload_support->inner & vc_ethertype && 1370 offload_support->inner & VIRTCHNL_VLAN_TOGGLE) { 1371 msg->inner_ethertype_setting = vc_ethertype; 1372 } else { 1373 dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n", 1374 offload_op, tpid); 1375 return -EINVAL; 1376 } 1377 1378 return 0; 1379 } 1380 1381 /** 1382 * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request 1383 * @adapter: adapter structure 1384 * @tpid: VLAN TPID 1385 * @offload_op: opcode used to determine which AQ required bit to clear 1386 */ 1387 static void 1388 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, 1389 enum virtchnl_ops offload_op) 1390 { 1391 switch (offload_op) { 1392 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1393 if (tpid == ETH_P_8021Q) 1394 adapter->aq_required &= 1395 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; 1396 else if (tpid == ETH_P_8021AD) 1397 adapter->aq_required &= 1398 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; 1399 break; 1400 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1401 if (tpid == ETH_P_8021Q) 1402 adapter->aq_required &= 1403 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; 1404 else if (tpid == ETH_P_8021AD) 1405 adapter->aq_required &= 1406 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; 1407 break; 1408 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1409 if (tpid == ETH_P_8021Q) 1410 adapter->aq_required &= 1411 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; 1412 else if (tpid == ETH_P_8021AD) 1413 adapter->aq_required &= 1414 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; 1415 break; 1416 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1417 if (tpid == ETH_P_8021Q) 1418 adapter->aq_required &= 1419 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; 1420 else if (tpid == ETH_P_8021AD) 1421 adapter->aq_required &= 1422 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; 1423 break; 1424 default: 1425 dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n", 1426 offload_op); 1427 } 1428 } 1429 1430 /** 1431 * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl 1432 * @adapter: adapter structure 1433 * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) 1434 * @offload_op: offload_op used to make the request over virtchnl 1435 */ 1436 static void 1437 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, 1438 enum virtchnl_ops offload_op) 1439 { 1440 struct virtchnl_vlan_setting *msg; 1441 int len = sizeof(*msg); 1442 1443 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1444 /* bail because we already have a command pending */ 1445 dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n", 1446 offload_op, adapter->current_op); 1447 return; 1448 } 1449 1450 adapter->current_op = offload_op; 1451 1452 msg = kzalloc(len, GFP_KERNEL); 1453 if (!msg) 1454 return; 1455 1456 msg->vport_id = adapter->vsi_res->vsi_id; 1457 1458 /* always clear to prevent unsupported and endless requests */ 1459 iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op); 1460 1461 /* only send valid offload requests */ 1462 if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op)) 1463 iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len); 1464 else 1465 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1466 1467 kfree(msg); 1468 } 1469 1470 /** 1471 * iavf_enable_vlan_stripping_v2 - enable VLAN stripping 1472 * @adapter: adapter structure 1473 * @tpid: VLAN TPID used to enable VLAN stripping 1474 */ 1475 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1476 { 1477 iavf_send_vlan_offload_v2(adapter, tpid, 1478 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2); 1479 } 1480 1481 /** 1482 * iavf_disable_vlan_stripping_v2 - disable VLAN stripping 1483 * @adapter: adapter structure 1484 * @tpid: VLAN TPID used to disable VLAN stripping 1485 */ 1486 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1487 { 1488 iavf_send_vlan_offload_v2(adapter, tpid, 1489 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2); 1490 } 1491 1492 /** 1493 * iavf_enable_vlan_insertion_v2 - enable VLAN insertion 1494 * @adapter: adapter structure 1495 * @tpid: VLAN TPID used to enable VLAN insertion 1496 */ 1497 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1498 { 1499 iavf_send_vlan_offload_v2(adapter, tpid, 1500 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2); 1501 } 1502 1503 /** 1504 * iavf_disable_vlan_insertion_v2 - disable VLAN insertion 1505 * @adapter: adapter structure 1506 * @tpid: VLAN TPID used to disable VLAN insertion 1507 */ 1508 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1509 { 1510 iavf_send_vlan_offload_v2(adapter, tpid, 1511 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); 1512 } 1513 1514 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) 1515 /** 1516 * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command 1517 * @adapter: adapter private structure 1518 * 1519 * De-queue one PTP command request and send the command message to the PF. 1520 * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send. 1521 */ 1522 void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter) 1523 { 1524 struct iavf_ptp_aq_cmd *cmd; 1525 int err; 1526 1527 if (!adapter->ptp.clock) { 1528 /* This shouldn't be possible to hit, since no messages should 1529 * be queued if PTP is not initialized. 1530 */ 1531 pci_err(adapter->pdev, "PTP is not initialized\n"); 1532 adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD; 1533 return; 1534 } 1535 1536 mutex_lock(&adapter->ptp.aq_cmd_lock); 1537 cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds, 1538 struct iavf_ptp_aq_cmd, list); 1539 if (!cmd) { 1540 /* no further PTP messages to send */ 1541 adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD; 1542 goto out_unlock; 1543 } 1544 1545 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1546 /* bail because we already have a command pending */ 1547 pci_err(adapter->pdev, 1548 "Cannot send PTP command %d, command %d pending\n", 1549 cmd->v_opcode, adapter->current_op); 1550 goto out_unlock; 1551 } 1552 1553 err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen); 1554 if (!err) { 1555 /* Command was sent without errors, so we can remove it from 1556 * the list and discard it. 1557 */ 1558 list_del(&cmd->list); 1559 kfree(cmd); 1560 } else { 1561 /* We failed to send the command, try again next cycle */ 1562 pci_err(adapter->pdev, "Failed to send PTP command %d\n", 1563 cmd->v_opcode); 1564 } 1565 1566 if (list_empty(&adapter->ptp.aq_cmds)) 1567 /* no further PTP messages to send */ 1568 adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD; 1569 1570 out_unlock: 1571 mutex_unlock(&adapter->ptp.aq_cmd_lock); 1572 } 1573 #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ 1574 1575 /** 1576 * iavf_print_link_message - print link up or down 1577 * @adapter: adapter structure 1578 * 1579 * Log a message telling the world of our wonderous link status 1580 */ 1581 static void iavf_print_link_message(struct iavf_adapter *adapter) 1582 { 1583 struct net_device *netdev = adapter->netdev; 1584 int link_speed_mbps; 1585 char *speed; 1586 1587 if (!adapter->link_up) { 1588 netdev_info(netdev, "NIC Link is Down\n"); 1589 return; 1590 } 1591 1592 if (ADV_LINK_SUPPORT(adapter)) { 1593 link_speed_mbps = adapter->link_speed_mbps; 1594 goto print_link_msg; 1595 } 1596 1597 switch (adapter->link_speed) { 1598 case VIRTCHNL_LINK_SPEED_40GB: 1599 link_speed_mbps = SPEED_40000; 1600 break; 1601 case VIRTCHNL_LINK_SPEED_25GB: 1602 link_speed_mbps = SPEED_25000; 1603 break; 1604 case VIRTCHNL_LINK_SPEED_20GB: 1605 link_speed_mbps = SPEED_20000; 1606 break; 1607 case VIRTCHNL_LINK_SPEED_10GB: 1608 link_speed_mbps = SPEED_10000; 1609 break; 1610 case VIRTCHNL_LINK_SPEED_5GB: 1611 link_speed_mbps = SPEED_5000; 1612 break; 1613 case VIRTCHNL_LINK_SPEED_2_5GB: 1614 link_speed_mbps = SPEED_2500; 1615 break; 1616 case VIRTCHNL_LINK_SPEED_1GB: 1617 link_speed_mbps = SPEED_1000; 1618 break; 1619 case VIRTCHNL_LINK_SPEED_100MB: 1620 link_speed_mbps = SPEED_100; 1621 break; 1622 default: 1623 link_speed_mbps = SPEED_UNKNOWN; 1624 break; 1625 } 1626 1627 print_link_msg: 1628 if (link_speed_mbps > SPEED_1000) { 1629 if (link_speed_mbps == SPEED_2500) { 1630 speed = kasprintf(GFP_KERNEL, "%s", "2.5 Gbps"); 1631 } else { 1632 /* convert to Gbps inline */ 1633 speed = kasprintf(GFP_KERNEL, "%d Gbps", 1634 link_speed_mbps / 1000); 1635 } 1636 } else if (link_speed_mbps == SPEED_UNKNOWN) { 1637 speed = kasprintf(GFP_KERNEL, "%s", "Unknown Mbps"); 1638 } else { 1639 speed = kasprintf(GFP_KERNEL, "%d Mbps", link_speed_mbps); 1640 } 1641 1642 netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); 1643 kfree(speed); 1644 } 1645 1646 /** 1647 * iavf_get_vpe_link_status 1648 * @adapter: adapter structure 1649 * @vpe: virtchnl_pf_event structure 1650 * 1651 * Helper function for determining the link status 1652 **/ 1653 static bool 1654 iavf_get_vpe_link_status(struct iavf_adapter *adapter, 1655 struct virtchnl_pf_event *vpe) 1656 { 1657 if (ADV_LINK_SUPPORT(adapter)) 1658 return vpe->event_data.link_event_adv.link_status; 1659 else 1660 return vpe->event_data.link_event.link_status; 1661 } 1662 1663 /** 1664 * iavf_set_adapter_link_speed_from_vpe 1665 * @adapter: adapter structure for which we are setting the link speed 1666 * @vpe: virtchnl_pf_event structure that contains the link speed we are setting 1667 * 1668 * Helper function for setting iavf_adapter link speed 1669 **/ 1670 static void 1671 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, 1672 struct virtchnl_pf_event *vpe) 1673 { 1674 if (ADV_LINK_SUPPORT(adapter)) 1675 adapter->link_speed_mbps = 1676 vpe->event_data.link_event_adv.link_speed; 1677 else 1678 adapter->link_speed = vpe->event_data.link_event.link_speed; 1679 } 1680 1681 /** 1682 * iavf_get_qos_caps - get qos caps support 1683 * @adapter: iavf adapter struct instance 1684 * 1685 * This function requests PF for Supported QoS Caps. 1686 */ 1687 void iavf_get_qos_caps(struct iavf_adapter *adapter) 1688 { 1689 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1690 /* bail because we already have a command pending */ 1691 dev_err(&adapter->pdev->dev, 1692 "Cannot get qos caps, command %d pending\n", 1693 adapter->current_op); 1694 return; 1695 } 1696 1697 adapter->current_op = VIRTCHNL_OP_GET_QOS_CAPS; 1698 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_QOS_CAPS; 1699 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_QOS_CAPS, NULL, 0); 1700 } 1701 1702 /** 1703 * iavf_set_quanta_size - set quanta size of queue chunk 1704 * @adapter: iavf adapter struct instance 1705 * @quanta_size: quanta size in bytes 1706 * @queue_index: starting index of queue chunk 1707 * @num_queues: number of queues in the queue chunk 1708 * 1709 * This function requests PF to set quanta size of queue chunk 1710 * starting at queue_index. 1711 */ 1712 static void 1713 iavf_set_quanta_size(struct iavf_adapter *adapter, u16 quanta_size, 1714 u16 queue_index, u16 num_queues) 1715 { 1716 struct virtchnl_quanta_cfg quanta_cfg; 1717 1718 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1719 /* bail because we already have a command pending */ 1720 dev_err(&adapter->pdev->dev, 1721 "Cannot set queue quanta size, command %d pending\n", 1722 adapter->current_op); 1723 return; 1724 } 1725 1726 adapter->current_op = VIRTCHNL_OP_CONFIG_QUANTA; 1727 quanta_cfg.quanta_size = quanta_size; 1728 quanta_cfg.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX; 1729 quanta_cfg.queue_select.start_queue_id = queue_index; 1730 quanta_cfg.queue_select.num_queues = num_queues; 1731 adapter->aq_required &= ~IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE; 1732 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUANTA, 1733 (u8 *)&quanta_cfg, sizeof(quanta_cfg)); 1734 } 1735 1736 /** 1737 * iavf_cfg_queues_quanta_size - configure quanta size of queues 1738 * @adapter: adapter structure 1739 * 1740 * Request that the PF configure quanta size of allocated queues. 1741 **/ 1742 void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter) 1743 { 1744 int quanta_size = IAVF_DEFAULT_QUANTA_SIZE; 1745 1746 /* Set Queue Quanta Size to default */ 1747 iavf_set_quanta_size(adapter, quanta_size, 0, 1748 adapter->num_active_queues); 1749 } 1750 1751 /** 1752 * iavf_cfg_queues_bw - configure bandwidth of allocated queues 1753 * @adapter: iavf adapter structure instance 1754 * 1755 * This function requests PF to configure queue bandwidth of allocated queues 1756 */ 1757 void iavf_cfg_queues_bw(struct iavf_adapter *adapter) 1758 { 1759 struct virtchnl_queues_bw_cfg *qs_bw_cfg; 1760 struct net_shaper *q_shaper; 1761 int qs_to_update = 0; 1762 int i, inx = 0; 1763 size_t len; 1764 1765 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1766 /* bail because we already have a command pending */ 1767 dev_err(&adapter->pdev->dev, 1768 "Cannot set tc queue bw, command %d pending\n", 1769 adapter->current_op); 1770 return; 1771 } 1772 1773 for (i = 0; i < adapter->num_active_queues; i++) { 1774 if (adapter->tx_rings[i].q_shaper_update) 1775 qs_to_update++; 1776 } 1777 len = struct_size(qs_bw_cfg, cfg, qs_to_update); 1778 qs_bw_cfg = kzalloc(len, GFP_KERNEL); 1779 if (!qs_bw_cfg) 1780 return; 1781 1782 qs_bw_cfg->vsi_id = adapter->vsi.id; 1783 qs_bw_cfg->num_queues = qs_to_update; 1784 1785 for (i = 0; i < adapter->num_active_queues; i++) { 1786 struct iavf_ring *tx_ring = &adapter->tx_rings[i]; 1787 1788 q_shaper = &tx_ring->q_shaper; 1789 if (tx_ring->q_shaper_update) { 1790 qs_bw_cfg->cfg[inx].queue_id = i; 1791 qs_bw_cfg->cfg[inx].shaper.peak = q_shaper->bw_max; 1792 qs_bw_cfg->cfg[inx].shaper.committed = q_shaper->bw_min; 1793 qs_bw_cfg->cfg[inx].tc = 0; 1794 inx++; 1795 } 1796 } 1797 1798 adapter->current_op = VIRTCHNL_OP_CONFIG_QUEUE_BW; 1799 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; 1800 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUEUE_BW, 1801 (u8 *)qs_bw_cfg, len); 1802 kfree(qs_bw_cfg); 1803 } 1804 1805 /** 1806 * iavf_enable_channels 1807 * @adapter: adapter structure 1808 * 1809 * Request that the PF enable channels as specified by 1810 * the user via tc tool. 1811 **/ 1812 void iavf_enable_channels(struct iavf_adapter *adapter) 1813 { 1814 struct virtchnl_tc_info *vti = NULL; 1815 size_t len; 1816 int i; 1817 1818 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1819 /* bail because we already have a command pending */ 1820 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1821 adapter->current_op); 1822 return; 1823 } 1824 1825 len = virtchnl_struct_size(vti, list, adapter->num_tc); 1826 vti = kzalloc(len, GFP_KERNEL); 1827 if (!vti) 1828 return; 1829 vti->num_tc = adapter->num_tc; 1830 for (i = 0; i < vti->num_tc; i++) { 1831 vti->list[i].count = adapter->ch_config.ch_info[i].count; 1832 vti->list[i].offset = adapter->ch_config.ch_info[i].offset; 1833 vti->list[i].pad = 0; 1834 vti->list[i].max_tx_rate = 1835 adapter->ch_config.ch_info[i].max_tx_rate; 1836 } 1837 1838 adapter->ch_config.state = __IAVF_TC_RUNNING; 1839 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1840 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; 1841 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; 1842 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); 1843 kfree(vti); 1844 } 1845 1846 /** 1847 * iavf_disable_channels 1848 * @adapter: adapter structure 1849 * 1850 * Request that the PF disable channels that are configured 1851 **/ 1852 void iavf_disable_channels(struct iavf_adapter *adapter) 1853 { 1854 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1855 /* bail because we already have a command pending */ 1856 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1857 adapter->current_op); 1858 return; 1859 } 1860 1861 adapter->ch_config.state = __IAVF_TC_INVALID; 1862 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1863 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; 1864 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; 1865 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); 1866 } 1867 1868 /** 1869 * iavf_print_cloud_filter 1870 * @adapter: adapter structure 1871 * @f: cloud filter to print 1872 * 1873 * Print the cloud filter 1874 **/ 1875 static void iavf_print_cloud_filter(struct iavf_adapter *adapter, 1876 struct virtchnl_filter *f) 1877 { 1878 switch (f->flow_type) { 1879 case VIRTCHNL_TCP_V4_FLOW: 1880 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", 1881 &f->data.tcp_spec.dst_mac, 1882 &f->data.tcp_spec.src_mac, 1883 ntohs(f->data.tcp_spec.vlan_id), 1884 &f->data.tcp_spec.dst_ip[0], 1885 &f->data.tcp_spec.src_ip[0], 1886 ntohs(f->data.tcp_spec.dst_port), 1887 ntohs(f->data.tcp_spec.src_port)); 1888 break; 1889 case VIRTCHNL_TCP_V6_FLOW: 1890 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", 1891 &f->data.tcp_spec.dst_mac, 1892 &f->data.tcp_spec.src_mac, 1893 ntohs(f->data.tcp_spec.vlan_id), 1894 &f->data.tcp_spec.dst_ip, 1895 &f->data.tcp_spec.src_ip, 1896 ntohs(f->data.tcp_spec.dst_port), 1897 ntohs(f->data.tcp_spec.src_port)); 1898 break; 1899 } 1900 } 1901 1902 /** 1903 * iavf_add_cloud_filter 1904 * @adapter: adapter structure 1905 * 1906 * Request that the PF add cloud filters as specified 1907 * by the user via tc tool. 1908 **/ 1909 void iavf_add_cloud_filter(struct iavf_adapter *adapter) 1910 { 1911 struct iavf_cloud_filter *cf; 1912 struct virtchnl_filter *f; 1913 int len = 0, count = 0; 1914 1915 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1916 /* bail because we already have a command pending */ 1917 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", 1918 adapter->current_op); 1919 return; 1920 } 1921 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1922 if (cf->add) { 1923 count++; 1924 break; 1925 } 1926 } 1927 if (!count) { 1928 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 1929 return; 1930 } 1931 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; 1932 1933 len = sizeof(struct virtchnl_filter); 1934 f = kzalloc(len, GFP_KERNEL); 1935 if (!f) 1936 return; 1937 1938 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1939 if (cf->add) { 1940 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1941 cf->add = false; 1942 cf->state = __IAVF_CF_ADD_PENDING; 1943 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, 1944 (u8 *)f, len); 1945 } 1946 } 1947 kfree(f); 1948 } 1949 1950 /** 1951 * iavf_del_cloud_filter 1952 * @adapter: adapter structure 1953 * 1954 * Request that the PF delete cloud filters as specified 1955 * by the user via tc tool. 1956 **/ 1957 void iavf_del_cloud_filter(struct iavf_adapter *adapter) 1958 { 1959 struct iavf_cloud_filter *cf, *cftmp; 1960 struct virtchnl_filter *f; 1961 int len = 0, count = 0; 1962 1963 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1964 /* bail because we already have a command pending */ 1965 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", 1966 adapter->current_op); 1967 return; 1968 } 1969 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1970 if (cf->del) { 1971 count++; 1972 break; 1973 } 1974 } 1975 if (!count) { 1976 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1977 return; 1978 } 1979 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; 1980 1981 len = sizeof(struct virtchnl_filter); 1982 f = kzalloc(len, GFP_KERNEL); 1983 if (!f) 1984 return; 1985 1986 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 1987 if (cf->del) { 1988 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1989 cf->del = false; 1990 cf->state = __IAVF_CF_DEL_PENDING; 1991 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, 1992 (u8 *)f, len); 1993 } 1994 } 1995 kfree(f); 1996 } 1997 1998 /** 1999 * iavf_add_fdir_filter 2000 * @adapter: the VF adapter structure 2001 * 2002 * Request that the PF add Flow Director filters as specified 2003 * by the user via ethtool. 2004 **/ 2005 void iavf_add_fdir_filter(struct iavf_adapter *adapter) 2006 { 2007 struct iavf_fdir_fltr *fdir; 2008 struct virtchnl_fdir_add *f; 2009 bool process_fltr = false; 2010 int len; 2011 2012 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 2013 /* bail because we already have a command pending */ 2014 dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", 2015 adapter->current_op); 2016 return; 2017 } 2018 2019 len = sizeof(struct virtchnl_fdir_add); 2020 f = kzalloc(len, GFP_KERNEL); 2021 if (!f) 2022 return; 2023 2024 spin_lock_bh(&adapter->fdir_fltr_lock); 2025 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 2026 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { 2027 process_fltr = true; 2028 fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; 2029 memcpy(f, &fdir->vc_add_msg, len); 2030 break; 2031 } 2032 } 2033 spin_unlock_bh(&adapter->fdir_fltr_lock); 2034 2035 if (!process_fltr) { 2036 /* prevent iavf_add_fdir_filter() from being called when there 2037 * are no filters to add 2038 */ 2039 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; 2040 kfree(f); 2041 return; 2042 } 2043 adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; 2044 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); 2045 kfree(f); 2046 } 2047 2048 /** 2049 * iavf_del_fdir_filter 2050 * @adapter: the VF adapter structure 2051 * 2052 * Request that the PF delete Flow Director filters as specified 2053 * by the user via ethtool. 2054 **/ 2055 void iavf_del_fdir_filter(struct iavf_adapter *adapter) 2056 { 2057 struct virtchnl_fdir_del f = {}; 2058 struct iavf_fdir_fltr *fdir; 2059 bool process_fltr = false; 2060 int len; 2061 2062 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 2063 /* bail because we already have a command pending */ 2064 dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", 2065 adapter->current_op); 2066 return; 2067 } 2068 2069 len = sizeof(struct virtchnl_fdir_del); 2070 2071 spin_lock_bh(&adapter->fdir_fltr_lock); 2072 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 2073 if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { 2074 process_fltr = true; 2075 f.vsi_id = fdir->vc_add_msg.vsi_id; 2076 f.flow_id = fdir->flow_id; 2077 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; 2078 break; 2079 } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) { 2080 process_fltr = true; 2081 f.vsi_id = fdir->vc_add_msg.vsi_id; 2082 f.flow_id = fdir->flow_id; 2083 fdir->state = IAVF_FDIR_FLTR_DIS_PENDING; 2084 break; 2085 } 2086 } 2087 spin_unlock_bh(&adapter->fdir_fltr_lock); 2088 2089 if (!process_fltr) { 2090 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; 2091 return; 2092 } 2093 2094 adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; 2095 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); 2096 } 2097 2098 /** 2099 * iavf_add_adv_rss_cfg 2100 * @adapter: the VF adapter structure 2101 * 2102 * Request that the PF add RSS configuration as specified 2103 * by the user via ethtool. 2104 **/ 2105 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) 2106 { 2107 struct virtchnl_rss_cfg *rss_cfg; 2108 struct iavf_adv_rss *rss; 2109 bool process_rss = false; 2110 int len; 2111 2112 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 2113 /* bail because we already have a command pending */ 2114 dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n", 2115 adapter->current_op); 2116 return; 2117 } 2118 2119 len = sizeof(struct virtchnl_rss_cfg); 2120 rss_cfg = kzalloc(len, GFP_KERNEL); 2121 if (!rss_cfg) 2122 return; 2123 2124 spin_lock_bh(&adapter->adv_rss_lock); 2125 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 2126 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { 2127 process_rss = true; 2128 rss->state = IAVF_ADV_RSS_ADD_PENDING; 2129 memcpy(rss_cfg, &rss->cfg_msg, len); 2130 iavf_print_adv_rss_cfg(adapter, rss, 2131 "Input set change for", 2132 "is pending"); 2133 break; 2134 } 2135 } 2136 spin_unlock_bh(&adapter->adv_rss_lock); 2137 2138 if (process_rss) { 2139 adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG; 2140 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG, 2141 (u8 *)rss_cfg, len); 2142 } else { 2143 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; 2144 } 2145 2146 kfree(rss_cfg); 2147 } 2148 2149 /** 2150 * iavf_del_adv_rss_cfg 2151 * @adapter: the VF adapter structure 2152 * 2153 * Request that the PF delete RSS configuration as specified 2154 * by the user via ethtool. 2155 **/ 2156 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) 2157 { 2158 struct virtchnl_rss_cfg *rss_cfg; 2159 struct iavf_adv_rss *rss; 2160 bool process_rss = false; 2161 int len; 2162 2163 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 2164 /* bail because we already have a command pending */ 2165 dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n", 2166 adapter->current_op); 2167 return; 2168 } 2169 2170 len = sizeof(struct virtchnl_rss_cfg); 2171 rss_cfg = kzalloc(len, GFP_KERNEL); 2172 if (!rss_cfg) 2173 return; 2174 2175 spin_lock_bh(&adapter->adv_rss_lock); 2176 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 2177 if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) { 2178 process_rss = true; 2179 rss->state = IAVF_ADV_RSS_DEL_PENDING; 2180 memcpy(rss_cfg, &rss->cfg_msg, len); 2181 break; 2182 } 2183 } 2184 spin_unlock_bh(&adapter->adv_rss_lock); 2185 2186 if (process_rss) { 2187 adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG; 2188 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG, 2189 (u8 *)rss_cfg, len); 2190 } else { 2191 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 2192 } 2193 2194 kfree(rss_cfg); 2195 } 2196 2197 /** 2198 * iavf_request_reset 2199 * @adapter: adapter structure 2200 * 2201 * Request that the PF reset this VF. No response is expected. 2202 **/ 2203 int iavf_request_reset(struct iavf_adapter *adapter) 2204 { 2205 int err; 2206 /* Don't check CURRENT_OP - this is always higher priority */ 2207 err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); 2208 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2209 return err; 2210 } 2211 2212 /** 2213 * iavf_netdev_features_vlan_strip_set - update vlan strip status 2214 * @netdev: ptr to netdev being adjusted 2215 * @enable: enable or disable vlan strip 2216 * 2217 * Helper function to change vlan strip status in netdev->features. 2218 */ 2219 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, 2220 const bool enable) 2221 { 2222 if (enable) 2223 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; 2224 else 2225 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 2226 } 2227 2228 /** 2229 * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset 2230 * @adapter: private adapter structure 2231 * 2232 * Called after a reset to re-add all FDIR filters and delete some of them 2233 * if they were pending to be deleted. 2234 */ 2235 static void iavf_activate_fdir_filters(struct iavf_adapter *adapter) 2236 { 2237 struct iavf_fdir_fltr *f, *ftmp; 2238 bool add_filters = false; 2239 2240 spin_lock_bh(&adapter->fdir_fltr_lock); 2241 list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) { 2242 if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST || 2243 f->state == IAVF_FDIR_FLTR_ADD_PENDING || 2244 f->state == IAVF_FDIR_FLTR_ACTIVE) { 2245 /* All filters and requests have been removed in PF, 2246 * restore them 2247 */ 2248 f->state = IAVF_FDIR_FLTR_ADD_REQUEST; 2249 add_filters = true; 2250 } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST || 2251 f->state == IAVF_FDIR_FLTR_DIS_PENDING) { 2252 /* Link down state, leave filters as inactive */ 2253 f->state = IAVF_FDIR_FLTR_INACTIVE; 2254 } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST || 2255 f->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2256 /* Delete filters that were pending to be deleted, the 2257 * list on PF is already cleared after a reset 2258 */ 2259 list_del(&f->list); 2260 iavf_dec_fdir_active_fltr(adapter, f); 2261 kfree(f); 2262 } 2263 } 2264 spin_unlock_bh(&adapter->fdir_fltr_lock); 2265 2266 if (add_filters) 2267 adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; 2268 } 2269 2270 /** 2271 * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME 2272 * @adapter: private adapter structure 2273 * @data: the message from the PF 2274 * @len: length of the message from the PF 2275 * 2276 * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message 2277 * is sent by the PF in response to the same op as a request from the VF. 2278 * Extract the 64bit nanoseconds time from the message and store it in 2279 * cached_phc_time. Then, notify any thread that is waiting for the update via 2280 * the wait queue. 2281 */ 2282 static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter, 2283 void *data, u16 len) 2284 { 2285 struct virtchnl_phc_time *msg = data; 2286 2287 if (len != sizeof(*msg)) { 2288 dev_err_once(&adapter->pdev->dev, 2289 "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n", 2290 len, sizeof(*msg)); 2291 return; 2292 } 2293 2294 adapter->ptp.cached_phc_time = msg->time; 2295 adapter->ptp.cached_phc_updated = jiffies; 2296 adapter->ptp.phc_time_ready = true; 2297 2298 wake_up(&adapter->ptp.phc_time_waitqueue); 2299 } 2300 2301 /** 2302 * iavf_virtchnl_completion 2303 * @adapter: adapter structure 2304 * @v_opcode: opcode sent by PF 2305 * @v_retval: retval sent by PF 2306 * @msg: message sent by PF 2307 * @msglen: message length 2308 * 2309 * Asynchronous completion function for admin queue messages. Rather than busy 2310 * wait, we fire off our requests and assume that no errors will be returned. 2311 * This function handles the reply messages. 2312 **/ 2313 void iavf_virtchnl_completion(struct iavf_adapter *adapter, 2314 enum virtchnl_ops v_opcode, 2315 enum iavf_status v_retval, u8 *msg, u16 msglen) 2316 { 2317 struct net_device *netdev = adapter->netdev; 2318 2319 if (v_opcode == VIRTCHNL_OP_EVENT) { 2320 struct virtchnl_pf_event *vpe = 2321 (struct virtchnl_pf_event *)msg; 2322 bool link_up = iavf_get_vpe_link_status(adapter, vpe); 2323 2324 switch (vpe->event) { 2325 case VIRTCHNL_EVENT_LINK_CHANGE: 2326 iavf_set_adapter_link_speed_from_vpe(adapter, vpe); 2327 2328 /* we've already got the right link status, bail */ 2329 if (adapter->link_up == link_up) 2330 break; 2331 2332 if (link_up) { 2333 /* If we get link up message and start queues 2334 * before our queues are configured it will 2335 * trigger a TX hang. In that case, just ignore 2336 * the link status message,we'll get another one 2337 * after we enable queues and actually prepared 2338 * to send traffic. 2339 */ 2340 if (adapter->state != __IAVF_RUNNING) 2341 break; 2342 2343 /* For ADq enabled VF, we reconfigure VSIs and 2344 * re-allocate queues. Hence wait till all 2345 * queues are enabled. 2346 */ 2347 if (adapter->flags & 2348 IAVF_FLAG_QUEUES_DISABLED) 2349 break; 2350 } 2351 2352 adapter->link_up = link_up; 2353 if (link_up) { 2354 netif_tx_start_all_queues(netdev); 2355 netif_carrier_on(netdev); 2356 } else { 2357 netif_tx_stop_all_queues(netdev); 2358 netif_carrier_off(netdev); 2359 } 2360 iavf_print_link_message(adapter); 2361 break; 2362 case VIRTCHNL_EVENT_RESET_IMPENDING: 2363 dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); 2364 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { 2365 dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); 2366 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); 2367 } 2368 break; 2369 default: 2370 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", 2371 vpe->event); 2372 break; 2373 } 2374 return; 2375 } 2376 if (v_retval) { 2377 switch (v_opcode) { 2378 case VIRTCHNL_OP_ADD_VLAN: 2379 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", 2380 iavf_stat_str(&adapter->hw, v_retval)); 2381 break; 2382 case VIRTCHNL_OP_ADD_ETH_ADDR: 2383 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", 2384 iavf_stat_str(&adapter->hw, v_retval)); 2385 iavf_mac_add_reject(adapter); 2386 /* restore administratively set MAC address */ 2387 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 2388 wake_up(&adapter->vc_waitqueue); 2389 break; 2390 case VIRTCHNL_OP_DEL_VLAN: 2391 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", 2392 iavf_stat_str(&adapter->hw, v_retval)); 2393 break; 2394 case VIRTCHNL_OP_DEL_ETH_ADDR: 2395 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", 2396 iavf_stat_str(&adapter->hw, v_retval)); 2397 break; 2398 case VIRTCHNL_OP_ENABLE_CHANNELS: 2399 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", 2400 iavf_stat_str(&adapter->hw, v_retval)); 2401 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2402 adapter->ch_config.state = __IAVF_TC_INVALID; 2403 netdev_reset_tc(netdev); 2404 netif_tx_start_all_queues(netdev); 2405 break; 2406 case VIRTCHNL_OP_DISABLE_CHANNELS: 2407 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", 2408 iavf_stat_str(&adapter->hw, v_retval)); 2409 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2410 adapter->ch_config.state = __IAVF_TC_RUNNING; 2411 netif_tx_start_all_queues(netdev); 2412 break; 2413 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 2414 struct iavf_cloud_filter *cf, *cftmp; 2415 2416 list_for_each_entry_safe(cf, cftmp, 2417 &adapter->cloud_filter_list, 2418 list) { 2419 if (cf->state == __IAVF_CF_ADD_PENDING) { 2420 cf->state = __IAVF_CF_INVALID; 2421 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", 2422 iavf_stat_str(&adapter->hw, 2423 v_retval)); 2424 iavf_print_cloud_filter(adapter, 2425 &cf->f); 2426 list_del(&cf->list); 2427 kfree(cf); 2428 adapter->num_cloud_filters--; 2429 } 2430 } 2431 } 2432 break; 2433 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 2434 struct iavf_cloud_filter *cf; 2435 2436 list_for_each_entry(cf, &adapter->cloud_filter_list, 2437 list) { 2438 if (cf->state == __IAVF_CF_DEL_PENDING) { 2439 cf->state = __IAVF_CF_ACTIVE; 2440 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", 2441 iavf_stat_str(&adapter->hw, 2442 v_retval)); 2443 iavf_print_cloud_filter(adapter, 2444 &cf->f); 2445 } 2446 } 2447 } 2448 break; 2449 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 2450 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2451 2452 spin_lock_bh(&adapter->fdir_fltr_lock); 2453 list_for_each_entry_safe(fdir, fdir_tmp, 2454 &adapter->fdir_list_head, 2455 list) { 2456 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2457 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", 2458 iavf_stat_str(&adapter->hw, 2459 v_retval)); 2460 iavf_print_fdir_fltr(adapter, fdir); 2461 if (msglen) 2462 dev_err(&adapter->pdev->dev, 2463 "%s\n", msg); 2464 list_del(&fdir->list); 2465 iavf_dec_fdir_active_fltr(adapter, fdir); 2466 kfree(fdir); 2467 } 2468 } 2469 spin_unlock_bh(&adapter->fdir_fltr_lock); 2470 } 2471 break; 2472 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2473 struct iavf_fdir_fltr *fdir; 2474 2475 spin_lock_bh(&adapter->fdir_fltr_lock); 2476 list_for_each_entry(fdir, &adapter->fdir_list_head, 2477 list) { 2478 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING || 2479 fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { 2480 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2481 dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", 2482 iavf_stat_str(&adapter->hw, 2483 v_retval)); 2484 iavf_print_fdir_fltr(adapter, fdir); 2485 } 2486 } 2487 spin_unlock_bh(&adapter->fdir_fltr_lock); 2488 } 2489 break; 2490 case VIRTCHNL_OP_ADD_RSS_CFG: { 2491 struct iavf_adv_rss *rss, *rss_tmp; 2492 2493 spin_lock_bh(&adapter->adv_rss_lock); 2494 list_for_each_entry_safe(rss, rss_tmp, 2495 &adapter->adv_rss_list_head, 2496 list) { 2497 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2498 iavf_print_adv_rss_cfg(adapter, rss, 2499 "Failed to change the input set for", 2500 NULL); 2501 list_del(&rss->list); 2502 kfree(rss); 2503 } 2504 } 2505 spin_unlock_bh(&adapter->adv_rss_lock); 2506 } 2507 break; 2508 case VIRTCHNL_OP_DEL_RSS_CFG: { 2509 struct iavf_adv_rss *rss; 2510 2511 spin_lock_bh(&adapter->adv_rss_lock); 2512 list_for_each_entry(rss, &adapter->adv_rss_list_head, 2513 list) { 2514 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2515 rss->state = IAVF_ADV_RSS_ACTIVE; 2516 dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n", 2517 iavf_stat_str(&adapter->hw, 2518 v_retval)); 2519 } 2520 } 2521 spin_unlock_bh(&adapter->adv_rss_lock); 2522 } 2523 break; 2524 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2525 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2526 /* Vlan stripping could not be enabled by ethtool. 2527 * Disable it in netdev->features. 2528 */ 2529 iavf_netdev_features_vlan_strip_set(netdev, false); 2530 break; 2531 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2532 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2533 /* Vlan stripping could not be disabled by ethtool. 2534 * Enable it in netdev->features. 2535 */ 2536 iavf_netdev_features_vlan_strip_set(netdev, true); 2537 break; 2538 case VIRTCHNL_OP_ADD_VLAN_V2: 2539 iavf_vlan_add_reject(adapter); 2540 dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", 2541 iavf_stat_str(&adapter->hw, v_retval)); 2542 break; 2543 case VIRTCHNL_OP_CONFIG_RSS_HFUNC: 2544 dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n", 2545 iavf_stat_str(&adapter->hw, v_retval)); 2546 2547 if (adapter->hfunc == 2548 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) 2549 adapter->hfunc = 2550 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; 2551 else 2552 adapter->hfunc = 2553 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; 2554 2555 break; 2556 case VIRTCHNL_OP_GET_QOS_CAPS: 2557 dev_warn(&adapter->pdev->dev, "Failed to Get Qos CAPs, error %s\n", 2558 iavf_stat_str(&adapter->hw, v_retval)); 2559 break; 2560 case VIRTCHNL_OP_CONFIG_QUANTA: 2561 dev_warn(&adapter->pdev->dev, "Failed to Config Quanta, error %s\n", 2562 iavf_stat_str(&adapter->hw, v_retval)); 2563 break; 2564 case VIRTCHNL_OP_CONFIG_QUEUE_BW: 2565 dev_warn(&adapter->pdev->dev, "Failed to Config Queue BW, error %s\n", 2566 iavf_stat_str(&adapter->hw, v_retval)); 2567 break; 2568 default: 2569 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", 2570 v_retval, iavf_stat_str(&adapter->hw, v_retval), 2571 v_opcode); 2572 } 2573 } 2574 switch (v_opcode) { 2575 case VIRTCHNL_OP_ADD_ETH_ADDR: 2576 if (!v_retval) 2577 iavf_mac_add_ok(adapter); 2578 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) 2579 if (!ether_addr_equal(netdev->dev_addr, 2580 adapter->hw.mac.addr)) { 2581 netif_addr_lock_bh(netdev); 2582 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2583 netif_addr_unlock_bh(netdev); 2584 } 2585 wake_up(&adapter->vc_waitqueue); 2586 break; 2587 case VIRTCHNL_OP_GET_STATS: { 2588 struct iavf_eth_stats *stats = 2589 (struct iavf_eth_stats *)msg; 2590 netdev->stats.rx_packets = stats->rx_unicast + 2591 stats->rx_multicast + 2592 stats->rx_broadcast; 2593 netdev->stats.tx_packets = stats->tx_unicast + 2594 stats->tx_multicast + 2595 stats->tx_broadcast; 2596 netdev->stats.rx_bytes = stats->rx_bytes; 2597 netdev->stats.tx_bytes = stats->tx_bytes; 2598 netdev->stats.tx_errors = stats->tx_errors; 2599 netdev->stats.rx_dropped = stats->rx_discards; 2600 netdev->stats.tx_dropped = stats->tx_discards; 2601 adapter->current_stats = *stats; 2602 } 2603 break; 2604 case VIRTCHNL_OP_GET_VF_RESOURCES: { 2605 u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE; 2606 2607 memcpy(adapter->vf_res, msg, min(msglen, len)); 2608 iavf_validate_num_queues(adapter); 2609 iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); 2610 if (is_zero_ether_addr(adapter->hw.mac.addr)) { 2611 /* restore current mac address */ 2612 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 2613 } else { 2614 netif_addr_lock_bh(netdev); 2615 /* refresh current mac address if changed */ 2616 ether_addr_copy(netdev->perm_addr, 2617 adapter->hw.mac.addr); 2618 netif_addr_unlock_bh(netdev); 2619 } 2620 spin_lock_bh(&adapter->mac_vlan_list_lock); 2621 iavf_add_filter(adapter, adapter->hw.mac.addr); 2622 2623 if (VLAN_ALLOWED(adapter)) { 2624 if (!list_empty(&adapter->vlan_filter_list)) { 2625 struct iavf_vlan_filter *vlf; 2626 2627 /* re-add all VLAN filters over virtchnl */ 2628 list_for_each_entry(vlf, 2629 &adapter->vlan_filter_list, 2630 list) 2631 vlf->state = IAVF_VLAN_ADD; 2632 2633 adapter->aq_required |= 2634 IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2635 } 2636 } 2637 2638 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2639 2640 iavf_activate_fdir_filters(adapter); 2641 2642 iavf_parse_vf_resource_msg(adapter); 2643 2644 /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the 2645 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish 2646 * configuration 2647 */ 2648 if (VLAN_V2_ALLOWED(adapter)) 2649 break; 2650 /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2 2651 * wasn't successfully negotiated with the PF 2652 */ 2653 } 2654 fallthrough; 2655 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: { 2656 struct iavf_mac_filter *f; 2657 bool was_mac_changed; 2658 u64 aq_required = 0; 2659 2660 if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) 2661 memcpy(&adapter->vlan_v2_caps, msg, 2662 min_t(u16, msglen, 2663 sizeof(adapter->vlan_v2_caps))); 2664 2665 iavf_process_config(adapter); 2666 adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; 2667 iavf_schedule_finish_config(adapter); 2668 2669 iavf_set_queue_vlan_tag_loc(adapter); 2670 2671 was_mac_changed = !ether_addr_equal(netdev->dev_addr, 2672 adapter->hw.mac.addr); 2673 2674 spin_lock_bh(&adapter->mac_vlan_list_lock); 2675 2676 /* re-add all MAC filters */ 2677 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2678 if (was_mac_changed && 2679 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2680 ether_addr_copy(f->macaddr, 2681 adapter->hw.mac.addr); 2682 2683 f->is_new_mac = true; 2684 f->add = true; 2685 f->add_handled = false; 2686 f->remove = false; 2687 } 2688 2689 /* re-add all VLAN filters */ 2690 if (VLAN_FILTERING_ALLOWED(adapter)) { 2691 struct iavf_vlan_filter *vlf; 2692 2693 if (!list_empty(&adapter->vlan_filter_list)) { 2694 list_for_each_entry(vlf, 2695 &adapter->vlan_filter_list, 2696 list) 2697 vlf->state = IAVF_VLAN_ADD; 2698 2699 aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2700 } 2701 } 2702 2703 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2704 2705 netif_addr_lock_bh(netdev); 2706 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2707 netif_addr_unlock_bh(netdev); 2708 2709 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER | 2710 aq_required; 2711 } 2712 break; 2713 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: 2714 if (msglen != sizeof(u64)) 2715 return; 2716 2717 adapter->supp_rxdids = *(u64 *)msg; 2718 2719 break; 2720 case VIRTCHNL_OP_1588_PTP_GET_CAPS: 2721 if (msglen != sizeof(adapter->ptp.hw_caps)) 2722 return; 2723 2724 adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg; 2725 2726 /* process any state change needed due to new capabilities */ 2727 iavf_ptp_process_caps(adapter); 2728 break; 2729 case VIRTCHNL_OP_1588_PTP_GET_TIME: 2730 iavf_virtchnl_ptp_get_time(adapter, msg, msglen); 2731 break; 2732 case VIRTCHNL_OP_ENABLE_QUEUES: 2733 /* enable transmits */ 2734 iavf_irq_enable(adapter, true); 2735 wake_up(&adapter->reset_waitqueue); 2736 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; 2737 break; 2738 case VIRTCHNL_OP_DISABLE_QUEUES: 2739 iavf_free_all_tx_resources(adapter); 2740 iavf_free_all_rx_resources(adapter); 2741 if (adapter->state == __IAVF_DOWN_PENDING) { 2742 iavf_change_state(adapter, __IAVF_DOWN); 2743 wake_up(&adapter->down_waitqueue); 2744 } 2745 break; 2746 case VIRTCHNL_OP_VERSION: 2747 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2748 /* Don't display an error if we get these out of sequence. 2749 * If the firmware needed to get kicked, we'll get these and 2750 * it's no problem. 2751 */ 2752 if (v_opcode != adapter->current_op) 2753 return; 2754 break; 2755 case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: { 2756 struct virtchnl_rss_hashcfg *vrh = 2757 (struct virtchnl_rss_hashcfg *)msg; 2758 2759 if (msglen == sizeof(*vrh)) 2760 adapter->rss_hashcfg = vrh->hashcfg; 2761 else 2762 dev_warn(&adapter->pdev->dev, 2763 "Invalid message %d from PF\n", v_opcode); 2764 } 2765 break; 2766 case VIRTCHNL_OP_REQUEST_QUEUES: { 2767 struct virtchnl_vf_res_request *vfres = 2768 (struct virtchnl_vf_res_request *)msg; 2769 2770 if (vfres->num_queue_pairs != adapter->num_req_queues) { 2771 dev_info(&adapter->pdev->dev, 2772 "Requested %d queues, PF can support %d\n", 2773 adapter->num_req_queues, 2774 vfres->num_queue_pairs); 2775 adapter->num_req_queues = 0; 2776 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2777 } 2778 } 2779 break; 2780 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 2781 struct iavf_cloud_filter *cf; 2782 2783 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2784 if (cf->state == __IAVF_CF_ADD_PENDING) 2785 cf->state = __IAVF_CF_ACTIVE; 2786 } 2787 } 2788 break; 2789 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 2790 struct iavf_cloud_filter *cf, *cftmp; 2791 2792 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2793 list) { 2794 if (cf->state == __IAVF_CF_DEL_PENDING) { 2795 cf->state = __IAVF_CF_INVALID; 2796 list_del(&cf->list); 2797 kfree(cf); 2798 adapter->num_cloud_filters--; 2799 } 2800 } 2801 } 2802 break; 2803 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 2804 struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; 2805 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2806 2807 spin_lock_bh(&adapter->fdir_fltr_lock); 2808 list_for_each_entry_safe(fdir, fdir_tmp, 2809 &adapter->fdir_list_head, 2810 list) { 2811 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2812 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2813 if (!iavf_is_raw_fdir(fdir)) 2814 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", 2815 fdir->loc); 2816 else 2817 dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n", 2818 TC_U32_USERHTID(fdir->cls_u32_handle)); 2819 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2820 fdir->flow_id = add_fltr->flow_id; 2821 } else { 2822 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", 2823 add_fltr->status); 2824 iavf_print_fdir_fltr(adapter, fdir); 2825 list_del(&fdir->list); 2826 iavf_dec_fdir_active_fltr(adapter, fdir); 2827 kfree(fdir); 2828 } 2829 } 2830 } 2831 spin_unlock_bh(&adapter->fdir_fltr_lock); 2832 } 2833 break; 2834 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2835 struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; 2836 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2837 2838 spin_lock_bh(&adapter->fdir_fltr_lock); 2839 list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, 2840 list) { 2841 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2842 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || 2843 del_fltr->status == 2844 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { 2845 if (!iavf_is_raw_fdir(fdir)) 2846 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", 2847 fdir->loc); 2848 else 2849 dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n", 2850 TC_U32_USERHTID(fdir->cls_u32_handle)); 2851 list_del(&fdir->list); 2852 iavf_dec_fdir_active_fltr(adapter, fdir); 2853 kfree(fdir); 2854 } else { 2855 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2856 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", 2857 del_fltr->status); 2858 iavf_print_fdir_fltr(adapter, fdir); 2859 } 2860 } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { 2861 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || 2862 del_fltr->status == 2863 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { 2864 fdir->state = IAVF_FDIR_FLTR_INACTIVE; 2865 } else { 2866 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2867 dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n", 2868 del_fltr->status); 2869 iavf_print_fdir_fltr(adapter, fdir); 2870 } 2871 } 2872 } 2873 spin_unlock_bh(&adapter->fdir_fltr_lock); 2874 } 2875 break; 2876 case VIRTCHNL_OP_ADD_RSS_CFG: { 2877 struct iavf_adv_rss *rss; 2878 2879 spin_lock_bh(&adapter->adv_rss_lock); 2880 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 2881 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2882 iavf_print_adv_rss_cfg(adapter, rss, 2883 "Input set change for", 2884 "successful"); 2885 rss->state = IAVF_ADV_RSS_ACTIVE; 2886 } 2887 } 2888 spin_unlock_bh(&adapter->adv_rss_lock); 2889 } 2890 break; 2891 case VIRTCHNL_OP_DEL_RSS_CFG: { 2892 struct iavf_adv_rss *rss, *rss_tmp; 2893 2894 spin_lock_bh(&adapter->adv_rss_lock); 2895 list_for_each_entry_safe(rss, rss_tmp, 2896 &adapter->adv_rss_list_head, list) { 2897 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2898 list_del(&rss->list); 2899 kfree(rss); 2900 } 2901 } 2902 spin_unlock_bh(&adapter->adv_rss_lock); 2903 } 2904 break; 2905 case VIRTCHNL_OP_ADD_VLAN_V2: { 2906 struct iavf_vlan_filter *f; 2907 2908 spin_lock_bh(&adapter->mac_vlan_list_lock); 2909 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 2910 if (f->state == IAVF_VLAN_IS_NEW) 2911 f->state = IAVF_VLAN_ACTIVE; 2912 } 2913 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2914 } 2915 break; 2916 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2917 /* PF enabled vlan strip on this VF. 2918 * Update netdev->features if needed to be in sync with ethtool. 2919 */ 2920 if (!v_retval) 2921 iavf_netdev_features_vlan_strip_set(netdev, true); 2922 break; 2923 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2924 /* PF disabled vlan strip on this VF. 2925 * Update netdev->features if needed to be in sync with ethtool. 2926 */ 2927 if (!v_retval) 2928 iavf_netdev_features_vlan_strip_set(netdev, false); 2929 break; 2930 case VIRTCHNL_OP_GET_QOS_CAPS: { 2931 u16 len = struct_size(adapter->qos_caps, cap, 2932 IAVF_MAX_QOS_TC_NUM); 2933 2934 memcpy(adapter->qos_caps, msg, min(msglen, len)); 2935 2936 adapter->aq_required |= IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE; 2937 } 2938 break; 2939 case VIRTCHNL_OP_CONFIG_QUANTA: 2940 break; 2941 case VIRTCHNL_OP_CONFIG_QUEUE_BW: { 2942 int i; 2943 /* shaper configuration is successful for all queues */ 2944 for (i = 0; i < adapter->num_active_queues; i++) 2945 adapter->tx_rings[i].q_shaper_update = false; 2946 } 2947 break; 2948 default: 2949 if (adapter->current_op && (v_opcode != adapter->current_op)) 2950 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", 2951 adapter->current_op, v_opcode); 2952 break; 2953 } /* switch v_opcode */ 2954 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2955 } 2956