1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include "idpf.h" 5 #include "idpf_virtchnl.h" 6 7 static const struct net_device_ops idpf_netdev_ops; 8 9 /** 10 * idpf_init_vector_stack - Fill the MSIX vector stack with vector index 11 * @adapter: private data struct 12 * 13 * Return 0 on success, error on failure 14 */ 15 static int idpf_init_vector_stack(struct idpf_adapter *adapter) 16 { 17 struct idpf_vector_lifo *stack; 18 u16 min_vec; 19 u32 i; 20 21 mutex_lock(&adapter->vector_lock); 22 min_vec = adapter->num_msix_entries - adapter->num_avail_msix; 23 stack = &adapter->vector_stack; 24 stack->size = adapter->num_msix_entries; 25 /* set the base and top to point at start of the 'free pool' to 26 * distribute the unused vectors on-demand basis 27 */ 28 stack->base = min_vec; 29 stack->top = min_vec; 30 31 stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL); 32 if (!stack->vec_idx) { 33 mutex_unlock(&adapter->vector_lock); 34 35 return -ENOMEM; 36 } 37 38 for (i = 0; i < stack->size; i++) 39 stack->vec_idx[i] = i; 40 41 mutex_unlock(&adapter->vector_lock); 42 43 return 0; 44 } 45 46 /** 47 * idpf_deinit_vector_stack - zero out the MSIX vector stack 48 * @adapter: private data struct 49 */ 50 static void idpf_deinit_vector_stack(struct idpf_adapter *adapter) 51 { 52 struct idpf_vector_lifo *stack; 53 54 mutex_lock(&adapter->vector_lock); 55 stack = &adapter->vector_stack; 56 kfree(stack->vec_idx); 57 stack->vec_idx = NULL; 58 mutex_unlock(&adapter->vector_lock); 59 } 60 61 /** 62 * idpf_mb_intr_rel_irq - Free the IRQ association with the OS 63 * @adapter: adapter structure 64 * 65 * This will also disable interrupt mode and queue up mailbox task. Mailbox 66 * task will reschedule itself if not in interrupt mode. 67 */ 68 static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) 69 { 70 clear_bit(IDPF_MB_INTR_MODE, adapter->flags); 71 kfree(free_irq(adapter->msix_entries[0].vector, adapter)); 72 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 73 } 74 75 /** 76 * idpf_intr_rel - Release interrupt capabilities and free memory 77 * @adapter: adapter to disable interrupts on 78 */ 79 void idpf_intr_rel(struct idpf_adapter *adapter) 80 { 81 if (!adapter->msix_entries) 82 return; 83 84 idpf_mb_intr_rel_irq(adapter); 85 pci_free_irq_vectors(adapter->pdev); 86 idpf_send_dealloc_vectors_msg(adapter); 87 idpf_deinit_vector_stack(adapter); 88 kfree(adapter->msix_entries); 89 adapter->msix_entries = NULL; 90 } 91 92 /** 93 * idpf_mb_intr_clean - Interrupt handler for the mailbox 94 * @irq: interrupt number 95 * @data: pointer to the adapter structure 96 */ 97 static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data) 98 { 99 struct idpf_adapter *adapter = (struct idpf_adapter *)data; 100 101 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 102 103 return IRQ_HANDLED; 104 } 105 106 /** 107 * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox 108 * @adapter: adapter to get the hardware address for register write 109 */ 110 static void idpf_mb_irq_enable(struct idpf_adapter *adapter) 111 { 112 struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg; 113 u32 val; 114 115 val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m; 116 writel(val, intr->dyn_ctl); 117 writel(intr->icr_ena_ctlq_m, intr->icr_ena); 118 } 119 120 /** 121 * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt 122 * @adapter: adapter structure to pass to the mailbox irq handler 123 */ 124 static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) 125 { 126 int irq_num, mb_vidx = 0, err; 127 char *name; 128 129 irq_num = adapter->msix_entries[mb_vidx].vector; 130 name = kasprintf(GFP_KERNEL, "%s-%s-%d", 131 dev_driver_string(&adapter->pdev->dev), 132 "Mailbox", mb_vidx); 133 err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter); 134 if (err) { 135 dev_err(&adapter->pdev->dev, 136 "IRQ request for mailbox failed, error: %d\n", err); 137 138 return err; 139 } 140 141 set_bit(IDPF_MB_INTR_MODE, adapter->flags); 142 143 return 0; 144 } 145 146 /** 147 * idpf_mb_intr_init - Initialize the mailbox interrupt 148 * @adapter: adapter structure to store the mailbox vector 149 */ 150 static int idpf_mb_intr_init(struct idpf_adapter *adapter) 151 { 152 adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter); 153 adapter->irq_mb_handler = idpf_mb_intr_clean; 154 155 return idpf_mb_intr_req_irq(adapter); 156 } 157 158 /** 159 * idpf_vector_lifo_push - push MSIX vector index onto stack 160 * @adapter: private data struct 161 * @vec_idx: vector index to store 162 */ 163 static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx) 164 { 165 struct idpf_vector_lifo *stack = &adapter->vector_stack; 166 167 lockdep_assert_held(&adapter->vector_lock); 168 169 if (stack->top == stack->base) { 170 dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n", 171 stack->top); 172 return -EINVAL; 173 } 174 175 stack->vec_idx[--stack->top] = vec_idx; 176 177 return 0; 178 } 179 180 /** 181 * idpf_vector_lifo_pop - pop MSIX vector index from stack 182 * @adapter: private data struct 183 */ 184 static int idpf_vector_lifo_pop(struct idpf_adapter *adapter) 185 { 186 struct idpf_vector_lifo *stack = &adapter->vector_stack; 187 188 lockdep_assert_held(&adapter->vector_lock); 189 190 if (stack->top == stack->size) { 191 dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n"); 192 193 return -EINVAL; 194 } 195 196 return stack->vec_idx[stack->top++]; 197 } 198 199 /** 200 * idpf_vector_stash - Store the vector indexes onto the stack 201 * @adapter: private data struct 202 * @q_vector_idxs: vector index array 203 * @vec_info: info related to the number of vectors 204 * 205 * This function is a no-op if there are no vectors indexes to be stashed 206 */ 207 static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs, 208 struct idpf_vector_info *vec_info) 209 { 210 int i, base = 0; 211 u16 vec_idx; 212 213 lockdep_assert_held(&adapter->vector_lock); 214 215 if (!vec_info->num_curr_vecs) 216 return; 217 218 /* For default vports, no need to stash vector allocated from the 219 * default pool onto the stack 220 */ 221 if (vec_info->default_vport) 222 base = IDPF_MIN_Q_VEC; 223 224 for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) { 225 vec_idx = q_vector_idxs[i]; 226 idpf_vector_lifo_push(adapter, vec_idx); 227 adapter->num_avail_msix++; 228 } 229 } 230 231 /** 232 * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes 233 * @adapter: driver specific private structure 234 * @q_vector_idxs: vector index array 235 * @vec_info: info related to the number of vectors 236 * 237 * This is the core function to distribute the MSIX vectors acquired from the 238 * OS. It expects the caller to pass the number of vectors required and 239 * also previously allocated. First, it stashes previously allocated vector 240 * indexes on to the stack and then figures out if it can allocate requested 241 * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as 242 * requested vectors, then this function just stashes the already allocated 243 * vectors and returns 0. 244 * 245 * Returns actual number of vectors allocated on success, error value on failure 246 * If 0 is returned, implies the stack has no vectors to allocate which is also 247 * a failure case for the caller 248 */ 249 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, 250 u16 *q_vector_idxs, 251 struct idpf_vector_info *vec_info) 252 { 253 u16 num_req_vecs, num_alloc_vecs = 0, max_vecs; 254 struct idpf_vector_lifo *stack; 255 int i, j, vecid; 256 257 mutex_lock(&adapter->vector_lock); 258 stack = &adapter->vector_stack; 259 num_req_vecs = vec_info->num_req_vecs; 260 261 /* Stash interrupt vector indexes onto the stack if required */ 262 idpf_vector_stash(adapter, q_vector_idxs, vec_info); 263 264 if (!num_req_vecs) 265 goto rel_lock; 266 267 if (vec_info->default_vport) { 268 /* As IDPF_MIN_Q_VEC per default vport is put aside in the 269 * default pool of the stack, use them for default vports 270 */ 271 j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC; 272 for (i = 0; i < IDPF_MIN_Q_VEC; i++) { 273 q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++]; 274 num_req_vecs--; 275 } 276 } 277 278 /* Find if stack has enough vector to allocate */ 279 max_vecs = min(adapter->num_avail_msix, num_req_vecs); 280 281 for (j = 0; j < max_vecs; j++) { 282 vecid = idpf_vector_lifo_pop(adapter); 283 q_vector_idxs[num_alloc_vecs++] = vecid; 284 } 285 adapter->num_avail_msix -= max_vecs; 286 287 rel_lock: 288 mutex_unlock(&adapter->vector_lock); 289 290 return num_alloc_vecs; 291 } 292 293 /** 294 * idpf_intr_req - Request interrupt capabilities 295 * @adapter: adapter to enable interrupts on 296 * 297 * Returns 0 on success, negative on failure 298 */ 299 int idpf_intr_req(struct idpf_adapter *adapter) 300 { 301 u16 default_vports = idpf_get_default_vports(adapter); 302 int num_q_vecs, total_vecs, num_vec_ids; 303 int min_vectors, v_actual, err; 304 unsigned int vector; 305 u16 *vecids; 306 307 total_vecs = idpf_get_reserved_vecs(adapter); 308 num_q_vecs = total_vecs - IDPF_MBX_Q_VEC; 309 310 err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs); 311 if (err) { 312 dev_err(&adapter->pdev->dev, 313 "Failed to allocate %d vectors: %d\n", num_q_vecs, err); 314 315 return -EAGAIN; 316 } 317 318 min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; 319 v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors, 320 total_vecs, PCI_IRQ_MSIX); 321 if (v_actual < min_vectors) { 322 dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n", 323 v_actual); 324 err = -EAGAIN; 325 goto send_dealloc_vecs; 326 } 327 328 adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry), 329 GFP_KERNEL); 330 331 if (!adapter->msix_entries) { 332 err = -ENOMEM; 333 goto free_irq; 334 } 335 336 adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id); 337 338 vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); 339 if (!vecids) { 340 err = -ENOMEM; 341 goto free_msix; 342 } 343 344 num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs, 345 &adapter->req_vec_chunks->vchunks); 346 if (num_vec_ids < v_actual) { 347 err = -EINVAL; 348 goto free_vecids; 349 } 350 351 for (vector = 0; vector < v_actual; vector++) { 352 adapter->msix_entries[vector].entry = vecids[vector]; 353 adapter->msix_entries[vector].vector = 354 pci_irq_vector(adapter->pdev, vector); 355 } 356 357 adapter->num_req_msix = total_vecs; 358 adapter->num_msix_entries = v_actual; 359 /* 'num_avail_msix' is used to distribute excess vectors to the vports 360 * after considering the minimum vectors required per each default 361 * vport 362 */ 363 adapter->num_avail_msix = v_actual - min_vectors; 364 365 /* Fill MSIX vector lifo stack with vector indexes */ 366 err = idpf_init_vector_stack(adapter); 367 if (err) 368 goto free_vecids; 369 370 err = idpf_mb_intr_init(adapter); 371 if (err) 372 goto deinit_vec_stack; 373 idpf_mb_irq_enable(adapter); 374 kfree(vecids); 375 376 return 0; 377 378 deinit_vec_stack: 379 idpf_deinit_vector_stack(adapter); 380 free_vecids: 381 kfree(vecids); 382 free_msix: 383 kfree(adapter->msix_entries); 384 adapter->msix_entries = NULL; 385 free_irq: 386 pci_free_irq_vectors(adapter->pdev); 387 send_dealloc_vecs: 388 idpf_send_dealloc_vectors_msg(adapter); 389 390 return err; 391 } 392 393 /** 394 * idpf_find_mac_filter - Search filter list for specific mac filter 395 * @vconfig: Vport config structure 396 * @macaddr: The MAC address 397 * 398 * Returns ptr to the filter object or NULL. Must be called while holding the 399 * mac_filter_list_lock. 400 **/ 401 static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig, 402 const u8 *macaddr) 403 { 404 struct idpf_mac_filter *f; 405 406 if (!macaddr) 407 return NULL; 408 409 list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) { 410 if (ether_addr_equal(macaddr, f->macaddr)) 411 return f; 412 } 413 414 return NULL; 415 } 416 417 /** 418 * __idpf_del_mac_filter - Delete a MAC filter from the filter list 419 * @vport_config: Vport config structure 420 * @macaddr: The MAC address 421 * 422 * Returns 0 on success, error value on failure 423 **/ 424 static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config, 425 const u8 *macaddr) 426 { 427 struct idpf_mac_filter *f; 428 429 spin_lock_bh(&vport_config->mac_filter_list_lock); 430 f = idpf_find_mac_filter(vport_config, macaddr); 431 if (f) { 432 list_del(&f->list); 433 kfree(f); 434 } 435 spin_unlock_bh(&vport_config->mac_filter_list_lock); 436 437 return 0; 438 } 439 440 /** 441 * idpf_del_mac_filter - Delete a MAC filter from the filter list 442 * @vport: Main vport structure 443 * @np: Netdev private structure 444 * @macaddr: The MAC address 445 * @async: Don't wait for return message 446 * 447 * Removes filter from list and if interface is up, tells hardware about the 448 * removed filter. 449 **/ 450 static int idpf_del_mac_filter(struct idpf_vport *vport, 451 struct idpf_netdev_priv *np, 452 const u8 *macaddr, bool async) 453 { 454 struct idpf_vport_config *vport_config; 455 struct idpf_mac_filter *f; 456 457 vport_config = np->adapter->vport_config[np->vport_idx]; 458 459 spin_lock_bh(&vport_config->mac_filter_list_lock); 460 f = idpf_find_mac_filter(vport_config, macaddr); 461 if (f) { 462 f->remove = true; 463 } else { 464 spin_unlock_bh(&vport_config->mac_filter_list_lock); 465 466 return -EINVAL; 467 } 468 spin_unlock_bh(&vport_config->mac_filter_list_lock); 469 470 if (np->state == __IDPF_VPORT_UP) { 471 int err; 472 473 err = idpf_add_del_mac_filters(vport, np, false, async); 474 if (err) 475 return err; 476 } 477 478 return __idpf_del_mac_filter(vport_config, macaddr); 479 } 480 481 /** 482 * __idpf_add_mac_filter - Add mac filter helper function 483 * @vport_config: Vport config structure 484 * @macaddr: Address to add 485 * 486 * Takes mac_filter_list_lock spinlock to add new filter to list. 487 */ 488 static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config, 489 const u8 *macaddr) 490 { 491 struct idpf_mac_filter *f; 492 493 spin_lock_bh(&vport_config->mac_filter_list_lock); 494 495 f = idpf_find_mac_filter(vport_config, macaddr); 496 if (f) { 497 f->remove = false; 498 spin_unlock_bh(&vport_config->mac_filter_list_lock); 499 500 return 0; 501 } 502 503 f = kzalloc(sizeof(*f), GFP_ATOMIC); 504 if (!f) { 505 spin_unlock_bh(&vport_config->mac_filter_list_lock); 506 507 return -ENOMEM; 508 } 509 510 ether_addr_copy(f->macaddr, macaddr); 511 list_add_tail(&f->list, &vport_config->user_config.mac_filter_list); 512 f->add = true; 513 514 spin_unlock_bh(&vport_config->mac_filter_list_lock); 515 516 return 0; 517 } 518 519 /** 520 * idpf_add_mac_filter - Add a mac filter to the filter list 521 * @vport: Main vport structure 522 * @np: Netdev private structure 523 * @macaddr: The MAC address 524 * @async: Don't wait for return message 525 * 526 * Returns 0 on success or error on failure. If interface is up, we'll also 527 * send the virtchnl message to tell hardware about the filter. 528 **/ 529 static int idpf_add_mac_filter(struct idpf_vport *vport, 530 struct idpf_netdev_priv *np, 531 const u8 *macaddr, bool async) 532 { 533 struct idpf_vport_config *vport_config; 534 int err; 535 536 vport_config = np->adapter->vport_config[np->vport_idx]; 537 err = __idpf_add_mac_filter(vport_config, macaddr); 538 if (err) 539 return err; 540 541 if (np->state == __IDPF_VPORT_UP) 542 err = idpf_add_del_mac_filters(vport, np, true, async); 543 544 return err; 545 } 546 547 /** 548 * idpf_del_all_mac_filters - Delete all MAC filters in list 549 * @vport: main vport struct 550 * 551 * Takes mac_filter_list_lock spinlock. Deletes all filters 552 */ 553 static void idpf_del_all_mac_filters(struct idpf_vport *vport) 554 { 555 struct idpf_vport_config *vport_config; 556 struct idpf_mac_filter *f, *ftmp; 557 558 vport_config = vport->adapter->vport_config[vport->idx]; 559 spin_lock_bh(&vport_config->mac_filter_list_lock); 560 561 list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list, 562 list) { 563 list_del(&f->list); 564 kfree(f); 565 } 566 567 spin_unlock_bh(&vport_config->mac_filter_list_lock); 568 } 569 570 /** 571 * idpf_restore_mac_filters - Re-add all MAC filters in list 572 * @vport: main vport struct 573 * 574 * Takes mac_filter_list_lock spinlock. Sets add field to true for filters to 575 * resync filters back to HW. 576 */ 577 static void idpf_restore_mac_filters(struct idpf_vport *vport) 578 { 579 struct idpf_vport_config *vport_config; 580 struct idpf_mac_filter *f; 581 582 vport_config = vport->adapter->vport_config[vport->idx]; 583 spin_lock_bh(&vport_config->mac_filter_list_lock); 584 585 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list) 586 f->add = true; 587 588 spin_unlock_bh(&vport_config->mac_filter_list_lock); 589 590 idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), 591 true, false); 592 } 593 594 /** 595 * idpf_remove_mac_filters - Remove all MAC filters in list 596 * @vport: main vport struct 597 * 598 * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters 599 * to remove filters in HW. 600 */ 601 static void idpf_remove_mac_filters(struct idpf_vport *vport) 602 { 603 struct idpf_vport_config *vport_config; 604 struct idpf_mac_filter *f; 605 606 vport_config = vport->adapter->vport_config[vport->idx]; 607 spin_lock_bh(&vport_config->mac_filter_list_lock); 608 609 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list) 610 f->remove = true; 611 612 spin_unlock_bh(&vport_config->mac_filter_list_lock); 613 614 idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), 615 false, false); 616 } 617 618 /** 619 * idpf_deinit_mac_addr - deinitialize mac address for vport 620 * @vport: main vport structure 621 */ 622 static void idpf_deinit_mac_addr(struct idpf_vport *vport) 623 { 624 struct idpf_vport_config *vport_config; 625 struct idpf_mac_filter *f; 626 627 vport_config = vport->adapter->vport_config[vport->idx]; 628 629 spin_lock_bh(&vport_config->mac_filter_list_lock); 630 631 f = idpf_find_mac_filter(vport_config, vport->default_mac_addr); 632 if (f) { 633 list_del(&f->list); 634 kfree(f); 635 } 636 637 spin_unlock_bh(&vport_config->mac_filter_list_lock); 638 } 639 640 /** 641 * idpf_init_mac_addr - initialize mac address for vport 642 * @vport: main vport structure 643 * @netdev: pointer to netdev struct associated with this vport 644 */ 645 static int idpf_init_mac_addr(struct idpf_vport *vport, 646 struct net_device *netdev) 647 { 648 struct idpf_netdev_priv *np = netdev_priv(netdev); 649 struct idpf_adapter *adapter = vport->adapter; 650 int err; 651 652 if (is_valid_ether_addr(vport->default_mac_addr)) { 653 eth_hw_addr_set(netdev, vport->default_mac_addr); 654 ether_addr_copy(netdev->perm_addr, vport->default_mac_addr); 655 656 return idpf_add_mac_filter(vport, np, vport->default_mac_addr, 657 false); 658 } 659 660 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, 661 VIRTCHNL2_CAP_MACFILTER)) { 662 dev_err(&adapter->pdev->dev, 663 "MAC address is not provided and capability is not set\n"); 664 665 return -EINVAL; 666 } 667 668 eth_hw_addr_random(netdev); 669 err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false); 670 if (err) 671 return err; 672 673 dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n", 674 vport->default_mac_addr, netdev->dev_addr); 675 ether_addr_copy(vport->default_mac_addr, netdev->dev_addr); 676 677 return 0; 678 } 679 680 /** 681 * idpf_cfg_netdev - Allocate, configure and register a netdev 682 * @vport: main vport structure 683 * 684 * Returns 0 on success, negative value on failure. 685 */ 686 static int idpf_cfg_netdev(struct idpf_vport *vport) 687 { 688 struct idpf_adapter *adapter = vport->adapter; 689 struct idpf_vport_config *vport_config; 690 netdev_features_t other_offloads = 0; 691 netdev_features_t csum_offloads = 0; 692 netdev_features_t tso_offloads = 0; 693 netdev_features_t dflt_features; 694 struct idpf_netdev_priv *np; 695 struct net_device *netdev; 696 u16 idx = vport->idx; 697 int err; 698 699 vport_config = adapter->vport_config[idx]; 700 701 /* It's possible we already have a netdev allocated and registered for 702 * this vport 703 */ 704 if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) { 705 netdev = adapter->netdevs[idx]; 706 np = netdev_priv(netdev); 707 np->vport = vport; 708 np->vport_idx = vport->idx; 709 np->vport_id = vport->vport_id; 710 vport->netdev = netdev; 711 712 return idpf_init_mac_addr(vport, netdev); 713 } 714 715 netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv), 716 vport_config->max_q.max_txq, 717 vport_config->max_q.max_rxq); 718 if (!netdev) 719 return -ENOMEM; 720 721 vport->netdev = netdev; 722 np = netdev_priv(netdev); 723 np->vport = vport; 724 np->adapter = adapter; 725 np->vport_idx = vport->idx; 726 np->vport_id = vport->vport_id; 727 728 spin_lock_init(&np->stats_lock); 729 730 err = idpf_init_mac_addr(vport, netdev); 731 if (err) { 732 free_netdev(vport->netdev); 733 vport->netdev = NULL; 734 735 return err; 736 } 737 738 /* assign netdev_ops */ 739 netdev->netdev_ops = &idpf_netdev_ops; 740 741 /* setup watchdog timeout value to be 5 second */ 742 netdev->watchdog_timeo = 5 * HZ; 743 744 netdev->dev_port = idx; 745 746 /* configure default MTU size */ 747 netdev->min_mtu = ETH_MIN_MTU; 748 netdev->max_mtu = vport->max_mtu; 749 750 dflt_features = NETIF_F_SG | 751 NETIF_F_HIGHDMA; 752 753 if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) 754 dflt_features |= NETIF_F_RXHASH; 755 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4)) 756 csum_offloads |= NETIF_F_IP_CSUM; 757 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6)) 758 csum_offloads |= NETIF_F_IPV6_CSUM; 759 if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM)) 760 csum_offloads |= NETIF_F_RXCSUM; 761 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM)) 762 csum_offloads |= NETIF_F_SCTP_CRC; 763 764 if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP)) 765 tso_offloads |= NETIF_F_TSO; 766 if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP)) 767 tso_offloads |= NETIF_F_TSO6; 768 if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS, 769 VIRTCHNL2_CAP_SEG_IPV4_UDP | 770 VIRTCHNL2_CAP_SEG_IPV6_UDP)) 771 tso_offloads |= NETIF_F_GSO_UDP_L4; 772 if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC)) 773 other_offloads |= NETIF_F_GRO_HW; 774 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK)) 775 other_offloads |= NETIF_F_LOOPBACK; 776 777 netdev->features |= dflt_features | csum_offloads | tso_offloads; 778 netdev->hw_features |= netdev->features | other_offloads; 779 netdev->vlan_features |= netdev->features | other_offloads; 780 netdev->hw_enc_features |= dflt_features | other_offloads; 781 idpf_set_ethtool_ops(netdev); 782 netif_set_affinity_auto(netdev); 783 SET_NETDEV_DEV(netdev, &adapter->pdev->dev); 784 785 /* carrier off on init to avoid Tx hangs */ 786 netif_carrier_off(netdev); 787 788 /* make sure transmit queues start off as stopped */ 789 netif_tx_stop_all_queues(netdev); 790 791 /* The vport can be arbitrarily released so we need to also track 792 * netdevs in the adapter struct 793 */ 794 adapter->netdevs[idx] = netdev; 795 796 return 0; 797 } 798 799 /** 800 * idpf_get_free_slot - get the next non-NULL location index in array 801 * @adapter: adapter in which to look for a free vport slot 802 */ 803 static int idpf_get_free_slot(struct idpf_adapter *adapter) 804 { 805 unsigned int i; 806 807 for (i = 0; i < adapter->max_vports; i++) { 808 if (!adapter->vports[i]) 809 return i; 810 } 811 812 return IDPF_NO_FREE_SLOT; 813 } 814 815 /** 816 * idpf_remove_features - Turn off feature configs 817 * @vport: virtual port structure 818 */ 819 static void idpf_remove_features(struct idpf_vport *vport) 820 { 821 struct idpf_adapter *adapter = vport->adapter; 822 823 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) 824 idpf_remove_mac_filters(vport); 825 } 826 827 /** 828 * idpf_vport_stop - Disable a vport 829 * @vport: vport to disable 830 */ 831 static void idpf_vport_stop(struct idpf_vport *vport) 832 { 833 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 834 835 if (np->state <= __IDPF_VPORT_DOWN) 836 return; 837 838 netif_carrier_off(vport->netdev); 839 netif_tx_disable(vport->netdev); 840 841 idpf_send_disable_vport_msg(vport); 842 idpf_send_disable_queues_msg(vport); 843 idpf_send_map_unmap_queue_vector_msg(vport, false); 844 /* Normally we ask for queues in create_vport, but if the number of 845 * initially requested queues have changed, for example via ethtool 846 * set channels, we do delete queues and then add the queues back 847 * instead of deleting and reallocating the vport. 848 */ 849 if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags)) 850 idpf_send_delete_queues_msg(vport); 851 852 idpf_remove_features(vport); 853 854 vport->link_up = false; 855 idpf_vport_intr_deinit(vport); 856 idpf_vport_queues_rel(vport); 857 idpf_vport_intr_rel(vport); 858 np->state = __IDPF_VPORT_DOWN; 859 } 860 861 /** 862 * idpf_stop - Disables a network interface 863 * @netdev: network interface device structure 864 * 865 * The stop entry point is called when an interface is de-activated by the OS, 866 * and the netdevice enters the DOWN state. The hardware is still under the 867 * driver's control, but the netdev interface is disabled. 868 * 869 * Returns success only - not allowed to fail 870 */ 871 static int idpf_stop(struct net_device *netdev) 872 { 873 struct idpf_netdev_priv *np = netdev_priv(netdev); 874 struct idpf_vport *vport; 875 876 if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags)) 877 return 0; 878 879 idpf_vport_ctrl_lock(netdev); 880 vport = idpf_netdev_to_vport(netdev); 881 882 idpf_vport_stop(vport); 883 884 idpf_vport_ctrl_unlock(netdev); 885 886 return 0; 887 } 888 889 /** 890 * idpf_decfg_netdev - Unregister the netdev 891 * @vport: vport for which netdev to be unregistered 892 */ 893 static void idpf_decfg_netdev(struct idpf_vport *vport) 894 { 895 struct idpf_adapter *adapter = vport->adapter; 896 u16 idx = vport->idx; 897 898 kfree(vport->rx_ptype_lkup); 899 vport->rx_ptype_lkup = NULL; 900 901 if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV, 902 adapter->vport_config[idx]->flags)) { 903 unregister_netdev(vport->netdev); 904 free_netdev(vport->netdev); 905 } 906 vport->netdev = NULL; 907 908 adapter->netdevs[idx] = NULL; 909 } 910 911 /** 912 * idpf_vport_rel - Delete a vport and free its resources 913 * @vport: the vport being removed 914 */ 915 static void idpf_vport_rel(struct idpf_vport *vport) 916 { 917 struct idpf_adapter *adapter = vport->adapter; 918 struct idpf_vport_config *vport_config; 919 struct idpf_vector_info vec_info; 920 struct idpf_rss_data *rss_data; 921 struct idpf_vport_max_q max_q; 922 u16 idx = vport->idx; 923 924 vport_config = adapter->vport_config[vport->idx]; 925 idpf_deinit_rss(vport); 926 rss_data = &vport_config->user_config.rss_data; 927 kfree(rss_data->rss_key); 928 rss_data->rss_key = NULL; 929 930 idpf_send_destroy_vport_msg(vport); 931 932 /* Release all max queues allocated to the adapter's pool */ 933 max_q.max_rxq = vport_config->max_q.max_rxq; 934 max_q.max_txq = vport_config->max_q.max_txq; 935 max_q.max_bufq = vport_config->max_q.max_bufq; 936 max_q.max_complq = vport_config->max_q.max_complq; 937 idpf_vport_dealloc_max_qs(adapter, &max_q); 938 939 /* Release all the allocated vectors on the stack */ 940 vec_info.num_req_vecs = 0; 941 vec_info.num_curr_vecs = vport->num_q_vectors; 942 vec_info.default_vport = vport->default_vport; 943 944 idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info); 945 946 kfree(vport->q_vector_idxs); 947 vport->q_vector_idxs = NULL; 948 949 kfree(adapter->vport_params_recvd[idx]); 950 adapter->vport_params_recvd[idx] = NULL; 951 kfree(adapter->vport_params_reqd[idx]); 952 adapter->vport_params_reqd[idx] = NULL; 953 if (adapter->vport_config[idx]) { 954 kfree(adapter->vport_config[idx]->req_qs_chunks); 955 adapter->vport_config[idx]->req_qs_chunks = NULL; 956 } 957 kfree(vport); 958 adapter->num_alloc_vports--; 959 } 960 961 /** 962 * idpf_vport_dealloc - cleanup and release a given vport 963 * @vport: pointer to idpf vport structure 964 * 965 * returns nothing 966 */ 967 static void idpf_vport_dealloc(struct idpf_vport *vport) 968 { 969 struct idpf_adapter *adapter = vport->adapter; 970 unsigned int i = vport->idx; 971 972 idpf_deinit_mac_addr(vport); 973 idpf_vport_stop(vport); 974 975 if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) 976 idpf_decfg_netdev(vport); 977 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 978 idpf_del_all_mac_filters(vport); 979 980 if (adapter->netdevs[i]) { 981 struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]); 982 983 np->vport = NULL; 984 } 985 986 idpf_vport_rel(vport); 987 988 adapter->vports[i] = NULL; 989 adapter->next_vport = idpf_get_free_slot(adapter); 990 } 991 992 /** 993 * idpf_is_hsplit_supported - check whether the header split is supported 994 * @vport: virtual port to check the capability for 995 * 996 * Return: true if it's supported by the HW/FW, false if not. 997 */ 998 static bool idpf_is_hsplit_supported(const struct idpf_vport *vport) 999 { 1000 return idpf_is_queue_model_split(vport->rxq_model) && 1001 idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS, 1002 IDPF_CAP_HSPLIT); 1003 } 1004 1005 /** 1006 * idpf_vport_get_hsplit - get the current header split feature state 1007 * @vport: virtual port to query the state for 1008 * 1009 * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported, 1010 * ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled, 1011 * ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active. 1012 */ 1013 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport) 1014 { 1015 const struct idpf_vport_user_config_data *config; 1016 1017 if (!idpf_is_hsplit_supported(vport)) 1018 return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; 1019 1020 config = &vport->adapter->vport_config[vport->idx]->user_config; 1021 1022 return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ? 1023 ETHTOOL_TCP_DATA_SPLIT_ENABLED : 1024 ETHTOOL_TCP_DATA_SPLIT_DISABLED; 1025 } 1026 1027 /** 1028 * idpf_vport_set_hsplit - enable or disable header split on a given vport 1029 * @vport: virtual port to configure 1030 * @val: Ethtool flag controlling the header split state 1031 * 1032 * Return: true on success, false if not supported by the HW. 1033 */ 1034 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val) 1035 { 1036 struct idpf_vport_user_config_data *config; 1037 1038 if (!idpf_is_hsplit_supported(vport)) 1039 return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; 1040 1041 config = &vport->adapter->vport_config[vport->idx]->user_config; 1042 1043 switch (val) { 1044 case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN: 1045 /* Default is to enable */ 1046 case ETHTOOL_TCP_DATA_SPLIT_ENABLED: 1047 __set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags); 1048 return true; 1049 case ETHTOOL_TCP_DATA_SPLIT_DISABLED: 1050 __clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags); 1051 return true; 1052 default: 1053 return false; 1054 } 1055 } 1056 1057 /** 1058 * idpf_vport_alloc - Allocates the next available struct vport in the adapter 1059 * @adapter: board private structure 1060 * @max_q: vport max queue info 1061 * 1062 * returns a pointer to a vport on success, NULL on failure. 1063 */ 1064 static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, 1065 struct idpf_vport_max_q *max_q) 1066 { 1067 struct idpf_rss_data *rss_data; 1068 u16 idx = adapter->next_vport; 1069 struct idpf_vport *vport; 1070 u16 num_max_q; 1071 1072 if (idx == IDPF_NO_FREE_SLOT) 1073 return NULL; 1074 1075 vport = kzalloc(sizeof(*vport), GFP_KERNEL); 1076 if (!vport) 1077 return vport; 1078 1079 if (!adapter->vport_config[idx]) { 1080 struct idpf_vport_config *vport_config; 1081 1082 vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL); 1083 if (!vport_config) { 1084 kfree(vport); 1085 1086 return NULL; 1087 } 1088 1089 adapter->vport_config[idx] = vport_config; 1090 } 1091 1092 vport->idx = idx; 1093 vport->adapter = adapter; 1094 vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET; 1095 vport->default_vport = adapter->num_alloc_vports < 1096 idpf_get_default_vports(adapter); 1097 1098 num_max_q = max(max_q->max_txq, max_q->max_rxq); 1099 vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); 1100 if (!vport->q_vector_idxs) 1101 goto free_vport; 1102 1103 idpf_vport_init(vport, max_q); 1104 1105 /* This alloc is done separate from the LUT because it's not strictly 1106 * dependent on how many queues we have. If we change number of queues 1107 * and soft reset we'll need a new LUT but the key can remain the same 1108 * for as long as the vport exists. 1109 */ 1110 rss_data = &adapter->vport_config[idx]->user_config.rss_data; 1111 rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL); 1112 if (!rss_data->rss_key) 1113 goto free_vector_idxs; 1114 1115 /* Initialize default rss key */ 1116 netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size); 1117 1118 /* fill vport slot in the adapter struct */ 1119 adapter->vports[idx] = vport; 1120 adapter->vport_ids[idx] = idpf_get_vport_id(vport); 1121 1122 adapter->num_alloc_vports++; 1123 /* prepare adapter->next_vport for next use */ 1124 adapter->next_vport = idpf_get_free_slot(adapter); 1125 1126 return vport; 1127 1128 free_vector_idxs: 1129 kfree(vport->q_vector_idxs); 1130 free_vport: 1131 kfree(vport); 1132 1133 return NULL; 1134 } 1135 1136 /** 1137 * idpf_get_stats64 - get statistics for network device structure 1138 * @netdev: network interface device structure 1139 * @stats: main device statistics structure 1140 */ 1141 static void idpf_get_stats64(struct net_device *netdev, 1142 struct rtnl_link_stats64 *stats) 1143 { 1144 struct idpf_netdev_priv *np = netdev_priv(netdev); 1145 1146 spin_lock_bh(&np->stats_lock); 1147 *stats = np->netstats; 1148 spin_unlock_bh(&np->stats_lock); 1149 } 1150 1151 /** 1152 * idpf_statistics_task - Delayed task to get statistics over mailbox 1153 * @work: work_struct handle to our data 1154 */ 1155 void idpf_statistics_task(struct work_struct *work) 1156 { 1157 struct idpf_adapter *adapter; 1158 int i; 1159 1160 adapter = container_of(work, struct idpf_adapter, stats_task.work); 1161 1162 for (i = 0; i < adapter->max_vports; i++) { 1163 struct idpf_vport *vport = adapter->vports[i]; 1164 1165 if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) 1166 idpf_send_get_stats_msg(vport); 1167 } 1168 1169 queue_delayed_work(adapter->stats_wq, &adapter->stats_task, 1170 msecs_to_jiffies(10000)); 1171 } 1172 1173 /** 1174 * idpf_mbx_task - Delayed task to handle mailbox responses 1175 * @work: work_struct handle 1176 */ 1177 void idpf_mbx_task(struct work_struct *work) 1178 { 1179 struct idpf_adapter *adapter; 1180 1181 adapter = container_of(work, struct idpf_adapter, mbx_task.work); 1182 1183 if (test_bit(IDPF_MB_INTR_MODE, adapter->flags)) 1184 idpf_mb_irq_enable(adapter); 1185 else 1186 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 1187 msecs_to_jiffies(300)); 1188 1189 idpf_recv_mb_msg(adapter); 1190 } 1191 1192 /** 1193 * idpf_service_task - Delayed task for handling mailbox responses 1194 * @work: work_struct handle to our data 1195 * 1196 */ 1197 void idpf_service_task(struct work_struct *work) 1198 { 1199 struct idpf_adapter *adapter; 1200 1201 adapter = container_of(work, struct idpf_adapter, serv_task.work); 1202 1203 if (idpf_is_reset_detected(adapter) && 1204 !idpf_is_reset_in_prog(adapter) && 1205 !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) { 1206 dev_info(&adapter->pdev->dev, "HW reset detected\n"); 1207 set_bit(IDPF_HR_FUNC_RESET, adapter->flags); 1208 queue_delayed_work(adapter->vc_event_wq, 1209 &adapter->vc_event_task, 1210 msecs_to_jiffies(10)); 1211 } 1212 1213 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, 1214 msecs_to_jiffies(300)); 1215 } 1216 1217 /** 1218 * idpf_restore_features - Restore feature configs 1219 * @vport: virtual port structure 1220 */ 1221 static void idpf_restore_features(struct idpf_vport *vport) 1222 { 1223 struct idpf_adapter *adapter = vport->adapter; 1224 1225 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) 1226 idpf_restore_mac_filters(vport); 1227 } 1228 1229 /** 1230 * idpf_set_real_num_queues - set number of queues for netdev 1231 * @vport: virtual port structure 1232 * 1233 * Returns 0 on success, negative on failure. 1234 */ 1235 static int idpf_set_real_num_queues(struct idpf_vport *vport) 1236 { 1237 int err; 1238 1239 err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq); 1240 if (err) 1241 return err; 1242 1243 return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq); 1244 } 1245 1246 /** 1247 * idpf_up_complete - Complete interface up sequence 1248 * @vport: virtual port structure 1249 * 1250 * Returns 0 on success, negative on failure. 1251 */ 1252 static int idpf_up_complete(struct idpf_vport *vport) 1253 { 1254 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 1255 1256 if (vport->link_up && !netif_carrier_ok(vport->netdev)) { 1257 netif_carrier_on(vport->netdev); 1258 netif_tx_start_all_queues(vport->netdev); 1259 } 1260 1261 np->state = __IDPF_VPORT_UP; 1262 1263 return 0; 1264 } 1265 1266 /** 1267 * idpf_rx_init_buf_tail - Write initial buffer ring tail value 1268 * @vport: virtual port struct 1269 */ 1270 static void idpf_rx_init_buf_tail(struct idpf_vport *vport) 1271 { 1272 int i, j; 1273 1274 for (i = 0; i < vport->num_rxq_grp; i++) { 1275 struct idpf_rxq_group *grp = &vport->rxq_grps[i]; 1276 1277 if (idpf_is_queue_model_split(vport->rxq_model)) { 1278 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 1279 const struct idpf_buf_queue *q = 1280 &grp->splitq.bufq_sets[j].bufq; 1281 1282 writel(q->next_to_alloc, q->tail); 1283 } 1284 } else { 1285 for (j = 0; j < grp->singleq.num_rxq; j++) { 1286 const struct idpf_rx_queue *q = 1287 grp->singleq.rxqs[j]; 1288 1289 writel(q->next_to_alloc, q->tail); 1290 } 1291 } 1292 } 1293 } 1294 1295 /** 1296 * idpf_vport_open - Bring up a vport 1297 * @vport: vport to bring up 1298 */ 1299 static int idpf_vport_open(struct idpf_vport *vport) 1300 { 1301 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 1302 struct idpf_adapter *adapter = vport->adapter; 1303 struct idpf_vport_config *vport_config; 1304 int err; 1305 1306 if (np->state != __IDPF_VPORT_DOWN) 1307 return -EBUSY; 1308 1309 /* we do not allow interface up just yet */ 1310 netif_carrier_off(vport->netdev); 1311 1312 err = idpf_vport_intr_alloc(vport); 1313 if (err) { 1314 dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n", 1315 vport->vport_id, err); 1316 return err; 1317 } 1318 1319 err = idpf_vport_queues_alloc(vport); 1320 if (err) 1321 goto intr_rel; 1322 1323 err = idpf_vport_queue_ids_init(vport); 1324 if (err) { 1325 dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n", 1326 vport->vport_id, err); 1327 goto queues_rel; 1328 } 1329 1330 err = idpf_vport_intr_init(vport); 1331 if (err) { 1332 dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n", 1333 vport->vport_id, err); 1334 goto queues_rel; 1335 } 1336 1337 err = idpf_rx_bufs_init_all(vport); 1338 if (err) { 1339 dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", 1340 vport->vport_id, err); 1341 goto queues_rel; 1342 } 1343 1344 err = idpf_queue_reg_init(vport); 1345 if (err) { 1346 dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", 1347 vport->vport_id, err); 1348 goto queues_rel; 1349 } 1350 1351 idpf_rx_init_buf_tail(vport); 1352 idpf_vport_intr_ena(vport); 1353 1354 err = idpf_send_config_queues_msg(vport); 1355 if (err) { 1356 dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n", 1357 vport->vport_id, err); 1358 goto intr_deinit; 1359 } 1360 1361 err = idpf_send_map_unmap_queue_vector_msg(vport, true); 1362 if (err) { 1363 dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n", 1364 vport->vport_id, err); 1365 goto intr_deinit; 1366 } 1367 1368 err = idpf_send_enable_queues_msg(vport); 1369 if (err) { 1370 dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n", 1371 vport->vport_id, err); 1372 goto unmap_queue_vectors; 1373 } 1374 1375 err = idpf_send_enable_vport_msg(vport); 1376 if (err) { 1377 dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n", 1378 vport->vport_id, err); 1379 err = -EAGAIN; 1380 goto disable_queues; 1381 } 1382 1383 idpf_restore_features(vport); 1384 1385 vport_config = adapter->vport_config[vport->idx]; 1386 if (vport_config->user_config.rss_data.rss_lut) 1387 err = idpf_config_rss(vport); 1388 else 1389 err = idpf_init_rss(vport); 1390 if (err) { 1391 dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n", 1392 vport->vport_id, err); 1393 goto disable_vport; 1394 } 1395 1396 err = idpf_up_complete(vport); 1397 if (err) { 1398 dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n", 1399 vport->vport_id, err); 1400 goto deinit_rss; 1401 } 1402 1403 return 0; 1404 1405 deinit_rss: 1406 idpf_deinit_rss(vport); 1407 disable_vport: 1408 idpf_send_disable_vport_msg(vport); 1409 disable_queues: 1410 idpf_send_disable_queues_msg(vport); 1411 unmap_queue_vectors: 1412 idpf_send_map_unmap_queue_vector_msg(vport, false); 1413 intr_deinit: 1414 idpf_vport_intr_deinit(vport); 1415 queues_rel: 1416 idpf_vport_queues_rel(vport); 1417 intr_rel: 1418 idpf_vport_intr_rel(vport); 1419 1420 return err; 1421 } 1422 1423 /** 1424 * idpf_init_task - Delayed initialization task 1425 * @work: work_struct handle to our data 1426 * 1427 * Init task finishes up pending work started in probe. Due to the asynchronous 1428 * nature in which the device communicates with hardware, we may have to wait 1429 * several milliseconds to get a response. Instead of busy polling in probe, 1430 * pulling it out into a delayed work task prevents us from bogging down the 1431 * whole system waiting for a response from hardware. 1432 */ 1433 void idpf_init_task(struct work_struct *work) 1434 { 1435 struct idpf_vport_config *vport_config; 1436 struct idpf_vport_max_q max_q; 1437 struct idpf_adapter *adapter; 1438 struct idpf_netdev_priv *np; 1439 struct idpf_vport *vport; 1440 u16 num_default_vports; 1441 struct pci_dev *pdev; 1442 bool default_vport; 1443 int index, err; 1444 1445 adapter = container_of(work, struct idpf_adapter, init_task.work); 1446 1447 num_default_vports = idpf_get_default_vports(adapter); 1448 if (adapter->num_alloc_vports < num_default_vports) 1449 default_vport = true; 1450 else 1451 default_vport = false; 1452 1453 err = idpf_vport_alloc_max_qs(adapter, &max_q); 1454 if (err) 1455 goto unwind_vports; 1456 1457 err = idpf_send_create_vport_msg(adapter, &max_q); 1458 if (err) { 1459 idpf_vport_dealloc_max_qs(adapter, &max_q); 1460 goto unwind_vports; 1461 } 1462 1463 pdev = adapter->pdev; 1464 vport = idpf_vport_alloc(adapter, &max_q); 1465 if (!vport) { 1466 err = -EFAULT; 1467 dev_err(&pdev->dev, "failed to allocate vport: %d\n", 1468 err); 1469 idpf_vport_dealloc_max_qs(adapter, &max_q); 1470 goto unwind_vports; 1471 } 1472 1473 index = vport->idx; 1474 vport_config = adapter->vport_config[index]; 1475 1476 init_waitqueue_head(&vport->sw_marker_wq); 1477 1478 spin_lock_init(&vport_config->mac_filter_list_lock); 1479 1480 INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list); 1481 1482 err = idpf_check_supported_desc_ids(vport); 1483 if (err) { 1484 dev_err(&pdev->dev, "failed to get required descriptor ids\n"); 1485 goto cfg_netdev_err; 1486 } 1487 1488 if (idpf_cfg_netdev(vport)) 1489 goto cfg_netdev_err; 1490 1491 err = idpf_send_get_rx_ptype_msg(vport); 1492 if (err) 1493 goto handle_err; 1494 1495 /* Once state is put into DOWN, driver is ready for dev_open */ 1496 np = netdev_priv(vport->netdev); 1497 np->state = __IDPF_VPORT_DOWN; 1498 if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) 1499 idpf_vport_open(vport); 1500 1501 /* Spawn and return 'idpf_init_task' work queue until all the 1502 * default vports are created 1503 */ 1504 if (adapter->num_alloc_vports < num_default_vports) { 1505 queue_delayed_work(adapter->init_wq, &adapter->init_task, 1506 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 1507 1508 return; 1509 } 1510 1511 for (index = 0; index < adapter->max_vports; index++) { 1512 struct net_device *netdev = adapter->netdevs[index]; 1513 struct idpf_vport_config *vport_config; 1514 1515 vport_config = adapter->vport_config[index]; 1516 1517 if (!netdev || 1518 test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) 1519 continue; 1520 1521 err = register_netdev(netdev); 1522 if (err) { 1523 dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n", 1524 index, ERR_PTR(err)); 1525 continue; 1526 } 1527 set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags); 1528 } 1529 1530 /* As all the required vports are created, clear the reset flag 1531 * unconditionally here in case we were in reset and the link was down. 1532 */ 1533 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1534 /* Start the statistics task now */ 1535 queue_delayed_work(adapter->stats_wq, &adapter->stats_task, 1536 msecs_to_jiffies(10 * (pdev->devfn & 0x07))); 1537 1538 return; 1539 1540 handle_err: 1541 idpf_decfg_netdev(vport); 1542 cfg_netdev_err: 1543 idpf_vport_rel(vport); 1544 adapter->vports[index] = NULL; 1545 unwind_vports: 1546 if (default_vport) { 1547 for (index = 0; index < adapter->max_vports; index++) { 1548 if (adapter->vports[index]) 1549 idpf_vport_dealloc(adapter->vports[index]); 1550 } 1551 } 1552 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1553 } 1554 1555 /** 1556 * idpf_sriov_ena - Enable or change number of VFs 1557 * @adapter: private data struct 1558 * @num_vfs: number of VFs to allocate 1559 */ 1560 static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs) 1561 { 1562 struct device *dev = &adapter->pdev->dev; 1563 int err; 1564 1565 err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs); 1566 if (err) { 1567 dev_err(dev, "Failed to allocate VFs: %d\n", err); 1568 1569 return err; 1570 } 1571 1572 err = pci_enable_sriov(adapter->pdev, num_vfs); 1573 if (err) { 1574 idpf_send_set_sriov_vfs_msg(adapter, 0); 1575 dev_err(dev, "Failed to enable SR-IOV: %d\n", err); 1576 1577 return err; 1578 } 1579 1580 adapter->num_vfs = num_vfs; 1581 1582 return num_vfs; 1583 } 1584 1585 /** 1586 * idpf_sriov_configure - Configure the requested VFs 1587 * @pdev: pointer to a pci_dev structure 1588 * @num_vfs: number of vfs to allocate 1589 * 1590 * Enable or change the number of VFs. Called when the user updates the number 1591 * of VFs in sysfs. 1592 **/ 1593 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs) 1594 { 1595 struct idpf_adapter *adapter = pci_get_drvdata(pdev); 1596 1597 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) { 1598 dev_info(&pdev->dev, "SR-IOV is not supported on this device\n"); 1599 1600 return -EOPNOTSUPP; 1601 } 1602 1603 if (num_vfs) 1604 return idpf_sriov_ena(adapter, num_vfs); 1605 1606 if (pci_vfs_assigned(pdev)) { 1607 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n"); 1608 1609 return -EBUSY; 1610 } 1611 1612 pci_disable_sriov(adapter->pdev); 1613 idpf_send_set_sriov_vfs_msg(adapter, 0); 1614 adapter->num_vfs = 0; 1615 1616 return 0; 1617 } 1618 1619 /** 1620 * idpf_deinit_task - Device deinit routine 1621 * @adapter: Driver specific private structure 1622 * 1623 * Extended remove logic which will be used for 1624 * hard reset as well 1625 */ 1626 void idpf_deinit_task(struct idpf_adapter *adapter) 1627 { 1628 unsigned int i; 1629 1630 /* Wait until the init_task is done else this thread might release 1631 * the resources first and the other thread might end up in a bad state 1632 */ 1633 cancel_delayed_work_sync(&adapter->init_task); 1634 1635 if (!adapter->vports) 1636 return; 1637 1638 cancel_delayed_work_sync(&adapter->stats_task); 1639 1640 for (i = 0; i < adapter->max_vports; i++) { 1641 if (adapter->vports[i]) 1642 idpf_vport_dealloc(adapter->vports[i]); 1643 } 1644 } 1645 1646 /** 1647 * idpf_check_reset_complete - check that reset is complete 1648 * @hw: pointer to hw struct 1649 * @reset_reg: struct with reset registers 1650 * 1651 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 1652 **/ 1653 static int idpf_check_reset_complete(struct idpf_hw *hw, 1654 struct idpf_reset_reg *reset_reg) 1655 { 1656 struct idpf_adapter *adapter = hw->back; 1657 int i; 1658 1659 for (i = 0; i < 2000; i++) { 1660 u32 reg_val = readl(reset_reg->rstat); 1661 1662 /* 0xFFFFFFFF might be read if other side hasn't cleared the 1663 * register for us yet and 0xFFFFFFFF is not a valid value for 1664 * the register, so treat that as invalid. 1665 */ 1666 if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m)) 1667 return 0; 1668 1669 usleep_range(5000, 10000); 1670 } 1671 1672 dev_warn(&adapter->pdev->dev, "Device reset timeout!\n"); 1673 /* Clear the reset flag unconditionally here since the reset 1674 * technically isn't in progress anymore from the driver's perspective 1675 */ 1676 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1677 1678 return -EBUSY; 1679 } 1680 1681 /** 1682 * idpf_set_vport_state - Set the vport state to be after the reset 1683 * @adapter: Driver specific private structure 1684 */ 1685 static void idpf_set_vport_state(struct idpf_adapter *adapter) 1686 { 1687 u16 i; 1688 1689 for (i = 0; i < adapter->max_vports; i++) { 1690 struct idpf_netdev_priv *np; 1691 1692 if (!adapter->netdevs[i]) 1693 continue; 1694 1695 np = netdev_priv(adapter->netdevs[i]); 1696 if (np->state == __IDPF_VPORT_UP) 1697 set_bit(IDPF_VPORT_UP_REQUESTED, 1698 adapter->vport_config[i]->flags); 1699 } 1700 } 1701 1702 /** 1703 * idpf_init_hard_reset - Initiate a hardware reset 1704 * @adapter: Driver specific private structure 1705 * 1706 * Deallocate the vports and all the resources associated with them and 1707 * reallocate. Also reinitialize the mailbox. Return 0 on success, 1708 * negative on failure. 1709 */ 1710 static int idpf_init_hard_reset(struct idpf_adapter *adapter) 1711 { 1712 struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops; 1713 struct device *dev = &adapter->pdev->dev; 1714 struct net_device *netdev; 1715 int err; 1716 u16 i; 1717 1718 mutex_lock(&adapter->vport_ctrl_lock); 1719 1720 dev_info(dev, "Device HW Reset initiated\n"); 1721 1722 /* Avoid TX hangs on reset */ 1723 for (i = 0; i < adapter->max_vports; i++) { 1724 netdev = adapter->netdevs[i]; 1725 if (!netdev) 1726 continue; 1727 1728 netif_carrier_off(netdev); 1729 netif_tx_disable(netdev); 1730 } 1731 1732 /* Prepare for reset */ 1733 if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { 1734 reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD); 1735 } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) { 1736 bool is_reset = idpf_is_reset_detected(adapter); 1737 1738 idpf_set_vport_state(adapter); 1739 idpf_vc_core_deinit(adapter); 1740 if (!is_reset) 1741 reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET); 1742 idpf_deinit_dflt_mbx(adapter); 1743 } else { 1744 dev_err(dev, "Unhandled hard reset cause\n"); 1745 err = -EBADRQC; 1746 goto unlock_mutex; 1747 } 1748 1749 /* Wait for reset to complete */ 1750 err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg); 1751 if (err) { 1752 dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n", 1753 adapter->state); 1754 goto unlock_mutex; 1755 } 1756 1757 /* Reset is complete and so start building the driver resources again */ 1758 err = idpf_init_dflt_mbx(adapter); 1759 if (err) { 1760 dev_err(dev, "Failed to initialize default mailbox: %d\n", err); 1761 goto unlock_mutex; 1762 } 1763 1764 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 1765 1766 /* Initialize the state machine, also allocate memory and request 1767 * resources 1768 */ 1769 err = idpf_vc_core_init(adapter); 1770 if (err) { 1771 cancel_delayed_work_sync(&adapter->mbx_task); 1772 idpf_deinit_dflt_mbx(adapter); 1773 goto unlock_mutex; 1774 } 1775 1776 /* Wait till all the vports are initialized to release the reset lock, 1777 * else user space callbacks may access uninitialized vports 1778 */ 1779 while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) 1780 msleep(100); 1781 1782 unlock_mutex: 1783 mutex_unlock(&adapter->vport_ctrl_lock); 1784 1785 return err; 1786 } 1787 1788 /** 1789 * idpf_vc_event_task - Handle virtchannel event logic 1790 * @work: work queue struct 1791 */ 1792 void idpf_vc_event_task(struct work_struct *work) 1793 { 1794 struct idpf_adapter *adapter; 1795 1796 adapter = container_of(work, struct idpf_adapter, vc_event_task.work); 1797 1798 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 1799 return; 1800 1801 if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || 1802 test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { 1803 set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1804 idpf_init_hard_reset(adapter); 1805 } 1806 } 1807 1808 /** 1809 * idpf_initiate_soft_reset - Initiate a software reset 1810 * @vport: virtual port data struct 1811 * @reset_cause: reason for the soft reset 1812 * 1813 * Soft reset only reallocs vport queue resources. Returns 0 on success, 1814 * negative on failure. 1815 */ 1816 int idpf_initiate_soft_reset(struct idpf_vport *vport, 1817 enum idpf_vport_reset_cause reset_cause) 1818 { 1819 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 1820 enum idpf_vport_state current_state = np->state; 1821 struct idpf_adapter *adapter = vport->adapter; 1822 struct idpf_vport *new_vport; 1823 int err; 1824 1825 /* If the system is low on memory, we can end up in bad state if we 1826 * free all the memory for queue resources and try to allocate them 1827 * again. Instead, we can pre-allocate the new resources before doing 1828 * anything and bailing if the alloc fails. 1829 * 1830 * Make a clone of the existing vport to mimic its current 1831 * configuration, then modify the new structure with any requested 1832 * changes. Once the allocation of the new resources is done, stop the 1833 * existing vport and copy the configuration to the main vport. If an 1834 * error occurred, the existing vport will be untouched. 1835 * 1836 */ 1837 new_vport = kzalloc(sizeof(*vport), GFP_KERNEL); 1838 if (!new_vport) 1839 return -ENOMEM; 1840 1841 /* This purposely avoids copying the end of the struct because it 1842 * contains wait_queues and mutexes and other stuff we don't want to 1843 * mess with. Nothing below should use those variables from new_vport 1844 * and should instead always refer to them in vport if they need to. 1845 */ 1846 memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up)); 1847 1848 /* Adjust resource parameters prior to reallocating resources */ 1849 switch (reset_cause) { 1850 case IDPF_SR_Q_CHANGE: 1851 err = idpf_vport_adjust_qs(new_vport); 1852 if (err) 1853 goto free_vport; 1854 break; 1855 case IDPF_SR_Q_DESC_CHANGE: 1856 /* Update queue parameters before allocating resources */ 1857 idpf_vport_calc_num_q_desc(new_vport); 1858 break; 1859 case IDPF_SR_MTU_CHANGE: 1860 case IDPF_SR_RSC_CHANGE: 1861 break; 1862 default: 1863 dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n"); 1864 err = -EINVAL; 1865 goto free_vport; 1866 } 1867 1868 if (current_state <= __IDPF_VPORT_DOWN) { 1869 idpf_send_delete_queues_msg(vport); 1870 } else { 1871 set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags); 1872 idpf_vport_stop(vport); 1873 } 1874 1875 idpf_deinit_rss(vport); 1876 /* We're passing in vport here because we need its wait_queue 1877 * to send a message and it should be getting all the vport 1878 * config data out of the adapter but we need to be careful not 1879 * to add code to add_queues to change the vport config within 1880 * vport itself as it will be wiped with a memcpy later. 1881 */ 1882 err = idpf_send_add_queues_msg(vport, new_vport->num_txq, 1883 new_vport->num_complq, 1884 new_vport->num_rxq, 1885 new_vport->num_bufq); 1886 if (err) 1887 goto err_reset; 1888 1889 /* Same comment as above regarding avoiding copying the wait_queues and 1890 * mutexes applies here. We do not want to mess with those if possible. 1891 */ 1892 memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up)); 1893 1894 if (reset_cause == IDPF_SR_Q_CHANGE) 1895 idpf_vport_alloc_vec_indexes(vport); 1896 1897 err = idpf_set_real_num_queues(vport); 1898 if (err) 1899 goto err_open; 1900 1901 if (current_state == __IDPF_VPORT_UP) 1902 err = idpf_vport_open(vport); 1903 1904 kfree(new_vport); 1905 1906 return err; 1907 1908 err_reset: 1909 idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, 1910 vport->num_rxq, vport->num_bufq); 1911 1912 err_open: 1913 if (current_state == __IDPF_VPORT_UP) 1914 idpf_vport_open(vport); 1915 1916 free_vport: 1917 kfree(new_vport); 1918 1919 return err; 1920 } 1921 1922 /** 1923 * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address 1924 * @netdev: the netdevice 1925 * @addr: address to add 1926 * 1927 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 1928 * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock 1929 * meaning we cannot sleep in this context. Due to this, we have to add the 1930 * filter and send the virtchnl message asynchronously without waiting for the 1931 * response from the other side. We won't know whether or not the operation 1932 * actually succeeded until we get the message back. Returns 0 on success, 1933 * negative on failure. 1934 */ 1935 static int idpf_addr_sync(struct net_device *netdev, const u8 *addr) 1936 { 1937 struct idpf_netdev_priv *np = netdev_priv(netdev); 1938 1939 return idpf_add_mac_filter(np->vport, np, addr, true); 1940 } 1941 1942 /** 1943 * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 1944 * @netdev: the netdevice 1945 * @addr: address to add 1946 * 1947 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 1948 * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock 1949 * meaning we cannot sleep in this context. Due to this we have to delete the 1950 * filter and send the virtchnl message asynchronously without waiting for the 1951 * return from the other side. We won't know whether or not the operation 1952 * actually succeeded until we get the message back. Returns 0 on success, 1953 * negative on failure. 1954 */ 1955 static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr) 1956 { 1957 struct idpf_netdev_priv *np = netdev_priv(netdev); 1958 1959 /* Under some circumstances, we might receive a request to delete 1960 * our own device address from our uc list. Because we store the 1961 * device address in the VSI's MAC filter list, we need to ignore 1962 * such requests and not delete our device address from this list. 1963 */ 1964 if (ether_addr_equal(addr, netdev->dev_addr)) 1965 return 0; 1966 1967 idpf_del_mac_filter(np->vport, np, addr, true); 1968 1969 return 0; 1970 } 1971 1972 /** 1973 * idpf_set_rx_mode - NDO callback to set the netdev filters 1974 * @netdev: network interface device structure 1975 * 1976 * Stack takes addr_list_lock spinlock before calling our .set_rx_mode. We 1977 * cannot sleep in this context. 1978 */ 1979 static void idpf_set_rx_mode(struct net_device *netdev) 1980 { 1981 struct idpf_netdev_priv *np = netdev_priv(netdev); 1982 struct idpf_vport_user_config_data *config_data; 1983 struct idpf_adapter *adapter; 1984 bool changed = false; 1985 struct device *dev; 1986 int err; 1987 1988 adapter = np->adapter; 1989 dev = &adapter->pdev->dev; 1990 1991 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) { 1992 __dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync); 1993 __dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync); 1994 } 1995 1996 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC)) 1997 return; 1998 1999 config_data = &adapter->vport_config[np->vport_idx]->user_config; 2000 /* IFF_PROMISC enables both unicast and multicast promiscuous, 2001 * while IFF_ALLMULTI only enables multicast such that: 2002 * 2003 * promisc + allmulti = unicast | multicast 2004 * promisc + !allmulti = unicast | multicast 2005 * !promisc + allmulti = multicast 2006 */ 2007 if ((netdev->flags & IFF_PROMISC) && 2008 !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) { 2009 changed = true; 2010 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); 2011 if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags)) 2012 dev_info(dev, "Entering multicast promiscuous mode\n"); 2013 } 2014 2015 if (!(netdev->flags & IFF_PROMISC) && 2016 test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) { 2017 changed = true; 2018 dev_info(dev, "Leaving promiscuous mode\n"); 2019 } 2020 2021 if (netdev->flags & IFF_ALLMULTI && 2022 !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) { 2023 changed = true; 2024 dev_info(dev, "Entering multicast promiscuous mode\n"); 2025 } 2026 2027 if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) && 2028 test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) { 2029 changed = true; 2030 dev_info(dev, "Leaving multicast promiscuous mode\n"); 2031 } 2032 2033 if (!changed) 2034 return; 2035 2036 err = idpf_set_promiscuous(adapter, config_data, np->vport_id); 2037 if (err) 2038 dev_err(dev, "Failed to set promiscuous mode: %d\n", err); 2039 } 2040 2041 /** 2042 * idpf_vport_manage_rss_lut - disable/enable RSS 2043 * @vport: the vport being changed 2044 * 2045 * In the event of disable request for RSS, this function will zero out RSS 2046 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 2047 * LUT with the default LUT configuration. 2048 */ 2049 static int idpf_vport_manage_rss_lut(struct idpf_vport *vport) 2050 { 2051 bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH); 2052 struct idpf_rss_data *rss_data; 2053 u16 idx = vport->idx; 2054 int lut_size; 2055 2056 rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data; 2057 lut_size = rss_data->rss_lut_size * sizeof(u32); 2058 2059 if (ena) { 2060 /* This will contain the default or user configured LUT */ 2061 memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size); 2062 } else { 2063 /* Save a copy of the current LUT to be restored later if 2064 * requested. 2065 */ 2066 memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size); 2067 2068 /* Zero out the current LUT to disable */ 2069 memset(rss_data->rss_lut, 0, lut_size); 2070 } 2071 2072 return idpf_config_rss(vport); 2073 } 2074 2075 /** 2076 * idpf_set_features - set the netdev feature flags 2077 * @netdev: ptr to the netdev being adjusted 2078 * @features: the feature set that the stack is suggesting 2079 */ 2080 static int idpf_set_features(struct net_device *netdev, 2081 netdev_features_t features) 2082 { 2083 netdev_features_t changed = netdev->features ^ features; 2084 struct idpf_adapter *adapter; 2085 struct idpf_vport *vport; 2086 int err = 0; 2087 2088 idpf_vport_ctrl_lock(netdev); 2089 vport = idpf_netdev_to_vport(netdev); 2090 2091 adapter = vport->adapter; 2092 2093 if (idpf_is_reset_in_prog(adapter)) { 2094 dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n"); 2095 err = -EBUSY; 2096 goto unlock_mutex; 2097 } 2098 2099 if (changed & NETIF_F_RXHASH) { 2100 netdev->features ^= NETIF_F_RXHASH; 2101 err = idpf_vport_manage_rss_lut(vport); 2102 if (err) 2103 goto unlock_mutex; 2104 } 2105 2106 if (changed & NETIF_F_GRO_HW) { 2107 netdev->features ^= NETIF_F_GRO_HW; 2108 err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE); 2109 if (err) 2110 goto unlock_mutex; 2111 } 2112 2113 if (changed & NETIF_F_LOOPBACK) { 2114 netdev->features ^= NETIF_F_LOOPBACK; 2115 err = idpf_send_ena_dis_loopback_msg(vport); 2116 } 2117 2118 unlock_mutex: 2119 idpf_vport_ctrl_unlock(netdev); 2120 2121 return err; 2122 } 2123 2124 /** 2125 * idpf_open - Called when a network interface becomes active 2126 * @netdev: network interface device structure 2127 * 2128 * The open entry point is called when a network interface is made 2129 * active by the system (IFF_UP). At this point all resources needed 2130 * for transmit and receive operations are allocated, the interrupt 2131 * handler is registered with the OS, the netdev watchdog is enabled, 2132 * and the stack is notified that the interface is ready. 2133 * 2134 * Returns 0 on success, negative value on failure 2135 */ 2136 static int idpf_open(struct net_device *netdev) 2137 { 2138 struct idpf_vport *vport; 2139 int err; 2140 2141 idpf_vport_ctrl_lock(netdev); 2142 vport = idpf_netdev_to_vport(netdev); 2143 2144 err = idpf_set_real_num_queues(vport); 2145 if (err) 2146 goto unlock; 2147 2148 err = idpf_vport_open(vport); 2149 2150 unlock: 2151 idpf_vport_ctrl_unlock(netdev); 2152 2153 return err; 2154 } 2155 2156 /** 2157 * idpf_change_mtu - NDO callback to change the MTU 2158 * @netdev: network interface device structure 2159 * @new_mtu: new value for maximum frame size 2160 * 2161 * Returns 0 on success, negative on failure 2162 */ 2163 static int idpf_change_mtu(struct net_device *netdev, int new_mtu) 2164 { 2165 struct idpf_vport *vport; 2166 int err; 2167 2168 idpf_vport_ctrl_lock(netdev); 2169 vport = idpf_netdev_to_vport(netdev); 2170 2171 WRITE_ONCE(netdev->mtu, new_mtu); 2172 2173 err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE); 2174 2175 idpf_vport_ctrl_unlock(netdev); 2176 2177 return err; 2178 } 2179 2180 /** 2181 * idpf_features_check - Validate packet conforms to limits 2182 * @skb: skb buffer 2183 * @netdev: This port's netdev 2184 * @features: Offload features that the stack believes apply 2185 */ 2186 static netdev_features_t idpf_features_check(struct sk_buff *skb, 2187 struct net_device *netdev, 2188 netdev_features_t features) 2189 { 2190 struct idpf_vport *vport = idpf_netdev_to_vport(netdev); 2191 struct idpf_adapter *adapter = vport->adapter; 2192 size_t len; 2193 2194 /* No point in doing any of this if neither checksum nor GSO are 2195 * being requested for this frame. We can rule out both by just 2196 * checking for CHECKSUM_PARTIAL 2197 */ 2198 if (skb->ip_summed != CHECKSUM_PARTIAL) 2199 return features; 2200 2201 /* We cannot support GSO if the MSS is going to be less than 2202 * 88 bytes. If it is then we need to drop support for GSO. 2203 */ 2204 if (skb_is_gso(skb) && 2205 (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)) 2206 features &= ~NETIF_F_GSO_MASK; 2207 2208 /* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */ 2209 len = skb_network_offset(skb); 2210 if (unlikely(len & ~(126))) 2211 goto unsupported; 2212 2213 len = skb_network_header_len(skb); 2214 if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) 2215 goto unsupported; 2216 2217 if (!skb->encapsulation) 2218 return features; 2219 2220 /* L4TUNLEN can support 127 words */ 2221 len = skb_inner_network_header(skb) - skb_transport_header(skb); 2222 if (unlikely(len & ~(127 * 2))) 2223 goto unsupported; 2224 2225 /* IPLEN can support at most 127 dwords */ 2226 len = skb_inner_network_header_len(skb); 2227 if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) 2228 goto unsupported; 2229 2230 /* No need to validate L4LEN as TCP is the only protocol with a 2231 * a flexible value and we support all possible values supported 2232 * by TCP, which is at most 15 dwords 2233 */ 2234 2235 return features; 2236 2237 unsupported: 2238 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2239 } 2240 2241 /** 2242 * idpf_set_mac - NDO callback to set port mac address 2243 * @netdev: network interface device structure 2244 * @p: pointer to an address structure 2245 * 2246 * Returns 0 on success, negative on failure 2247 **/ 2248 static int idpf_set_mac(struct net_device *netdev, void *p) 2249 { 2250 struct idpf_netdev_priv *np = netdev_priv(netdev); 2251 struct idpf_vport_config *vport_config; 2252 struct sockaddr *addr = p; 2253 struct idpf_vport *vport; 2254 int err = 0; 2255 2256 idpf_vport_ctrl_lock(netdev); 2257 vport = idpf_netdev_to_vport(netdev); 2258 2259 if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, 2260 VIRTCHNL2_CAP_MACFILTER)) { 2261 dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n"); 2262 err = -EOPNOTSUPP; 2263 goto unlock_mutex; 2264 } 2265 2266 if (!is_valid_ether_addr(addr->sa_data)) { 2267 dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n", 2268 addr->sa_data); 2269 err = -EADDRNOTAVAIL; 2270 goto unlock_mutex; 2271 } 2272 2273 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 2274 goto unlock_mutex; 2275 2276 vport_config = vport->adapter->vport_config[vport->idx]; 2277 err = idpf_add_mac_filter(vport, np, addr->sa_data, false); 2278 if (err) { 2279 __idpf_del_mac_filter(vport_config, addr->sa_data); 2280 goto unlock_mutex; 2281 } 2282 2283 if (is_valid_ether_addr(vport->default_mac_addr)) 2284 idpf_del_mac_filter(vport, np, vport->default_mac_addr, false); 2285 2286 ether_addr_copy(vport->default_mac_addr, addr->sa_data); 2287 eth_hw_addr_set(netdev, addr->sa_data); 2288 2289 unlock_mutex: 2290 idpf_vport_ctrl_unlock(netdev); 2291 2292 return err; 2293 } 2294 2295 /** 2296 * idpf_alloc_dma_mem - Allocate dma memory 2297 * @hw: pointer to hw struct 2298 * @mem: pointer to dma_mem struct 2299 * @size: size of the memory to allocate 2300 */ 2301 void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size) 2302 { 2303 struct idpf_adapter *adapter = hw->back; 2304 size_t sz = ALIGN(size, 4096); 2305 2306 mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz, 2307 &mem->pa, GFP_KERNEL); 2308 mem->size = sz; 2309 2310 return mem->va; 2311 } 2312 2313 /** 2314 * idpf_free_dma_mem - Free the allocated dma memory 2315 * @hw: pointer to hw struct 2316 * @mem: pointer to dma_mem struct 2317 */ 2318 void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) 2319 { 2320 struct idpf_adapter *adapter = hw->back; 2321 2322 dma_free_coherent(&adapter->pdev->dev, mem->size, 2323 mem->va, mem->pa); 2324 mem->size = 0; 2325 mem->va = NULL; 2326 mem->pa = 0; 2327 } 2328 2329 static const struct net_device_ops idpf_netdev_ops = { 2330 .ndo_open = idpf_open, 2331 .ndo_stop = idpf_stop, 2332 .ndo_start_xmit = idpf_tx_start, 2333 .ndo_features_check = idpf_features_check, 2334 .ndo_set_rx_mode = idpf_set_rx_mode, 2335 .ndo_validate_addr = eth_validate_addr, 2336 .ndo_set_mac_address = idpf_set_mac, 2337 .ndo_change_mtu = idpf_change_mtu, 2338 .ndo_get_stats64 = idpf_get_stats64, 2339 .ndo_set_features = idpf_set_features, 2340 .ndo_tx_timeout = idpf_tx_timeout, 2341 }; 2342