1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include "idpf.h" 5 6 static const struct net_device_ops idpf_netdev_ops_splitq; 7 static const struct net_device_ops idpf_netdev_ops_singleq; 8 9 const char * const idpf_vport_vc_state_str[] = { 10 IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING) 11 }; 12 13 /** 14 * idpf_init_vector_stack - Fill the MSIX vector stack with vector index 15 * @adapter: private data struct 16 * 17 * Return 0 on success, error on failure 18 */ 19 static int idpf_init_vector_stack(struct idpf_adapter *adapter) 20 { 21 struct idpf_vector_lifo *stack; 22 u16 min_vec; 23 u32 i; 24 25 mutex_lock(&adapter->vector_lock); 26 min_vec = adapter->num_msix_entries - adapter->num_avail_msix; 27 stack = &adapter->vector_stack; 28 stack->size = adapter->num_msix_entries; 29 /* set the base and top to point at start of the 'free pool' to 30 * distribute the unused vectors on-demand basis 31 */ 32 stack->base = min_vec; 33 stack->top = min_vec; 34 35 stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL); 36 if (!stack->vec_idx) { 37 mutex_unlock(&adapter->vector_lock); 38 39 return -ENOMEM; 40 } 41 42 for (i = 0; i < stack->size; i++) 43 stack->vec_idx[i] = i; 44 45 mutex_unlock(&adapter->vector_lock); 46 47 return 0; 48 } 49 50 /** 51 * idpf_deinit_vector_stack - zero out the MSIX vector stack 52 * @adapter: private data struct 53 */ 54 static void idpf_deinit_vector_stack(struct idpf_adapter *adapter) 55 { 56 struct idpf_vector_lifo *stack; 57 58 mutex_lock(&adapter->vector_lock); 59 stack = &adapter->vector_stack; 60 kfree(stack->vec_idx); 61 stack->vec_idx = NULL; 62 mutex_unlock(&adapter->vector_lock); 63 } 64 65 /** 66 * idpf_mb_intr_rel_irq - Free the IRQ association with the OS 67 * @adapter: adapter structure 68 * 69 * This will also disable interrupt mode and queue up mailbox task. Mailbox 70 * task will reschedule itself if not in interrupt mode. 71 */ 72 static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) 73 { 74 clear_bit(IDPF_MB_INTR_MODE, adapter->flags); 75 free_irq(adapter->msix_entries[0].vector, adapter); 76 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 77 } 78 79 /** 80 * idpf_intr_rel - Release interrupt capabilities and free memory 81 * @adapter: adapter to disable interrupts on 82 */ 83 void idpf_intr_rel(struct idpf_adapter *adapter) 84 { 85 int err; 86 87 if (!adapter->msix_entries) 88 return; 89 90 idpf_mb_intr_rel_irq(adapter); 91 pci_free_irq_vectors(adapter->pdev); 92 93 err = idpf_send_dealloc_vectors_msg(adapter); 94 if (err) 95 dev_err(&adapter->pdev->dev, 96 "Failed to deallocate vectors: %d\n", err); 97 98 idpf_deinit_vector_stack(adapter); 99 kfree(adapter->msix_entries); 100 adapter->msix_entries = NULL; 101 } 102 103 /** 104 * idpf_mb_intr_clean - Interrupt handler for the mailbox 105 * @irq: interrupt number 106 * @data: pointer to the adapter structure 107 */ 108 static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data) 109 { 110 struct idpf_adapter *adapter = (struct idpf_adapter *)data; 111 112 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 113 114 return IRQ_HANDLED; 115 } 116 117 /** 118 * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox 119 * @adapter: adapter to get the hardware address for register write 120 */ 121 static void idpf_mb_irq_enable(struct idpf_adapter *adapter) 122 { 123 struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg; 124 u32 val; 125 126 val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m; 127 writel(val, intr->dyn_ctl); 128 writel(intr->icr_ena_ctlq_m, intr->icr_ena); 129 } 130 131 /** 132 * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt 133 * @adapter: adapter structure to pass to the mailbox irq handler 134 */ 135 static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) 136 { 137 struct idpf_q_vector *mb_vector = &adapter->mb_vector; 138 int irq_num, mb_vidx = 0, err; 139 140 irq_num = adapter->msix_entries[mb_vidx].vector; 141 mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", 142 dev_driver_string(&adapter->pdev->dev), 143 "Mailbox", mb_vidx); 144 err = request_irq(irq_num, adapter->irq_mb_handler, 0, 145 mb_vector->name, adapter); 146 if (err) { 147 dev_err(&adapter->pdev->dev, 148 "IRQ request for mailbox failed, error: %d\n", err); 149 150 return err; 151 } 152 153 set_bit(IDPF_MB_INTR_MODE, adapter->flags); 154 155 return 0; 156 } 157 158 /** 159 * idpf_set_mb_vec_id - Set vector index for mailbox 160 * @adapter: adapter structure to access the vector chunks 161 * 162 * The first vector id in the requested vector chunks from the CP is for 163 * the mailbox 164 */ 165 static void idpf_set_mb_vec_id(struct idpf_adapter *adapter) 166 { 167 if (adapter->req_vec_chunks) 168 adapter->mb_vector.v_idx = 169 le16_to_cpu(adapter->caps.mailbox_vector_id); 170 else 171 adapter->mb_vector.v_idx = 0; 172 } 173 174 /** 175 * idpf_mb_intr_init - Initialize the mailbox interrupt 176 * @adapter: adapter structure to store the mailbox vector 177 */ 178 static int idpf_mb_intr_init(struct idpf_adapter *adapter) 179 { 180 adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter); 181 adapter->irq_mb_handler = idpf_mb_intr_clean; 182 183 return idpf_mb_intr_req_irq(adapter); 184 } 185 186 /** 187 * idpf_vector_lifo_push - push MSIX vector index onto stack 188 * @adapter: private data struct 189 * @vec_idx: vector index to store 190 */ 191 static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx) 192 { 193 struct idpf_vector_lifo *stack = &adapter->vector_stack; 194 195 lockdep_assert_held(&adapter->vector_lock); 196 197 if (stack->top == stack->base) { 198 dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n", 199 stack->top); 200 return -EINVAL; 201 } 202 203 stack->vec_idx[--stack->top] = vec_idx; 204 205 return 0; 206 } 207 208 /** 209 * idpf_vector_lifo_pop - pop MSIX vector index from stack 210 * @adapter: private data struct 211 */ 212 static int idpf_vector_lifo_pop(struct idpf_adapter *adapter) 213 { 214 struct idpf_vector_lifo *stack = &adapter->vector_stack; 215 216 lockdep_assert_held(&adapter->vector_lock); 217 218 if (stack->top == stack->size) { 219 dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n"); 220 221 return -EINVAL; 222 } 223 224 return stack->vec_idx[stack->top++]; 225 } 226 227 /** 228 * idpf_vector_stash - Store the vector indexes onto the stack 229 * @adapter: private data struct 230 * @q_vector_idxs: vector index array 231 * @vec_info: info related to the number of vectors 232 * 233 * This function is a no-op if there are no vectors indexes to be stashed 234 */ 235 static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs, 236 struct idpf_vector_info *vec_info) 237 { 238 int i, base = 0; 239 u16 vec_idx; 240 241 lockdep_assert_held(&adapter->vector_lock); 242 243 if (!vec_info->num_curr_vecs) 244 return; 245 246 /* For default vports, no need to stash vector allocated from the 247 * default pool onto the stack 248 */ 249 if (vec_info->default_vport) 250 base = IDPF_MIN_Q_VEC; 251 252 for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) { 253 vec_idx = q_vector_idxs[i]; 254 idpf_vector_lifo_push(adapter, vec_idx); 255 adapter->num_avail_msix++; 256 } 257 } 258 259 /** 260 * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes 261 * @adapter: driver specific private structure 262 * @q_vector_idxs: vector index array 263 * @vec_info: info related to the number of vectors 264 * 265 * This is the core function to distribute the MSIX vectors acquired from the 266 * OS. It expects the caller to pass the number of vectors required and 267 * also previously allocated. First, it stashes previously allocated vector 268 * indexes on to the stack and then figures out if it can allocate requested 269 * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as 270 * requested vectors, then this function just stashes the already allocated 271 * vectors and returns 0. 272 * 273 * Returns actual number of vectors allocated on success, error value on failure 274 * If 0 is returned, implies the stack has no vectors to allocate which is also 275 * a failure case for the caller 276 */ 277 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, 278 u16 *q_vector_idxs, 279 struct idpf_vector_info *vec_info) 280 { 281 u16 num_req_vecs, num_alloc_vecs = 0, max_vecs; 282 struct idpf_vector_lifo *stack; 283 int i, j, vecid; 284 285 mutex_lock(&adapter->vector_lock); 286 stack = &adapter->vector_stack; 287 num_req_vecs = vec_info->num_req_vecs; 288 289 /* Stash interrupt vector indexes onto the stack if required */ 290 idpf_vector_stash(adapter, q_vector_idxs, vec_info); 291 292 if (!num_req_vecs) 293 goto rel_lock; 294 295 if (vec_info->default_vport) { 296 /* As IDPF_MIN_Q_VEC per default vport is put aside in the 297 * default pool of the stack, use them for default vports 298 */ 299 j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC; 300 for (i = 0; i < IDPF_MIN_Q_VEC; i++) { 301 q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++]; 302 num_req_vecs--; 303 } 304 } 305 306 /* Find if stack has enough vector to allocate */ 307 max_vecs = min(adapter->num_avail_msix, num_req_vecs); 308 309 for (j = 0; j < max_vecs; j++) { 310 vecid = idpf_vector_lifo_pop(adapter); 311 q_vector_idxs[num_alloc_vecs++] = vecid; 312 } 313 adapter->num_avail_msix -= max_vecs; 314 315 rel_lock: 316 mutex_unlock(&adapter->vector_lock); 317 318 return num_alloc_vecs; 319 } 320 321 /** 322 * idpf_intr_req - Request interrupt capabilities 323 * @adapter: adapter to enable interrupts on 324 * 325 * Returns 0 on success, negative on failure 326 */ 327 int idpf_intr_req(struct idpf_adapter *adapter) 328 { 329 u16 default_vports = idpf_get_default_vports(adapter); 330 int num_q_vecs, total_vecs, num_vec_ids; 331 int min_vectors, v_actual, err; 332 unsigned int vector; 333 u16 *vecids; 334 335 total_vecs = idpf_get_reserved_vecs(adapter); 336 num_q_vecs = total_vecs - IDPF_MBX_Q_VEC; 337 338 err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs); 339 if (err) { 340 dev_err(&adapter->pdev->dev, 341 "Failed to allocate %d vectors: %d\n", num_q_vecs, err); 342 343 return -EAGAIN; 344 } 345 346 min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; 347 v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors, 348 total_vecs, PCI_IRQ_MSIX); 349 if (v_actual < min_vectors) { 350 dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n", 351 v_actual); 352 err = -EAGAIN; 353 goto send_dealloc_vecs; 354 } 355 356 adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry), 357 GFP_KERNEL); 358 359 if (!adapter->msix_entries) { 360 err = -ENOMEM; 361 goto free_irq; 362 } 363 364 idpf_set_mb_vec_id(adapter); 365 366 vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); 367 if (!vecids) { 368 err = -ENOMEM; 369 goto free_msix; 370 } 371 372 if (adapter->req_vec_chunks) { 373 struct virtchnl2_vector_chunks *vchunks; 374 struct virtchnl2_alloc_vectors *ac; 375 376 ac = adapter->req_vec_chunks; 377 vchunks = &ac->vchunks; 378 379 num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs, 380 vchunks); 381 if (num_vec_ids < v_actual) { 382 err = -EINVAL; 383 goto free_vecids; 384 } 385 } else { 386 int i; 387 388 for (i = 0; i < v_actual; i++) 389 vecids[i] = i; 390 } 391 392 for (vector = 0; vector < v_actual; vector++) { 393 adapter->msix_entries[vector].entry = vecids[vector]; 394 adapter->msix_entries[vector].vector = 395 pci_irq_vector(adapter->pdev, vector); 396 } 397 398 adapter->num_req_msix = total_vecs; 399 adapter->num_msix_entries = v_actual; 400 /* 'num_avail_msix' is used to distribute excess vectors to the vports 401 * after considering the minimum vectors required per each default 402 * vport 403 */ 404 adapter->num_avail_msix = v_actual - min_vectors; 405 406 /* Fill MSIX vector lifo stack with vector indexes */ 407 err = idpf_init_vector_stack(adapter); 408 if (err) 409 goto free_vecids; 410 411 err = idpf_mb_intr_init(adapter); 412 if (err) 413 goto deinit_vec_stack; 414 idpf_mb_irq_enable(adapter); 415 kfree(vecids); 416 417 return 0; 418 419 deinit_vec_stack: 420 idpf_deinit_vector_stack(adapter); 421 free_vecids: 422 kfree(vecids); 423 free_msix: 424 kfree(adapter->msix_entries); 425 adapter->msix_entries = NULL; 426 free_irq: 427 pci_free_irq_vectors(adapter->pdev); 428 send_dealloc_vecs: 429 idpf_send_dealloc_vectors_msg(adapter); 430 431 return err; 432 } 433 434 /** 435 * idpf_find_mac_filter - Search filter list for specific mac filter 436 * @vconfig: Vport config structure 437 * @macaddr: The MAC address 438 * 439 * Returns ptr to the filter object or NULL. Must be called while holding the 440 * mac_filter_list_lock. 441 **/ 442 static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig, 443 const u8 *macaddr) 444 { 445 struct idpf_mac_filter *f; 446 447 if (!macaddr) 448 return NULL; 449 450 list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) { 451 if (ether_addr_equal(macaddr, f->macaddr)) 452 return f; 453 } 454 455 return NULL; 456 } 457 458 /** 459 * __idpf_del_mac_filter - Delete a MAC filter from the filter list 460 * @vport_config: Vport config structure 461 * @macaddr: The MAC address 462 * 463 * Returns 0 on success, error value on failure 464 **/ 465 static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config, 466 const u8 *macaddr) 467 { 468 struct idpf_mac_filter *f; 469 470 spin_lock_bh(&vport_config->mac_filter_list_lock); 471 f = idpf_find_mac_filter(vport_config, macaddr); 472 if (f) { 473 list_del(&f->list); 474 kfree(f); 475 } 476 spin_unlock_bh(&vport_config->mac_filter_list_lock); 477 478 return 0; 479 } 480 481 /** 482 * idpf_del_mac_filter - Delete a MAC filter from the filter list 483 * @vport: Main vport structure 484 * @np: Netdev private structure 485 * @macaddr: The MAC address 486 * @async: Don't wait for return message 487 * 488 * Removes filter from list and if interface is up, tells hardware about the 489 * removed filter. 490 **/ 491 static int idpf_del_mac_filter(struct idpf_vport *vport, 492 struct idpf_netdev_priv *np, 493 const u8 *macaddr, bool async) 494 { 495 struct idpf_vport_config *vport_config; 496 struct idpf_mac_filter *f; 497 498 vport_config = np->adapter->vport_config[np->vport_idx]; 499 500 spin_lock_bh(&vport_config->mac_filter_list_lock); 501 f = idpf_find_mac_filter(vport_config, macaddr); 502 if (f) { 503 f->remove = true; 504 } else { 505 spin_unlock_bh(&vport_config->mac_filter_list_lock); 506 507 return -EINVAL; 508 } 509 spin_unlock_bh(&vport_config->mac_filter_list_lock); 510 511 if (np->state == __IDPF_VPORT_UP) { 512 int err; 513 514 err = idpf_add_del_mac_filters(vport, np, false, async); 515 if (err) 516 return err; 517 } 518 519 return __idpf_del_mac_filter(vport_config, macaddr); 520 } 521 522 /** 523 * __idpf_add_mac_filter - Add mac filter helper function 524 * @vport_config: Vport config structure 525 * @macaddr: Address to add 526 * 527 * Takes mac_filter_list_lock spinlock to add new filter to list. 528 */ 529 static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config, 530 const u8 *macaddr) 531 { 532 struct idpf_mac_filter *f; 533 534 spin_lock_bh(&vport_config->mac_filter_list_lock); 535 536 f = idpf_find_mac_filter(vport_config, macaddr); 537 if (f) { 538 f->remove = false; 539 spin_unlock_bh(&vport_config->mac_filter_list_lock); 540 541 return 0; 542 } 543 544 f = kzalloc(sizeof(*f), GFP_ATOMIC); 545 if (!f) { 546 spin_unlock_bh(&vport_config->mac_filter_list_lock); 547 548 return -ENOMEM; 549 } 550 551 ether_addr_copy(f->macaddr, macaddr); 552 list_add_tail(&f->list, &vport_config->user_config.mac_filter_list); 553 f->add = true; 554 555 spin_unlock_bh(&vport_config->mac_filter_list_lock); 556 557 return 0; 558 } 559 560 /** 561 * idpf_add_mac_filter - Add a mac filter to the filter list 562 * @vport: Main vport structure 563 * @np: Netdev private structure 564 * @macaddr: The MAC address 565 * @async: Don't wait for return message 566 * 567 * Returns 0 on success or error on failure. If interface is up, we'll also 568 * send the virtchnl message to tell hardware about the filter. 569 **/ 570 static int idpf_add_mac_filter(struct idpf_vport *vport, 571 struct idpf_netdev_priv *np, 572 const u8 *macaddr, bool async) 573 { 574 struct idpf_vport_config *vport_config; 575 int err; 576 577 vport_config = np->adapter->vport_config[np->vport_idx]; 578 err = __idpf_add_mac_filter(vport_config, macaddr); 579 if (err) 580 return err; 581 582 if (np->state == __IDPF_VPORT_UP) 583 err = idpf_add_del_mac_filters(vport, np, true, async); 584 585 return err; 586 } 587 588 /** 589 * idpf_del_all_mac_filters - Delete all MAC filters in list 590 * @vport: main vport struct 591 * 592 * Takes mac_filter_list_lock spinlock. Deletes all filters 593 */ 594 static void idpf_del_all_mac_filters(struct idpf_vport *vport) 595 { 596 struct idpf_vport_config *vport_config; 597 struct idpf_mac_filter *f, *ftmp; 598 599 vport_config = vport->adapter->vport_config[vport->idx]; 600 spin_lock_bh(&vport_config->mac_filter_list_lock); 601 602 list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list, 603 list) { 604 list_del(&f->list); 605 kfree(f); 606 } 607 608 spin_unlock_bh(&vport_config->mac_filter_list_lock); 609 } 610 611 /** 612 * idpf_restore_mac_filters - Re-add all MAC filters in list 613 * @vport: main vport struct 614 * 615 * Takes mac_filter_list_lock spinlock. Sets add field to true for filters to 616 * resync filters back to HW. 617 */ 618 static void idpf_restore_mac_filters(struct idpf_vport *vport) 619 { 620 struct idpf_vport_config *vport_config; 621 struct idpf_mac_filter *f; 622 623 vport_config = vport->adapter->vport_config[vport->idx]; 624 spin_lock_bh(&vport_config->mac_filter_list_lock); 625 626 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list) 627 f->add = true; 628 629 spin_unlock_bh(&vport_config->mac_filter_list_lock); 630 631 idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), 632 true, false); 633 } 634 635 /** 636 * idpf_remove_mac_filters - Remove all MAC filters in list 637 * @vport: main vport struct 638 * 639 * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters 640 * to remove filters in HW. 641 */ 642 static void idpf_remove_mac_filters(struct idpf_vport *vport) 643 { 644 struct idpf_vport_config *vport_config; 645 struct idpf_mac_filter *f; 646 647 vport_config = vport->adapter->vport_config[vport->idx]; 648 spin_lock_bh(&vport_config->mac_filter_list_lock); 649 650 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list) 651 f->remove = true; 652 653 spin_unlock_bh(&vport_config->mac_filter_list_lock); 654 655 idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), 656 false, false); 657 } 658 659 /** 660 * idpf_deinit_mac_addr - deinitialize mac address for vport 661 * @vport: main vport structure 662 */ 663 static void idpf_deinit_mac_addr(struct idpf_vport *vport) 664 { 665 struct idpf_vport_config *vport_config; 666 struct idpf_mac_filter *f; 667 668 vport_config = vport->adapter->vport_config[vport->idx]; 669 670 spin_lock_bh(&vport_config->mac_filter_list_lock); 671 672 f = idpf_find_mac_filter(vport_config, vport->default_mac_addr); 673 if (f) { 674 list_del(&f->list); 675 kfree(f); 676 } 677 678 spin_unlock_bh(&vport_config->mac_filter_list_lock); 679 } 680 681 /** 682 * idpf_init_mac_addr - initialize mac address for vport 683 * @vport: main vport structure 684 * @netdev: pointer to netdev struct associated with this vport 685 */ 686 static int idpf_init_mac_addr(struct idpf_vport *vport, 687 struct net_device *netdev) 688 { 689 struct idpf_netdev_priv *np = netdev_priv(netdev); 690 struct idpf_adapter *adapter = vport->adapter; 691 int err; 692 693 if (is_valid_ether_addr(vport->default_mac_addr)) { 694 eth_hw_addr_set(netdev, vport->default_mac_addr); 695 ether_addr_copy(netdev->perm_addr, vport->default_mac_addr); 696 697 return idpf_add_mac_filter(vport, np, vport->default_mac_addr, 698 false); 699 } 700 701 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, 702 VIRTCHNL2_CAP_MACFILTER)) { 703 dev_err(&adapter->pdev->dev, 704 "MAC address is not provided and capability is not set\n"); 705 706 return -EINVAL; 707 } 708 709 eth_hw_addr_random(netdev); 710 err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false); 711 if (err) 712 return err; 713 714 dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n", 715 vport->default_mac_addr, netdev->dev_addr); 716 ether_addr_copy(vport->default_mac_addr, netdev->dev_addr); 717 718 return 0; 719 } 720 721 /** 722 * idpf_cfg_netdev - Allocate, configure and register a netdev 723 * @vport: main vport structure 724 * 725 * Returns 0 on success, negative value on failure. 726 */ 727 static int idpf_cfg_netdev(struct idpf_vport *vport) 728 { 729 struct idpf_adapter *adapter = vport->adapter; 730 struct idpf_vport_config *vport_config; 731 netdev_features_t dflt_features; 732 netdev_features_t offloads = 0; 733 struct idpf_netdev_priv *np; 734 struct net_device *netdev; 735 u16 idx = vport->idx; 736 int err; 737 738 vport_config = adapter->vport_config[idx]; 739 740 /* It's possible we already have a netdev allocated and registered for 741 * this vport 742 */ 743 if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) { 744 netdev = adapter->netdevs[idx]; 745 np = netdev_priv(netdev); 746 np->vport = vport; 747 np->vport_idx = vport->idx; 748 np->vport_id = vport->vport_id; 749 vport->netdev = netdev; 750 751 return idpf_init_mac_addr(vport, netdev); 752 } 753 754 netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv), 755 vport_config->max_q.max_txq, 756 vport_config->max_q.max_rxq); 757 if (!netdev) 758 return -ENOMEM; 759 760 vport->netdev = netdev; 761 np = netdev_priv(netdev); 762 np->vport = vport; 763 np->adapter = adapter; 764 np->vport_idx = vport->idx; 765 np->vport_id = vport->vport_id; 766 767 spin_lock_init(&np->stats_lock); 768 769 err = idpf_init_mac_addr(vport, netdev); 770 if (err) { 771 free_netdev(vport->netdev); 772 vport->netdev = NULL; 773 774 return err; 775 } 776 777 /* assign netdev_ops */ 778 if (idpf_is_queue_model_split(vport->txq_model)) 779 netdev->netdev_ops = &idpf_netdev_ops_splitq; 780 else 781 netdev->netdev_ops = &idpf_netdev_ops_singleq; 782 783 /* setup watchdog timeout value to be 5 second */ 784 netdev->watchdog_timeo = 5 * HZ; 785 786 /* configure default MTU size */ 787 netdev->min_mtu = ETH_MIN_MTU; 788 netdev->max_mtu = vport->max_mtu; 789 790 dflt_features = NETIF_F_SG | 791 NETIF_F_HIGHDMA; 792 793 if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) 794 dflt_features |= NETIF_F_RXHASH; 795 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4)) 796 dflt_features |= NETIF_F_IP_CSUM; 797 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6)) 798 dflt_features |= NETIF_F_IPV6_CSUM; 799 if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM)) 800 dflt_features |= NETIF_F_RXCSUM; 801 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM)) 802 dflt_features |= NETIF_F_SCTP_CRC; 803 804 if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP)) 805 dflt_features |= NETIF_F_TSO; 806 if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP)) 807 dflt_features |= NETIF_F_TSO6; 808 if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS, 809 VIRTCHNL2_CAP_SEG_IPV4_UDP | 810 VIRTCHNL2_CAP_SEG_IPV6_UDP)) 811 dflt_features |= NETIF_F_GSO_UDP_L4; 812 if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC)) 813 offloads |= NETIF_F_GRO_HW; 814 /* advertise to stack only if offloads for encapsulated packets is 815 * supported 816 */ 817 if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS, 818 VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) { 819 offloads |= NETIF_F_GSO_UDP_TUNNEL | 820 NETIF_F_GSO_GRE | 821 NETIF_F_GSO_GRE_CSUM | 822 NETIF_F_GSO_PARTIAL | 823 NETIF_F_GSO_UDP_TUNNEL_CSUM | 824 NETIF_F_GSO_IPXIP4 | 825 NETIF_F_GSO_IPXIP6 | 826 0; 827 828 if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS, 829 IDPF_CAP_TUNNEL_TX_CSUM)) 830 netdev->gso_partial_features |= 831 NETIF_F_GSO_UDP_TUNNEL_CSUM; 832 833 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 834 offloads |= NETIF_F_TSO_MANGLEID; 835 } 836 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK)) 837 offloads |= NETIF_F_LOOPBACK; 838 839 netdev->features |= dflt_features; 840 netdev->hw_features |= dflt_features | offloads; 841 netdev->hw_enc_features |= dflt_features | offloads; 842 idpf_set_ethtool_ops(netdev); 843 SET_NETDEV_DEV(netdev, &adapter->pdev->dev); 844 845 /* carrier off on init to avoid Tx hangs */ 846 netif_carrier_off(netdev); 847 848 /* make sure transmit queues start off as stopped */ 849 netif_tx_stop_all_queues(netdev); 850 851 /* The vport can be arbitrarily released so we need to also track 852 * netdevs in the adapter struct 853 */ 854 adapter->netdevs[idx] = netdev; 855 856 return 0; 857 } 858 859 /** 860 * idpf_get_free_slot - get the next non-NULL location index in array 861 * @adapter: adapter in which to look for a free vport slot 862 */ 863 static int idpf_get_free_slot(struct idpf_adapter *adapter) 864 { 865 unsigned int i; 866 867 for (i = 0; i < adapter->max_vports; i++) { 868 if (!adapter->vports[i]) 869 return i; 870 } 871 872 return IDPF_NO_FREE_SLOT; 873 } 874 875 /** 876 * idpf_remove_features - Turn off feature configs 877 * @vport: virtual port structure 878 */ 879 static void idpf_remove_features(struct idpf_vport *vport) 880 { 881 struct idpf_adapter *adapter = vport->adapter; 882 883 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) 884 idpf_remove_mac_filters(vport); 885 } 886 887 /** 888 * idpf_vport_stop - Disable a vport 889 * @vport: vport to disable 890 */ 891 static void idpf_vport_stop(struct idpf_vport *vport) 892 { 893 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 894 895 if (np->state <= __IDPF_VPORT_DOWN) 896 return; 897 898 netif_carrier_off(vport->netdev); 899 netif_tx_disable(vport->netdev); 900 901 idpf_send_disable_vport_msg(vport); 902 idpf_send_disable_queues_msg(vport); 903 idpf_send_map_unmap_queue_vector_msg(vport, false); 904 /* Normally we ask for queues in create_vport, but if the number of 905 * initially requested queues have changed, for example via ethtool 906 * set channels, we do delete queues and then add the queues back 907 * instead of deleting and reallocating the vport. 908 */ 909 if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags)) 910 idpf_send_delete_queues_msg(vport); 911 912 idpf_remove_features(vport); 913 914 vport->link_up = false; 915 idpf_vport_intr_deinit(vport); 916 idpf_vport_intr_rel(vport); 917 idpf_vport_queues_rel(vport); 918 np->state = __IDPF_VPORT_DOWN; 919 } 920 921 /** 922 * idpf_stop - Disables a network interface 923 * @netdev: network interface device structure 924 * 925 * The stop entry point is called when an interface is de-activated by the OS, 926 * and the netdevice enters the DOWN state. The hardware is still under the 927 * driver's control, but the netdev interface is disabled. 928 * 929 * Returns success only - not allowed to fail 930 */ 931 static int idpf_stop(struct net_device *netdev) 932 { 933 struct idpf_netdev_priv *np = netdev_priv(netdev); 934 struct idpf_vport *vport; 935 936 if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags)) 937 return 0; 938 939 idpf_vport_ctrl_lock(netdev); 940 vport = idpf_netdev_to_vport(netdev); 941 942 idpf_vport_stop(vport); 943 944 idpf_vport_ctrl_unlock(netdev); 945 946 return 0; 947 } 948 949 /** 950 * idpf_decfg_netdev - Unregister the netdev 951 * @vport: vport for which netdev to be unregistered 952 */ 953 static void idpf_decfg_netdev(struct idpf_vport *vport) 954 { 955 struct idpf_adapter *adapter = vport->adapter; 956 957 unregister_netdev(vport->netdev); 958 free_netdev(vport->netdev); 959 vport->netdev = NULL; 960 961 adapter->netdevs[vport->idx] = NULL; 962 } 963 964 /** 965 * idpf_vport_rel - Delete a vport and free its resources 966 * @vport: the vport being removed 967 */ 968 static void idpf_vport_rel(struct idpf_vport *vport) 969 { 970 struct idpf_adapter *adapter = vport->adapter; 971 struct idpf_vport_config *vport_config; 972 struct idpf_vector_info vec_info; 973 struct idpf_rss_data *rss_data; 974 struct idpf_vport_max_q max_q; 975 u16 idx = vport->idx; 976 int i; 977 978 vport_config = adapter->vport_config[vport->idx]; 979 idpf_deinit_rss(vport); 980 rss_data = &vport_config->user_config.rss_data; 981 kfree(rss_data->rss_key); 982 rss_data->rss_key = NULL; 983 984 idpf_send_destroy_vport_msg(vport); 985 986 /* Set all bits as we dont know on which vc_state the vport vhnl_wq 987 * is waiting on and wakeup the virtchnl workqueue even if it is 988 * waiting for the response as we are going down 989 */ 990 for (i = 0; i < IDPF_VC_NBITS; i++) 991 set_bit(i, vport->vc_state); 992 wake_up(&vport->vchnl_wq); 993 994 mutex_destroy(&vport->vc_buf_lock); 995 996 /* Clear all the bits */ 997 for (i = 0; i < IDPF_VC_NBITS; i++) 998 clear_bit(i, vport->vc_state); 999 1000 /* Release all max queues allocated to the adapter's pool */ 1001 max_q.max_rxq = vport_config->max_q.max_rxq; 1002 max_q.max_txq = vport_config->max_q.max_txq; 1003 max_q.max_bufq = vport_config->max_q.max_bufq; 1004 max_q.max_complq = vport_config->max_q.max_complq; 1005 idpf_vport_dealloc_max_qs(adapter, &max_q); 1006 1007 /* Release all the allocated vectors on the stack */ 1008 vec_info.num_req_vecs = 0; 1009 vec_info.num_curr_vecs = vport->num_q_vectors; 1010 vec_info.default_vport = vport->default_vport; 1011 1012 idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info); 1013 1014 kfree(vport->q_vector_idxs); 1015 vport->q_vector_idxs = NULL; 1016 1017 kfree(adapter->vport_params_recvd[idx]); 1018 adapter->vport_params_recvd[idx] = NULL; 1019 kfree(adapter->vport_params_reqd[idx]); 1020 adapter->vport_params_reqd[idx] = NULL; 1021 if (adapter->vport_config[idx]) { 1022 kfree(adapter->vport_config[idx]->req_qs_chunks); 1023 adapter->vport_config[idx]->req_qs_chunks = NULL; 1024 } 1025 kfree(vport); 1026 adapter->num_alloc_vports--; 1027 } 1028 1029 /** 1030 * idpf_vport_dealloc - cleanup and release a given vport 1031 * @vport: pointer to idpf vport structure 1032 * 1033 * returns nothing 1034 */ 1035 static void idpf_vport_dealloc(struct idpf_vport *vport) 1036 { 1037 struct idpf_adapter *adapter = vport->adapter; 1038 unsigned int i = vport->idx; 1039 1040 idpf_deinit_mac_addr(vport); 1041 idpf_vport_stop(vport); 1042 1043 if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) 1044 idpf_decfg_netdev(vport); 1045 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 1046 idpf_del_all_mac_filters(vport); 1047 1048 if (adapter->netdevs[i]) { 1049 struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]); 1050 1051 np->vport = NULL; 1052 } 1053 1054 idpf_vport_rel(vport); 1055 1056 adapter->vports[i] = NULL; 1057 adapter->next_vport = idpf_get_free_slot(adapter); 1058 } 1059 1060 /** 1061 * idpf_is_hsplit_supported - check whether the header split is supported 1062 * @vport: virtual port to check the capability for 1063 * 1064 * Return: true if it's supported by the HW/FW, false if not. 1065 */ 1066 static bool idpf_is_hsplit_supported(const struct idpf_vport *vport) 1067 { 1068 return idpf_is_queue_model_split(vport->rxq_model) && 1069 idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS, 1070 IDPF_CAP_HSPLIT); 1071 } 1072 1073 /** 1074 * idpf_vport_get_hsplit - get the current header split feature state 1075 * @vport: virtual port to query the state for 1076 * 1077 * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported, 1078 * ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled, 1079 * ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active. 1080 */ 1081 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport) 1082 { 1083 const struct idpf_vport_user_config_data *config; 1084 1085 if (!idpf_is_hsplit_supported(vport)) 1086 return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; 1087 1088 config = &vport->adapter->vport_config[vport->idx]->user_config; 1089 1090 return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ? 1091 ETHTOOL_TCP_DATA_SPLIT_ENABLED : 1092 ETHTOOL_TCP_DATA_SPLIT_DISABLED; 1093 } 1094 1095 /** 1096 * idpf_vport_set_hsplit - enable or disable header split on a given vport 1097 * @vport: virtual port to configure 1098 * @val: Ethtool flag controlling the header split state 1099 * 1100 * Return: true on success, false if not supported by the HW. 1101 */ 1102 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val) 1103 { 1104 struct idpf_vport_user_config_data *config; 1105 1106 if (!idpf_is_hsplit_supported(vport)) 1107 return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; 1108 1109 config = &vport->adapter->vport_config[vport->idx]->user_config; 1110 1111 switch (val) { 1112 case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN: 1113 /* Default is to enable */ 1114 case ETHTOOL_TCP_DATA_SPLIT_ENABLED: 1115 __set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags); 1116 return true; 1117 case ETHTOOL_TCP_DATA_SPLIT_DISABLED: 1118 __clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags); 1119 return true; 1120 default: 1121 return false; 1122 } 1123 } 1124 1125 /** 1126 * idpf_vport_alloc - Allocates the next available struct vport in the adapter 1127 * @adapter: board private structure 1128 * @max_q: vport max queue info 1129 * 1130 * returns a pointer to a vport on success, NULL on failure. 1131 */ 1132 static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, 1133 struct idpf_vport_max_q *max_q) 1134 { 1135 struct idpf_rss_data *rss_data; 1136 u16 idx = adapter->next_vport; 1137 struct idpf_vport *vport; 1138 u16 num_max_q; 1139 1140 if (idx == IDPF_NO_FREE_SLOT) 1141 return NULL; 1142 1143 vport = kzalloc(sizeof(*vport), GFP_KERNEL); 1144 if (!vport) 1145 return vport; 1146 1147 if (!adapter->vport_config[idx]) { 1148 struct idpf_vport_config *vport_config; 1149 1150 vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL); 1151 if (!vport_config) { 1152 kfree(vport); 1153 1154 return NULL; 1155 } 1156 1157 adapter->vport_config[idx] = vport_config; 1158 } 1159 1160 vport->idx = idx; 1161 vport->adapter = adapter; 1162 vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET; 1163 vport->default_vport = adapter->num_alloc_vports < 1164 idpf_get_default_vports(adapter); 1165 1166 num_max_q = max(max_q->max_txq, max_q->max_rxq); 1167 vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); 1168 if (!vport->q_vector_idxs) { 1169 kfree(vport); 1170 1171 return NULL; 1172 } 1173 idpf_vport_init(vport, max_q); 1174 1175 /* This alloc is done separate from the LUT because it's not strictly 1176 * dependent on how many queues we have. If we change number of queues 1177 * and soft reset we'll need a new LUT but the key can remain the same 1178 * for as long as the vport exists. 1179 */ 1180 rss_data = &adapter->vport_config[idx]->user_config.rss_data; 1181 rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL); 1182 if (!rss_data->rss_key) { 1183 kfree(vport); 1184 1185 return NULL; 1186 } 1187 /* Initialize default rss key */ 1188 netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size); 1189 1190 /* fill vport slot in the adapter struct */ 1191 adapter->vports[idx] = vport; 1192 adapter->vport_ids[idx] = idpf_get_vport_id(vport); 1193 1194 adapter->num_alloc_vports++; 1195 /* prepare adapter->next_vport for next use */ 1196 adapter->next_vport = idpf_get_free_slot(adapter); 1197 1198 return vport; 1199 } 1200 1201 /** 1202 * idpf_get_stats64 - get statistics for network device structure 1203 * @netdev: network interface device structure 1204 * @stats: main device statistics structure 1205 */ 1206 static void idpf_get_stats64(struct net_device *netdev, 1207 struct rtnl_link_stats64 *stats) 1208 { 1209 struct idpf_netdev_priv *np = netdev_priv(netdev); 1210 1211 spin_lock_bh(&np->stats_lock); 1212 *stats = np->netstats; 1213 spin_unlock_bh(&np->stats_lock); 1214 } 1215 1216 /** 1217 * idpf_statistics_task - Delayed task to get statistics over mailbox 1218 * @work: work_struct handle to our data 1219 */ 1220 void idpf_statistics_task(struct work_struct *work) 1221 { 1222 struct idpf_adapter *adapter; 1223 int i; 1224 1225 adapter = container_of(work, struct idpf_adapter, stats_task.work); 1226 1227 for (i = 0; i < adapter->max_vports; i++) { 1228 struct idpf_vport *vport = adapter->vports[i]; 1229 1230 if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) 1231 idpf_send_get_stats_msg(vport); 1232 } 1233 1234 queue_delayed_work(adapter->stats_wq, &adapter->stats_task, 1235 msecs_to_jiffies(10000)); 1236 } 1237 1238 /** 1239 * idpf_mbx_task - Delayed task to handle mailbox responses 1240 * @work: work_struct handle 1241 */ 1242 void idpf_mbx_task(struct work_struct *work) 1243 { 1244 struct idpf_adapter *adapter; 1245 1246 adapter = container_of(work, struct idpf_adapter, mbx_task.work); 1247 1248 if (test_bit(IDPF_MB_INTR_MODE, adapter->flags)) 1249 idpf_mb_irq_enable(adapter); 1250 else 1251 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 1252 msecs_to_jiffies(300)); 1253 1254 idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0); 1255 } 1256 1257 /** 1258 * idpf_service_task - Delayed task for handling mailbox responses 1259 * @work: work_struct handle to our data 1260 * 1261 */ 1262 void idpf_service_task(struct work_struct *work) 1263 { 1264 struct idpf_adapter *adapter; 1265 1266 adapter = container_of(work, struct idpf_adapter, serv_task.work); 1267 1268 if (idpf_is_reset_detected(adapter) && 1269 !idpf_is_reset_in_prog(adapter) && 1270 !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) { 1271 dev_info(&adapter->pdev->dev, "HW reset detected\n"); 1272 set_bit(IDPF_HR_FUNC_RESET, adapter->flags); 1273 queue_delayed_work(adapter->vc_event_wq, 1274 &adapter->vc_event_task, 1275 msecs_to_jiffies(10)); 1276 } 1277 1278 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, 1279 msecs_to_jiffies(300)); 1280 } 1281 1282 /** 1283 * idpf_restore_features - Restore feature configs 1284 * @vport: virtual port structure 1285 */ 1286 static void idpf_restore_features(struct idpf_vport *vport) 1287 { 1288 struct idpf_adapter *adapter = vport->adapter; 1289 1290 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) 1291 idpf_restore_mac_filters(vport); 1292 } 1293 1294 /** 1295 * idpf_set_real_num_queues - set number of queues for netdev 1296 * @vport: virtual port structure 1297 * 1298 * Returns 0 on success, negative on failure. 1299 */ 1300 static int idpf_set_real_num_queues(struct idpf_vport *vport) 1301 { 1302 int err; 1303 1304 err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq); 1305 if (err) 1306 return err; 1307 1308 return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq); 1309 } 1310 1311 /** 1312 * idpf_up_complete - Complete interface up sequence 1313 * @vport: virtual port structure 1314 * 1315 * Returns 0 on success, negative on failure. 1316 */ 1317 static int idpf_up_complete(struct idpf_vport *vport) 1318 { 1319 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 1320 1321 if (vport->link_up && !netif_carrier_ok(vport->netdev)) { 1322 netif_carrier_on(vport->netdev); 1323 netif_tx_start_all_queues(vport->netdev); 1324 } 1325 1326 np->state = __IDPF_VPORT_UP; 1327 1328 return 0; 1329 } 1330 1331 /** 1332 * idpf_rx_init_buf_tail - Write initial buffer ring tail value 1333 * @vport: virtual port struct 1334 */ 1335 static void idpf_rx_init_buf_tail(struct idpf_vport *vport) 1336 { 1337 int i, j; 1338 1339 for (i = 0; i < vport->num_rxq_grp; i++) { 1340 struct idpf_rxq_group *grp = &vport->rxq_grps[i]; 1341 1342 if (idpf_is_queue_model_split(vport->rxq_model)) { 1343 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 1344 struct idpf_queue *q = 1345 &grp->splitq.bufq_sets[j].bufq; 1346 1347 writel(q->next_to_alloc, q->tail); 1348 } 1349 } else { 1350 for (j = 0; j < grp->singleq.num_rxq; j++) { 1351 struct idpf_queue *q = 1352 grp->singleq.rxqs[j]; 1353 1354 writel(q->next_to_alloc, q->tail); 1355 } 1356 } 1357 } 1358 } 1359 1360 /** 1361 * idpf_vport_open - Bring up a vport 1362 * @vport: vport to bring up 1363 * @alloc_res: allocate queue resources 1364 */ 1365 static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) 1366 { 1367 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 1368 struct idpf_adapter *adapter = vport->adapter; 1369 struct idpf_vport_config *vport_config; 1370 int err; 1371 1372 if (np->state != __IDPF_VPORT_DOWN) 1373 return -EBUSY; 1374 1375 /* we do not allow interface up just yet */ 1376 netif_carrier_off(vport->netdev); 1377 1378 if (alloc_res) { 1379 err = idpf_vport_queues_alloc(vport); 1380 if (err) 1381 return err; 1382 } 1383 1384 err = idpf_vport_intr_alloc(vport); 1385 if (err) { 1386 dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n", 1387 vport->vport_id, err); 1388 goto queues_rel; 1389 } 1390 1391 err = idpf_vport_queue_ids_init(vport); 1392 if (err) { 1393 dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n", 1394 vport->vport_id, err); 1395 goto intr_rel; 1396 } 1397 1398 err = idpf_vport_intr_init(vport); 1399 if (err) { 1400 dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n", 1401 vport->vport_id, err); 1402 goto intr_rel; 1403 } 1404 1405 err = idpf_rx_bufs_init_all(vport); 1406 if (err) { 1407 dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", 1408 vport->vport_id, err); 1409 goto intr_rel; 1410 } 1411 1412 err = idpf_queue_reg_init(vport); 1413 if (err) { 1414 dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", 1415 vport->vport_id, err); 1416 goto intr_rel; 1417 } 1418 1419 idpf_rx_init_buf_tail(vport); 1420 1421 err = idpf_send_config_queues_msg(vport); 1422 if (err) { 1423 dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n", 1424 vport->vport_id, err); 1425 goto intr_deinit; 1426 } 1427 1428 err = idpf_send_map_unmap_queue_vector_msg(vport, true); 1429 if (err) { 1430 dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n", 1431 vport->vport_id, err); 1432 goto intr_deinit; 1433 } 1434 1435 err = idpf_send_enable_queues_msg(vport); 1436 if (err) { 1437 dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n", 1438 vport->vport_id, err); 1439 goto unmap_queue_vectors; 1440 } 1441 1442 err = idpf_send_enable_vport_msg(vport); 1443 if (err) { 1444 dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n", 1445 vport->vport_id, err); 1446 err = -EAGAIN; 1447 goto disable_queues; 1448 } 1449 1450 idpf_restore_features(vport); 1451 1452 vport_config = adapter->vport_config[vport->idx]; 1453 if (vport_config->user_config.rss_data.rss_lut) 1454 err = idpf_config_rss(vport); 1455 else 1456 err = idpf_init_rss(vport); 1457 if (err) { 1458 dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n", 1459 vport->vport_id, err); 1460 goto disable_vport; 1461 } 1462 1463 err = idpf_up_complete(vport); 1464 if (err) { 1465 dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n", 1466 vport->vport_id, err); 1467 goto deinit_rss; 1468 } 1469 1470 return 0; 1471 1472 deinit_rss: 1473 idpf_deinit_rss(vport); 1474 disable_vport: 1475 idpf_send_disable_vport_msg(vport); 1476 disable_queues: 1477 idpf_send_disable_queues_msg(vport); 1478 unmap_queue_vectors: 1479 idpf_send_map_unmap_queue_vector_msg(vport, false); 1480 intr_deinit: 1481 idpf_vport_intr_deinit(vport); 1482 intr_rel: 1483 idpf_vport_intr_rel(vport); 1484 queues_rel: 1485 idpf_vport_queues_rel(vport); 1486 1487 return err; 1488 } 1489 1490 /** 1491 * idpf_init_task - Delayed initialization task 1492 * @work: work_struct handle to our data 1493 * 1494 * Init task finishes up pending work started in probe. Due to the asynchronous 1495 * nature in which the device communicates with hardware, we may have to wait 1496 * several milliseconds to get a response. Instead of busy polling in probe, 1497 * pulling it out into a delayed work task prevents us from bogging down the 1498 * whole system waiting for a response from hardware. 1499 */ 1500 void idpf_init_task(struct work_struct *work) 1501 { 1502 struct idpf_vport_config *vport_config; 1503 struct idpf_vport_max_q max_q; 1504 struct idpf_adapter *adapter; 1505 struct idpf_netdev_priv *np; 1506 struct idpf_vport *vport; 1507 u16 num_default_vports; 1508 struct pci_dev *pdev; 1509 bool default_vport; 1510 int index, err; 1511 1512 adapter = container_of(work, struct idpf_adapter, init_task.work); 1513 1514 num_default_vports = idpf_get_default_vports(adapter); 1515 if (adapter->num_alloc_vports < num_default_vports) 1516 default_vport = true; 1517 else 1518 default_vport = false; 1519 1520 err = idpf_vport_alloc_max_qs(adapter, &max_q); 1521 if (err) 1522 goto unwind_vports; 1523 1524 err = idpf_send_create_vport_msg(adapter, &max_q); 1525 if (err) { 1526 idpf_vport_dealloc_max_qs(adapter, &max_q); 1527 goto unwind_vports; 1528 } 1529 1530 pdev = adapter->pdev; 1531 vport = idpf_vport_alloc(adapter, &max_q); 1532 if (!vport) { 1533 err = -EFAULT; 1534 dev_err(&pdev->dev, "failed to allocate vport: %d\n", 1535 err); 1536 idpf_vport_dealloc_max_qs(adapter, &max_q); 1537 goto unwind_vports; 1538 } 1539 1540 index = vport->idx; 1541 vport_config = adapter->vport_config[index]; 1542 1543 init_waitqueue_head(&vport->sw_marker_wq); 1544 init_waitqueue_head(&vport->vchnl_wq); 1545 1546 mutex_init(&vport->vc_buf_lock); 1547 spin_lock_init(&vport_config->mac_filter_list_lock); 1548 1549 INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list); 1550 1551 err = idpf_check_supported_desc_ids(vport); 1552 if (err) { 1553 dev_err(&pdev->dev, "failed to get required descriptor ids\n"); 1554 goto cfg_netdev_err; 1555 } 1556 1557 if (idpf_cfg_netdev(vport)) 1558 goto cfg_netdev_err; 1559 1560 err = idpf_send_get_rx_ptype_msg(vport); 1561 if (err) 1562 goto handle_err; 1563 1564 /* Once state is put into DOWN, driver is ready for dev_open */ 1565 np = netdev_priv(vport->netdev); 1566 np->state = __IDPF_VPORT_DOWN; 1567 if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) 1568 idpf_vport_open(vport, true); 1569 1570 /* Spawn and return 'idpf_init_task' work queue until all the 1571 * default vports are created 1572 */ 1573 if (adapter->num_alloc_vports < num_default_vports) { 1574 queue_delayed_work(adapter->init_wq, &adapter->init_task, 1575 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 1576 1577 return; 1578 } 1579 1580 for (index = 0; index < adapter->max_vports; index++) { 1581 if (adapter->netdevs[index] && 1582 !test_bit(IDPF_VPORT_REG_NETDEV, 1583 adapter->vport_config[index]->flags)) { 1584 register_netdev(adapter->netdevs[index]); 1585 set_bit(IDPF_VPORT_REG_NETDEV, 1586 adapter->vport_config[index]->flags); 1587 } 1588 } 1589 1590 /* As all the required vports are created, clear the reset flag 1591 * unconditionally here in case we were in reset and the link was down. 1592 */ 1593 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1594 /* Start the statistics task now */ 1595 queue_delayed_work(adapter->stats_wq, &adapter->stats_task, 1596 msecs_to_jiffies(10 * (pdev->devfn & 0x07))); 1597 1598 return; 1599 1600 handle_err: 1601 idpf_decfg_netdev(vport); 1602 cfg_netdev_err: 1603 idpf_vport_rel(vport); 1604 adapter->vports[index] = NULL; 1605 unwind_vports: 1606 if (default_vport) { 1607 for (index = 0; index < adapter->max_vports; index++) { 1608 if (adapter->vports[index]) 1609 idpf_vport_dealloc(adapter->vports[index]); 1610 } 1611 } 1612 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1613 } 1614 1615 /** 1616 * idpf_sriov_ena - Enable or change number of VFs 1617 * @adapter: private data struct 1618 * @num_vfs: number of VFs to allocate 1619 */ 1620 static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs) 1621 { 1622 struct device *dev = &adapter->pdev->dev; 1623 int err; 1624 1625 err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs); 1626 if (err) { 1627 dev_err(dev, "Failed to allocate VFs: %d\n", err); 1628 1629 return err; 1630 } 1631 1632 err = pci_enable_sriov(adapter->pdev, num_vfs); 1633 if (err) { 1634 idpf_send_set_sriov_vfs_msg(adapter, 0); 1635 dev_err(dev, "Failed to enable SR-IOV: %d\n", err); 1636 1637 return err; 1638 } 1639 1640 adapter->num_vfs = num_vfs; 1641 1642 return num_vfs; 1643 } 1644 1645 /** 1646 * idpf_sriov_configure - Configure the requested VFs 1647 * @pdev: pointer to a pci_dev structure 1648 * @num_vfs: number of vfs to allocate 1649 * 1650 * Enable or change the number of VFs. Called when the user updates the number 1651 * of VFs in sysfs. 1652 **/ 1653 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs) 1654 { 1655 struct idpf_adapter *adapter = pci_get_drvdata(pdev); 1656 1657 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) { 1658 dev_info(&pdev->dev, "SR-IOV is not supported on this device\n"); 1659 1660 return -EOPNOTSUPP; 1661 } 1662 1663 if (num_vfs) 1664 return idpf_sriov_ena(adapter, num_vfs); 1665 1666 if (pci_vfs_assigned(pdev)) { 1667 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n"); 1668 1669 return -EBUSY; 1670 } 1671 1672 pci_disable_sriov(adapter->pdev); 1673 idpf_send_set_sriov_vfs_msg(adapter, 0); 1674 adapter->num_vfs = 0; 1675 1676 return 0; 1677 } 1678 1679 /** 1680 * idpf_deinit_task - Device deinit routine 1681 * @adapter: Driver specific private structure 1682 * 1683 * Extended remove logic which will be used for 1684 * hard reset as well 1685 */ 1686 void idpf_deinit_task(struct idpf_adapter *adapter) 1687 { 1688 unsigned int i; 1689 1690 /* Wait until the init_task is done else this thread might release 1691 * the resources first and the other thread might end up in a bad state 1692 */ 1693 cancel_delayed_work_sync(&adapter->init_task); 1694 1695 if (!adapter->vports) 1696 return; 1697 1698 cancel_delayed_work_sync(&adapter->stats_task); 1699 1700 for (i = 0; i < adapter->max_vports; i++) { 1701 if (adapter->vports[i]) 1702 idpf_vport_dealloc(adapter->vports[i]); 1703 } 1704 } 1705 1706 /** 1707 * idpf_check_reset_complete - check that reset is complete 1708 * @hw: pointer to hw struct 1709 * @reset_reg: struct with reset registers 1710 * 1711 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 1712 **/ 1713 static int idpf_check_reset_complete(struct idpf_hw *hw, 1714 struct idpf_reset_reg *reset_reg) 1715 { 1716 struct idpf_adapter *adapter = hw->back; 1717 int i; 1718 1719 for (i = 0; i < 2000; i++) { 1720 u32 reg_val = readl(reset_reg->rstat); 1721 1722 /* 0xFFFFFFFF might be read if other side hasn't cleared the 1723 * register for us yet and 0xFFFFFFFF is not a valid value for 1724 * the register, so treat that as invalid. 1725 */ 1726 if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m)) 1727 return 0; 1728 1729 usleep_range(5000, 10000); 1730 } 1731 1732 dev_warn(&adapter->pdev->dev, "Device reset timeout!\n"); 1733 /* Clear the reset flag unconditionally here since the reset 1734 * technically isn't in progress anymore from the driver's perspective 1735 */ 1736 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1737 1738 return -EBUSY; 1739 } 1740 1741 /** 1742 * idpf_set_vport_state - Set the vport state to be after the reset 1743 * @adapter: Driver specific private structure 1744 */ 1745 static void idpf_set_vport_state(struct idpf_adapter *adapter) 1746 { 1747 u16 i; 1748 1749 for (i = 0; i < adapter->max_vports; i++) { 1750 struct idpf_netdev_priv *np; 1751 1752 if (!adapter->netdevs[i]) 1753 continue; 1754 1755 np = netdev_priv(adapter->netdevs[i]); 1756 if (np->state == __IDPF_VPORT_UP) 1757 set_bit(IDPF_VPORT_UP_REQUESTED, 1758 adapter->vport_config[i]->flags); 1759 } 1760 } 1761 1762 /** 1763 * idpf_init_hard_reset - Initiate a hardware reset 1764 * @adapter: Driver specific private structure 1765 * 1766 * Deallocate the vports and all the resources associated with them and 1767 * reallocate. Also reinitialize the mailbox. Return 0 on success, 1768 * negative on failure. 1769 */ 1770 static int idpf_init_hard_reset(struct idpf_adapter *adapter) 1771 { 1772 struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops; 1773 struct device *dev = &adapter->pdev->dev; 1774 struct net_device *netdev; 1775 int err; 1776 u16 i; 1777 1778 mutex_lock(&adapter->vport_ctrl_lock); 1779 1780 dev_info(dev, "Device HW Reset initiated\n"); 1781 1782 /* Avoid TX hangs on reset */ 1783 for (i = 0; i < adapter->max_vports; i++) { 1784 netdev = adapter->netdevs[i]; 1785 if (!netdev) 1786 continue; 1787 1788 netif_carrier_off(netdev); 1789 netif_tx_disable(netdev); 1790 } 1791 1792 /* Prepare for reset */ 1793 if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { 1794 reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD); 1795 } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) { 1796 bool is_reset = idpf_is_reset_detected(adapter); 1797 1798 idpf_set_vport_state(adapter); 1799 idpf_vc_core_deinit(adapter); 1800 if (!is_reset) 1801 reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET); 1802 idpf_deinit_dflt_mbx(adapter); 1803 } else { 1804 dev_err(dev, "Unhandled hard reset cause\n"); 1805 err = -EBADRQC; 1806 goto unlock_mutex; 1807 } 1808 1809 /* Wait for reset to complete */ 1810 err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg); 1811 if (err) { 1812 dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n", 1813 adapter->state); 1814 goto unlock_mutex; 1815 } 1816 1817 /* Reset is complete and so start building the driver resources again */ 1818 err = idpf_init_dflt_mbx(adapter); 1819 if (err) { 1820 dev_err(dev, "Failed to initialize default mailbox: %d\n", err); 1821 goto unlock_mutex; 1822 } 1823 1824 /* Initialize the state machine, also allocate memory and request 1825 * resources 1826 */ 1827 err = idpf_vc_core_init(adapter); 1828 if (err) { 1829 idpf_deinit_dflt_mbx(adapter); 1830 goto unlock_mutex; 1831 } 1832 1833 /* Wait till all the vports are initialized to release the reset lock, 1834 * else user space callbacks may access uninitialized vports 1835 */ 1836 while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) 1837 msleep(100); 1838 1839 unlock_mutex: 1840 mutex_unlock(&adapter->vport_ctrl_lock); 1841 1842 return err; 1843 } 1844 1845 /** 1846 * idpf_vc_event_task - Handle virtchannel event logic 1847 * @work: work queue struct 1848 */ 1849 void idpf_vc_event_task(struct work_struct *work) 1850 { 1851 struct idpf_adapter *adapter; 1852 1853 adapter = container_of(work, struct idpf_adapter, vc_event_task.work); 1854 1855 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 1856 return; 1857 1858 if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || 1859 test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { 1860 set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1861 idpf_init_hard_reset(adapter); 1862 } 1863 } 1864 1865 /** 1866 * idpf_initiate_soft_reset - Initiate a software reset 1867 * @vport: virtual port data struct 1868 * @reset_cause: reason for the soft reset 1869 * 1870 * Soft reset only reallocs vport queue resources. Returns 0 on success, 1871 * negative on failure. 1872 */ 1873 int idpf_initiate_soft_reset(struct idpf_vport *vport, 1874 enum idpf_vport_reset_cause reset_cause) 1875 { 1876 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 1877 enum idpf_vport_state current_state = np->state; 1878 struct idpf_adapter *adapter = vport->adapter; 1879 struct idpf_vport *new_vport; 1880 int err, i; 1881 1882 /* If the system is low on memory, we can end up in bad state if we 1883 * free all the memory for queue resources and try to allocate them 1884 * again. Instead, we can pre-allocate the new resources before doing 1885 * anything and bailing if the alloc fails. 1886 * 1887 * Make a clone of the existing vport to mimic its current 1888 * configuration, then modify the new structure with any requested 1889 * changes. Once the allocation of the new resources is done, stop the 1890 * existing vport and copy the configuration to the main vport. If an 1891 * error occurred, the existing vport will be untouched. 1892 * 1893 */ 1894 new_vport = kzalloc(sizeof(*vport), GFP_KERNEL); 1895 if (!new_vport) 1896 return -ENOMEM; 1897 1898 /* This purposely avoids copying the end of the struct because it 1899 * contains wait_queues and mutexes and other stuff we don't want to 1900 * mess with. Nothing below should use those variables from new_vport 1901 * and should instead always refer to them in vport if they need to. 1902 */ 1903 memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state)); 1904 1905 /* Adjust resource parameters prior to reallocating resources */ 1906 switch (reset_cause) { 1907 case IDPF_SR_Q_CHANGE: 1908 err = idpf_vport_adjust_qs(new_vport); 1909 if (err) 1910 goto free_vport; 1911 break; 1912 case IDPF_SR_Q_DESC_CHANGE: 1913 /* Update queue parameters before allocating resources */ 1914 idpf_vport_calc_num_q_desc(new_vport); 1915 break; 1916 case IDPF_SR_MTU_CHANGE: 1917 case IDPF_SR_RSC_CHANGE: 1918 break; 1919 default: 1920 dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n"); 1921 err = -EINVAL; 1922 goto free_vport; 1923 } 1924 1925 err = idpf_vport_queues_alloc(new_vport); 1926 if (err) 1927 goto free_vport; 1928 if (current_state <= __IDPF_VPORT_DOWN) { 1929 idpf_send_delete_queues_msg(vport); 1930 } else { 1931 set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags); 1932 idpf_vport_stop(vport); 1933 } 1934 1935 idpf_deinit_rss(vport); 1936 /* We're passing in vport here because we need its wait_queue 1937 * to send a message and it should be getting all the vport 1938 * config data out of the adapter but we need to be careful not 1939 * to add code to add_queues to change the vport config within 1940 * vport itself as it will be wiped with a memcpy later. 1941 */ 1942 err = idpf_send_add_queues_msg(vport, new_vport->num_txq, 1943 new_vport->num_complq, 1944 new_vport->num_rxq, 1945 new_vport->num_bufq); 1946 if (err) 1947 goto err_reset; 1948 1949 /* Same comment as above regarding avoiding copying the wait_queues and 1950 * mutexes applies here. We do not want to mess with those if possible. 1951 */ 1952 memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state)); 1953 1954 /* Since idpf_vport_queues_alloc was called with new_port, the queue 1955 * back pointers are currently pointing to the local new_vport. Reset 1956 * the backpointers to the original vport here 1957 */ 1958 for (i = 0; i < vport->num_txq_grp; i++) { 1959 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1960 int j; 1961 1962 tx_qgrp->vport = vport; 1963 for (j = 0; j < tx_qgrp->num_txq; j++) 1964 tx_qgrp->txqs[j]->vport = vport; 1965 1966 if (idpf_is_queue_model_split(vport->txq_model)) 1967 tx_qgrp->complq->vport = vport; 1968 } 1969 1970 for (i = 0; i < vport->num_rxq_grp; i++) { 1971 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1972 struct idpf_queue *q; 1973 u16 num_rxq; 1974 int j; 1975 1976 rx_qgrp->vport = vport; 1977 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) 1978 rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport; 1979 1980 if (idpf_is_queue_model_split(vport->rxq_model)) 1981 num_rxq = rx_qgrp->splitq.num_rxq_sets; 1982 else 1983 num_rxq = rx_qgrp->singleq.num_rxq; 1984 1985 for (j = 0; j < num_rxq; j++) { 1986 if (idpf_is_queue_model_split(vport->rxq_model)) 1987 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 1988 else 1989 q = rx_qgrp->singleq.rxqs[j]; 1990 q->vport = vport; 1991 } 1992 } 1993 1994 if (reset_cause == IDPF_SR_Q_CHANGE) 1995 idpf_vport_alloc_vec_indexes(vport); 1996 1997 err = idpf_set_real_num_queues(vport); 1998 if (err) 1999 goto err_reset; 2000 2001 if (current_state == __IDPF_VPORT_UP) 2002 err = idpf_vport_open(vport, false); 2003 2004 kfree(new_vport); 2005 2006 return err; 2007 2008 err_reset: 2009 idpf_vport_queues_rel(new_vport); 2010 free_vport: 2011 kfree(new_vport); 2012 2013 return err; 2014 } 2015 2016 /** 2017 * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address 2018 * @netdev: the netdevice 2019 * @addr: address to add 2020 * 2021 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 2022 * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock 2023 * meaning we cannot sleep in this context. Due to this, we have to add the 2024 * filter and send the virtchnl message asynchronously without waiting for the 2025 * response from the other side. We won't know whether or not the operation 2026 * actually succeeded until we get the message back. Returns 0 on success, 2027 * negative on failure. 2028 */ 2029 static int idpf_addr_sync(struct net_device *netdev, const u8 *addr) 2030 { 2031 struct idpf_netdev_priv *np = netdev_priv(netdev); 2032 2033 return idpf_add_mac_filter(np->vport, np, addr, true); 2034 } 2035 2036 /** 2037 * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 2038 * @netdev: the netdevice 2039 * @addr: address to add 2040 * 2041 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 2042 * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock 2043 * meaning we cannot sleep in this context. Due to this we have to delete the 2044 * filter and send the virtchnl message asynchronously without waiting for the 2045 * return from the other side. We won't know whether or not the operation 2046 * actually succeeded until we get the message back. Returns 0 on success, 2047 * negative on failure. 2048 */ 2049 static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr) 2050 { 2051 struct idpf_netdev_priv *np = netdev_priv(netdev); 2052 2053 /* Under some circumstances, we might receive a request to delete 2054 * our own device address from our uc list. Because we store the 2055 * device address in the VSI's MAC filter list, we need to ignore 2056 * such requests and not delete our device address from this list. 2057 */ 2058 if (ether_addr_equal(addr, netdev->dev_addr)) 2059 return 0; 2060 2061 idpf_del_mac_filter(np->vport, np, addr, true); 2062 2063 return 0; 2064 } 2065 2066 /** 2067 * idpf_set_rx_mode - NDO callback to set the netdev filters 2068 * @netdev: network interface device structure 2069 * 2070 * Stack takes addr_list_lock spinlock before calling our .set_rx_mode. We 2071 * cannot sleep in this context. 2072 */ 2073 static void idpf_set_rx_mode(struct net_device *netdev) 2074 { 2075 struct idpf_netdev_priv *np = netdev_priv(netdev); 2076 struct idpf_vport_user_config_data *config_data; 2077 struct idpf_adapter *adapter; 2078 bool changed = false; 2079 struct device *dev; 2080 int err; 2081 2082 adapter = np->adapter; 2083 dev = &adapter->pdev->dev; 2084 2085 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) { 2086 __dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync); 2087 __dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync); 2088 } 2089 2090 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC)) 2091 return; 2092 2093 config_data = &adapter->vport_config[np->vport_idx]->user_config; 2094 /* IFF_PROMISC enables both unicast and multicast promiscuous, 2095 * while IFF_ALLMULTI only enables multicast such that: 2096 * 2097 * promisc + allmulti = unicast | multicast 2098 * promisc + !allmulti = unicast | multicast 2099 * !promisc + allmulti = multicast 2100 */ 2101 if ((netdev->flags & IFF_PROMISC) && 2102 !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) { 2103 changed = true; 2104 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); 2105 if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags)) 2106 dev_info(dev, "Entering multicast promiscuous mode\n"); 2107 } 2108 2109 if (!(netdev->flags & IFF_PROMISC) && 2110 test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) { 2111 changed = true; 2112 dev_info(dev, "Leaving promiscuous mode\n"); 2113 } 2114 2115 if (netdev->flags & IFF_ALLMULTI && 2116 !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) { 2117 changed = true; 2118 dev_info(dev, "Entering multicast promiscuous mode\n"); 2119 } 2120 2121 if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) && 2122 test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) { 2123 changed = true; 2124 dev_info(dev, "Leaving multicast promiscuous mode\n"); 2125 } 2126 2127 if (!changed) 2128 return; 2129 2130 err = idpf_set_promiscuous(adapter, config_data, np->vport_id); 2131 if (err) 2132 dev_err(dev, "Failed to set promiscuous mode: %d\n", err); 2133 } 2134 2135 /** 2136 * idpf_vport_manage_rss_lut - disable/enable RSS 2137 * @vport: the vport being changed 2138 * 2139 * In the event of disable request for RSS, this function will zero out RSS 2140 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 2141 * LUT with the default LUT configuration. 2142 */ 2143 static int idpf_vport_manage_rss_lut(struct idpf_vport *vport) 2144 { 2145 bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH); 2146 struct idpf_rss_data *rss_data; 2147 u16 idx = vport->idx; 2148 int lut_size; 2149 2150 rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data; 2151 lut_size = rss_data->rss_lut_size * sizeof(u32); 2152 2153 if (ena) { 2154 /* This will contain the default or user configured LUT */ 2155 memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size); 2156 } else { 2157 /* Save a copy of the current LUT to be restored later if 2158 * requested. 2159 */ 2160 memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size); 2161 2162 /* Zero out the current LUT to disable */ 2163 memset(rss_data->rss_lut, 0, lut_size); 2164 } 2165 2166 return idpf_config_rss(vport); 2167 } 2168 2169 /** 2170 * idpf_set_features - set the netdev feature flags 2171 * @netdev: ptr to the netdev being adjusted 2172 * @features: the feature set that the stack is suggesting 2173 */ 2174 static int idpf_set_features(struct net_device *netdev, 2175 netdev_features_t features) 2176 { 2177 netdev_features_t changed = netdev->features ^ features; 2178 struct idpf_adapter *adapter; 2179 struct idpf_vport *vport; 2180 int err = 0; 2181 2182 idpf_vport_ctrl_lock(netdev); 2183 vport = idpf_netdev_to_vport(netdev); 2184 2185 adapter = vport->adapter; 2186 2187 if (idpf_is_reset_in_prog(adapter)) { 2188 dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n"); 2189 err = -EBUSY; 2190 goto unlock_mutex; 2191 } 2192 2193 if (changed & NETIF_F_RXHASH) { 2194 netdev->features ^= NETIF_F_RXHASH; 2195 err = idpf_vport_manage_rss_lut(vport); 2196 if (err) 2197 goto unlock_mutex; 2198 } 2199 2200 if (changed & NETIF_F_GRO_HW) { 2201 netdev->features ^= NETIF_F_GRO_HW; 2202 err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE); 2203 if (err) 2204 goto unlock_mutex; 2205 } 2206 2207 if (changed & NETIF_F_LOOPBACK) { 2208 netdev->features ^= NETIF_F_LOOPBACK; 2209 err = idpf_send_ena_dis_loopback_msg(vport); 2210 } 2211 2212 unlock_mutex: 2213 idpf_vport_ctrl_unlock(netdev); 2214 2215 return err; 2216 } 2217 2218 /** 2219 * idpf_open - Called when a network interface becomes active 2220 * @netdev: network interface device structure 2221 * 2222 * The open entry point is called when a network interface is made 2223 * active by the system (IFF_UP). At this point all resources needed 2224 * for transmit and receive operations are allocated, the interrupt 2225 * handler is registered with the OS, the netdev watchdog is enabled, 2226 * and the stack is notified that the interface is ready. 2227 * 2228 * Returns 0 on success, negative value on failure 2229 */ 2230 static int idpf_open(struct net_device *netdev) 2231 { 2232 struct idpf_vport *vport; 2233 int err; 2234 2235 idpf_vport_ctrl_lock(netdev); 2236 vport = idpf_netdev_to_vport(netdev); 2237 2238 err = idpf_vport_open(vport, true); 2239 2240 idpf_vport_ctrl_unlock(netdev); 2241 2242 return err; 2243 } 2244 2245 /** 2246 * idpf_change_mtu - NDO callback to change the MTU 2247 * @netdev: network interface device structure 2248 * @new_mtu: new value for maximum frame size 2249 * 2250 * Returns 0 on success, negative on failure 2251 */ 2252 static int idpf_change_mtu(struct net_device *netdev, int new_mtu) 2253 { 2254 struct idpf_vport *vport; 2255 int err; 2256 2257 idpf_vport_ctrl_lock(netdev); 2258 vport = idpf_netdev_to_vport(netdev); 2259 2260 netdev->mtu = new_mtu; 2261 2262 err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE); 2263 2264 idpf_vport_ctrl_unlock(netdev); 2265 2266 return err; 2267 } 2268 2269 /** 2270 * idpf_features_check - Validate packet conforms to limits 2271 * @skb: skb buffer 2272 * @netdev: This port's netdev 2273 * @features: Offload features that the stack believes apply 2274 */ 2275 static netdev_features_t idpf_features_check(struct sk_buff *skb, 2276 struct net_device *netdev, 2277 netdev_features_t features) 2278 { 2279 struct idpf_vport *vport = idpf_netdev_to_vport(netdev); 2280 struct idpf_adapter *adapter = vport->adapter; 2281 size_t len; 2282 2283 /* No point in doing any of this if neither checksum nor GSO are 2284 * being requested for this frame. We can rule out both by just 2285 * checking for CHECKSUM_PARTIAL 2286 */ 2287 if (skb->ip_summed != CHECKSUM_PARTIAL) 2288 return features; 2289 2290 /* We cannot support GSO if the MSS is going to be less than 2291 * 88 bytes. If it is then we need to drop support for GSO. 2292 */ 2293 if (skb_is_gso(skb) && 2294 (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)) 2295 features &= ~NETIF_F_GSO_MASK; 2296 2297 /* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */ 2298 len = skb_network_offset(skb); 2299 if (unlikely(len & ~(126))) 2300 goto unsupported; 2301 2302 len = skb_network_header_len(skb); 2303 if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) 2304 goto unsupported; 2305 2306 if (!skb->encapsulation) 2307 return features; 2308 2309 /* L4TUNLEN can support 127 words */ 2310 len = skb_inner_network_header(skb) - skb_transport_header(skb); 2311 if (unlikely(len & ~(127 * 2))) 2312 goto unsupported; 2313 2314 /* IPLEN can support at most 127 dwords */ 2315 len = skb_inner_network_header_len(skb); 2316 if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) 2317 goto unsupported; 2318 2319 /* No need to validate L4LEN as TCP is the only protocol with a 2320 * a flexible value and we support all possible values supported 2321 * by TCP, which is at most 15 dwords 2322 */ 2323 2324 return features; 2325 2326 unsupported: 2327 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2328 } 2329 2330 /** 2331 * idpf_set_mac - NDO callback to set port mac address 2332 * @netdev: network interface device structure 2333 * @p: pointer to an address structure 2334 * 2335 * Returns 0 on success, negative on failure 2336 **/ 2337 static int idpf_set_mac(struct net_device *netdev, void *p) 2338 { 2339 struct idpf_netdev_priv *np = netdev_priv(netdev); 2340 struct idpf_vport_config *vport_config; 2341 struct sockaddr *addr = p; 2342 struct idpf_vport *vport; 2343 int err = 0; 2344 2345 idpf_vport_ctrl_lock(netdev); 2346 vport = idpf_netdev_to_vport(netdev); 2347 2348 if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, 2349 VIRTCHNL2_CAP_MACFILTER)) { 2350 dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n"); 2351 err = -EOPNOTSUPP; 2352 goto unlock_mutex; 2353 } 2354 2355 if (!is_valid_ether_addr(addr->sa_data)) { 2356 dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n", 2357 addr->sa_data); 2358 err = -EADDRNOTAVAIL; 2359 goto unlock_mutex; 2360 } 2361 2362 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 2363 goto unlock_mutex; 2364 2365 vport_config = vport->adapter->vport_config[vport->idx]; 2366 err = idpf_add_mac_filter(vport, np, addr->sa_data, false); 2367 if (err) { 2368 __idpf_del_mac_filter(vport_config, addr->sa_data); 2369 goto unlock_mutex; 2370 } 2371 2372 if (is_valid_ether_addr(vport->default_mac_addr)) 2373 idpf_del_mac_filter(vport, np, vport->default_mac_addr, false); 2374 2375 ether_addr_copy(vport->default_mac_addr, addr->sa_data); 2376 eth_hw_addr_set(netdev, addr->sa_data); 2377 2378 unlock_mutex: 2379 idpf_vport_ctrl_unlock(netdev); 2380 2381 return err; 2382 } 2383 2384 /** 2385 * idpf_alloc_dma_mem - Allocate dma memory 2386 * @hw: pointer to hw struct 2387 * @mem: pointer to dma_mem struct 2388 * @size: size of the memory to allocate 2389 */ 2390 void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size) 2391 { 2392 struct idpf_adapter *adapter = hw->back; 2393 size_t sz = ALIGN(size, 4096); 2394 2395 mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz, 2396 &mem->pa, GFP_KERNEL); 2397 mem->size = sz; 2398 2399 return mem->va; 2400 } 2401 2402 /** 2403 * idpf_free_dma_mem - Free the allocated dma memory 2404 * @hw: pointer to hw struct 2405 * @mem: pointer to dma_mem struct 2406 */ 2407 void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) 2408 { 2409 struct idpf_adapter *adapter = hw->back; 2410 2411 dma_free_coherent(&adapter->pdev->dev, mem->size, 2412 mem->va, mem->pa); 2413 mem->size = 0; 2414 mem->va = NULL; 2415 mem->pa = 0; 2416 } 2417 2418 static const struct net_device_ops idpf_netdev_ops_splitq = { 2419 .ndo_open = idpf_open, 2420 .ndo_stop = idpf_stop, 2421 .ndo_start_xmit = idpf_tx_splitq_start, 2422 .ndo_features_check = idpf_features_check, 2423 .ndo_set_rx_mode = idpf_set_rx_mode, 2424 .ndo_validate_addr = eth_validate_addr, 2425 .ndo_set_mac_address = idpf_set_mac, 2426 .ndo_change_mtu = idpf_change_mtu, 2427 .ndo_get_stats64 = idpf_get_stats64, 2428 .ndo_set_features = idpf_set_features, 2429 .ndo_tx_timeout = idpf_tx_timeout, 2430 }; 2431 2432 static const struct net_device_ops idpf_netdev_ops_singleq = { 2433 .ndo_open = idpf_open, 2434 .ndo_stop = idpf_stop, 2435 .ndo_start_xmit = idpf_tx_singleq_start, 2436 .ndo_features_check = idpf_features_check, 2437 .ndo_set_rx_mode = idpf_set_rx_mode, 2438 .ndo_validate_addr = eth_validate_addr, 2439 .ndo_set_mac_address = idpf_set_mac, 2440 .ndo_change_mtu = idpf_change_mtu, 2441 .ndo_get_stats64 = idpf_get_stats64, 2442 .ndo_set_features = idpf_set_features, 2443 .ndo_tx_timeout = idpf_tx_timeout, 2444 }; 2445