1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include "idpf.h" 5 #include "idpf_virtchnl.h" 6 #include "idpf_ptp.h" 7 8 static const struct net_device_ops idpf_netdev_ops; 9 10 /** 11 * idpf_init_vector_stack - Fill the MSIX vector stack with vector index 12 * @adapter: private data struct 13 * 14 * Return 0 on success, error on failure 15 */ 16 static int idpf_init_vector_stack(struct idpf_adapter *adapter) 17 { 18 struct idpf_vector_lifo *stack; 19 u16 min_vec; 20 u32 i; 21 22 mutex_lock(&adapter->vector_lock); 23 min_vec = adapter->num_msix_entries - adapter->num_avail_msix; 24 stack = &adapter->vector_stack; 25 stack->size = adapter->num_msix_entries; 26 /* set the base and top to point at start of the 'free pool' to 27 * distribute the unused vectors on-demand basis 28 */ 29 stack->base = min_vec; 30 stack->top = min_vec; 31 32 stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL); 33 if (!stack->vec_idx) { 34 mutex_unlock(&adapter->vector_lock); 35 36 return -ENOMEM; 37 } 38 39 for (i = 0; i < stack->size; i++) 40 stack->vec_idx[i] = i; 41 42 mutex_unlock(&adapter->vector_lock); 43 44 return 0; 45 } 46 47 /** 48 * idpf_deinit_vector_stack - zero out the MSIX vector stack 49 * @adapter: private data struct 50 */ 51 static void idpf_deinit_vector_stack(struct idpf_adapter *adapter) 52 { 53 struct idpf_vector_lifo *stack; 54 55 mutex_lock(&adapter->vector_lock); 56 stack = &adapter->vector_stack; 57 kfree(stack->vec_idx); 58 stack->vec_idx = NULL; 59 mutex_unlock(&adapter->vector_lock); 60 } 61 62 /** 63 * idpf_mb_intr_rel_irq - Free the IRQ association with the OS 64 * @adapter: adapter structure 65 * 66 * This will also disable interrupt mode and queue up mailbox task. Mailbox 67 * task will reschedule itself if not in interrupt mode. 68 */ 69 static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) 70 { 71 clear_bit(IDPF_MB_INTR_MODE, adapter->flags); 72 kfree(free_irq(adapter->msix_entries[0].vector, adapter)); 73 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 74 } 75 76 /** 77 * idpf_intr_rel - Release interrupt capabilities and free memory 78 * @adapter: adapter to disable interrupts on 79 */ 80 void idpf_intr_rel(struct idpf_adapter *adapter) 81 { 82 if (!adapter->msix_entries) 83 return; 84 85 idpf_mb_intr_rel_irq(adapter); 86 pci_free_irq_vectors(adapter->pdev); 87 idpf_send_dealloc_vectors_msg(adapter); 88 idpf_deinit_vector_stack(adapter); 89 kfree(adapter->msix_entries); 90 adapter->msix_entries = NULL; 91 kfree(adapter->rdma_msix_entries); 92 adapter->rdma_msix_entries = NULL; 93 } 94 95 /** 96 * idpf_mb_intr_clean - Interrupt handler for the mailbox 97 * @irq: interrupt number 98 * @data: pointer to the adapter structure 99 */ 100 static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data) 101 { 102 struct idpf_adapter *adapter = (struct idpf_adapter *)data; 103 104 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 105 106 return IRQ_HANDLED; 107 } 108 109 /** 110 * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox 111 * @adapter: adapter to get the hardware address for register write 112 */ 113 static void idpf_mb_irq_enable(struct idpf_adapter *adapter) 114 { 115 struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg; 116 u32 val; 117 118 val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m; 119 writel(val, intr->dyn_ctl); 120 writel(intr->icr_ena_ctlq_m, intr->icr_ena); 121 } 122 123 /** 124 * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt 125 * @adapter: adapter structure to pass to the mailbox irq handler 126 */ 127 static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) 128 { 129 int irq_num, mb_vidx = 0, err; 130 char *name; 131 132 irq_num = adapter->msix_entries[mb_vidx].vector; 133 name = kasprintf(GFP_KERNEL, "%s-%s-%d", 134 dev_driver_string(&adapter->pdev->dev), 135 "Mailbox", mb_vidx); 136 err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter); 137 if (err) { 138 dev_err(&adapter->pdev->dev, 139 "IRQ request for mailbox failed, error: %d\n", err); 140 141 return err; 142 } 143 144 set_bit(IDPF_MB_INTR_MODE, adapter->flags); 145 146 return 0; 147 } 148 149 /** 150 * idpf_mb_intr_init - Initialize the mailbox interrupt 151 * @adapter: adapter structure to store the mailbox vector 152 */ 153 static int idpf_mb_intr_init(struct idpf_adapter *adapter) 154 { 155 adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter); 156 adapter->irq_mb_handler = idpf_mb_intr_clean; 157 158 return idpf_mb_intr_req_irq(adapter); 159 } 160 161 /** 162 * idpf_vector_lifo_push - push MSIX vector index onto stack 163 * @adapter: private data struct 164 * @vec_idx: vector index to store 165 */ 166 static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx) 167 { 168 struct idpf_vector_lifo *stack = &adapter->vector_stack; 169 170 lockdep_assert_held(&adapter->vector_lock); 171 172 if (stack->top == stack->base) { 173 dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n", 174 stack->top); 175 return -EINVAL; 176 } 177 178 stack->vec_idx[--stack->top] = vec_idx; 179 180 return 0; 181 } 182 183 /** 184 * idpf_vector_lifo_pop - pop MSIX vector index from stack 185 * @adapter: private data struct 186 */ 187 static int idpf_vector_lifo_pop(struct idpf_adapter *adapter) 188 { 189 struct idpf_vector_lifo *stack = &adapter->vector_stack; 190 191 lockdep_assert_held(&adapter->vector_lock); 192 193 if (stack->top == stack->size) { 194 dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n"); 195 196 return -EINVAL; 197 } 198 199 return stack->vec_idx[stack->top++]; 200 } 201 202 /** 203 * idpf_vector_stash - Store the vector indexes onto the stack 204 * @adapter: private data struct 205 * @q_vector_idxs: vector index array 206 * @vec_info: info related to the number of vectors 207 * 208 * This function is a no-op if there are no vectors indexes to be stashed 209 */ 210 static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs, 211 struct idpf_vector_info *vec_info) 212 { 213 int i, base = 0; 214 u16 vec_idx; 215 216 lockdep_assert_held(&adapter->vector_lock); 217 218 if (!vec_info->num_curr_vecs) 219 return; 220 221 /* For default vports, no need to stash vector allocated from the 222 * default pool onto the stack 223 */ 224 if (vec_info->default_vport) 225 base = IDPF_MIN_Q_VEC; 226 227 for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) { 228 vec_idx = q_vector_idxs[i]; 229 idpf_vector_lifo_push(adapter, vec_idx); 230 adapter->num_avail_msix++; 231 } 232 } 233 234 /** 235 * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes 236 * @adapter: driver specific private structure 237 * @q_vector_idxs: vector index array 238 * @vec_info: info related to the number of vectors 239 * 240 * This is the core function to distribute the MSIX vectors acquired from the 241 * OS. It expects the caller to pass the number of vectors required and 242 * also previously allocated. First, it stashes previously allocated vector 243 * indexes on to the stack and then figures out if it can allocate requested 244 * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as 245 * requested vectors, then this function just stashes the already allocated 246 * vectors and returns 0. 247 * 248 * Returns actual number of vectors allocated on success, error value on failure 249 * If 0 is returned, implies the stack has no vectors to allocate which is also 250 * a failure case for the caller 251 */ 252 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, 253 u16 *q_vector_idxs, 254 struct idpf_vector_info *vec_info) 255 { 256 u16 num_req_vecs, num_alloc_vecs = 0, max_vecs; 257 struct idpf_vector_lifo *stack; 258 int i, j, vecid; 259 260 mutex_lock(&adapter->vector_lock); 261 stack = &adapter->vector_stack; 262 num_req_vecs = vec_info->num_req_vecs; 263 264 /* Stash interrupt vector indexes onto the stack if required */ 265 idpf_vector_stash(adapter, q_vector_idxs, vec_info); 266 267 if (!num_req_vecs) 268 goto rel_lock; 269 270 if (vec_info->default_vport) { 271 /* As IDPF_MIN_Q_VEC per default vport is put aside in the 272 * default pool of the stack, use them for default vports 273 */ 274 j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC; 275 for (i = 0; i < IDPF_MIN_Q_VEC; i++) { 276 q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++]; 277 num_req_vecs--; 278 } 279 } 280 281 /* Find if stack has enough vector to allocate */ 282 max_vecs = min(adapter->num_avail_msix, num_req_vecs); 283 284 for (j = 0; j < max_vecs; j++) { 285 vecid = idpf_vector_lifo_pop(adapter); 286 q_vector_idxs[num_alloc_vecs++] = vecid; 287 } 288 adapter->num_avail_msix -= max_vecs; 289 290 rel_lock: 291 mutex_unlock(&adapter->vector_lock); 292 293 return num_alloc_vecs; 294 } 295 296 /** 297 * idpf_intr_req - Request interrupt capabilities 298 * @adapter: adapter to enable interrupts on 299 * 300 * Returns 0 on success, negative on failure 301 */ 302 int idpf_intr_req(struct idpf_adapter *adapter) 303 { 304 u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0; 305 u16 default_vports = idpf_get_default_vports(adapter); 306 int num_q_vecs, total_vecs, num_vec_ids; 307 int min_vectors, actual_vecs, err; 308 unsigned int vector; 309 u16 *vecids; 310 int i; 311 312 total_vecs = idpf_get_reserved_vecs(adapter); 313 num_lan_vecs = total_vecs; 314 if (idpf_is_rdma_cap_ena(adapter)) { 315 num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter); 316 min_rdma_vecs = IDPF_MIN_RDMA_VEC; 317 318 if (!num_rdma_vecs) { 319 /* If idpf_get_reserved_rdma_vecs is 0, vectors are 320 * pulled from the LAN pool. 321 */ 322 num_rdma_vecs = min_rdma_vecs; 323 } else if (num_rdma_vecs < min_rdma_vecs) { 324 dev_err(&adapter->pdev->dev, 325 "Not enough vectors reserved for RDMA (min: %u, current: %u)\n", 326 min_rdma_vecs, num_rdma_vecs); 327 return -EINVAL; 328 } 329 } 330 331 num_q_vecs = total_vecs - IDPF_MBX_Q_VEC; 332 333 err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs); 334 if (err) { 335 dev_err(&adapter->pdev->dev, 336 "Failed to allocate %d vectors: %d\n", num_q_vecs, err); 337 338 return -EAGAIN; 339 } 340 341 min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; 342 min_vectors = min_lan_vecs + min_rdma_vecs; 343 actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors, 344 total_vecs, PCI_IRQ_MSIX); 345 if (actual_vecs < 0) { 346 dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n", 347 min_vectors); 348 err = actual_vecs; 349 goto send_dealloc_vecs; 350 } 351 352 if (idpf_is_rdma_cap_ena(adapter)) { 353 if (actual_vecs < total_vecs) { 354 dev_warn(&adapter->pdev->dev, 355 "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n", 356 total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC); 357 num_rdma_vecs = IDPF_MIN_RDMA_VEC; 358 } 359 360 adapter->rdma_msix_entries = kcalloc(num_rdma_vecs, 361 sizeof(struct msix_entry), 362 GFP_KERNEL); 363 if (!adapter->rdma_msix_entries) { 364 err = -ENOMEM; 365 goto free_irq; 366 } 367 } 368 369 num_lan_vecs = actual_vecs - num_rdma_vecs; 370 adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry), 371 GFP_KERNEL); 372 if (!adapter->msix_entries) { 373 err = -ENOMEM; 374 goto free_rdma_msix; 375 } 376 377 adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id); 378 379 vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL); 380 if (!vecids) { 381 err = -ENOMEM; 382 goto free_msix; 383 } 384 385 num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs, 386 &adapter->req_vec_chunks->vchunks); 387 if (num_vec_ids < actual_vecs) { 388 err = -EINVAL; 389 goto free_vecids; 390 } 391 392 for (vector = 0; vector < num_lan_vecs; vector++) { 393 adapter->msix_entries[vector].entry = vecids[vector]; 394 adapter->msix_entries[vector].vector = 395 pci_irq_vector(adapter->pdev, vector); 396 } 397 for (i = 0; i < num_rdma_vecs; vector++, i++) { 398 adapter->rdma_msix_entries[i].entry = vecids[vector]; 399 adapter->rdma_msix_entries[i].vector = 400 pci_irq_vector(adapter->pdev, vector); 401 } 402 403 /* 'num_avail_msix' is used to distribute excess vectors to the vports 404 * after considering the minimum vectors required per each default 405 * vport 406 */ 407 adapter->num_avail_msix = num_lan_vecs - min_lan_vecs; 408 adapter->num_msix_entries = num_lan_vecs; 409 if (idpf_is_rdma_cap_ena(adapter)) 410 adapter->num_rdma_msix_entries = num_rdma_vecs; 411 412 /* Fill MSIX vector lifo stack with vector indexes */ 413 err = idpf_init_vector_stack(adapter); 414 if (err) 415 goto free_vecids; 416 417 err = idpf_mb_intr_init(adapter); 418 if (err) 419 goto deinit_vec_stack; 420 idpf_mb_irq_enable(adapter); 421 kfree(vecids); 422 423 return 0; 424 425 deinit_vec_stack: 426 idpf_deinit_vector_stack(adapter); 427 free_vecids: 428 kfree(vecids); 429 free_msix: 430 kfree(adapter->msix_entries); 431 adapter->msix_entries = NULL; 432 free_rdma_msix: 433 kfree(adapter->rdma_msix_entries); 434 adapter->rdma_msix_entries = NULL; 435 free_irq: 436 pci_free_irq_vectors(adapter->pdev); 437 send_dealloc_vecs: 438 idpf_send_dealloc_vectors_msg(adapter); 439 440 return err; 441 } 442 443 /** 444 * idpf_find_mac_filter - Search filter list for specific mac filter 445 * @vconfig: Vport config structure 446 * @macaddr: The MAC address 447 * 448 * Returns ptr to the filter object or NULL. Must be called while holding the 449 * mac_filter_list_lock. 450 **/ 451 static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig, 452 const u8 *macaddr) 453 { 454 struct idpf_mac_filter *f; 455 456 if (!macaddr) 457 return NULL; 458 459 list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) { 460 if (ether_addr_equal(macaddr, f->macaddr)) 461 return f; 462 } 463 464 return NULL; 465 } 466 467 /** 468 * __idpf_del_mac_filter - Delete a MAC filter from the filter list 469 * @vport_config: Vport config structure 470 * @macaddr: The MAC address 471 * 472 * Returns 0 on success, error value on failure 473 **/ 474 static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config, 475 const u8 *macaddr) 476 { 477 struct idpf_mac_filter *f; 478 479 spin_lock_bh(&vport_config->mac_filter_list_lock); 480 f = idpf_find_mac_filter(vport_config, macaddr); 481 if (f) { 482 list_del(&f->list); 483 kfree(f); 484 } 485 spin_unlock_bh(&vport_config->mac_filter_list_lock); 486 487 return 0; 488 } 489 490 /** 491 * idpf_del_mac_filter - Delete a MAC filter from the filter list 492 * @vport: Main vport structure 493 * @np: Netdev private structure 494 * @macaddr: The MAC address 495 * @async: Don't wait for return message 496 * 497 * Removes filter from list and if interface is up, tells hardware about the 498 * removed filter. 499 **/ 500 static int idpf_del_mac_filter(struct idpf_vport *vport, 501 struct idpf_netdev_priv *np, 502 const u8 *macaddr, bool async) 503 { 504 struct idpf_vport_config *vport_config; 505 struct idpf_mac_filter *f; 506 507 vport_config = np->adapter->vport_config[np->vport_idx]; 508 509 spin_lock_bh(&vport_config->mac_filter_list_lock); 510 f = idpf_find_mac_filter(vport_config, macaddr); 511 if (f) { 512 f->remove = true; 513 } else { 514 spin_unlock_bh(&vport_config->mac_filter_list_lock); 515 516 return -EINVAL; 517 } 518 spin_unlock_bh(&vport_config->mac_filter_list_lock); 519 520 if (np->state == __IDPF_VPORT_UP) { 521 int err; 522 523 err = idpf_add_del_mac_filters(vport, np, false, async); 524 if (err) 525 return err; 526 } 527 528 return __idpf_del_mac_filter(vport_config, macaddr); 529 } 530 531 /** 532 * __idpf_add_mac_filter - Add mac filter helper function 533 * @vport_config: Vport config structure 534 * @macaddr: Address to add 535 * 536 * Takes mac_filter_list_lock spinlock to add new filter to list. 537 */ 538 static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config, 539 const u8 *macaddr) 540 { 541 struct idpf_mac_filter *f; 542 543 spin_lock_bh(&vport_config->mac_filter_list_lock); 544 545 f = idpf_find_mac_filter(vport_config, macaddr); 546 if (f) { 547 f->remove = false; 548 spin_unlock_bh(&vport_config->mac_filter_list_lock); 549 550 return 0; 551 } 552 553 f = kzalloc(sizeof(*f), GFP_ATOMIC); 554 if (!f) { 555 spin_unlock_bh(&vport_config->mac_filter_list_lock); 556 557 return -ENOMEM; 558 } 559 560 ether_addr_copy(f->macaddr, macaddr); 561 list_add_tail(&f->list, &vport_config->user_config.mac_filter_list); 562 f->add = true; 563 564 spin_unlock_bh(&vport_config->mac_filter_list_lock); 565 566 return 0; 567 } 568 569 /** 570 * idpf_add_mac_filter - Add a mac filter to the filter list 571 * @vport: Main vport structure 572 * @np: Netdev private structure 573 * @macaddr: The MAC address 574 * @async: Don't wait for return message 575 * 576 * Returns 0 on success or error on failure. If interface is up, we'll also 577 * send the virtchnl message to tell hardware about the filter. 578 **/ 579 static int idpf_add_mac_filter(struct idpf_vport *vport, 580 struct idpf_netdev_priv *np, 581 const u8 *macaddr, bool async) 582 { 583 struct idpf_vport_config *vport_config; 584 int err; 585 586 vport_config = np->adapter->vport_config[np->vport_idx]; 587 err = __idpf_add_mac_filter(vport_config, macaddr); 588 if (err) 589 return err; 590 591 if (np->state == __IDPF_VPORT_UP) 592 err = idpf_add_del_mac_filters(vport, np, true, async); 593 594 return err; 595 } 596 597 /** 598 * idpf_del_all_mac_filters - Delete all MAC filters in list 599 * @vport: main vport struct 600 * 601 * Takes mac_filter_list_lock spinlock. Deletes all filters 602 */ 603 static void idpf_del_all_mac_filters(struct idpf_vport *vport) 604 { 605 struct idpf_vport_config *vport_config; 606 struct idpf_mac_filter *f, *ftmp; 607 608 vport_config = vport->adapter->vport_config[vport->idx]; 609 spin_lock_bh(&vport_config->mac_filter_list_lock); 610 611 list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list, 612 list) { 613 list_del(&f->list); 614 kfree(f); 615 } 616 617 spin_unlock_bh(&vport_config->mac_filter_list_lock); 618 } 619 620 /** 621 * idpf_restore_mac_filters - Re-add all MAC filters in list 622 * @vport: main vport struct 623 * 624 * Takes mac_filter_list_lock spinlock. Sets add field to true for filters to 625 * resync filters back to HW. 626 */ 627 static void idpf_restore_mac_filters(struct idpf_vport *vport) 628 { 629 struct idpf_vport_config *vport_config; 630 struct idpf_mac_filter *f; 631 632 vport_config = vport->adapter->vport_config[vport->idx]; 633 spin_lock_bh(&vport_config->mac_filter_list_lock); 634 635 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list) 636 f->add = true; 637 638 spin_unlock_bh(&vport_config->mac_filter_list_lock); 639 640 idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), 641 true, false); 642 } 643 644 /** 645 * idpf_remove_mac_filters - Remove all MAC filters in list 646 * @vport: main vport struct 647 * 648 * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters 649 * to remove filters in HW. 650 */ 651 static void idpf_remove_mac_filters(struct idpf_vport *vport) 652 { 653 struct idpf_vport_config *vport_config; 654 struct idpf_mac_filter *f; 655 656 vport_config = vport->adapter->vport_config[vport->idx]; 657 spin_lock_bh(&vport_config->mac_filter_list_lock); 658 659 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list) 660 f->remove = true; 661 662 spin_unlock_bh(&vport_config->mac_filter_list_lock); 663 664 idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), 665 false, false); 666 } 667 668 /** 669 * idpf_deinit_mac_addr - deinitialize mac address for vport 670 * @vport: main vport structure 671 */ 672 static void idpf_deinit_mac_addr(struct idpf_vport *vport) 673 { 674 struct idpf_vport_config *vport_config; 675 struct idpf_mac_filter *f; 676 677 vport_config = vport->adapter->vport_config[vport->idx]; 678 679 spin_lock_bh(&vport_config->mac_filter_list_lock); 680 681 f = idpf_find_mac_filter(vport_config, vport->default_mac_addr); 682 if (f) { 683 list_del(&f->list); 684 kfree(f); 685 } 686 687 spin_unlock_bh(&vport_config->mac_filter_list_lock); 688 } 689 690 /** 691 * idpf_init_mac_addr - initialize mac address for vport 692 * @vport: main vport structure 693 * @netdev: pointer to netdev struct associated with this vport 694 */ 695 static int idpf_init_mac_addr(struct idpf_vport *vport, 696 struct net_device *netdev) 697 { 698 struct idpf_netdev_priv *np = netdev_priv(netdev); 699 struct idpf_adapter *adapter = vport->adapter; 700 int err; 701 702 if (is_valid_ether_addr(vport->default_mac_addr)) { 703 eth_hw_addr_set(netdev, vport->default_mac_addr); 704 ether_addr_copy(netdev->perm_addr, vport->default_mac_addr); 705 706 return idpf_add_mac_filter(vport, np, vport->default_mac_addr, 707 false); 708 } 709 710 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, 711 VIRTCHNL2_CAP_MACFILTER)) { 712 dev_err(&adapter->pdev->dev, 713 "MAC address is not provided and capability is not set\n"); 714 715 return -EINVAL; 716 } 717 718 eth_hw_addr_random(netdev); 719 err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false); 720 if (err) 721 return err; 722 723 dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n", 724 vport->default_mac_addr, netdev->dev_addr); 725 ether_addr_copy(vport->default_mac_addr, netdev->dev_addr); 726 727 return 0; 728 } 729 730 /** 731 * idpf_cfg_netdev - Allocate, configure and register a netdev 732 * @vport: main vport structure 733 * 734 * Returns 0 on success, negative value on failure. 735 */ 736 static int idpf_cfg_netdev(struct idpf_vport *vport) 737 { 738 struct idpf_adapter *adapter = vport->adapter; 739 struct idpf_vport_config *vport_config; 740 netdev_features_t other_offloads = 0; 741 netdev_features_t csum_offloads = 0; 742 netdev_features_t tso_offloads = 0; 743 netdev_features_t dflt_features; 744 struct idpf_netdev_priv *np; 745 struct net_device *netdev; 746 u16 idx = vport->idx; 747 int err; 748 749 vport_config = adapter->vport_config[idx]; 750 751 /* It's possible we already have a netdev allocated and registered for 752 * this vport 753 */ 754 if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) { 755 netdev = adapter->netdevs[idx]; 756 np = netdev_priv(netdev); 757 np->vport = vport; 758 np->vport_idx = vport->idx; 759 np->vport_id = vport->vport_id; 760 np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); 761 vport->netdev = netdev; 762 763 return idpf_init_mac_addr(vport, netdev); 764 } 765 766 netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv), 767 vport_config->max_q.max_txq, 768 vport_config->max_q.max_rxq); 769 if (!netdev) 770 return -ENOMEM; 771 772 vport->netdev = netdev; 773 np = netdev_priv(netdev); 774 np->vport = vport; 775 np->adapter = adapter; 776 np->vport_idx = vport->idx; 777 np->vport_id = vport->vport_id; 778 np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); 779 np->tx_max_bufs = idpf_get_max_tx_bufs(adapter); 780 781 spin_lock_init(&np->stats_lock); 782 783 err = idpf_init_mac_addr(vport, netdev); 784 if (err) { 785 free_netdev(vport->netdev); 786 vport->netdev = NULL; 787 788 return err; 789 } 790 791 /* assign netdev_ops */ 792 netdev->netdev_ops = &idpf_netdev_ops; 793 794 /* setup watchdog timeout value to be 5 second */ 795 netdev->watchdog_timeo = 5 * HZ; 796 797 netdev->dev_port = idx; 798 799 /* configure default MTU size */ 800 netdev->min_mtu = ETH_MIN_MTU; 801 netdev->max_mtu = vport->max_mtu; 802 803 dflt_features = NETIF_F_SG | 804 NETIF_F_HIGHDMA; 805 806 if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) 807 dflt_features |= NETIF_F_RXHASH; 808 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, 809 VIRTCHNL2_CAP_FLOW_STEER) && 810 idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER)) 811 dflt_features |= NETIF_F_NTUPLE; 812 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4)) 813 csum_offloads |= NETIF_F_IP_CSUM; 814 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6)) 815 csum_offloads |= NETIF_F_IPV6_CSUM; 816 if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM)) 817 csum_offloads |= NETIF_F_RXCSUM; 818 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM)) 819 csum_offloads |= NETIF_F_SCTP_CRC; 820 821 if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP)) 822 tso_offloads |= NETIF_F_TSO; 823 if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP)) 824 tso_offloads |= NETIF_F_TSO6; 825 if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS, 826 VIRTCHNL2_CAP_SEG_IPV4_UDP | 827 VIRTCHNL2_CAP_SEG_IPV6_UDP)) 828 tso_offloads |= NETIF_F_GSO_UDP_L4; 829 if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC)) 830 other_offloads |= NETIF_F_GRO_HW; 831 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK)) 832 other_offloads |= NETIF_F_LOOPBACK; 833 834 netdev->features |= dflt_features | csum_offloads | tso_offloads; 835 netdev->hw_features |= netdev->features | other_offloads; 836 netdev->vlan_features |= netdev->features | other_offloads; 837 netdev->hw_enc_features |= dflt_features | other_offloads; 838 idpf_set_ethtool_ops(netdev); 839 netif_set_affinity_auto(netdev); 840 SET_NETDEV_DEV(netdev, &adapter->pdev->dev); 841 842 /* carrier off on init to avoid Tx hangs */ 843 netif_carrier_off(netdev); 844 845 /* make sure transmit queues start off as stopped */ 846 netif_tx_stop_all_queues(netdev); 847 848 /* The vport can be arbitrarily released so we need to also track 849 * netdevs in the adapter struct 850 */ 851 adapter->netdevs[idx] = netdev; 852 853 return 0; 854 } 855 856 /** 857 * idpf_get_free_slot - get the next non-NULL location index in array 858 * @adapter: adapter in which to look for a free vport slot 859 */ 860 static int idpf_get_free_slot(struct idpf_adapter *adapter) 861 { 862 unsigned int i; 863 864 for (i = 0; i < adapter->max_vports; i++) { 865 if (!adapter->vports[i]) 866 return i; 867 } 868 869 return IDPF_NO_FREE_SLOT; 870 } 871 872 /** 873 * idpf_remove_features - Turn off feature configs 874 * @vport: virtual port structure 875 */ 876 static void idpf_remove_features(struct idpf_vport *vport) 877 { 878 struct idpf_adapter *adapter = vport->adapter; 879 880 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) 881 idpf_remove_mac_filters(vport); 882 } 883 884 /** 885 * idpf_vport_stop - Disable a vport 886 * @vport: vport to disable 887 */ 888 static void idpf_vport_stop(struct idpf_vport *vport) 889 { 890 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 891 892 if (np->state <= __IDPF_VPORT_DOWN) 893 return; 894 895 netif_carrier_off(vport->netdev); 896 netif_tx_disable(vport->netdev); 897 898 idpf_send_disable_vport_msg(vport); 899 idpf_send_disable_queues_msg(vport); 900 idpf_send_map_unmap_queue_vector_msg(vport, false); 901 /* Normally we ask for queues in create_vport, but if the number of 902 * initially requested queues have changed, for example via ethtool 903 * set channels, we do delete queues and then add the queues back 904 * instead of deleting and reallocating the vport. 905 */ 906 if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags)) 907 idpf_send_delete_queues_msg(vport); 908 909 idpf_remove_features(vport); 910 911 vport->link_up = false; 912 idpf_vport_intr_deinit(vport); 913 idpf_vport_queues_rel(vport); 914 idpf_vport_intr_rel(vport); 915 np->state = __IDPF_VPORT_DOWN; 916 } 917 918 /** 919 * idpf_stop - Disables a network interface 920 * @netdev: network interface device structure 921 * 922 * The stop entry point is called when an interface is de-activated by the OS, 923 * and the netdevice enters the DOWN state. The hardware is still under the 924 * driver's control, but the netdev interface is disabled. 925 * 926 * Returns success only - not allowed to fail 927 */ 928 static int idpf_stop(struct net_device *netdev) 929 { 930 struct idpf_netdev_priv *np = netdev_priv(netdev); 931 struct idpf_vport *vport; 932 933 if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags)) 934 return 0; 935 936 idpf_vport_ctrl_lock(netdev); 937 vport = idpf_netdev_to_vport(netdev); 938 939 idpf_vport_stop(vport); 940 941 idpf_vport_ctrl_unlock(netdev); 942 943 return 0; 944 } 945 946 /** 947 * idpf_decfg_netdev - Unregister the netdev 948 * @vport: vport for which netdev to be unregistered 949 */ 950 static void idpf_decfg_netdev(struct idpf_vport *vport) 951 { 952 struct idpf_adapter *adapter = vport->adapter; 953 u16 idx = vport->idx; 954 955 kfree(vport->rx_ptype_lkup); 956 vport->rx_ptype_lkup = NULL; 957 958 if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV, 959 adapter->vport_config[idx]->flags)) { 960 unregister_netdev(vport->netdev); 961 free_netdev(vport->netdev); 962 } 963 vport->netdev = NULL; 964 965 adapter->netdevs[idx] = NULL; 966 } 967 968 /** 969 * idpf_vport_rel - Delete a vport and free its resources 970 * @vport: the vport being removed 971 */ 972 static void idpf_vport_rel(struct idpf_vport *vport) 973 { 974 struct idpf_adapter *adapter = vport->adapter; 975 struct idpf_vport_config *vport_config; 976 struct idpf_vector_info vec_info; 977 struct idpf_rss_data *rss_data; 978 struct idpf_vport_max_q max_q; 979 u16 idx = vport->idx; 980 981 vport_config = adapter->vport_config[vport->idx]; 982 idpf_deinit_rss(vport); 983 rss_data = &vport_config->user_config.rss_data; 984 kfree(rss_data->rss_key); 985 rss_data->rss_key = NULL; 986 987 idpf_send_destroy_vport_msg(vport); 988 989 /* Release all max queues allocated to the adapter's pool */ 990 max_q.max_rxq = vport_config->max_q.max_rxq; 991 max_q.max_txq = vport_config->max_q.max_txq; 992 max_q.max_bufq = vport_config->max_q.max_bufq; 993 max_q.max_complq = vport_config->max_q.max_complq; 994 idpf_vport_dealloc_max_qs(adapter, &max_q); 995 996 /* Release all the allocated vectors on the stack */ 997 vec_info.num_req_vecs = 0; 998 vec_info.num_curr_vecs = vport->num_q_vectors; 999 vec_info.default_vport = vport->default_vport; 1000 1001 idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info); 1002 1003 kfree(vport->q_vector_idxs); 1004 vport->q_vector_idxs = NULL; 1005 1006 kfree(adapter->vport_params_recvd[idx]); 1007 adapter->vport_params_recvd[idx] = NULL; 1008 kfree(adapter->vport_params_reqd[idx]); 1009 adapter->vport_params_reqd[idx] = NULL; 1010 if (adapter->vport_config[idx]) { 1011 kfree(adapter->vport_config[idx]->req_qs_chunks); 1012 adapter->vport_config[idx]->req_qs_chunks = NULL; 1013 } 1014 kfree(vport); 1015 adapter->num_alloc_vports--; 1016 } 1017 1018 /** 1019 * idpf_vport_dealloc - cleanup and release a given vport 1020 * @vport: pointer to idpf vport structure 1021 * 1022 * returns nothing 1023 */ 1024 static void idpf_vport_dealloc(struct idpf_vport *vport) 1025 { 1026 struct idpf_adapter *adapter = vport->adapter; 1027 unsigned int i = vport->idx; 1028 1029 idpf_idc_deinit_vport_aux_device(vport->vdev_info); 1030 1031 idpf_deinit_mac_addr(vport); 1032 idpf_vport_stop(vport); 1033 1034 if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) 1035 idpf_decfg_netdev(vport); 1036 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 1037 idpf_del_all_mac_filters(vport); 1038 1039 if (adapter->netdevs[i]) { 1040 struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]); 1041 1042 np->vport = NULL; 1043 } 1044 1045 idpf_vport_rel(vport); 1046 1047 adapter->vports[i] = NULL; 1048 adapter->next_vport = idpf_get_free_slot(adapter); 1049 } 1050 1051 /** 1052 * idpf_is_hsplit_supported - check whether the header split is supported 1053 * @vport: virtual port to check the capability for 1054 * 1055 * Return: true if it's supported by the HW/FW, false if not. 1056 */ 1057 static bool idpf_is_hsplit_supported(const struct idpf_vport *vport) 1058 { 1059 return idpf_is_queue_model_split(vport->rxq_model) && 1060 idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS, 1061 IDPF_CAP_HSPLIT); 1062 } 1063 1064 /** 1065 * idpf_vport_get_hsplit - get the current header split feature state 1066 * @vport: virtual port to query the state for 1067 * 1068 * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported, 1069 * ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled, 1070 * ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active. 1071 */ 1072 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport) 1073 { 1074 const struct idpf_vport_user_config_data *config; 1075 1076 if (!idpf_is_hsplit_supported(vport)) 1077 return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; 1078 1079 config = &vport->adapter->vport_config[vport->idx]->user_config; 1080 1081 return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ? 1082 ETHTOOL_TCP_DATA_SPLIT_ENABLED : 1083 ETHTOOL_TCP_DATA_SPLIT_DISABLED; 1084 } 1085 1086 /** 1087 * idpf_vport_set_hsplit - enable or disable header split on a given vport 1088 * @vport: virtual port to configure 1089 * @val: Ethtool flag controlling the header split state 1090 * 1091 * Return: true on success, false if not supported by the HW. 1092 */ 1093 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val) 1094 { 1095 struct idpf_vport_user_config_data *config; 1096 1097 if (!idpf_is_hsplit_supported(vport)) 1098 return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; 1099 1100 config = &vport->adapter->vport_config[vport->idx]->user_config; 1101 1102 switch (val) { 1103 case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN: 1104 /* Default is to enable */ 1105 case ETHTOOL_TCP_DATA_SPLIT_ENABLED: 1106 __set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags); 1107 return true; 1108 case ETHTOOL_TCP_DATA_SPLIT_DISABLED: 1109 __clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags); 1110 return true; 1111 default: 1112 return false; 1113 } 1114 } 1115 1116 /** 1117 * idpf_vport_alloc - Allocates the next available struct vport in the adapter 1118 * @adapter: board private structure 1119 * @max_q: vport max queue info 1120 * 1121 * returns a pointer to a vport on success, NULL on failure. 1122 */ 1123 static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, 1124 struct idpf_vport_max_q *max_q) 1125 { 1126 struct idpf_rss_data *rss_data; 1127 u16 idx = adapter->next_vport; 1128 struct idpf_vport *vport; 1129 u16 num_max_q; 1130 1131 if (idx == IDPF_NO_FREE_SLOT) 1132 return NULL; 1133 1134 vport = kzalloc(sizeof(*vport), GFP_KERNEL); 1135 if (!vport) 1136 return vport; 1137 1138 num_max_q = max(max_q->max_txq, max_q->max_rxq); 1139 if (!adapter->vport_config[idx]) { 1140 struct idpf_vport_config *vport_config; 1141 struct idpf_q_coalesce *q_coal; 1142 1143 vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL); 1144 if (!vport_config) { 1145 kfree(vport); 1146 1147 return NULL; 1148 } 1149 1150 q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL); 1151 if (!q_coal) { 1152 kfree(vport_config); 1153 kfree(vport); 1154 1155 return NULL; 1156 } 1157 for (int i = 0; i < num_max_q; i++) { 1158 q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC; 1159 q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF; 1160 q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC; 1161 q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF; 1162 } 1163 vport_config->user_config.q_coalesce = q_coal; 1164 1165 adapter->vport_config[idx] = vport_config; 1166 } 1167 1168 vport->idx = idx; 1169 vport->adapter = adapter; 1170 vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET; 1171 vport->default_vport = adapter->num_alloc_vports < 1172 idpf_get_default_vports(adapter); 1173 1174 vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); 1175 if (!vport->q_vector_idxs) 1176 goto free_vport; 1177 1178 idpf_vport_init(vport, max_q); 1179 1180 /* This alloc is done separate from the LUT because it's not strictly 1181 * dependent on how many queues we have. If we change number of queues 1182 * and soft reset we'll need a new LUT but the key can remain the same 1183 * for as long as the vport exists. 1184 */ 1185 rss_data = &adapter->vport_config[idx]->user_config.rss_data; 1186 rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL); 1187 if (!rss_data->rss_key) 1188 goto free_vector_idxs; 1189 1190 /* Initialize default rss key */ 1191 netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size); 1192 1193 /* fill vport slot in the adapter struct */ 1194 adapter->vports[idx] = vport; 1195 adapter->vport_ids[idx] = idpf_get_vport_id(vport); 1196 1197 adapter->num_alloc_vports++; 1198 /* prepare adapter->next_vport for next use */ 1199 adapter->next_vport = idpf_get_free_slot(adapter); 1200 1201 return vport; 1202 1203 free_vector_idxs: 1204 kfree(vport->q_vector_idxs); 1205 free_vport: 1206 kfree(vport); 1207 1208 return NULL; 1209 } 1210 1211 /** 1212 * idpf_get_stats64 - get statistics for network device structure 1213 * @netdev: network interface device structure 1214 * @stats: main device statistics structure 1215 */ 1216 static void idpf_get_stats64(struct net_device *netdev, 1217 struct rtnl_link_stats64 *stats) 1218 { 1219 struct idpf_netdev_priv *np = netdev_priv(netdev); 1220 1221 spin_lock_bh(&np->stats_lock); 1222 *stats = np->netstats; 1223 spin_unlock_bh(&np->stats_lock); 1224 } 1225 1226 /** 1227 * idpf_statistics_task - Delayed task to get statistics over mailbox 1228 * @work: work_struct handle to our data 1229 */ 1230 void idpf_statistics_task(struct work_struct *work) 1231 { 1232 struct idpf_adapter *adapter; 1233 int i; 1234 1235 adapter = container_of(work, struct idpf_adapter, stats_task.work); 1236 1237 for (i = 0; i < adapter->max_vports; i++) { 1238 struct idpf_vport *vport = adapter->vports[i]; 1239 1240 if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) 1241 idpf_send_get_stats_msg(vport); 1242 } 1243 1244 queue_delayed_work(adapter->stats_wq, &adapter->stats_task, 1245 msecs_to_jiffies(10000)); 1246 } 1247 1248 /** 1249 * idpf_mbx_task - Delayed task to handle mailbox responses 1250 * @work: work_struct handle 1251 */ 1252 void idpf_mbx_task(struct work_struct *work) 1253 { 1254 struct idpf_adapter *adapter; 1255 1256 adapter = container_of(work, struct idpf_adapter, mbx_task.work); 1257 1258 if (test_bit(IDPF_MB_INTR_MODE, adapter->flags)) 1259 idpf_mb_irq_enable(adapter); 1260 else 1261 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 1262 msecs_to_jiffies(300)); 1263 1264 idpf_recv_mb_msg(adapter); 1265 } 1266 1267 /** 1268 * idpf_service_task - Delayed task for handling mailbox responses 1269 * @work: work_struct handle to our data 1270 * 1271 */ 1272 void idpf_service_task(struct work_struct *work) 1273 { 1274 struct idpf_adapter *adapter; 1275 1276 adapter = container_of(work, struct idpf_adapter, serv_task.work); 1277 1278 if (idpf_is_reset_detected(adapter) && 1279 !idpf_is_reset_in_prog(adapter) && 1280 !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) { 1281 dev_info(&adapter->pdev->dev, "HW reset detected\n"); 1282 set_bit(IDPF_HR_FUNC_RESET, adapter->flags); 1283 queue_delayed_work(adapter->vc_event_wq, 1284 &adapter->vc_event_task, 1285 msecs_to_jiffies(10)); 1286 } 1287 1288 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, 1289 msecs_to_jiffies(300)); 1290 } 1291 1292 /** 1293 * idpf_restore_features - Restore feature configs 1294 * @vport: virtual port structure 1295 */ 1296 static void idpf_restore_features(struct idpf_vport *vport) 1297 { 1298 struct idpf_adapter *adapter = vport->adapter; 1299 1300 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) 1301 idpf_restore_mac_filters(vport); 1302 } 1303 1304 /** 1305 * idpf_set_real_num_queues - set number of queues for netdev 1306 * @vport: virtual port structure 1307 * 1308 * Returns 0 on success, negative on failure. 1309 */ 1310 static int idpf_set_real_num_queues(struct idpf_vport *vport) 1311 { 1312 int err; 1313 1314 err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq); 1315 if (err) 1316 return err; 1317 1318 return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq); 1319 } 1320 1321 /** 1322 * idpf_up_complete - Complete interface up sequence 1323 * @vport: virtual port structure 1324 * 1325 * Returns 0 on success, negative on failure. 1326 */ 1327 static int idpf_up_complete(struct idpf_vport *vport) 1328 { 1329 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 1330 1331 if (vport->link_up && !netif_carrier_ok(vport->netdev)) { 1332 netif_carrier_on(vport->netdev); 1333 netif_tx_start_all_queues(vport->netdev); 1334 } 1335 1336 np->state = __IDPF_VPORT_UP; 1337 1338 return 0; 1339 } 1340 1341 /** 1342 * idpf_rx_init_buf_tail - Write initial buffer ring tail value 1343 * @vport: virtual port struct 1344 */ 1345 static void idpf_rx_init_buf_tail(struct idpf_vport *vport) 1346 { 1347 int i, j; 1348 1349 for (i = 0; i < vport->num_rxq_grp; i++) { 1350 struct idpf_rxq_group *grp = &vport->rxq_grps[i]; 1351 1352 if (idpf_is_queue_model_split(vport->rxq_model)) { 1353 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { 1354 const struct idpf_buf_queue *q = 1355 &grp->splitq.bufq_sets[j].bufq; 1356 1357 writel(q->next_to_alloc, q->tail); 1358 } 1359 } else { 1360 for (j = 0; j < grp->singleq.num_rxq; j++) { 1361 const struct idpf_rx_queue *q = 1362 grp->singleq.rxqs[j]; 1363 1364 writel(q->next_to_alloc, q->tail); 1365 } 1366 } 1367 } 1368 } 1369 1370 /** 1371 * idpf_vport_open - Bring up a vport 1372 * @vport: vport to bring up 1373 */ 1374 static int idpf_vport_open(struct idpf_vport *vport) 1375 { 1376 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 1377 struct idpf_adapter *adapter = vport->adapter; 1378 struct idpf_vport_config *vport_config; 1379 int err; 1380 1381 if (np->state != __IDPF_VPORT_DOWN) 1382 return -EBUSY; 1383 1384 /* we do not allow interface up just yet */ 1385 netif_carrier_off(vport->netdev); 1386 1387 err = idpf_vport_intr_alloc(vport); 1388 if (err) { 1389 dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n", 1390 vport->vport_id, err); 1391 return err; 1392 } 1393 1394 err = idpf_vport_queues_alloc(vport); 1395 if (err) 1396 goto intr_rel; 1397 1398 err = idpf_vport_queue_ids_init(vport); 1399 if (err) { 1400 dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n", 1401 vport->vport_id, err); 1402 goto queues_rel; 1403 } 1404 1405 err = idpf_vport_intr_init(vport); 1406 if (err) { 1407 dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n", 1408 vport->vport_id, err); 1409 goto queues_rel; 1410 } 1411 1412 err = idpf_rx_bufs_init_all(vport); 1413 if (err) { 1414 dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", 1415 vport->vport_id, err); 1416 goto queues_rel; 1417 } 1418 1419 err = idpf_queue_reg_init(vport); 1420 if (err) { 1421 dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", 1422 vport->vport_id, err); 1423 goto queues_rel; 1424 } 1425 1426 idpf_rx_init_buf_tail(vport); 1427 idpf_vport_intr_ena(vport); 1428 1429 err = idpf_send_config_queues_msg(vport); 1430 if (err) { 1431 dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n", 1432 vport->vport_id, err); 1433 goto intr_deinit; 1434 } 1435 1436 err = idpf_send_map_unmap_queue_vector_msg(vport, true); 1437 if (err) { 1438 dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n", 1439 vport->vport_id, err); 1440 goto intr_deinit; 1441 } 1442 1443 err = idpf_send_enable_queues_msg(vport); 1444 if (err) { 1445 dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n", 1446 vport->vport_id, err); 1447 goto unmap_queue_vectors; 1448 } 1449 1450 err = idpf_send_enable_vport_msg(vport); 1451 if (err) { 1452 dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n", 1453 vport->vport_id, err); 1454 err = -EAGAIN; 1455 goto disable_queues; 1456 } 1457 1458 idpf_restore_features(vport); 1459 1460 vport_config = adapter->vport_config[vport->idx]; 1461 if (vport_config->user_config.rss_data.rss_lut) 1462 err = idpf_config_rss(vport); 1463 else 1464 err = idpf_init_rss(vport); 1465 if (err) { 1466 dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n", 1467 vport->vport_id, err); 1468 goto disable_vport; 1469 } 1470 1471 err = idpf_up_complete(vport); 1472 if (err) { 1473 dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n", 1474 vport->vport_id, err); 1475 goto deinit_rss; 1476 } 1477 1478 return 0; 1479 1480 deinit_rss: 1481 idpf_deinit_rss(vport); 1482 disable_vport: 1483 idpf_send_disable_vport_msg(vport); 1484 disable_queues: 1485 idpf_send_disable_queues_msg(vport); 1486 unmap_queue_vectors: 1487 idpf_send_map_unmap_queue_vector_msg(vport, false); 1488 intr_deinit: 1489 idpf_vport_intr_deinit(vport); 1490 queues_rel: 1491 idpf_vport_queues_rel(vport); 1492 intr_rel: 1493 idpf_vport_intr_rel(vport); 1494 1495 return err; 1496 } 1497 1498 /** 1499 * idpf_init_task - Delayed initialization task 1500 * @work: work_struct handle to our data 1501 * 1502 * Init task finishes up pending work started in probe. Due to the asynchronous 1503 * nature in which the device communicates with hardware, we may have to wait 1504 * several milliseconds to get a response. Instead of busy polling in probe, 1505 * pulling it out into a delayed work task prevents us from bogging down the 1506 * whole system waiting for a response from hardware. 1507 */ 1508 void idpf_init_task(struct work_struct *work) 1509 { 1510 struct idpf_vport_config *vport_config; 1511 struct idpf_vport_max_q max_q; 1512 struct idpf_adapter *adapter; 1513 struct idpf_netdev_priv *np; 1514 struct idpf_vport *vport; 1515 u16 num_default_vports; 1516 struct pci_dev *pdev; 1517 bool default_vport; 1518 int index, err; 1519 1520 adapter = container_of(work, struct idpf_adapter, init_task.work); 1521 1522 num_default_vports = idpf_get_default_vports(adapter); 1523 if (adapter->num_alloc_vports < num_default_vports) 1524 default_vport = true; 1525 else 1526 default_vport = false; 1527 1528 err = idpf_vport_alloc_max_qs(adapter, &max_q); 1529 if (err) 1530 goto unwind_vports; 1531 1532 err = idpf_send_create_vport_msg(adapter, &max_q); 1533 if (err) { 1534 idpf_vport_dealloc_max_qs(adapter, &max_q); 1535 goto unwind_vports; 1536 } 1537 1538 pdev = adapter->pdev; 1539 vport = idpf_vport_alloc(adapter, &max_q); 1540 if (!vport) { 1541 err = -EFAULT; 1542 dev_err(&pdev->dev, "failed to allocate vport: %d\n", 1543 err); 1544 idpf_vport_dealloc_max_qs(adapter, &max_q); 1545 goto unwind_vports; 1546 } 1547 1548 index = vport->idx; 1549 vport_config = adapter->vport_config[index]; 1550 1551 init_waitqueue_head(&vport->sw_marker_wq); 1552 1553 spin_lock_init(&vport_config->mac_filter_list_lock); 1554 1555 INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list); 1556 INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list); 1557 1558 err = idpf_check_supported_desc_ids(vport); 1559 if (err) { 1560 dev_err(&pdev->dev, "failed to get required descriptor ids\n"); 1561 goto cfg_netdev_err; 1562 } 1563 1564 if (idpf_cfg_netdev(vport)) 1565 goto cfg_netdev_err; 1566 1567 err = idpf_send_get_rx_ptype_msg(vport); 1568 if (err) 1569 goto handle_err; 1570 1571 /* Once state is put into DOWN, driver is ready for dev_open */ 1572 np = netdev_priv(vport->netdev); 1573 np->state = __IDPF_VPORT_DOWN; 1574 if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) 1575 idpf_vport_open(vport); 1576 1577 /* Spawn and return 'idpf_init_task' work queue until all the 1578 * default vports are created 1579 */ 1580 if (adapter->num_alloc_vports < num_default_vports) { 1581 queue_delayed_work(adapter->init_wq, &adapter->init_task, 1582 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 1583 1584 return; 1585 } 1586 1587 for (index = 0; index < adapter->max_vports; index++) { 1588 struct net_device *netdev = adapter->netdevs[index]; 1589 struct idpf_vport_config *vport_config; 1590 1591 vport_config = adapter->vport_config[index]; 1592 1593 if (!netdev || 1594 test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) 1595 continue; 1596 1597 err = register_netdev(netdev); 1598 if (err) { 1599 dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n", 1600 index, ERR_PTR(err)); 1601 continue; 1602 } 1603 set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags); 1604 } 1605 1606 /* As all the required vports are created, clear the reset flag 1607 * unconditionally here in case we were in reset and the link was down. 1608 */ 1609 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1610 /* Start the statistics task now */ 1611 queue_delayed_work(adapter->stats_wq, &adapter->stats_task, 1612 msecs_to_jiffies(10 * (pdev->devfn & 0x07))); 1613 1614 return; 1615 1616 handle_err: 1617 idpf_decfg_netdev(vport); 1618 cfg_netdev_err: 1619 idpf_vport_rel(vport); 1620 adapter->vports[index] = NULL; 1621 unwind_vports: 1622 if (default_vport) { 1623 for (index = 0; index < adapter->max_vports; index++) { 1624 if (adapter->vports[index]) 1625 idpf_vport_dealloc(adapter->vports[index]); 1626 } 1627 } 1628 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1629 } 1630 1631 /** 1632 * idpf_sriov_ena - Enable or change number of VFs 1633 * @adapter: private data struct 1634 * @num_vfs: number of VFs to allocate 1635 */ 1636 static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs) 1637 { 1638 struct device *dev = &adapter->pdev->dev; 1639 int err; 1640 1641 err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs); 1642 if (err) { 1643 dev_err(dev, "Failed to allocate VFs: %d\n", err); 1644 1645 return err; 1646 } 1647 1648 err = pci_enable_sriov(adapter->pdev, num_vfs); 1649 if (err) { 1650 idpf_send_set_sriov_vfs_msg(adapter, 0); 1651 dev_err(dev, "Failed to enable SR-IOV: %d\n", err); 1652 1653 return err; 1654 } 1655 1656 adapter->num_vfs = num_vfs; 1657 1658 return num_vfs; 1659 } 1660 1661 /** 1662 * idpf_sriov_configure - Configure the requested VFs 1663 * @pdev: pointer to a pci_dev structure 1664 * @num_vfs: number of vfs to allocate 1665 * 1666 * Enable or change the number of VFs. Called when the user updates the number 1667 * of VFs in sysfs. 1668 **/ 1669 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs) 1670 { 1671 struct idpf_adapter *adapter = pci_get_drvdata(pdev); 1672 1673 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) { 1674 dev_info(&pdev->dev, "SR-IOV is not supported on this device\n"); 1675 1676 return -EOPNOTSUPP; 1677 } 1678 1679 if (num_vfs) 1680 return idpf_sriov_ena(adapter, num_vfs); 1681 1682 if (pci_vfs_assigned(pdev)) { 1683 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n"); 1684 1685 return -EBUSY; 1686 } 1687 1688 pci_disable_sriov(adapter->pdev); 1689 idpf_send_set_sriov_vfs_msg(adapter, 0); 1690 adapter->num_vfs = 0; 1691 1692 return 0; 1693 } 1694 1695 /** 1696 * idpf_deinit_task - Device deinit routine 1697 * @adapter: Driver specific private structure 1698 * 1699 * Extended remove logic which will be used for 1700 * hard reset as well 1701 */ 1702 void idpf_deinit_task(struct idpf_adapter *adapter) 1703 { 1704 unsigned int i; 1705 1706 /* Wait until the init_task is done else this thread might release 1707 * the resources first and the other thread might end up in a bad state 1708 */ 1709 cancel_delayed_work_sync(&adapter->init_task); 1710 1711 if (!adapter->vports) 1712 return; 1713 1714 cancel_delayed_work_sync(&adapter->stats_task); 1715 1716 for (i = 0; i < adapter->max_vports; i++) { 1717 if (adapter->vports[i]) 1718 idpf_vport_dealloc(adapter->vports[i]); 1719 } 1720 } 1721 1722 /** 1723 * idpf_check_reset_complete - check that reset is complete 1724 * @hw: pointer to hw struct 1725 * @reset_reg: struct with reset registers 1726 * 1727 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 1728 **/ 1729 static int idpf_check_reset_complete(struct idpf_hw *hw, 1730 struct idpf_reset_reg *reset_reg) 1731 { 1732 struct idpf_adapter *adapter = hw->back; 1733 int i; 1734 1735 for (i = 0; i < 2000; i++) { 1736 u32 reg_val = readl(reset_reg->rstat); 1737 1738 /* 0xFFFFFFFF might be read if other side hasn't cleared the 1739 * register for us yet and 0xFFFFFFFF is not a valid value for 1740 * the register, so treat that as invalid. 1741 */ 1742 if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m)) 1743 return 0; 1744 1745 usleep_range(5000, 10000); 1746 } 1747 1748 dev_warn(&adapter->pdev->dev, "Device reset timeout!\n"); 1749 /* Clear the reset flag unconditionally here since the reset 1750 * technically isn't in progress anymore from the driver's perspective 1751 */ 1752 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1753 1754 return -EBUSY; 1755 } 1756 1757 /** 1758 * idpf_set_vport_state - Set the vport state to be after the reset 1759 * @adapter: Driver specific private structure 1760 */ 1761 static void idpf_set_vport_state(struct idpf_adapter *adapter) 1762 { 1763 u16 i; 1764 1765 for (i = 0; i < adapter->max_vports; i++) { 1766 struct idpf_netdev_priv *np; 1767 1768 if (!adapter->netdevs[i]) 1769 continue; 1770 1771 np = netdev_priv(adapter->netdevs[i]); 1772 if (np->state == __IDPF_VPORT_UP) 1773 set_bit(IDPF_VPORT_UP_REQUESTED, 1774 adapter->vport_config[i]->flags); 1775 } 1776 } 1777 1778 /** 1779 * idpf_init_hard_reset - Initiate a hardware reset 1780 * @adapter: Driver specific private structure 1781 * 1782 * Deallocate the vports and all the resources associated with them and 1783 * reallocate. Also reinitialize the mailbox. Return 0 on success, 1784 * negative on failure. 1785 */ 1786 static int idpf_init_hard_reset(struct idpf_adapter *adapter) 1787 { 1788 struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops; 1789 struct device *dev = &adapter->pdev->dev; 1790 struct net_device *netdev; 1791 int err; 1792 u16 i; 1793 1794 mutex_lock(&adapter->vport_ctrl_lock); 1795 1796 dev_info(dev, "Device HW Reset initiated\n"); 1797 1798 /* Avoid TX hangs on reset */ 1799 for (i = 0; i < adapter->max_vports; i++) { 1800 netdev = adapter->netdevs[i]; 1801 if (!netdev) 1802 continue; 1803 1804 netif_carrier_off(netdev); 1805 netif_tx_disable(netdev); 1806 } 1807 1808 /* Prepare for reset */ 1809 if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { 1810 reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD); 1811 } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) { 1812 bool is_reset = idpf_is_reset_detected(adapter); 1813 1814 idpf_idc_issue_reset_event(adapter->cdev_info); 1815 1816 idpf_set_vport_state(adapter); 1817 idpf_vc_core_deinit(adapter); 1818 if (!is_reset) 1819 reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET); 1820 idpf_deinit_dflt_mbx(adapter); 1821 } else { 1822 dev_err(dev, "Unhandled hard reset cause\n"); 1823 err = -EBADRQC; 1824 goto unlock_mutex; 1825 } 1826 1827 /* Wait for reset to complete */ 1828 err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg); 1829 if (err) { 1830 dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n", 1831 adapter->state); 1832 goto unlock_mutex; 1833 } 1834 1835 /* Reset is complete and so start building the driver resources again */ 1836 err = idpf_init_dflt_mbx(adapter); 1837 if (err) { 1838 dev_err(dev, "Failed to initialize default mailbox: %d\n", err); 1839 goto unlock_mutex; 1840 } 1841 1842 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 1843 1844 /* Initialize the state machine, also allocate memory and request 1845 * resources 1846 */ 1847 err = idpf_vc_core_init(adapter); 1848 if (err) { 1849 cancel_delayed_work_sync(&adapter->mbx_task); 1850 idpf_deinit_dflt_mbx(adapter); 1851 goto unlock_mutex; 1852 } 1853 1854 /* Wait till all the vports are initialized to release the reset lock, 1855 * else user space callbacks may access uninitialized vports 1856 */ 1857 while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) 1858 msleep(100); 1859 1860 unlock_mutex: 1861 mutex_unlock(&adapter->vport_ctrl_lock); 1862 1863 /* Wait until all vports are created to init RDMA CORE AUX */ 1864 if (!err) 1865 err = idpf_idc_init(adapter); 1866 1867 return err; 1868 } 1869 1870 /** 1871 * idpf_vc_event_task - Handle virtchannel event logic 1872 * @work: work queue struct 1873 */ 1874 void idpf_vc_event_task(struct work_struct *work) 1875 { 1876 struct idpf_adapter *adapter; 1877 1878 adapter = container_of(work, struct idpf_adapter, vc_event_task.work); 1879 1880 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 1881 return; 1882 1883 if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags)) 1884 goto func_reset; 1885 1886 if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) 1887 goto drv_load; 1888 1889 return; 1890 1891 func_reset: 1892 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 1893 drv_load: 1894 set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1895 idpf_init_hard_reset(adapter); 1896 } 1897 1898 /** 1899 * idpf_initiate_soft_reset - Initiate a software reset 1900 * @vport: virtual port data struct 1901 * @reset_cause: reason for the soft reset 1902 * 1903 * Soft reset only reallocs vport queue resources. Returns 0 on success, 1904 * negative on failure. 1905 */ 1906 int idpf_initiate_soft_reset(struct idpf_vport *vport, 1907 enum idpf_vport_reset_cause reset_cause) 1908 { 1909 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 1910 enum idpf_vport_state current_state = np->state; 1911 struct idpf_adapter *adapter = vport->adapter; 1912 struct idpf_vport *new_vport; 1913 int err; 1914 1915 /* If the system is low on memory, we can end up in bad state if we 1916 * free all the memory for queue resources and try to allocate them 1917 * again. Instead, we can pre-allocate the new resources before doing 1918 * anything and bailing if the alloc fails. 1919 * 1920 * Make a clone of the existing vport to mimic its current 1921 * configuration, then modify the new structure with any requested 1922 * changes. Once the allocation of the new resources is done, stop the 1923 * existing vport and copy the configuration to the main vport. If an 1924 * error occurred, the existing vport will be untouched. 1925 * 1926 */ 1927 new_vport = kzalloc(sizeof(*vport), GFP_KERNEL); 1928 if (!new_vport) 1929 return -ENOMEM; 1930 1931 /* This purposely avoids copying the end of the struct because it 1932 * contains wait_queues and mutexes and other stuff we don't want to 1933 * mess with. Nothing below should use those variables from new_vport 1934 * and should instead always refer to them in vport if they need to. 1935 */ 1936 memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up)); 1937 1938 /* Adjust resource parameters prior to reallocating resources */ 1939 switch (reset_cause) { 1940 case IDPF_SR_Q_CHANGE: 1941 err = idpf_vport_adjust_qs(new_vport); 1942 if (err) 1943 goto free_vport; 1944 break; 1945 case IDPF_SR_Q_DESC_CHANGE: 1946 /* Update queue parameters before allocating resources */ 1947 idpf_vport_calc_num_q_desc(new_vport); 1948 break; 1949 case IDPF_SR_MTU_CHANGE: 1950 idpf_idc_vdev_mtu_event(vport->vdev_info, 1951 IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE); 1952 break; 1953 case IDPF_SR_RSC_CHANGE: 1954 break; 1955 default: 1956 dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n"); 1957 err = -EINVAL; 1958 goto free_vport; 1959 } 1960 1961 if (current_state <= __IDPF_VPORT_DOWN) { 1962 idpf_send_delete_queues_msg(vport); 1963 } else { 1964 set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags); 1965 idpf_vport_stop(vport); 1966 } 1967 1968 idpf_deinit_rss(vport); 1969 /* We're passing in vport here because we need its wait_queue 1970 * to send a message and it should be getting all the vport 1971 * config data out of the adapter but we need to be careful not 1972 * to add code to add_queues to change the vport config within 1973 * vport itself as it will be wiped with a memcpy later. 1974 */ 1975 err = idpf_send_add_queues_msg(vport, new_vport->num_txq, 1976 new_vport->num_complq, 1977 new_vport->num_rxq, 1978 new_vport->num_bufq); 1979 if (err) 1980 goto err_reset; 1981 1982 /* Same comment as above regarding avoiding copying the wait_queues and 1983 * mutexes applies here. We do not want to mess with those if possible. 1984 */ 1985 memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up)); 1986 1987 if (reset_cause == IDPF_SR_Q_CHANGE) 1988 idpf_vport_alloc_vec_indexes(vport); 1989 1990 err = idpf_set_real_num_queues(vport); 1991 if (err) 1992 goto err_open; 1993 1994 if (current_state == __IDPF_VPORT_UP) 1995 err = idpf_vport_open(vport); 1996 1997 goto free_vport; 1998 1999 err_reset: 2000 idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, 2001 vport->num_rxq, vport->num_bufq); 2002 2003 err_open: 2004 if (current_state == __IDPF_VPORT_UP) 2005 idpf_vport_open(vport); 2006 2007 free_vport: 2008 kfree(new_vport); 2009 2010 if (reset_cause == IDPF_SR_MTU_CHANGE) 2011 idpf_idc_vdev_mtu_event(vport->vdev_info, 2012 IIDC_RDMA_EVENT_AFTER_MTU_CHANGE); 2013 2014 return err; 2015 } 2016 2017 /** 2018 * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address 2019 * @netdev: the netdevice 2020 * @addr: address to add 2021 * 2022 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 2023 * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock 2024 * meaning we cannot sleep in this context. Due to this, we have to add the 2025 * filter and send the virtchnl message asynchronously without waiting for the 2026 * response from the other side. We won't know whether or not the operation 2027 * actually succeeded until we get the message back. Returns 0 on success, 2028 * negative on failure. 2029 */ 2030 static int idpf_addr_sync(struct net_device *netdev, const u8 *addr) 2031 { 2032 struct idpf_netdev_priv *np = netdev_priv(netdev); 2033 2034 return idpf_add_mac_filter(np->vport, np, addr, true); 2035 } 2036 2037 /** 2038 * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 2039 * @netdev: the netdevice 2040 * @addr: address to add 2041 * 2042 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 2043 * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock 2044 * meaning we cannot sleep in this context. Due to this we have to delete the 2045 * filter and send the virtchnl message asynchronously without waiting for the 2046 * return from the other side. We won't know whether or not the operation 2047 * actually succeeded until we get the message back. Returns 0 on success, 2048 * negative on failure. 2049 */ 2050 static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr) 2051 { 2052 struct idpf_netdev_priv *np = netdev_priv(netdev); 2053 2054 /* Under some circumstances, we might receive a request to delete 2055 * our own device address from our uc list. Because we store the 2056 * device address in the VSI's MAC filter list, we need to ignore 2057 * such requests and not delete our device address from this list. 2058 */ 2059 if (ether_addr_equal(addr, netdev->dev_addr)) 2060 return 0; 2061 2062 idpf_del_mac_filter(np->vport, np, addr, true); 2063 2064 return 0; 2065 } 2066 2067 /** 2068 * idpf_set_rx_mode - NDO callback to set the netdev filters 2069 * @netdev: network interface device structure 2070 * 2071 * Stack takes addr_list_lock spinlock before calling our .set_rx_mode. We 2072 * cannot sleep in this context. 2073 */ 2074 static void idpf_set_rx_mode(struct net_device *netdev) 2075 { 2076 struct idpf_netdev_priv *np = netdev_priv(netdev); 2077 struct idpf_vport_user_config_data *config_data; 2078 struct idpf_adapter *adapter; 2079 bool changed = false; 2080 struct device *dev; 2081 int err; 2082 2083 adapter = np->adapter; 2084 dev = &adapter->pdev->dev; 2085 2086 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) { 2087 __dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync); 2088 __dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync); 2089 } 2090 2091 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC)) 2092 return; 2093 2094 config_data = &adapter->vport_config[np->vport_idx]->user_config; 2095 /* IFF_PROMISC enables both unicast and multicast promiscuous, 2096 * while IFF_ALLMULTI only enables multicast such that: 2097 * 2098 * promisc + allmulti = unicast | multicast 2099 * promisc + !allmulti = unicast | multicast 2100 * !promisc + allmulti = multicast 2101 */ 2102 if ((netdev->flags & IFF_PROMISC) && 2103 !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) { 2104 changed = true; 2105 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); 2106 if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags)) 2107 dev_info(dev, "Entering multicast promiscuous mode\n"); 2108 } 2109 2110 if (!(netdev->flags & IFF_PROMISC) && 2111 test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) { 2112 changed = true; 2113 dev_info(dev, "Leaving promiscuous mode\n"); 2114 } 2115 2116 if (netdev->flags & IFF_ALLMULTI && 2117 !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) { 2118 changed = true; 2119 dev_info(dev, "Entering multicast promiscuous mode\n"); 2120 } 2121 2122 if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) && 2123 test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) { 2124 changed = true; 2125 dev_info(dev, "Leaving multicast promiscuous mode\n"); 2126 } 2127 2128 if (!changed) 2129 return; 2130 2131 err = idpf_set_promiscuous(adapter, config_data, np->vport_id); 2132 if (err) 2133 dev_err(dev, "Failed to set promiscuous mode: %d\n", err); 2134 } 2135 2136 /** 2137 * idpf_vport_manage_rss_lut - disable/enable RSS 2138 * @vport: the vport being changed 2139 * 2140 * In the event of disable request for RSS, this function will zero out RSS 2141 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 2142 * LUT with the default LUT configuration. 2143 */ 2144 static int idpf_vport_manage_rss_lut(struct idpf_vport *vport) 2145 { 2146 bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH); 2147 struct idpf_rss_data *rss_data; 2148 u16 idx = vport->idx; 2149 int lut_size; 2150 2151 rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data; 2152 lut_size = rss_data->rss_lut_size * sizeof(u32); 2153 2154 if (ena) { 2155 /* This will contain the default or user configured LUT */ 2156 memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size); 2157 } else { 2158 /* Save a copy of the current LUT to be restored later if 2159 * requested. 2160 */ 2161 memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size); 2162 2163 /* Zero out the current LUT to disable */ 2164 memset(rss_data->rss_lut, 0, lut_size); 2165 } 2166 2167 return idpf_config_rss(vport); 2168 } 2169 2170 /** 2171 * idpf_set_features - set the netdev feature flags 2172 * @netdev: ptr to the netdev being adjusted 2173 * @features: the feature set that the stack is suggesting 2174 */ 2175 static int idpf_set_features(struct net_device *netdev, 2176 netdev_features_t features) 2177 { 2178 netdev_features_t changed = netdev->features ^ features; 2179 struct idpf_adapter *adapter; 2180 struct idpf_vport *vport; 2181 int err = 0; 2182 2183 idpf_vport_ctrl_lock(netdev); 2184 vport = idpf_netdev_to_vport(netdev); 2185 2186 adapter = vport->adapter; 2187 2188 if (idpf_is_reset_in_prog(adapter)) { 2189 dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n"); 2190 err = -EBUSY; 2191 goto unlock_mutex; 2192 } 2193 2194 if (changed & NETIF_F_RXHASH) { 2195 netdev->features ^= NETIF_F_RXHASH; 2196 err = idpf_vport_manage_rss_lut(vport); 2197 if (err) 2198 goto unlock_mutex; 2199 } 2200 2201 if (changed & NETIF_F_GRO_HW) { 2202 netdev->features ^= NETIF_F_GRO_HW; 2203 err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE); 2204 if (err) 2205 goto unlock_mutex; 2206 } 2207 2208 if (changed & NETIF_F_LOOPBACK) { 2209 netdev->features ^= NETIF_F_LOOPBACK; 2210 err = idpf_send_ena_dis_loopback_msg(vport); 2211 } 2212 2213 unlock_mutex: 2214 idpf_vport_ctrl_unlock(netdev); 2215 2216 return err; 2217 } 2218 2219 /** 2220 * idpf_open - Called when a network interface becomes active 2221 * @netdev: network interface device structure 2222 * 2223 * The open entry point is called when a network interface is made 2224 * active by the system (IFF_UP). At this point all resources needed 2225 * for transmit and receive operations are allocated, the interrupt 2226 * handler is registered with the OS, the netdev watchdog is enabled, 2227 * and the stack is notified that the interface is ready. 2228 * 2229 * Returns 0 on success, negative value on failure 2230 */ 2231 static int idpf_open(struct net_device *netdev) 2232 { 2233 struct idpf_vport *vport; 2234 int err; 2235 2236 idpf_vport_ctrl_lock(netdev); 2237 vport = idpf_netdev_to_vport(netdev); 2238 2239 err = idpf_set_real_num_queues(vport); 2240 if (err) 2241 goto unlock; 2242 2243 err = idpf_vport_open(vport); 2244 2245 unlock: 2246 idpf_vport_ctrl_unlock(netdev); 2247 2248 return err; 2249 } 2250 2251 /** 2252 * idpf_change_mtu - NDO callback to change the MTU 2253 * @netdev: network interface device structure 2254 * @new_mtu: new value for maximum frame size 2255 * 2256 * Returns 0 on success, negative on failure 2257 */ 2258 static int idpf_change_mtu(struct net_device *netdev, int new_mtu) 2259 { 2260 struct idpf_vport *vport; 2261 int err; 2262 2263 idpf_vport_ctrl_lock(netdev); 2264 vport = idpf_netdev_to_vport(netdev); 2265 2266 WRITE_ONCE(netdev->mtu, new_mtu); 2267 2268 err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE); 2269 2270 idpf_vport_ctrl_unlock(netdev); 2271 2272 return err; 2273 } 2274 2275 /** 2276 * idpf_chk_tso_segment - Check skb is not using too many buffers 2277 * @skb: send buffer 2278 * @max_bufs: maximum number of buffers 2279 * 2280 * For TSO we need to count the TSO header and segment payload separately. As 2281 * such we need to check cases where we have max_bufs-1 fragments or more as we 2282 * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1 2283 * for the segment payload in the first descriptor, and another max_buf-1 for 2284 * the fragments. 2285 * 2286 * Returns true if the packet needs to be software segmented by core stack. 2287 */ 2288 static bool idpf_chk_tso_segment(const struct sk_buff *skb, 2289 unsigned int max_bufs) 2290 { 2291 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2292 const skb_frag_t *frag, *stale; 2293 int nr_frags, sum; 2294 2295 /* no need to check if number of frags is less than max_bufs - 1 */ 2296 nr_frags = shinfo->nr_frags; 2297 if (nr_frags < (max_bufs - 1)) 2298 return false; 2299 2300 /* We need to walk through the list and validate that each group 2301 * of max_bufs-2 fragments totals at least gso_size. 2302 */ 2303 nr_frags -= max_bufs - 2; 2304 frag = &shinfo->frags[0]; 2305 2306 /* Initialize size to the negative value of gso_size minus 1. We use 2307 * this as the worst case scenario in which the frag ahead of us only 2308 * provides one byte which is why we are limited to max_bufs-2 2309 * descriptors for a single transmit as the header and previous 2310 * fragment are already consuming 2 descriptors. 2311 */ 2312 sum = 1 - shinfo->gso_size; 2313 2314 /* Add size of frags 0 through 4 to create our initial sum */ 2315 sum += skb_frag_size(frag++); 2316 sum += skb_frag_size(frag++); 2317 sum += skb_frag_size(frag++); 2318 sum += skb_frag_size(frag++); 2319 sum += skb_frag_size(frag++); 2320 2321 /* Walk through fragments adding latest fragment, testing it, and 2322 * then removing stale fragments from the sum. 2323 */ 2324 for (stale = &shinfo->frags[0];; stale++) { 2325 int stale_size = skb_frag_size(stale); 2326 2327 sum += skb_frag_size(frag++); 2328 2329 /* The stale fragment may present us with a smaller 2330 * descriptor than the actual fragment size. To account 2331 * for that we need to remove all the data on the front and 2332 * figure out what the remainder would be in the last 2333 * descriptor associated with the fragment. 2334 */ 2335 if (stale_size > IDPF_TX_MAX_DESC_DATA) { 2336 int align_pad = -(skb_frag_off(stale)) & 2337 (IDPF_TX_MAX_READ_REQ_SIZE - 1); 2338 2339 sum -= align_pad; 2340 stale_size -= align_pad; 2341 2342 do { 2343 sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED; 2344 stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED; 2345 } while (stale_size > IDPF_TX_MAX_DESC_DATA); 2346 } 2347 2348 /* if sum is negative we failed to make sufficient progress */ 2349 if (sum < 0) 2350 return true; 2351 2352 if (!nr_frags--) 2353 break; 2354 2355 sum -= stale_size; 2356 } 2357 2358 return false; 2359 } 2360 2361 /** 2362 * idpf_features_check - Validate packet conforms to limits 2363 * @skb: skb buffer 2364 * @netdev: This port's netdev 2365 * @features: Offload features that the stack believes apply 2366 */ 2367 static netdev_features_t idpf_features_check(struct sk_buff *skb, 2368 struct net_device *netdev, 2369 netdev_features_t features) 2370 { 2371 struct idpf_netdev_priv *np = netdev_priv(netdev); 2372 u16 max_tx_hdr_size = np->max_tx_hdr_size; 2373 size_t len; 2374 2375 /* No point in doing any of this if neither checksum nor GSO are 2376 * being requested for this frame. We can rule out both by just 2377 * checking for CHECKSUM_PARTIAL 2378 */ 2379 if (skb->ip_summed != CHECKSUM_PARTIAL) 2380 return features; 2381 2382 if (skb_is_gso(skb)) { 2383 /* We cannot support GSO if the MSS is going to be less than 2384 * 88 bytes. If it is then we need to drop support for GSO. 2385 */ 2386 if (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS) 2387 features &= ~NETIF_F_GSO_MASK; 2388 else if (idpf_chk_tso_segment(skb, np->tx_max_bufs)) 2389 features &= ~NETIF_F_GSO_MASK; 2390 } 2391 2392 /* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */ 2393 len = skb_network_offset(skb); 2394 if (unlikely(len & ~(126))) 2395 goto unsupported; 2396 2397 len = skb_network_header_len(skb); 2398 if (unlikely(len > max_tx_hdr_size)) 2399 goto unsupported; 2400 2401 if (!skb->encapsulation) 2402 return features; 2403 2404 /* L4TUNLEN can support 127 words */ 2405 len = skb_inner_network_header(skb) - skb_transport_header(skb); 2406 if (unlikely(len & ~(127 * 2))) 2407 goto unsupported; 2408 2409 /* IPLEN can support at most 127 dwords */ 2410 len = skb_inner_network_header_len(skb); 2411 if (unlikely(len > max_tx_hdr_size)) 2412 goto unsupported; 2413 2414 /* No need to validate L4LEN as TCP is the only protocol with a 2415 * a flexible value and we support all possible values supported 2416 * by TCP, which is at most 15 dwords 2417 */ 2418 2419 return features; 2420 2421 unsupported: 2422 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2423 } 2424 2425 /** 2426 * idpf_set_mac - NDO callback to set port mac address 2427 * @netdev: network interface device structure 2428 * @p: pointer to an address structure 2429 * 2430 * Returns 0 on success, negative on failure 2431 **/ 2432 static int idpf_set_mac(struct net_device *netdev, void *p) 2433 { 2434 struct idpf_netdev_priv *np = netdev_priv(netdev); 2435 struct idpf_vport_config *vport_config; 2436 struct sockaddr *addr = p; 2437 struct idpf_vport *vport; 2438 int err = 0; 2439 2440 idpf_vport_ctrl_lock(netdev); 2441 vport = idpf_netdev_to_vport(netdev); 2442 2443 if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, 2444 VIRTCHNL2_CAP_MACFILTER)) { 2445 dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n"); 2446 err = -EOPNOTSUPP; 2447 goto unlock_mutex; 2448 } 2449 2450 if (!is_valid_ether_addr(addr->sa_data)) { 2451 dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n", 2452 addr->sa_data); 2453 err = -EADDRNOTAVAIL; 2454 goto unlock_mutex; 2455 } 2456 2457 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 2458 goto unlock_mutex; 2459 2460 vport_config = vport->adapter->vport_config[vport->idx]; 2461 err = idpf_add_mac_filter(vport, np, addr->sa_data, false); 2462 if (err) { 2463 __idpf_del_mac_filter(vport_config, addr->sa_data); 2464 goto unlock_mutex; 2465 } 2466 2467 if (is_valid_ether_addr(vport->default_mac_addr)) 2468 idpf_del_mac_filter(vport, np, vport->default_mac_addr, false); 2469 2470 ether_addr_copy(vport->default_mac_addr, addr->sa_data); 2471 eth_hw_addr_set(netdev, addr->sa_data); 2472 2473 unlock_mutex: 2474 idpf_vport_ctrl_unlock(netdev); 2475 2476 return err; 2477 } 2478 2479 /** 2480 * idpf_alloc_dma_mem - Allocate dma memory 2481 * @hw: pointer to hw struct 2482 * @mem: pointer to dma_mem struct 2483 * @size: size of the memory to allocate 2484 */ 2485 void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size) 2486 { 2487 struct idpf_adapter *adapter = hw->back; 2488 size_t sz = ALIGN(size, 4096); 2489 2490 /* The control queue resources are freed under a spinlock, contiguous 2491 * pages will avoid IOMMU remapping and the use vmap (and vunmap in 2492 * dma_free_*() path. 2493 */ 2494 mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa, 2495 GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); 2496 mem->size = sz; 2497 2498 return mem->va; 2499 } 2500 2501 /** 2502 * idpf_free_dma_mem - Free the allocated dma memory 2503 * @hw: pointer to hw struct 2504 * @mem: pointer to dma_mem struct 2505 */ 2506 void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) 2507 { 2508 struct idpf_adapter *adapter = hw->back; 2509 2510 dma_free_attrs(&adapter->pdev->dev, mem->size, 2511 mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS); 2512 mem->size = 0; 2513 mem->va = NULL; 2514 mem->pa = 0; 2515 } 2516 2517 static int idpf_hwtstamp_set(struct net_device *netdev, 2518 struct kernel_hwtstamp_config *config, 2519 struct netlink_ext_ack *extack) 2520 { 2521 struct idpf_vport *vport; 2522 int err; 2523 2524 idpf_vport_ctrl_lock(netdev); 2525 vport = idpf_netdev_to_vport(netdev); 2526 2527 if (!vport->link_up) { 2528 idpf_vport_ctrl_unlock(netdev); 2529 return -EPERM; 2530 } 2531 2532 if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) && 2533 !idpf_ptp_is_vport_rx_tstamp_ena(vport)) { 2534 idpf_vport_ctrl_unlock(netdev); 2535 return -EOPNOTSUPP; 2536 } 2537 2538 err = idpf_ptp_set_timestamp_mode(vport, config); 2539 2540 idpf_vport_ctrl_unlock(netdev); 2541 2542 return err; 2543 } 2544 2545 static int idpf_hwtstamp_get(struct net_device *netdev, 2546 struct kernel_hwtstamp_config *config) 2547 { 2548 struct idpf_vport *vport; 2549 2550 idpf_vport_ctrl_lock(netdev); 2551 vport = idpf_netdev_to_vport(netdev); 2552 2553 if (!vport->link_up) { 2554 idpf_vport_ctrl_unlock(netdev); 2555 return -EPERM; 2556 } 2557 2558 if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) && 2559 !idpf_ptp_is_vport_rx_tstamp_ena(vport)) { 2560 idpf_vport_ctrl_unlock(netdev); 2561 return 0; 2562 } 2563 2564 *config = vport->tstamp_config; 2565 2566 idpf_vport_ctrl_unlock(netdev); 2567 2568 return 0; 2569 } 2570 2571 static const struct net_device_ops idpf_netdev_ops = { 2572 .ndo_open = idpf_open, 2573 .ndo_stop = idpf_stop, 2574 .ndo_start_xmit = idpf_tx_start, 2575 .ndo_features_check = idpf_features_check, 2576 .ndo_set_rx_mode = idpf_set_rx_mode, 2577 .ndo_validate_addr = eth_validate_addr, 2578 .ndo_set_mac_address = idpf_set_mac, 2579 .ndo_change_mtu = idpf_change_mtu, 2580 .ndo_get_stats64 = idpf_get_stats64, 2581 .ndo_set_features = idpf_set_features, 2582 .ndo_tx_timeout = idpf_tx_timeout, 2583 .ndo_hwtstamp_get = idpf_hwtstamp_get, 2584 .ndo_hwtstamp_set = idpf_hwtstamp_set, 2585 }; 2586