1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ethtool.h> 5 #include <linux/printk.h> 6 #include <linux/dynamic_debug.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_vlan.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/cpumask.h> 14 #include <linux/crash_dump.h> 15 #include <linux/vmalloc.h> 16 #include <net/page_pool/helpers.h> 17 18 #include "ionic.h" 19 #include "ionic_bus.h" 20 #include "ionic_dev.h" 21 #include "ionic_lif.h" 22 #include "ionic_aux.h" 23 #include "ionic_txrx.h" 24 #include "ionic_ethtool.h" 25 #include "ionic_debugfs.h" 26 27 /* queuetype support level */ 28 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 29 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 30 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 31 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support 32 * 2 = ... with CMB rings 33 */ 34 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support 35 * 1 = ... with Tx SG version 1 36 * 3 = ... with CMB rings 37 */ 38 }; 39 40 static void ionic_link_status_check(struct ionic_lif *lif); 41 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 42 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 43 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 44 45 static void ionic_txrx_deinit(struct ionic_lif *lif); 46 static int ionic_txrx_init(struct ionic_lif *lif); 47 static int ionic_start_queues(struct ionic_lif *lif); 48 static void ionic_stop_queues(struct ionic_lif *lif); 49 static void ionic_lif_queue_identify(struct ionic_lif *lif); 50 51 static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif); 52 static void ionic_unregister_rxq_info(struct ionic_queue *q); 53 static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id); 54 55 static void ionic_dim_work(struct work_struct *work) 56 { 57 struct dim *dim = container_of(work, struct dim, work); 58 struct dim_cq_moder cur_moder; 59 struct ionic_intr_info *intr; 60 struct ionic_qcq *qcq; 61 struct ionic_lif *lif; 62 struct ionic_queue *q; 63 u32 new_coal; 64 65 qcq = container_of(dim, struct ionic_qcq, dim); 66 q = &qcq->q; 67 if (q->type == IONIC_QTYPE_RXQ) 68 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 69 else 70 cur_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix); 71 lif = q->lif; 72 new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec); 73 new_coal = new_coal ? new_coal : 1; 74 75 intr = &qcq->intr; 76 if (intr->dim_coal_hw != new_coal) { 77 intr->dim_coal_hw = new_coal; 78 79 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 80 intr->index, intr->dim_coal_hw); 81 } 82 83 dim->state = DIM_START_MEASURE; 84 } 85 86 static void ionic_lif_deferred_work(struct work_struct *work) 87 { 88 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 89 struct ionic_deferred *def = &lif->deferred; 90 struct ionic_deferred_work *w = NULL; 91 92 do { 93 spin_lock_bh(&def->lock); 94 if (!list_empty(&def->list)) { 95 w = list_first_entry(&def->list, 96 struct ionic_deferred_work, list); 97 list_del(&w->list); 98 } 99 spin_unlock_bh(&def->lock); 100 101 if (!w) 102 break; 103 104 switch (w->type) { 105 case IONIC_DW_TYPE_RX_MODE: 106 ionic_lif_rx_mode(lif); 107 break; 108 case IONIC_DW_TYPE_LINK_STATUS: 109 ionic_link_status_check(lif); 110 break; 111 case IONIC_DW_TYPE_LIF_RESET: 112 if (w->fw_status) { 113 ionic_lif_handle_fw_up(lif); 114 } else { 115 ionic_lif_handle_fw_down(lif); 116 117 /* Fire off another watchdog to see 118 * if the FW is already back rather than 119 * waiting another whole cycle 120 */ 121 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1); 122 } 123 break; 124 default: 125 break; 126 } 127 kfree(w); 128 w = NULL; 129 } while (true); 130 } 131 132 void ionic_lif_deferred_enqueue(struct ionic_lif *lif, 133 struct ionic_deferred_work *work) 134 { 135 spin_lock_bh(&lif->deferred.lock); 136 list_add_tail(&work->list, &lif->deferred.list); 137 spin_unlock_bh(&lif->deferred.lock); 138 queue_work(lif->ionic->wq, &lif->deferred.work); 139 } 140 141 static void ionic_link_status_check(struct ionic_lif *lif) 142 { 143 struct net_device *netdev = lif->netdev; 144 u16 link_status; 145 bool link_up; 146 147 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 148 return; 149 150 /* Don't put carrier back up if we're in a broken state */ 151 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { 152 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 153 return; 154 } 155 156 link_status = le16_to_cpu(lif->info->status.link_status); 157 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 158 159 if (link_up) { 160 int err = 0; 161 162 if (netdev->flags & IFF_UP && netif_running(netdev)) { 163 mutex_lock(&lif->queue_lock); 164 err = ionic_start_queues(lif); 165 if (err && err != -EBUSY) { 166 netdev_err(netdev, 167 "Failed to start queues: %d\n", err); 168 set_bit(IONIC_LIF_F_BROKEN, lif->state); 169 netif_carrier_off(lif->netdev); 170 } 171 mutex_unlock(&lif->queue_lock); 172 } 173 174 if (!err && !netif_carrier_ok(netdev)) { 175 ionic_port_identify(lif->ionic); 176 netdev_info(netdev, "Link up - %d Gbps\n", 177 le32_to_cpu(lif->info->status.link_speed) / 1000); 178 netif_carrier_on(netdev); 179 } 180 } else { 181 if (netif_carrier_ok(netdev)) { 182 lif->link_down_count++; 183 netdev_info(netdev, "Link down\n"); 184 netif_carrier_off(netdev); 185 } 186 187 if (netdev->flags & IFF_UP && netif_running(netdev)) { 188 mutex_lock(&lif->queue_lock); 189 ionic_stop_queues(lif); 190 mutex_unlock(&lif->queue_lock); 191 } 192 } 193 194 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 195 } 196 197 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) 198 { 199 struct ionic_deferred_work *work; 200 201 /* we only need one request outstanding at a time */ 202 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 203 return; 204 205 if (!can_sleep) { 206 work = kzalloc(sizeof(*work), GFP_ATOMIC); 207 if (!work) { 208 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 209 return; 210 } 211 212 work->type = IONIC_DW_TYPE_LINK_STATUS; 213 ionic_lif_deferred_enqueue(lif, work); 214 } else { 215 ionic_link_status_check(lif); 216 } 217 } 218 219 static irqreturn_t ionic_isr(int irq, void *data) 220 { 221 struct napi_struct *napi = data; 222 223 napi_schedule_irqoff(napi); 224 225 return IRQ_HANDLED; 226 } 227 228 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 229 { 230 struct ionic_intr_info *intr = &qcq->intr; 231 struct device *dev = lif->ionic->dev; 232 struct ionic_queue *q = &qcq->q; 233 const char *name; 234 235 if (lif->registered) 236 name = netdev_name(lif->netdev); 237 else 238 name = dev_name(dev); 239 240 snprintf(intr->name, sizeof(intr->name), 241 "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name); 242 243 return devm_request_irq(dev, intr->vector, ionic_isr, 244 0, intr->name, &qcq->napi); 245 } 246 247 int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 248 { 249 struct ionic *ionic = lif->ionic; 250 int index, err; 251 252 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 253 if (index == ionic->nintrs) 254 return -ENOSPC; 255 256 set_bit(index, ionic->intrs); 257 ionic_intr_init(&ionic->idev, intr, index); 258 259 err = ionic_bus_get_irq(ionic, intr->index); 260 if (err < 0) { 261 clear_bit(index, ionic->intrs); 262 return err; 263 } 264 265 intr->vector = err; 266 267 return 0; 268 } 269 EXPORT_SYMBOL_NS(ionic_intr_alloc, "NET_IONIC"); 270 271 void ionic_intr_free(struct ionic_lif *lif, int index) 272 { 273 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs) 274 clear_bit(index, lif->ionic->intrs); 275 } 276 EXPORT_SYMBOL_NS(ionic_intr_free, "NET_IONIC"); 277 278 static void ionic_irq_aff_notify(struct irq_affinity_notify *notify, 279 const cpumask_t *mask) 280 { 281 struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify); 282 283 cpumask_copy(*intr->affinity_mask, mask); 284 } 285 286 static void ionic_irq_aff_release(struct kref __always_unused *ref) 287 { 288 } 289 290 static int ionic_qcq_enable(struct ionic_qcq *qcq) 291 { 292 struct ionic_queue *q = &qcq->q; 293 struct ionic_lif *lif = q->lif; 294 struct ionic_dev *idev; 295 struct device *dev; 296 297 struct ionic_admin_ctx ctx = { 298 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 299 .cmd.q_control = { 300 .opcode = IONIC_CMD_Q_CONTROL, 301 .lif_index = cpu_to_le16(lif->index), 302 .type = q->type, 303 .index = cpu_to_le32(q->index), 304 .oper = IONIC_Q_ENABLE, 305 }, 306 }; 307 int ret; 308 309 idev = &lif->ionic->idev; 310 dev = lif->ionic->dev; 311 312 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 313 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 314 315 if (qcq->flags & IONIC_QCQ_F_INTR) 316 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 317 318 ret = ionic_adminq_post_wait(lif, &ctx); 319 if (ret) 320 return ret; 321 322 if (qcq->flags & IONIC_QCQ_F_INTR) { 323 napi_enable(&qcq->napi); 324 irq_set_affinity_notifier(qcq->intr.vector, 325 &qcq->intr.aff_notify); 326 irq_set_affinity_hint(qcq->intr.vector, 327 *qcq->intr.affinity_mask); 328 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 329 IONIC_INTR_MASK_CLEAR); 330 } 331 332 return 0; 333 } 334 335 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err) 336 { 337 struct ionic_queue *q; 338 339 struct ionic_admin_ctx ctx = { 340 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 341 .cmd.q_control = { 342 .opcode = IONIC_CMD_Q_CONTROL, 343 .oper = IONIC_Q_DISABLE, 344 }, 345 }; 346 347 if (!qcq) { 348 netdev_err(lif->netdev, "%s: bad qcq\n", __func__); 349 return -ENXIO; 350 } 351 352 q = &qcq->q; 353 354 if (qcq->flags & IONIC_QCQ_F_INTR) { 355 struct ionic_dev *idev = &lif->ionic->idev; 356 357 if (lif->doorbell_wa) 358 cancel_work_sync(&qcq->doorbell_napi_work); 359 cancel_work_sync(&qcq->dim.work); 360 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 361 IONIC_INTR_MASK_SET); 362 synchronize_irq(qcq->intr.vector); 363 irq_set_affinity_notifier(qcq->intr.vector, NULL); 364 irq_set_affinity_hint(qcq->intr.vector, NULL); 365 napi_disable(&qcq->napi); 366 } 367 368 /* If there was a previous fw communcation error, don't bother with 369 * sending the adminq command and just return the same error value. 370 */ 371 if (fw_err == -ETIMEDOUT || fw_err == -ENXIO) 372 return fw_err; 373 374 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); 375 ctx.cmd.q_control.type = q->type; 376 ctx.cmd.q_control.index = cpu_to_le32(q->index); 377 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", 378 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 379 380 return ionic_adminq_post_wait(lif, &ctx); 381 } 382 383 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 384 { 385 struct ionic_dev *idev = &lif->ionic->idev; 386 387 if (!qcq) 388 return; 389 390 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 391 return; 392 393 ionic_unregister_rxq_info(&qcq->q); 394 if (qcq->flags & IONIC_QCQ_F_INTR) { 395 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 396 IONIC_INTR_MASK_SET); 397 netif_napi_del(&qcq->napi); 398 } 399 400 qcq->flags &= ~IONIC_QCQ_F_INITED; 401 } 402 403 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 404 { 405 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 406 return; 407 408 irq_set_affinity_hint(qcq->intr.vector, NULL); 409 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 410 qcq->intr.vector = 0; 411 ionic_intr_free(lif, qcq->intr.index); 412 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 413 } 414 415 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 416 { 417 struct device *dev = lif->ionic->dev; 418 419 if (!qcq) 420 return; 421 422 ionic_debugfs_del_qcq(qcq); 423 424 if (qcq->q_base) { 425 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 426 qcq->q_base = NULL; 427 qcq->q_base_pa = 0; 428 } 429 430 if (qcq->cmb_q_base) { 431 iounmap(qcq->cmb_q_base); 432 ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order); 433 qcq->cmb_pgid = 0; 434 qcq->cmb_order = 0; 435 qcq->cmb_q_base = NULL; 436 qcq->cmb_q_base_pa = 0; 437 } 438 439 if (qcq->cq_base) { 440 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 441 qcq->cq_base = NULL; 442 qcq->cq_base_pa = 0; 443 } 444 445 if (qcq->sg_base) { 446 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 447 qcq->sg_base = NULL; 448 qcq->sg_base_pa = 0; 449 } 450 451 page_pool_destroy(qcq->q.page_pool); 452 qcq->q.page_pool = NULL; 453 454 ionic_qcq_intr_free(lif, qcq); 455 vfree(qcq->q.info); 456 qcq->q.info = NULL; 457 } 458 459 void ionic_qcqs_free(struct ionic_lif *lif) 460 { 461 struct device *dev = lif->ionic->dev; 462 struct ionic_qcq *adminqcq; 463 unsigned long irqflags; 464 465 if (lif->notifyqcq) { 466 ionic_qcq_free(lif, lif->notifyqcq); 467 devm_kfree(dev, lif->notifyqcq); 468 lif->notifyqcq = NULL; 469 } 470 471 if (lif->adminqcq) { 472 spin_lock_irqsave(&lif->adminq_lock, irqflags); 473 adminqcq = READ_ONCE(lif->adminqcq); 474 lif->adminqcq = NULL; 475 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 476 if (adminqcq) { 477 ionic_qcq_free(lif, adminqcq); 478 devm_kfree(dev, adminqcq); 479 } 480 } 481 482 if (lif->rxqcqs) { 483 devm_kfree(dev, lif->rxqstats); 484 lif->rxqstats = NULL; 485 devm_kfree(dev, lif->rxqcqs); 486 lif->rxqcqs = NULL; 487 } 488 489 if (lif->txqcqs) { 490 devm_kfree(dev, lif->txqstats); 491 lif->txqstats = NULL; 492 devm_kfree(dev, lif->txqcqs); 493 lif->txqcqs = NULL; 494 } 495 } 496 497 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 498 struct ionic_qcq *n_qcq) 499 { 500 n_qcq->intr.vector = src_qcq->intr.vector; 501 n_qcq->intr.index = src_qcq->intr.index; 502 } 503 504 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 505 { 506 cpumask_var_t *affinity_mask; 507 int err; 508 509 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 510 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 511 return 0; 512 } 513 514 err = ionic_intr_alloc(lif, &qcq->intr); 515 if (err) { 516 netdev_warn(lif->netdev, "no intr for %s: %d\n", 517 qcq->q.name, err); 518 goto err_out; 519 } 520 521 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 522 IONIC_INTR_MASK_SET); 523 524 err = ionic_request_irq(lif, qcq); 525 if (err) { 526 netdev_warn(lif->netdev, "irq request failed %d\n", err); 527 goto err_out_free_intr; 528 } 529 530 /* try to get the irq on the local numa node first */ 531 affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index]; 532 if (cpumask_empty(*affinity_mask)) { 533 unsigned int cpu; 534 535 cpu = cpumask_local_spread(qcq->intr.index, 536 dev_to_node(lif->ionic->dev)); 537 if (cpu != -1) 538 cpumask_set_cpu(cpu, *affinity_mask); 539 } 540 541 qcq->intr.affinity_mask = affinity_mask; 542 qcq->intr.aff_notify.notify = ionic_irq_aff_notify; 543 qcq->intr.aff_notify.release = ionic_irq_aff_release; 544 545 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 546 return 0; 547 548 err_out_free_intr: 549 ionic_intr_free(lif, qcq->intr.index); 550 err_out: 551 return err; 552 } 553 554 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 555 unsigned int index, 556 const char *name, unsigned int flags, 557 unsigned int num_descs, unsigned int desc_size, 558 unsigned int cq_desc_size, 559 unsigned int sg_desc_size, 560 unsigned int desc_info_size, 561 unsigned int pid, struct bpf_prog *xdp_prog, 562 struct ionic_qcq **qcq) 563 { 564 struct ionic_dev *idev = &lif->ionic->idev; 565 struct device *dev = lif->ionic->dev; 566 struct ionic_qcq *new; 567 int err; 568 569 *qcq = NULL; 570 571 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 572 if (!new) { 573 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 574 err = -ENOMEM; 575 goto err_out; 576 } 577 578 new->q.dev = dev; 579 new->flags = flags; 580 581 new->q.info = vcalloc(num_descs, desc_info_size); 582 if (!new->q.info) { 583 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 584 err = -ENOMEM; 585 goto err_out_free_qcq; 586 } 587 588 if (type == IONIC_QTYPE_RXQ) { 589 struct page_pool_params pp_params = { 590 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 591 .order = 0, 592 .pool_size = num_descs, 593 .nid = NUMA_NO_NODE, 594 .dev = lif->ionic->dev, 595 .napi = &new->napi, 596 .dma_dir = DMA_FROM_DEVICE, 597 .max_len = PAGE_SIZE, 598 .netdev = lif->netdev, 599 }; 600 601 if (xdp_prog) 602 pp_params.dma_dir = DMA_BIDIRECTIONAL; 603 604 new->q.page_pool = page_pool_create(&pp_params); 605 if (IS_ERR(new->q.page_pool)) { 606 netdev_err(lif->netdev, "Cannot create page_pool\n"); 607 err = PTR_ERR(new->q.page_pool); 608 new->q.page_pool = NULL; 609 goto err_out_free_q_info; 610 } 611 } 612 613 new->q.type = type; 614 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; 615 616 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 617 desc_size, sg_desc_size, pid); 618 if (err) { 619 netdev_err(lif->netdev, "Cannot initialize queue\n"); 620 goto err_out_free_page_pool; 621 } 622 623 err = ionic_alloc_qcq_interrupt(lif, new); 624 if (err) 625 goto err_out_free_page_pool; 626 627 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 628 if (err) { 629 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 630 goto err_out_free_irq; 631 } 632 633 if (flags & IONIC_QCQ_F_NOTIFYQ) { 634 int q_size; 635 636 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q 637 * and don't alloc qc. We leave new->qc_size and new->qc_base 638 * as 0 to be sure we don't try to free it later. 639 */ 640 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); 641 new->q_size = PAGE_SIZE + q_size + 642 ALIGN(num_descs * cq_desc_size, PAGE_SIZE); 643 new->q_base = dma_alloc_coherent(dev, new->q_size, 644 &new->q_base_pa, GFP_KERNEL); 645 if (!new->q_base) { 646 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); 647 err = -ENOMEM; 648 goto err_out_free_irq; 649 } 650 new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE); 651 new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 652 653 /* Base the NotifyQ cq.base off of the ALIGNed q.base */ 654 new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE); 655 new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); 656 new->cq.bound_q = &new->q; 657 } else { 658 /* regular DMA q descriptors */ 659 new->q_size = PAGE_SIZE + (num_descs * desc_size); 660 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 661 GFP_KERNEL); 662 if (!new->q_base) { 663 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 664 err = -ENOMEM; 665 goto err_out_free_irq; 666 } 667 new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE); 668 new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 669 670 if (flags & IONIC_QCQ_F_CMB_RINGS) { 671 /* on-chip CMB q descriptors */ 672 new->cmb_q_size = num_descs * desc_size; 673 new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE); 674 675 err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa, 676 new->cmb_order, 0, NULL); 677 if (err) { 678 netdev_err(lif->netdev, 679 "Cannot allocate queue order %d from cmb: err %d\n", 680 new->cmb_order, err); 681 goto err_out_free_q; 682 } 683 684 new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size); 685 if (!new->cmb_q_base) { 686 netdev_err(lif->netdev, "Cannot map queue from cmb\n"); 687 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); 688 err = -ENOMEM; 689 goto err_out_free_q; 690 } 691 692 new->cmb_q_base_pa -= idev->phy_cmb_pages; 693 new->q.cmb_base = new->cmb_q_base; 694 new->q.cmb_base_pa = new->cmb_q_base_pa; 695 } 696 697 /* cq DMA descriptors */ 698 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 699 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 700 GFP_KERNEL); 701 if (!new->cq_base) { 702 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 703 err = -ENOMEM; 704 goto err_out_free_q; 705 } 706 new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 707 new->cq.base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 708 new->cq.bound_q = &new->q; 709 } 710 711 if (flags & IONIC_QCQ_F_SG) { 712 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 713 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 714 GFP_KERNEL); 715 if (!new->sg_base) { 716 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 717 err = -ENOMEM; 718 goto err_out_free_cq; 719 } 720 new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 721 new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 722 } 723 724 INIT_WORK(&new->dim.work, ionic_dim_work); 725 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; 726 if (lif->doorbell_wa) 727 INIT_WORK(&new->doorbell_napi_work, ionic_doorbell_napi_work); 728 729 *qcq = new; 730 731 return 0; 732 733 err_out_free_cq: 734 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 735 err_out_free_q: 736 if (new->cmb_q_base) { 737 iounmap(new->cmb_q_base); 738 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); 739 } 740 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 741 err_out_free_irq: 742 if (flags & IONIC_QCQ_F_INTR) { 743 devm_free_irq(dev, new->intr.vector, &new->napi); 744 ionic_intr_free(lif, new->intr.index); 745 } 746 err_out_free_page_pool: 747 page_pool_destroy(new->q.page_pool); 748 err_out_free_q_info: 749 vfree(new->q.info); 750 err_out_free_qcq: 751 devm_kfree(dev, new); 752 err_out: 753 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 754 return err; 755 } 756 757 static int ionic_qcqs_alloc(struct ionic_lif *lif) 758 { 759 struct device *dev = lif->ionic->dev; 760 unsigned int flags; 761 int err; 762 763 flags = IONIC_QCQ_F_INTR; 764 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 765 IONIC_ADMINQ_LENGTH, 766 sizeof(struct ionic_admin_cmd), 767 sizeof(struct ionic_admin_comp), 768 0, 769 sizeof(struct ionic_admin_desc_info), 770 lif->kern_pid, NULL, &lif->adminqcq); 771 if (err) 772 return err; 773 ionic_debugfs_add_qcq(lif, lif->adminqcq); 774 775 if (lif->ionic->nnqs_per_lif) { 776 flags = IONIC_QCQ_F_NOTIFYQ; 777 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 778 flags, IONIC_NOTIFYQ_LENGTH, 779 sizeof(struct ionic_notifyq_cmd), 780 sizeof(union ionic_notifyq_comp), 781 0, 782 sizeof(struct ionic_admin_desc_info), 783 lif->kern_pid, NULL, &lif->notifyqcq); 784 if (err) 785 goto err_out; 786 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 787 788 /* Let the notifyq ride on the adminq interrupt */ 789 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 790 } 791 792 err = -ENOMEM; 793 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 794 sizeof(*lif->txqcqs), GFP_KERNEL); 795 if (!lif->txqcqs) 796 goto err_out; 797 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 798 sizeof(*lif->rxqcqs), GFP_KERNEL); 799 if (!lif->rxqcqs) 800 goto err_out; 801 802 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1, 803 sizeof(*lif->txqstats), GFP_KERNEL); 804 if (!lif->txqstats) 805 goto err_out; 806 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1, 807 sizeof(*lif->rxqstats), GFP_KERNEL); 808 if (!lif->rxqstats) 809 goto err_out; 810 811 return 0; 812 813 err_out: 814 ionic_qcqs_free(lif); 815 return err; 816 } 817 818 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 819 { 820 qcq->q.tail_idx = 0; 821 qcq->q.head_idx = 0; 822 qcq->cq.tail_idx = 0; 823 qcq->cq.done_color = 1; 824 memset(qcq->q_base, 0, qcq->q_size); 825 if (qcq->cmb_q_base) 826 memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size); 827 memset(qcq->cq_base, 0, qcq->cq_size); 828 memset(qcq->sg_base, 0, qcq->sg_size); 829 } 830 831 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 832 { 833 struct device *dev = lif->ionic->dev; 834 struct ionic_queue *q = &qcq->q; 835 struct ionic_cq *cq = &qcq->cq; 836 struct ionic_admin_ctx ctx = { 837 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 838 .cmd.q_init = { 839 .opcode = IONIC_CMD_Q_INIT, 840 .lif_index = cpu_to_le16(lif->index), 841 .type = q->type, 842 .ver = lif->qtype_info[q->type].version, 843 .index = cpu_to_le32(q->index), 844 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 845 IONIC_QINIT_F_SG), 846 .intr_index = cpu_to_le16(qcq->intr.index), 847 .pid = cpu_to_le16(q->pid), 848 .ring_size = ilog2(q->num_descs), 849 .ring_base = cpu_to_le64(q->base_pa), 850 .cq_ring_base = cpu_to_le64(cq->base_pa), 851 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 852 .features = cpu_to_le64(q->features), 853 }, 854 }; 855 int err; 856 857 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { 858 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); 859 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); 860 } 861 862 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 863 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 864 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 865 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 866 dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base); 867 dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base); 868 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 869 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 870 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 871 872 ionic_qcq_sanitize(qcq); 873 874 err = ionic_adminq_post_wait(lif, &ctx); 875 if (err) 876 return err; 877 878 q->hw_type = ctx.comp.q_init.hw_type; 879 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 880 q->dbval = IONIC_DBELL_QID(q->hw_index); 881 882 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 883 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 884 885 q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE; 886 q->dbell_jiffies = jiffies; 887 888 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 889 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi); 890 891 qcq->flags |= IONIC_QCQ_F_INITED; 892 893 return 0; 894 } 895 896 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 897 { 898 struct device *dev = lif->ionic->dev; 899 struct ionic_queue *q = &qcq->q; 900 struct ionic_cq *cq = &qcq->cq; 901 struct ionic_admin_ctx ctx = { 902 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 903 .cmd.q_init = { 904 .opcode = IONIC_CMD_Q_INIT, 905 .lif_index = cpu_to_le16(lif->index), 906 .type = q->type, 907 .ver = lif->qtype_info[q->type].version, 908 .index = cpu_to_le32(q->index), 909 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ), 910 .intr_index = cpu_to_le16(cq->bound_intr->index), 911 .pid = cpu_to_le16(q->pid), 912 .ring_size = ilog2(q->num_descs), 913 .ring_base = cpu_to_le64(q->base_pa), 914 .cq_ring_base = cpu_to_le64(cq->base_pa), 915 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 916 .features = cpu_to_le64(q->features), 917 }, 918 }; 919 int err; 920 921 q->partner = &lif->txqcqs[q->index]->q; 922 q->partner->partner = q; 923 924 if (!lif->xdp_prog || 925 (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags)) 926 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG); 927 928 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { 929 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); 930 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); 931 } 932 933 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 934 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 935 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 936 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 937 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 938 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 939 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 940 941 ionic_qcq_sanitize(qcq); 942 943 err = ionic_adminq_post_wait(lif, &ctx); 944 if (err) 945 return err; 946 947 q->hw_type = ctx.comp.q_init.hw_type; 948 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 949 q->dbval = IONIC_DBELL_QID(q->hw_index); 950 951 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 952 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 953 954 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; 955 q->dbell_jiffies = jiffies; 956 957 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 958 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi); 959 else 960 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); 961 err = ionic_register_rxq_info(q, qcq->napi.napi_id); 962 if (err) { 963 netif_napi_del(&qcq->napi); 964 return err; 965 } 966 967 qcq->flags |= IONIC_QCQ_F_INITED; 968 969 return 0; 970 } 971 972 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) 973 { 974 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 975 unsigned int txq_i, flags; 976 struct ionic_qcq *txq; 977 u64 features; 978 int err; 979 980 if (lif->hwstamp_txq) 981 return 0; 982 983 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; 984 985 num_desc = IONIC_MIN_TXRX_DESC; 986 desc_sz = sizeof(struct ionic_txq_desc); 987 comp_sz = 2 * sizeof(struct ionic_txq_comp); 988 989 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 990 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) 991 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 992 else 993 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 994 995 txq_i = lif->ionic->ntxqs_per_lif; 996 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 997 998 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, 999 num_desc, desc_sz, comp_sz, sg_desc_sz, 1000 sizeof(struct ionic_tx_desc_info), 1001 lif->kern_pid, NULL, &txq); 1002 if (err) 1003 goto err_qcq_alloc; 1004 1005 txq->q.features = features; 1006 1007 ionic_link_qcq_interrupts(lif->adminqcq, txq); 1008 ionic_debugfs_add_qcq(lif, txq); 1009 1010 lif->hwstamp_txq = txq; 1011 1012 if (netif_running(lif->netdev)) { 1013 err = ionic_lif_txq_init(lif, txq); 1014 if (err) 1015 goto err_qcq_init; 1016 1017 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 1018 err = ionic_qcq_enable(txq); 1019 if (err) 1020 goto err_qcq_enable; 1021 } 1022 } 1023 1024 return 0; 1025 1026 err_qcq_enable: 1027 ionic_lif_qcq_deinit(lif, txq); 1028 err_qcq_init: 1029 lif->hwstamp_txq = NULL; 1030 ionic_debugfs_del_qcq(txq); 1031 ionic_qcq_free(lif, txq); 1032 devm_kfree(lif->ionic->dev, txq); 1033 err_qcq_alloc: 1034 return err; 1035 } 1036 1037 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) 1038 { 1039 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 1040 unsigned int rxq_i, flags; 1041 struct ionic_qcq *rxq; 1042 u64 features; 1043 int err; 1044 1045 if (lif->hwstamp_rxq) 1046 return 0; 1047 1048 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 1049 1050 num_desc = IONIC_MIN_TXRX_DESC; 1051 desc_sz = sizeof(struct ionic_rxq_desc); 1052 comp_sz = 2 * sizeof(struct ionic_rxq_comp); 1053 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 1054 1055 rxq_i = lif->ionic->nrxqs_per_lif; 1056 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 1057 1058 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, 1059 num_desc, desc_sz, comp_sz, sg_desc_sz, 1060 sizeof(struct ionic_rx_desc_info), 1061 lif->kern_pid, NULL, &rxq); 1062 if (err) 1063 goto err_qcq_alloc; 1064 1065 rxq->q.features = features; 1066 1067 ionic_link_qcq_interrupts(lif->adminqcq, rxq); 1068 ionic_debugfs_add_qcq(lif, rxq); 1069 1070 lif->hwstamp_rxq = rxq; 1071 1072 if (netif_running(lif->netdev)) { 1073 err = ionic_lif_rxq_init(lif, rxq); 1074 if (err) 1075 goto err_qcq_init; 1076 1077 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 1078 ionic_rx_fill(&rxq->q, NULL); 1079 err = ionic_qcq_enable(rxq); 1080 if (err) 1081 goto err_qcq_enable; 1082 } 1083 } 1084 1085 return 0; 1086 1087 err_qcq_enable: 1088 ionic_lif_qcq_deinit(lif, rxq); 1089 err_qcq_init: 1090 lif->hwstamp_rxq = NULL; 1091 ionic_debugfs_del_qcq(rxq); 1092 ionic_qcq_free(lif, rxq); 1093 devm_kfree(lif->ionic->dev, rxq); 1094 err_qcq_alloc: 1095 return err; 1096 } 1097 1098 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all) 1099 { 1100 struct ionic_queue_params qparam; 1101 1102 ionic_init_queue_params(lif, &qparam); 1103 1104 if (rx_all) 1105 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 1106 else 1107 qparam.rxq_features = 0; 1108 1109 /* if we're not running, just set the values and return */ 1110 if (!netif_running(lif->netdev)) { 1111 lif->rxq_features = qparam.rxq_features; 1112 return 0; 1113 } 1114 1115 return ionic_reconfigure_queues(lif, &qparam); 1116 } 1117 1118 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode) 1119 { 1120 struct ionic_admin_ctx ctx = { 1121 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1122 .cmd.lif_setattr = { 1123 .opcode = IONIC_CMD_LIF_SETATTR, 1124 .index = cpu_to_le16(lif->index), 1125 .attr = IONIC_LIF_ATTR_TXSTAMP, 1126 .txstamp_mode = cpu_to_le16(txstamp_mode), 1127 }, 1128 }; 1129 1130 return ionic_adminq_post_wait(lif, &ctx); 1131 } 1132 1133 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif) 1134 { 1135 struct ionic_admin_ctx ctx = { 1136 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1137 .cmd.rx_filter_del = { 1138 .opcode = IONIC_CMD_RX_FILTER_DEL, 1139 .lif_index = cpu_to_le16(lif->index), 1140 }, 1141 }; 1142 struct ionic_rx_filter *f; 1143 u32 filter_id; 1144 int err; 1145 1146 spin_lock_bh(&lif->rx_filters.lock); 1147 1148 f = ionic_rx_filter_rxsteer(lif); 1149 if (!f) { 1150 spin_unlock_bh(&lif->rx_filters.lock); 1151 return; 1152 } 1153 1154 filter_id = f->filter_id; 1155 ionic_rx_filter_free(lif, f); 1156 1157 spin_unlock_bh(&lif->rx_filters.lock); 1158 1159 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id); 1160 1161 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id); 1162 1163 err = ionic_adminq_post_wait(lif, &ctx); 1164 if (err && err != -EEXIST) 1165 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id); 1166 } 1167 1168 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1169 { 1170 struct ionic_admin_ctx ctx = { 1171 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1172 .cmd.rx_filter_add = { 1173 .opcode = IONIC_CMD_RX_FILTER_ADD, 1174 .lif_index = cpu_to_le16(lif->index), 1175 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS), 1176 .pkt_class = cpu_to_le64(pkt_class), 1177 }, 1178 }; 1179 u8 qtype; 1180 u32 qid; 1181 int err; 1182 1183 if (!lif->hwstamp_rxq) 1184 return -EINVAL; 1185 1186 qtype = lif->hwstamp_rxq->q.type; 1187 ctx.cmd.rx_filter_add.qtype = qtype; 1188 1189 qid = lif->hwstamp_rxq->q.index; 1190 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid); 1191 1192 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n"); 1193 err = ionic_adminq_post_wait(lif, &ctx); 1194 if (err && err != -EEXIST) 1195 return err; 1196 1197 spin_lock_bh(&lif->rx_filters.lock); 1198 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED); 1199 spin_unlock_bh(&lif->rx_filters.lock); 1200 1201 return err; 1202 } 1203 1204 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1205 { 1206 ionic_lif_del_hwstamp_rxfilt(lif); 1207 1208 if (!pkt_class) 1209 return 0; 1210 1211 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); 1212 } 1213 1214 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 1215 { 1216 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 1217 struct ionic_lif *lif = napi_to_cq(napi)->lif; 1218 struct ionic_dev *idev = &lif->ionic->idev; 1219 unsigned long irqflags; 1220 unsigned int flags = 0; 1221 int rx_work = 0; 1222 int tx_work = 0; 1223 int n_work = 0; 1224 int a_work = 0; 1225 int work_done; 1226 int credits; 1227 1228 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 1229 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 1230 ionic_notifyq_service, NULL, NULL); 1231 1232 spin_lock_irqsave(&lif->adminq_lock, irqflags); 1233 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 1234 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 1235 ionic_adminq_service, NULL, NULL); 1236 1237 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 1238 1239 if (lif->hwstamp_rxq) 1240 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget, 1241 ionic_rx_service, NULL, NULL); 1242 1243 if (lif->hwstamp_txq) 1244 tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget); 1245 1246 work_done = max(max(n_work, a_work), max(rx_work, tx_work)); 1247 if (work_done < budget && napi_complete_done(napi, work_done)) { 1248 flags |= IONIC_INTR_CRED_UNMASK; 1249 intr->rearm_count++; 1250 } 1251 1252 if (work_done || flags) { 1253 flags |= IONIC_INTR_CRED_RESET_COALESCE; 1254 credits = n_work + a_work + rx_work + tx_work; 1255 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); 1256 } 1257 1258 if (lif->doorbell_wa) { 1259 if (!a_work) 1260 ionic_adminq_poke_doorbell(&lif->adminqcq->q); 1261 if (lif->hwstamp_rxq && !rx_work) 1262 ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q); 1263 if (lif->hwstamp_txq && !tx_work) 1264 ionic_txq_poke_doorbell(&lif->hwstamp_txq->q); 1265 } 1266 1267 return work_done; 1268 } 1269 1270 void ionic_get_stats64(struct net_device *netdev, 1271 struct rtnl_link_stats64 *ns) 1272 { 1273 struct ionic_lif *lif = netdev_priv(netdev); 1274 struct ionic_lif_stats *ls; 1275 1276 memset(ns, 0, sizeof(*ns)); 1277 ls = &lif->info->stats; 1278 1279 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 1280 le64_to_cpu(ls->rx_mcast_packets) + 1281 le64_to_cpu(ls->rx_bcast_packets); 1282 1283 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 1284 le64_to_cpu(ls->tx_mcast_packets) + 1285 le64_to_cpu(ls->tx_bcast_packets); 1286 1287 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 1288 le64_to_cpu(ls->rx_mcast_bytes) + 1289 le64_to_cpu(ls->rx_bcast_bytes); 1290 1291 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 1292 le64_to_cpu(ls->tx_mcast_bytes) + 1293 le64_to_cpu(ls->tx_bcast_bytes); 1294 1295 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 1296 le64_to_cpu(ls->rx_mcast_drop_packets) + 1297 le64_to_cpu(ls->rx_bcast_drop_packets); 1298 1299 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 1300 le64_to_cpu(ls->tx_mcast_drop_packets) + 1301 le64_to_cpu(ls->tx_bcast_drop_packets); 1302 1303 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 1304 1305 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 1306 1307 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 1308 le64_to_cpu(ls->rx_queue_disabled) + 1309 le64_to_cpu(ls->rx_desc_fetch_error) + 1310 le64_to_cpu(ls->rx_desc_data_error); 1311 1312 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 1313 le64_to_cpu(ls->tx_queue_disabled) + 1314 le64_to_cpu(ls->tx_desc_fetch_error) + 1315 le64_to_cpu(ls->tx_desc_data_error); 1316 1317 ns->rx_errors = ns->rx_over_errors + 1318 ns->rx_missed_errors; 1319 1320 ns->tx_errors = ns->tx_aborted_errors; 1321 } 1322 1323 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1324 { 1325 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); 1326 } 1327 1328 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1329 { 1330 /* Don't delete our own address from the uc list */ 1331 if (ether_addr_equal(addr, netdev->dev_addr)) 1332 return 0; 1333 1334 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); 1335 } 1336 1337 void ionic_lif_rx_mode(struct ionic_lif *lif) 1338 { 1339 struct net_device *netdev = lif->netdev; 1340 unsigned int nfilters; 1341 unsigned int nd_flags; 1342 char buf[128]; 1343 u16 rx_mode; 1344 int i; 1345 #define REMAIN(__x) (sizeof(buf) - (__x)) 1346 1347 mutex_lock(&lif->config_lock); 1348 1349 /* grab the flags once for local use */ 1350 nd_flags = netdev->flags; 1351 1352 rx_mode = IONIC_RX_MODE_F_UNICAST; 1353 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1354 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1355 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1356 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1357 1358 /* sync the filters */ 1359 ionic_rx_filter_sync(lif); 1360 1361 /* check for overflow state 1362 * if so, we track that we overflowed and enable NIC PROMISC 1363 * else if the overflow is set and not needed 1364 * we remove our overflow flag and check the netdev flags 1365 * to see if we can disable NIC PROMISC 1366 */ 1367 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1368 1369 if (((lif->nucast + lif->nmcast) >= nfilters) || 1370 (lif->max_vlans && lif->nvlans >= lif->max_vlans)) { 1371 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1372 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1373 } else { 1374 if (!(nd_flags & IFF_PROMISC)) 1375 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1376 if (!(nd_flags & IFF_ALLMULTI)) 1377 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1378 } 1379 1380 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1381 lif->rx_mode, rx_mode); 1382 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1383 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1384 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1385 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1386 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1387 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1388 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1389 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1390 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1391 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1392 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER) 1393 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER"); 1394 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf); 1395 1396 if (lif->rx_mode != rx_mode) { 1397 struct ionic_admin_ctx ctx = { 1398 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1399 .cmd.rx_mode_set = { 1400 .opcode = IONIC_CMD_RX_MODE_SET, 1401 .lif_index = cpu_to_le16(lif->index), 1402 }, 1403 }; 1404 int err; 1405 1406 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode); 1407 err = ionic_adminq_post_wait(lif, &ctx); 1408 if (err) 1409 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n", 1410 rx_mode, err); 1411 else 1412 lif->rx_mode = rx_mode; 1413 } 1414 1415 mutex_unlock(&lif->config_lock); 1416 } 1417 1418 static void ionic_ndo_set_rx_mode(struct net_device *netdev) 1419 { 1420 struct ionic_lif *lif = netdev_priv(netdev); 1421 struct ionic_deferred_work *work; 1422 1423 /* Sync the kernel filter list with the driver filter list */ 1424 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1425 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1426 1427 /* Shove off the rest of the rxmode work to the work task 1428 * which will include syncing the filters to the firmware. 1429 */ 1430 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1431 if (!work) { 1432 netdev_err(lif->netdev, "rxmode change dropped\n"); 1433 return; 1434 } 1435 work->type = IONIC_DW_TYPE_RX_MODE; 1436 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1437 ionic_lif_deferred_enqueue(lif, work); 1438 } 1439 1440 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1441 { 1442 u64 wanted = 0; 1443 1444 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1445 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1446 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1447 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1448 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1449 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1450 if (features & NETIF_F_RXHASH) 1451 wanted |= IONIC_ETH_HW_RX_HASH; 1452 if (features & NETIF_F_RXCSUM) 1453 wanted |= IONIC_ETH_HW_RX_CSUM; 1454 if (features & NETIF_F_SG) 1455 wanted |= IONIC_ETH_HW_TX_SG; 1456 if (features & NETIF_F_HW_CSUM) 1457 wanted |= IONIC_ETH_HW_TX_CSUM; 1458 if (features & NETIF_F_TSO) 1459 wanted |= IONIC_ETH_HW_TSO; 1460 if (features & NETIF_F_TSO6) 1461 wanted |= IONIC_ETH_HW_TSO_IPV6; 1462 if (features & NETIF_F_TSO_ECN) 1463 wanted |= IONIC_ETH_HW_TSO_ECN; 1464 if (features & NETIF_F_GSO_GRE) 1465 wanted |= IONIC_ETH_HW_TSO_GRE; 1466 if (features & NETIF_F_GSO_GRE_CSUM) 1467 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1468 if (features & NETIF_F_GSO_IPXIP4) 1469 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1470 if (features & NETIF_F_GSO_IPXIP6) 1471 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1472 if (features & NETIF_F_GSO_UDP_TUNNEL) 1473 wanted |= IONIC_ETH_HW_TSO_UDP; 1474 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1475 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1476 1477 return cpu_to_le64(wanted); 1478 } 1479 1480 static int ionic_set_nic_features(struct ionic_lif *lif, 1481 netdev_features_t features) 1482 { 1483 struct device *dev = lif->ionic->dev; 1484 struct ionic_admin_ctx ctx = { 1485 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1486 .cmd.lif_setattr = { 1487 .opcode = IONIC_CMD_LIF_SETATTR, 1488 .index = cpu_to_le16(lif->index), 1489 .attr = IONIC_LIF_ATTR_FEATURES, 1490 }, 1491 }; 1492 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1493 IONIC_ETH_HW_VLAN_RX_STRIP | 1494 IONIC_ETH_HW_VLAN_RX_FILTER; 1495 u64 old_hw_features; 1496 int err; 1497 1498 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1499 1500 if (lif->phc) 1501 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP); 1502 1503 err = ionic_adminq_post_wait(lif, &ctx); 1504 if (err) 1505 return err; 1506 1507 old_hw_features = lif->hw_features; 1508 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1509 ctx.comp.lif_setattr.features); 1510 1511 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1512 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1513 1514 if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) && 1515 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1516 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1517 1518 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1519 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1520 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1521 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1522 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1523 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1524 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1525 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1526 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1527 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1528 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1529 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1530 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1531 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1532 if (lif->hw_features & IONIC_ETH_HW_TSO) 1533 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1534 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1535 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1536 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1537 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1538 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1539 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1540 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1541 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1542 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1543 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1544 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1545 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1546 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1547 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1548 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1549 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1550 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP) 1551 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n"); 1552 1553 return 0; 1554 } 1555 1556 static int ionic_init_nic_features(struct ionic_lif *lif) 1557 { 1558 struct net_device *netdev = lif->netdev; 1559 netdev_features_t features; 1560 int err; 1561 1562 /* set up what we expect to support by default */ 1563 features = NETIF_F_HW_VLAN_CTAG_TX | 1564 NETIF_F_HW_VLAN_CTAG_RX | 1565 NETIF_F_HW_VLAN_CTAG_FILTER | 1566 NETIF_F_SG | 1567 NETIF_F_HW_CSUM | 1568 NETIF_F_RXCSUM | 1569 NETIF_F_TSO | 1570 NETIF_F_TSO6 | 1571 NETIF_F_TSO_ECN | 1572 NETIF_F_GSO_GRE | 1573 NETIF_F_GSO_GRE_CSUM | 1574 NETIF_F_GSO_IPXIP4 | 1575 NETIF_F_GSO_IPXIP6 | 1576 NETIF_F_GSO_UDP_TUNNEL | 1577 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1578 1579 if (lif->nxqs > 1) 1580 features |= NETIF_F_RXHASH; 1581 1582 err = ionic_set_nic_features(lif, features); 1583 if (err) 1584 return err; 1585 1586 /* tell the netdev what we actually can support */ 1587 netdev->features |= NETIF_F_HIGHDMA; 1588 1589 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1590 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1591 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1592 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1593 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1594 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1595 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1596 netdev->hw_features |= NETIF_F_RXHASH; 1597 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1598 netdev->hw_features |= NETIF_F_SG; 1599 1600 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1601 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1602 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1603 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1604 if (lif->hw_features & IONIC_ETH_HW_TSO) 1605 netdev->hw_enc_features |= NETIF_F_TSO; 1606 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1607 netdev->hw_enc_features |= NETIF_F_TSO6; 1608 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1609 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1610 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1611 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1612 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1613 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1614 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1615 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1616 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1617 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1618 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1619 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1620 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1621 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1622 1623 netdev->hw_features |= netdev->hw_enc_features; 1624 netdev->features |= netdev->hw_features; 1625 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1626 1627 netdev->priv_flags |= IFF_UNICAST_FLT | 1628 IFF_LIVE_ADDR_CHANGE; 1629 1630 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | 1631 NETDEV_XDP_ACT_REDIRECT | 1632 NETDEV_XDP_ACT_RX_SG | 1633 NETDEV_XDP_ACT_NDO_XMIT | 1634 NETDEV_XDP_ACT_NDO_XMIT_SG; 1635 1636 return 0; 1637 } 1638 1639 static int ionic_set_features(struct net_device *netdev, 1640 netdev_features_t features) 1641 { 1642 struct ionic_lif *lif = netdev_priv(netdev); 1643 int err; 1644 1645 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1646 __func__, (u64)lif->netdev->features, (u64)features); 1647 1648 err = ionic_set_nic_features(lif, features); 1649 1650 return err; 1651 } 1652 1653 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac) 1654 { 1655 struct ionic_admin_ctx ctx = { 1656 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1657 .cmd.lif_setattr = { 1658 .opcode = IONIC_CMD_LIF_SETATTR, 1659 .index = cpu_to_le16(lif->index), 1660 .attr = IONIC_LIF_ATTR_MAC, 1661 }, 1662 }; 1663 1664 ether_addr_copy(ctx.cmd.lif_setattr.mac, mac); 1665 return ionic_adminq_post_wait(lif, &ctx); 1666 } 1667 1668 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr) 1669 { 1670 struct ionic_admin_ctx ctx = { 1671 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1672 .cmd.lif_getattr = { 1673 .opcode = IONIC_CMD_LIF_GETATTR, 1674 .index = cpu_to_le16(lif->index), 1675 .attr = IONIC_LIF_ATTR_MAC, 1676 }, 1677 }; 1678 int err; 1679 1680 err = ionic_adminq_post_wait(lif, &ctx); 1681 if (err) 1682 return err; 1683 1684 ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac); 1685 return 0; 1686 } 1687 1688 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac) 1689 { 1690 u8 get_mac[ETH_ALEN]; 1691 int err; 1692 1693 err = ionic_set_attr_mac(lif, mac); 1694 if (err) 1695 return err; 1696 1697 err = ionic_get_attr_mac(lif, get_mac); 1698 if (err) 1699 return err; 1700 1701 /* To deal with older firmware that silently ignores the set attr mac: 1702 * doesn't actually change the mac and doesn't return an error, so we 1703 * do the get attr to verify whether or not the set actually happened 1704 */ 1705 if (!ether_addr_equal(get_mac, mac)) 1706 return 1; 1707 1708 return 0; 1709 } 1710 1711 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1712 { 1713 struct ionic_lif *lif = netdev_priv(netdev); 1714 struct sockaddr *addr = sa; 1715 u8 *mac; 1716 int err; 1717 1718 mac = (u8 *)addr->sa_data; 1719 if (ether_addr_equal(netdev->dev_addr, mac)) 1720 return 0; 1721 1722 err = ionic_program_mac(lif, mac); 1723 if (err < 0) 1724 return err; 1725 1726 if (err > 0) 1727 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n", 1728 __func__); 1729 1730 err = eth_prepare_mac_addr_change(netdev, addr); 1731 if (err) 1732 return err; 1733 1734 if (!is_zero_ether_addr(netdev->dev_addr)) { 1735 netdev_info(netdev, "deleting mac addr %pM\n", 1736 netdev->dev_addr); 1737 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr); 1738 } 1739 1740 eth_commit_mac_addr_change(netdev, addr); 1741 netdev_info(netdev, "updating mac addr %pM\n", mac); 1742 1743 return ionic_lif_addr_add(netdev_priv(netdev), mac); 1744 } 1745 1746 void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1747 { 1748 /* Stop and clean the queues before reconfiguration */ 1749 netif_device_detach(lif->netdev); 1750 ionic_stop_queues(lif); 1751 ionic_txrx_deinit(lif); 1752 } 1753 1754 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1755 { 1756 int err; 1757 1758 /* Re-init the queues after reconfiguration */ 1759 1760 /* The only way txrx_init can fail here is if communication 1761 * with FW is suddenly broken. There's not much we can do 1762 * at this point - error messages have already been printed, 1763 * so we can continue on and the user can eventually do a 1764 * DOWN and UP to try to reset and clear the issue. 1765 */ 1766 err = ionic_txrx_init(lif); 1767 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 1768 netif_device_attach(lif->netdev); 1769 1770 return err; 1771 } 1772 1773 static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu, 1774 struct bpf_prog *xdp_prog) 1775 { 1776 if (!xdp_prog) 1777 return true; 1778 1779 if (mtu <= IONIC_XDP_MAX_LINEAR_MTU) 1780 return true; 1781 1782 if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags) 1783 return true; 1784 1785 return false; 1786 } 1787 1788 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1789 { 1790 struct ionic_lif *lif = netdev_priv(netdev); 1791 struct ionic_admin_ctx ctx = { 1792 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1793 .cmd.lif_setattr = { 1794 .opcode = IONIC_CMD_LIF_SETATTR, 1795 .index = cpu_to_le16(lif->index), 1796 .attr = IONIC_LIF_ATTR_MTU, 1797 .mtu = cpu_to_le32(new_mtu), 1798 }, 1799 }; 1800 struct bpf_prog *xdp_prog; 1801 int err; 1802 1803 xdp_prog = READ_ONCE(lif->xdp_prog); 1804 if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog)) 1805 return -EINVAL; 1806 1807 err = ionic_adminq_post_wait(lif, &ctx); 1808 if (err) 1809 return err; 1810 1811 /* if we're not running, nothing more to do */ 1812 if (!netif_running(netdev)) { 1813 WRITE_ONCE(netdev->mtu, new_mtu); 1814 return 0; 1815 } 1816 1817 mutex_lock(&lif->queue_lock); 1818 ionic_stop_queues_reconfig(lif); 1819 WRITE_ONCE(netdev->mtu, new_mtu); 1820 err = ionic_start_queues_reconfig(lif); 1821 mutex_unlock(&lif->queue_lock); 1822 1823 return err; 1824 } 1825 1826 static void ionic_tx_timeout_work(struct work_struct *ws) 1827 { 1828 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1829 int err; 1830 1831 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1832 return; 1833 1834 /* if we were stopped before this scheduled job was launched, 1835 * don't bother the queues as they are already stopped. 1836 */ 1837 if (!netif_running(lif->netdev)) 1838 return; 1839 1840 mutex_lock(&lif->queue_lock); 1841 ionic_stop_queues_reconfig(lif); 1842 err = ionic_start_queues_reconfig(lif); 1843 mutex_unlock(&lif->queue_lock); 1844 1845 if (err) 1846 dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__); 1847 } 1848 1849 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1850 { 1851 struct ionic_lif *lif = netdev_priv(netdev); 1852 1853 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); 1854 schedule_work(&lif->tx_timeout_work); 1855 } 1856 1857 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1858 u16 vid) 1859 { 1860 struct ionic_lif *lif = netdev_priv(netdev); 1861 int err; 1862 1863 err = ionic_lif_vlan_add(lif, vid); 1864 if (err) 1865 return err; 1866 1867 ionic_lif_rx_mode(lif); 1868 1869 return 0; 1870 } 1871 1872 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1873 u16 vid) 1874 { 1875 struct ionic_lif *lif = netdev_priv(netdev); 1876 int err; 1877 1878 err = ionic_lif_vlan_del(lif, vid); 1879 if (err) 1880 return err; 1881 1882 ionic_lif_rx_mode(lif); 1883 1884 return 0; 1885 } 1886 1887 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1888 const u8 *key, const u32 *indir) 1889 { 1890 struct ionic_admin_ctx ctx = { 1891 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1892 .cmd.lif_setattr = { 1893 .opcode = IONIC_CMD_LIF_SETATTR, 1894 .attr = IONIC_LIF_ATTR_RSS, 1895 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1896 }, 1897 }; 1898 unsigned int i, tbl_sz; 1899 1900 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1901 lif->rss_types = types; 1902 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1903 } 1904 1905 if (key) 1906 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1907 1908 if (indir) { 1909 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1910 for (i = 0; i < tbl_sz; i++) 1911 lif->rss_ind_tbl[i] = indir[i]; 1912 } 1913 1914 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1915 IONIC_RSS_HASH_KEY_SIZE); 1916 1917 return ionic_adminq_post_wait(lif, &ctx); 1918 } 1919 1920 static int ionic_lif_rss_init(struct ionic_lif *lif) 1921 { 1922 unsigned int tbl_sz; 1923 unsigned int i; 1924 1925 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1926 IONIC_RSS_TYPE_IPV4_TCP | 1927 IONIC_RSS_TYPE_IPV4_UDP | 1928 IONIC_RSS_TYPE_IPV6 | 1929 IONIC_RSS_TYPE_IPV6_TCP | 1930 IONIC_RSS_TYPE_IPV6_UDP; 1931 1932 /* Fill indirection table with 'default' values */ 1933 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1934 for (i = 0; i < tbl_sz; i++) 1935 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1936 1937 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1938 } 1939 1940 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1941 { 1942 int tbl_sz; 1943 1944 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1945 memset(lif->rss_ind_tbl, 0, tbl_sz); 1946 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1947 1948 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1949 } 1950 1951 static void ionic_lif_quiesce(struct ionic_lif *lif) 1952 { 1953 struct ionic_admin_ctx ctx = { 1954 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1955 .cmd.lif_setattr = { 1956 .opcode = IONIC_CMD_LIF_SETATTR, 1957 .index = cpu_to_le16(lif->index), 1958 .attr = IONIC_LIF_ATTR_STATE, 1959 .state = IONIC_LIF_QUIESCE, 1960 }, 1961 }; 1962 int err; 1963 1964 err = ionic_adminq_post_wait(lif, &ctx); 1965 if (err) 1966 netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err); 1967 } 1968 1969 static void ionic_txrx_disable(struct ionic_lif *lif) 1970 { 1971 unsigned int i; 1972 int err = 0; 1973 1974 if (lif->txqcqs) { 1975 for (i = 0; i < lif->nxqs; i++) 1976 err = ionic_qcq_disable(lif, lif->txqcqs[i], err); 1977 } 1978 1979 if (lif->hwstamp_txq) 1980 err = ionic_qcq_disable(lif, lif->hwstamp_txq, err); 1981 1982 if (lif->rxqcqs) { 1983 for (i = 0; i < lif->nxqs; i++) 1984 err = ionic_qcq_disable(lif, lif->rxqcqs[i], err); 1985 } 1986 1987 if (lif->hwstamp_rxq) 1988 err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err); 1989 1990 ionic_lif_quiesce(lif); 1991 } 1992 1993 static void ionic_txrx_deinit(struct ionic_lif *lif) 1994 { 1995 unsigned int i; 1996 1997 if (lif->txqcqs) { 1998 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 1999 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2000 ionic_tx_flush(&lif->txqcqs[i]->cq); 2001 ionic_tx_empty(&lif->txqcqs[i]->q); 2002 } 2003 } 2004 2005 if (lif->rxqcqs) { 2006 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 2007 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2008 ionic_rx_empty(&lif->rxqcqs[i]->q); 2009 } 2010 } 2011 lif->rx_mode = 0; 2012 2013 if (lif->hwstamp_txq) { 2014 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq); 2015 ionic_tx_flush(&lif->hwstamp_txq->cq); 2016 ionic_tx_empty(&lif->hwstamp_txq->q); 2017 } 2018 2019 if (lif->hwstamp_rxq) { 2020 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq); 2021 ionic_rx_empty(&lif->hwstamp_rxq->q); 2022 } 2023 } 2024 2025 void ionic_txrx_free(struct ionic_lif *lif) 2026 { 2027 unsigned int i; 2028 2029 if (lif->txqcqs) { 2030 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 2031 ionic_qcq_free(lif, lif->txqcqs[i]); 2032 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 2033 lif->txqcqs[i] = NULL; 2034 } 2035 } 2036 2037 if (lif->rxqcqs) { 2038 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2039 ionic_qcq_free(lif, lif->rxqcqs[i]); 2040 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 2041 lif->rxqcqs[i] = NULL; 2042 } 2043 } 2044 2045 if (lif->hwstamp_txq) { 2046 ionic_qcq_free(lif, lif->hwstamp_txq); 2047 devm_kfree(lif->ionic->dev, lif->hwstamp_txq); 2048 lif->hwstamp_txq = NULL; 2049 } 2050 2051 if (lif->hwstamp_rxq) { 2052 ionic_qcq_free(lif, lif->hwstamp_rxq); 2053 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq); 2054 lif->hwstamp_rxq = NULL; 2055 } 2056 } 2057 2058 static int ionic_txrx_alloc(struct ionic_lif *lif) 2059 { 2060 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2061 unsigned int flags, i; 2062 int err = 0; 2063 2064 num_desc = lif->ntxq_descs; 2065 desc_sz = sizeof(struct ionic_txq_desc); 2066 comp_sz = sizeof(struct ionic_txq_comp); 2067 2068 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2069 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2070 sizeof(struct ionic_txq_sg_desc_v1)) 2071 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2072 else 2073 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2074 2075 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2076 2077 if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state)) 2078 flags |= IONIC_QCQ_F_CMB_RINGS; 2079 2080 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2081 flags |= IONIC_QCQ_F_INTR; 2082 2083 for (i = 0; i < lif->nxqs; i++) { 2084 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2085 num_desc, desc_sz, comp_sz, sg_desc_sz, 2086 sizeof(struct ionic_tx_desc_info), 2087 lif->kern_pid, NULL, &lif->txqcqs[i]); 2088 if (err) 2089 goto err_out; 2090 2091 if (flags & IONIC_QCQ_F_INTR) { 2092 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2093 lif->txqcqs[i]->intr.index, 2094 lif->tx_coalesce_hw); 2095 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2096 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2097 } 2098 2099 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2100 } 2101 2102 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 2103 2104 if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) 2105 flags |= IONIC_QCQ_F_CMB_RINGS; 2106 2107 num_desc = lif->nrxq_descs; 2108 desc_sz = sizeof(struct ionic_rxq_desc); 2109 comp_sz = sizeof(struct ionic_rxq_comp); 2110 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2111 2112 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2113 comp_sz *= 2; 2114 2115 for (i = 0; i < lif->nxqs; i++) { 2116 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2117 num_desc, desc_sz, comp_sz, sg_desc_sz, 2118 sizeof(struct ionic_rx_desc_info), 2119 lif->kern_pid, lif->xdp_prog, 2120 &lif->rxqcqs[i]); 2121 if (err) 2122 goto err_out; 2123 2124 lif->rxqcqs[i]->q.features = lif->rxq_features; 2125 2126 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2127 lif->rxqcqs[i]->intr.index, 2128 lif->rx_coalesce_hw); 2129 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) 2130 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; 2131 2132 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2133 ionic_link_qcq_interrupts(lif->rxqcqs[i], 2134 lif->txqcqs[i]); 2135 2136 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2137 } 2138 2139 return 0; 2140 2141 err_out: 2142 ionic_txrx_free(lif); 2143 2144 return err; 2145 } 2146 2147 static int ionic_txrx_init(struct ionic_lif *lif) 2148 { 2149 unsigned int i; 2150 int err; 2151 2152 for (i = 0; i < lif->nxqs; i++) { 2153 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 2154 if (err) 2155 goto err_out; 2156 2157 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 2158 if (err) { 2159 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2160 goto err_out; 2161 } 2162 } 2163 2164 if (lif->netdev->features & NETIF_F_RXHASH) 2165 ionic_lif_rss_init(lif); 2166 2167 ionic_lif_rx_mode(lif); 2168 2169 return 0; 2170 2171 err_out: 2172 while (i--) { 2173 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2174 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2175 } 2176 2177 return err; 2178 } 2179 2180 static int ionic_txrx_enable(struct ionic_lif *lif) 2181 { 2182 int derr = 0; 2183 int i, err; 2184 2185 ionic_xdp_rxqs_prog_update(lif); 2186 2187 for (i = 0; i < lif->nxqs; i++) { 2188 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { 2189 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); 2190 err = -ENXIO; 2191 goto err_out; 2192 } 2193 2194 ionic_rx_fill(&lif->rxqcqs[i]->q, 2195 READ_ONCE(lif->rxqcqs[i]->q.xdp_prog)); 2196 err = ionic_qcq_enable(lif->rxqcqs[i]); 2197 if (err) 2198 goto err_out; 2199 2200 err = ionic_qcq_enable(lif->txqcqs[i]); 2201 if (err) { 2202 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err); 2203 goto err_out; 2204 } 2205 } 2206 2207 if (lif->hwstamp_rxq) { 2208 ionic_rx_fill(&lif->hwstamp_rxq->q, NULL); 2209 err = ionic_qcq_enable(lif->hwstamp_rxq); 2210 if (err) 2211 goto err_out_hwstamp_rx; 2212 } 2213 2214 if (lif->hwstamp_txq) { 2215 err = ionic_qcq_enable(lif->hwstamp_txq); 2216 if (err) 2217 goto err_out_hwstamp_tx; 2218 } 2219 2220 return 0; 2221 2222 err_out_hwstamp_tx: 2223 if (lif->hwstamp_rxq) 2224 derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr); 2225 err_out_hwstamp_rx: 2226 i = lif->nxqs; 2227 err_out: 2228 while (i--) { 2229 derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr); 2230 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); 2231 } 2232 2233 ionic_xdp_rxqs_prog_update(lif); 2234 2235 return err; 2236 } 2237 2238 static int ionic_start_queues(struct ionic_lif *lif) 2239 { 2240 int err; 2241 2242 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) 2243 return -EIO; 2244 2245 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2246 return -EBUSY; 2247 2248 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 2249 return 0; 2250 2251 err = ionic_txrx_enable(lif); 2252 if (err) { 2253 clear_bit(IONIC_LIF_F_UP, lif->state); 2254 return err; 2255 } 2256 netif_tx_wake_all_queues(lif->netdev); 2257 2258 return 0; 2259 } 2260 2261 static int ionic_open(struct net_device *netdev) 2262 { 2263 struct ionic_lif *lif = netdev_priv(netdev); 2264 int err; 2265 2266 /* If recovering from a broken state, clear the bit and we'll try again */ 2267 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 2268 netdev_info(netdev, "clearing broken state\n"); 2269 2270 mutex_lock(&lif->queue_lock); 2271 2272 err = ionic_txrx_alloc(lif); 2273 if (err) 2274 goto err_unlock; 2275 2276 err = ionic_txrx_init(lif); 2277 if (err) 2278 goto err_txrx_free; 2279 2280 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 2281 if (err) 2282 goto err_txrx_deinit; 2283 2284 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 2285 if (err) 2286 goto err_txrx_deinit; 2287 2288 /* don't start the queues until we have link */ 2289 if (netif_carrier_ok(netdev)) { 2290 err = ionic_start_queues(lif); 2291 if (err) 2292 goto err_txrx_deinit; 2293 } 2294 2295 /* If hardware timestamping is enabled, but the queues were freed by 2296 * ionic_stop, those need to be reallocated and initialized, too. 2297 */ 2298 ionic_lif_hwstamp_recreate_queues(lif); 2299 2300 mutex_unlock(&lif->queue_lock); 2301 2302 return 0; 2303 2304 err_txrx_deinit: 2305 ionic_txrx_deinit(lif); 2306 err_txrx_free: 2307 ionic_txrx_free(lif); 2308 err_unlock: 2309 mutex_unlock(&lif->queue_lock); 2310 return err; 2311 } 2312 2313 static void ionic_stop_queues(struct ionic_lif *lif) 2314 { 2315 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 2316 return; 2317 2318 netif_tx_disable(lif->netdev); 2319 ionic_txrx_disable(lif); 2320 } 2321 2322 static int ionic_stop(struct net_device *netdev) 2323 { 2324 struct ionic_lif *lif = netdev_priv(netdev); 2325 2326 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2327 return 0; 2328 2329 mutex_lock(&lif->queue_lock); 2330 ionic_stop_queues(lif); 2331 ionic_txrx_deinit(lif); 2332 ionic_txrx_free(lif); 2333 mutex_unlock(&lif->queue_lock); 2334 2335 return 0; 2336 } 2337 2338 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2339 { 2340 struct ionic_lif *lif = netdev_priv(netdev); 2341 2342 switch (cmd) { 2343 case SIOCSHWTSTAMP: 2344 return ionic_lif_hwstamp_set(lif, ifr); 2345 case SIOCGHWTSTAMP: 2346 return ionic_lif_hwstamp_get(lif, ifr); 2347 default: 2348 return -EOPNOTSUPP; 2349 } 2350 } 2351 2352 static int ionic_get_vf_config(struct net_device *netdev, 2353 int vf, struct ifla_vf_info *ivf) 2354 { 2355 struct ionic_lif *lif = netdev_priv(netdev); 2356 struct ionic *ionic = lif->ionic; 2357 int ret = 0; 2358 2359 if (!netif_device_present(netdev)) 2360 return -EBUSY; 2361 2362 down_read(&ionic->vf_op_lock); 2363 2364 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2365 ret = -EINVAL; 2366 } else { 2367 struct ionic_vf *vfdata = &ionic->vfs[vf]; 2368 2369 ivf->vf = vf; 2370 ivf->qos = 0; 2371 ivf->vlan = le16_to_cpu(vfdata->vlanid); 2372 ivf->spoofchk = vfdata->spoofchk; 2373 ivf->linkstate = vfdata->linkstate; 2374 ivf->max_tx_rate = le32_to_cpu(vfdata->maxrate); 2375 ivf->trusted = vfdata->trusted; 2376 ether_addr_copy(ivf->mac, vfdata->macaddr); 2377 } 2378 2379 up_read(&ionic->vf_op_lock); 2380 return ret; 2381 } 2382 2383 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 2384 struct ifla_vf_stats *vf_stats) 2385 { 2386 struct ionic_lif *lif = netdev_priv(netdev); 2387 struct ionic *ionic = lif->ionic; 2388 struct ionic_lif_stats *vs; 2389 int ret = 0; 2390 2391 if (!netif_device_present(netdev)) 2392 return -EBUSY; 2393 2394 down_read(&ionic->vf_op_lock); 2395 2396 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2397 ret = -EINVAL; 2398 } else { 2399 memset(vf_stats, 0, sizeof(*vf_stats)); 2400 vs = &ionic->vfs[vf].stats; 2401 2402 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 2403 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 2404 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 2405 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 2406 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 2407 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 2408 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 2409 le64_to_cpu(vs->rx_mcast_drop_packets) + 2410 le64_to_cpu(vs->rx_bcast_drop_packets); 2411 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 2412 le64_to_cpu(vs->tx_mcast_drop_packets) + 2413 le64_to_cpu(vs->tx_bcast_drop_packets); 2414 } 2415 2416 up_read(&ionic->vf_op_lock); 2417 return ret; 2418 } 2419 2420 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2421 { 2422 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC }; 2423 struct ionic_lif *lif = netdev_priv(netdev); 2424 struct ionic *ionic = lif->ionic; 2425 int ret; 2426 2427 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 2428 return -EINVAL; 2429 2430 if (!netif_device_present(netdev)) 2431 return -EBUSY; 2432 2433 down_write(&ionic->vf_op_lock); 2434 2435 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2436 ret = -EINVAL; 2437 } else { 2438 ether_addr_copy(vfc.macaddr, mac); 2439 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n", 2440 __func__, vf, vfc.macaddr); 2441 2442 ret = ionic_set_vf_config(ionic, vf, &vfc); 2443 if (!ret) 2444 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 2445 } 2446 2447 up_write(&ionic->vf_op_lock); 2448 return ret; 2449 } 2450 2451 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2452 u8 qos, __be16 proto) 2453 { 2454 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN }; 2455 struct ionic_lif *lif = netdev_priv(netdev); 2456 struct ionic *ionic = lif->ionic; 2457 int ret; 2458 2459 /* until someday when we support qos */ 2460 if (qos) 2461 return -EINVAL; 2462 2463 if (vlan > 4095) 2464 return -EINVAL; 2465 2466 if (proto != htons(ETH_P_8021Q)) 2467 return -EPROTONOSUPPORT; 2468 2469 if (!netif_device_present(netdev)) 2470 return -EBUSY; 2471 2472 down_write(&ionic->vf_op_lock); 2473 2474 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2475 ret = -EINVAL; 2476 } else { 2477 vfc.vlanid = cpu_to_le16(vlan); 2478 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n", 2479 __func__, vf, le16_to_cpu(vfc.vlanid)); 2480 2481 ret = ionic_set_vf_config(ionic, vf, &vfc); 2482 if (!ret) 2483 ionic->vfs[vf].vlanid = cpu_to_le16(vlan); 2484 } 2485 2486 up_write(&ionic->vf_op_lock); 2487 return ret; 2488 } 2489 2490 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 2491 int tx_min, int tx_max) 2492 { 2493 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE }; 2494 struct ionic_lif *lif = netdev_priv(netdev); 2495 struct ionic *ionic = lif->ionic; 2496 int ret; 2497 2498 /* setting the min just seems silly */ 2499 if (tx_min) 2500 return -EINVAL; 2501 2502 if (!netif_device_present(netdev)) 2503 return -EBUSY; 2504 2505 down_write(&ionic->vf_op_lock); 2506 2507 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2508 ret = -EINVAL; 2509 } else { 2510 vfc.maxrate = cpu_to_le32(tx_max); 2511 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n", 2512 __func__, vf, le32_to_cpu(vfc.maxrate)); 2513 2514 ret = ionic_set_vf_config(ionic, vf, &vfc); 2515 if (!ret) 2516 ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); 2517 } 2518 2519 up_write(&ionic->vf_op_lock); 2520 return ret; 2521 } 2522 2523 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 2524 { 2525 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK }; 2526 struct ionic_lif *lif = netdev_priv(netdev); 2527 struct ionic *ionic = lif->ionic; 2528 int ret; 2529 2530 if (!netif_device_present(netdev)) 2531 return -EBUSY; 2532 2533 down_write(&ionic->vf_op_lock); 2534 2535 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2536 ret = -EINVAL; 2537 } else { 2538 vfc.spoofchk = set; 2539 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n", 2540 __func__, vf, vfc.spoofchk); 2541 2542 ret = ionic_set_vf_config(ionic, vf, &vfc); 2543 if (!ret) 2544 ionic->vfs[vf].spoofchk = set; 2545 } 2546 2547 up_write(&ionic->vf_op_lock); 2548 return ret; 2549 } 2550 2551 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2552 { 2553 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST }; 2554 struct ionic_lif *lif = netdev_priv(netdev); 2555 struct ionic *ionic = lif->ionic; 2556 int ret; 2557 2558 if (!netif_device_present(netdev)) 2559 return -EBUSY; 2560 2561 down_write(&ionic->vf_op_lock); 2562 2563 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2564 ret = -EINVAL; 2565 } else { 2566 vfc.trust = set; 2567 dev_dbg(ionic->dev, "%s: vf %d trust %d\n", 2568 __func__, vf, vfc.trust); 2569 2570 ret = ionic_set_vf_config(ionic, vf, &vfc); 2571 if (!ret) 2572 ionic->vfs[vf].trusted = set; 2573 } 2574 2575 up_write(&ionic->vf_op_lock); 2576 return ret; 2577 } 2578 2579 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2580 { 2581 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE }; 2582 struct ionic_lif *lif = netdev_priv(netdev); 2583 struct ionic *ionic = lif->ionic; 2584 u8 vfls; 2585 int ret; 2586 2587 switch (set) { 2588 case IFLA_VF_LINK_STATE_ENABLE: 2589 vfls = IONIC_VF_LINK_STATUS_UP; 2590 break; 2591 case IFLA_VF_LINK_STATE_DISABLE: 2592 vfls = IONIC_VF_LINK_STATUS_DOWN; 2593 break; 2594 case IFLA_VF_LINK_STATE_AUTO: 2595 vfls = IONIC_VF_LINK_STATUS_AUTO; 2596 break; 2597 default: 2598 return -EINVAL; 2599 } 2600 2601 if (!netif_device_present(netdev)) 2602 return -EBUSY; 2603 2604 down_write(&ionic->vf_op_lock); 2605 2606 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2607 ret = -EINVAL; 2608 } else { 2609 vfc.linkstate = vfls; 2610 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n", 2611 __func__, vf, vfc.linkstate); 2612 2613 ret = ionic_set_vf_config(ionic, vf, &vfc); 2614 if (!ret) 2615 ionic->vfs[vf].linkstate = set; 2616 } 2617 2618 up_write(&ionic->vf_op_lock); 2619 return ret; 2620 } 2621 2622 static void ionic_vf_attr_replay(struct ionic_lif *lif) 2623 { 2624 struct ionic_vf_setattr_cmd vfc = { }; 2625 struct ionic *ionic = lif->ionic; 2626 struct ionic_vf *v; 2627 int i; 2628 2629 if (!ionic->vfs) 2630 return; 2631 2632 down_read(&ionic->vf_op_lock); 2633 2634 for (i = 0; i < ionic->num_vfs; i++) { 2635 v = &ionic->vfs[i]; 2636 2637 if (v->stats_pa) { 2638 vfc.attr = IONIC_VF_ATTR_STATSADDR; 2639 vfc.stats_pa = cpu_to_le64(v->stats_pa); 2640 ionic_set_vf_config(ionic, i, &vfc); 2641 vfc.stats_pa = 0; 2642 } 2643 2644 if (!is_zero_ether_addr(v->macaddr)) { 2645 vfc.attr = IONIC_VF_ATTR_MAC; 2646 ether_addr_copy(vfc.macaddr, v->macaddr); 2647 ionic_set_vf_config(ionic, i, &vfc); 2648 eth_zero_addr(vfc.macaddr); 2649 } 2650 2651 if (v->vlanid) { 2652 vfc.attr = IONIC_VF_ATTR_VLAN; 2653 vfc.vlanid = v->vlanid; 2654 ionic_set_vf_config(ionic, i, &vfc); 2655 vfc.vlanid = 0; 2656 } 2657 2658 if (v->maxrate) { 2659 vfc.attr = IONIC_VF_ATTR_RATE; 2660 vfc.maxrate = v->maxrate; 2661 ionic_set_vf_config(ionic, i, &vfc); 2662 vfc.maxrate = 0; 2663 } 2664 2665 if (v->spoofchk) { 2666 vfc.attr = IONIC_VF_ATTR_SPOOFCHK; 2667 vfc.spoofchk = v->spoofchk; 2668 ionic_set_vf_config(ionic, i, &vfc); 2669 vfc.spoofchk = 0; 2670 } 2671 2672 if (v->trusted) { 2673 vfc.attr = IONIC_VF_ATTR_TRUST; 2674 vfc.trust = v->trusted; 2675 ionic_set_vf_config(ionic, i, &vfc); 2676 vfc.trust = 0; 2677 } 2678 2679 if (v->linkstate) { 2680 vfc.attr = IONIC_VF_ATTR_LINKSTATE; 2681 vfc.linkstate = v->linkstate; 2682 ionic_set_vf_config(ionic, i, &vfc); 2683 vfc.linkstate = 0; 2684 } 2685 } 2686 2687 up_read(&ionic->vf_op_lock); 2688 2689 ionic_vf_start(ionic); 2690 } 2691 2692 static void ionic_unregister_rxq_info(struct ionic_queue *q) 2693 { 2694 struct xdp_rxq_info *xi; 2695 2696 if (!q->xdp_rxq_info) 2697 return; 2698 2699 xi = q->xdp_rxq_info; 2700 q->xdp_rxq_info = NULL; 2701 2702 xdp_rxq_info_unreg(xi); 2703 kfree(xi); 2704 } 2705 2706 static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id) 2707 { 2708 struct xdp_rxq_info *rxq_info; 2709 int err; 2710 2711 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); 2712 if (!rxq_info) 2713 return -ENOMEM; 2714 2715 err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id); 2716 if (err) { 2717 netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg failed, err %d\n", 2718 q->index, err); 2719 goto err_out; 2720 } 2721 2722 err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_POOL, q->page_pool); 2723 if (err) { 2724 netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg_mem_model failed, err %d\n", 2725 q->index, err); 2726 xdp_rxq_info_unreg(rxq_info); 2727 goto err_out; 2728 } 2729 2730 q->xdp_rxq_info = rxq_info; 2731 2732 return 0; 2733 2734 err_out: 2735 kfree(rxq_info); 2736 return err; 2737 } 2738 2739 static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif) 2740 { 2741 struct bpf_prog *xdp_prog; 2742 unsigned int i; 2743 2744 if (!lif->rxqcqs) 2745 return; 2746 2747 xdp_prog = READ_ONCE(lif->xdp_prog); 2748 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2749 struct ionic_queue *q = &lif->rxqcqs[i]->q; 2750 2751 WRITE_ONCE(q->xdp_prog, xdp_prog); 2752 } 2753 } 2754 2755 static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) 2756 { 2757 struct ionic_lif *lif = netdev_priv(netdev); 2758 struct bpf_prog *old_prog; 2759 u32 maxfs; 2760 2761 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { 2762 #define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts" 2763 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT); 2764 netdev_info(lif->netdev, XDP_ERR_SPLIT); 2765 return -EOPNOTSUPP; 2766 } 2767 2768 if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) { 2769 #define XDP_ERR_MTU "MTU is too large for XDP without frags support" 2770 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU); 2771 netdev_info(lif->netdev, XDP_ERR_MTU); 2772 return -EINVAL; 2773 } 2774 2775 maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; 2776 if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags)) 2777 maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU); 2778 netdev->max_mtu = maxfs; 2779 2780 if (!netif_running(netdev)) { 2781 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2782 } else if (lif->xdp_prog && bpf->prog) { 2783 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2784 ionic_xdp_rxqs_prog_update(lif); 2785 } else { 2786 struct ionic_queue_params qparams; 2787 2788 ionic_init_queue_params(lif, &qparams); 2789 qparams.xdp_prog = bpf->prog; 2790 mutex_lock(&lif->queue_lock); 2791 ionic_reconfigure_queues(lif, &qparams); 2792 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2793 mutex_unlock(&lif->queue_lock); 2794 } 2795 2796 if (old_prog) 2797 bpf_prog_put(old_prog); 2798 2799 return 0; 2800 } 2801 2802 static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf) 2803 { 2804 switch (bpf->command) { 2805 case XDP_SETUP_PROG: 2806 return ionic_xdp_config(netdev, bpf); 2807 default: 2808 return -EINVAL; 2809 } 2810 } 2811 2812 static const struct net_device_ops ionic_netdev_ops = { 2813 .ndo_open = ionic_open, 2814 .ndo_stop = ionic_stop, 2815 .ndo_eth_ioctl = ionic_eth_ioctl, 2816 .ndo_start_xmit = ionic_start_xmit, 2817 .ndo_bpf = ionic_xdp, 2818 .ndo_xdp_xmit = ionic_xdp_xmit, 2819 .ndo_get_stats64 = ionic_get_stats64, 2820 .ndo_set_rx_mode = ionic_ndo_set_rx_mode, 2821 .ndo_set_features = ionic_set_features, 2822 .ndo_set_mac_address = ionic_set_mac_address, 2823 .ndo_validate_addr = eth_validate_addr, 2824 .ndo_tx_timeout = ionic_tx_timeout, 2825 .ndo_change_mtu = ionic_change_mtu, 2826 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2827 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2828 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2829 .ndo_set_vf_trust = ionic_set_vf_trust, 2830 .ndo_set_vf_mac = ionic_set_vf_mac, 2831 .ndo_set_vf_rate = ionic_set_vf_rate, 2832 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2833 .ndo_get_vf_config = ionic_get_vf_config, 2834 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2835 .ndo_get_vf_stats = ionic_get_vf_stats, 2836 }; 2837 2838 static int ionic_cmb_reconfig(struct ionic_lif *lif, 2839 struct ionic_queue_params *qparam) 2840 { 2841 struct ionic_queue_params start_qparams; 2842 int err = 0; 2843 2844 /* When changing CMB queue parameters, we're using limited 2845 * on-device memory and don't have extra memory to use for 2846 * duplicate allocations, so we free it all first then 2847 * re-allocate with the new parameters. 2848 */ 2849 2850 /* Checkpoint for possible unwind */ 2851 ionic_init_queue_params(lif, &start_qparams); 2852 2853 /* Stop and free the queues */ 2854 ionic_stop_queues_reconfig(lif); 2855 ionic_txrx_free(lif); 2856 2857 /* Set up new qparams */ 2858 ionic_set_queue_params(lif, qparam); 2859 2860 if (netif_running(lif->netdev)) { 2861 /* Alloc and start the new configuration */ 2862 err = ionic_txrx_alloc(lif); 2863 if (err) { 2864 dev_warn(lif->ionic->dev, 2865 "CMB reconfig failed, restoring values: %d\n", err); 2866 2867 /* Back out the changes */ 2868 ionic_set_queue_params(lif, &start_qparams); 2869 err = ionic_txrx_alloc(lif); 2870 if (err) { 2871 dev_err(lif->ionic->dev, 2872 "CMB restore failed: %d\n", err); 2873 goto err_out; 2874 } 2875 } 2876 2877 err = ionic_start_queues_reconfig(lif); 2878 if (err) { 2879 dev_err(lif->ionic->dev, 2880 "CMB reconfig failed: %d\n", err); 2881 goto err_out; 2882 } 2883 } 2884 2885 err_out: 2886 /* This was detached in ionic_stop_queues_reconfig() */ 2887 netif_device_attach(lif->netdev); 2888 2889 return err; 2890 } 2891 2892 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2893 { 2894 /* only swapping the queues and napi, not flags or other stuff */ 2895 swap(a->napi, b->napi); 2896 2897 if (a->q.type == IONIC_QTYPE_RXQ) { 2898 swap(a->q.page_pool, b->q.page_pool); 2899 a->q.page_pool->p.napi = &a->napi; 2900 if (b->q.page_pool) /* is NULL when increasing queue count */ 2901 b->q.page_pool->p.napi = &b->napi; 2902 } 2903 2904 swap(a->q.features, b->q.features); 2905 swap(a->q.num_descs, b->q.num_descs); 2906 swap(a->q.desc_size, b->q.desc_size); 2907 swap(a->q.base, b->q.base); 2908 swap(a->q.base_pa, b->q.base_pa); 2909 swap(a->q.info, b->q.info); 2910 swap(a->q.xdp_prog, b->q.xdp_prog); 2911 swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info); 2912 swap(a->q.partner, b->q.partner); 2913 swap(a->q_base, b->q_base); 2914 swap(a->q_base_pa, b->q_base_pa); 2915 swap(a->q_size, b->q_size); 2916 2917 swap(a->q.sg_desc_size, b->q.sg_desc_size); 2918 swap(a->q.sg_base, b->q.sg_base); 2919 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2920 swap(a->sg_base, b->sg_base); 2921 swap(a->sg_base_pa, b->sg_base_pa); 2922 swap(a->sg_size, b->sg_size); 2923 2924 swap(a->cq.num_descs, b->cq.num_descs); 2925 swap(a->cq.desc_size, b->cq.desc_size); 2926 swap(a->cq.base, b->cq.base); 2927 swap(a->cq.base_pa, b->cq.base_pa); 2928 swap(a->cq_base, b->cq_base); 2929 swap(a->cq_base_pa, b->cq_base_pa); 2930 swap(a->cq_size, b->cq_size); 2931 2932 ionic_debugfs_del_qcq(a); 2933 ionic_debugfs_add_qcq(a->q.lif, a); 2934 } 2935 2936 int ionic_reconfigure_queues(struct ionic_lif *lif, 2937 struct ionic_queue_params *qparam) 2938 { 2939 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2940 struct ionic_qcq **tx_qcqs = NULL; 2941 struct ionic_qcq **rx_qcqs = NULL; 2942 unsigned int flags, i; 2943 int err = 0; 2944 2945 /* Are we changing q params while CMB is on */ 2946 if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) || 2947 (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx)) 2948 return ionic_cmb_reconfig(lif, qparam); 2949 2950 /* allocate temporary qcq arrays to hold new queue structs */ 2951 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2952 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2953 sizeof(struct ionic_qcq *), GFP_KERNEL); 2954 if (!tx_qcqs) { 2955 err = -ENOMEM; 2956 goto err_out; 2957 } 2958 } 2959 if (qparam->nxqs != lif->nxqs || 2960 qparam->nrxq_descs != lif->nrxq_descs || 2961 qparam->rxq_features != lif->rxq_features || 2962 qparam->xdp_prog != lif->xdp_prog) { 2963 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2964 sizeof(struct ionic_qcq *), GFP_KERNEL); 2965 if (!rx_qcqs) { 2966 err = -ENOMEM; 2967 goto err_out; 2968 } 2969 } 2970 2971 /* allocate new desc_info and rings, but leave the interrupt setup 2972 * until later so as to not mess with the still-running queues 2973 */ 2974 if (tx_qcqs) { 2975 num_desc = qparam->ntxq_descs; 2976 desc_sz = sizeof(struct ionic_txq_desc); 2977 comp_sz = sizeof(struct ionic_txq_comp); 2978 2979 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2980 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2981 sizeof(struct ionic_txq_sg_desc_v1)) 2982 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2983 else 2984 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2985 2986 for (i = 0; i < qparam->nxqs; i++) { 2987 /* If missing, short placeholder qcq needed for swap */ 2988 if (!lif->txqcqs[i]) { 2989 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2990 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2991 4, desc_sz, comp_sz, sg_desc_sz, 2992 sizeof(struct ionic_tx_desc_info), 2993 lif->kern_pid, NULL, &lif->txqcqs[i]); 2994 if (err) 2995 goto err_out; 2996 } 2997 2998 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2999 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 3000 num_desc, desc_sz, comp_sz, sg_desc_sz, 3001 sizeof(struct ionic_tx_desc_info), 3002 lif->kern_pid, NULL, &tx_qcqs[i]); 3003 if (err) 3004 goto err_out; 3005 } 3006 } 3007 3008 if (rx_qcqs) { 3009 num_desc = qparam->nrxq_descs; 3010 desc_sz = sizeof(struct ionic_rxq_desc); 3011 comp_sz = sizeof(struct ionic_rxq_comp); 3012 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 3013 3014 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC) 3015 comp_sz *= 2; 3016 3017 for (i = 0; i < qparam->nxqs; i++) { 3018 /* If missing, short placeholder qcq needed for swap */ 3019 if (!lif->rxqcqs[i]) { 3020 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 3021 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 3022 4, desc_sz, comp_sz, sg_desc_sz, 3023 sizeof(struct ionic_rx_desc_info), 3024 lif->kern_pid, NULL, &lif->rxqcqs[i]); 3025 if (err) 3026 goto err_out; 3027 } 3028 3029 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 3030 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 3031 num_desc, desc_sz, comp_sz, sg_desc_sz, 3032 sizeof(struct ionic_rx_desc_info), 3033 lif->kern_pid, qparam->xdp_prog, &rx_qcqs[i]); 3034 if (err) 3035 goto err_out; 3036 3037 rx_qcqs[i]->q.features = qparam->rxq_features; 3038 rx_qcqs[i]->q.xdp_prog = qparam->xdp_prog; 3039 } 3040 } 3041 3042 /* stop and clean the queues */ 3043 ionic_stop_queues_reconfig(lif); 3044 3045 if (qparam->nxqs != lif->nxqs) { 3046 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 3047 if (err) 3048 goto err_out_reinit_unlock; 3049 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 3050 if (err) { 3051 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 3052 goto err_out_reinit_unlock; 3053 } 3054 } 3055 3056 /* swap new desc_info and rings, keeping existing interrupt config */ 3057 if (tx_qcqs) { 3058 lif->ntxq_descs = qparam->ntxq_descs; 3059 for (i = 0; i < qparam->nxqs; i++) 3060 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 3061 } 3062 3063 if (rx_qcqs) { 3064 lif->nrxq_descs = qparam->nrxq_descs; 3065 for (i = 0; i < qparam->nxqs; i++) 3066 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 3067 } 3068 3069 /* if we need to change the interrupt layout, this is the time */ 3070 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 3071 qparam->nxqs != lif->nxqs) { 3072 if (qparam->intr_split) { 3073 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 3074 } else { 3075 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 3076 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 3077 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 3078 } 3079 3080 /* Clear existing interrupt assignments. We check for NULL here 3081 * because we're checking the whole array for potential qcqs, not 3082 * just those qcqs that have just been set up. 3083 */ 3084 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 3085 if (lif->txqcqs[i]) 3086 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 3087 if (lif->rxqcqs[i]) 3088 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 3089 } 3090 3091 /* re-assign the interrupts */ 3092 for (i = 0; i < qparam->nxqs; i++) { 3093 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 3094 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 3095 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 3096 lif->rxqcqs[i]->intr.index, 3097 lif->rx_coalesce_hw); 3098 3099 if (qparam->intr_split) { 3100 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 3101 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 3102 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 3103 lif->txqcqs[i]->intr.index, 3104 lif->tx_coalesce_hw); 3105 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 3106 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 3107 } else { 3108 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3109 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 3110 } 3111 } 3112 } 3113 3114 /* now we can rework the debugfs mappings */ 3115 if (tx_qcqs) { 3116 for (i = 0; i < qparam->nxqs; i++) { 3117 ionic_debugfs_del_qcq(lif->txqcqs[i]); 3118 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 3119 } 3120 } 3121 3122 if (rx_qcqs) { 3123 for (i = 0; i < qparam->nxqs; i++) { 3124 ionic_debugfs_del_qcq(lif->rxqcqs[i]); 3125 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 3126 } 3127 } 3128 3129 swap(lif->nxqs, qparam->nxqs); 3130 swap(lif->rxq_features, qparam->rxq_features); 3131 3132 err_out_reinit_unlock: 3133 /* re-init the queues, but don't lose an error code */ 3134 if (err) 3135 ionic_start_queues_reconfig(lif); 3136 else 3137 err = ionic_start_queues_reconfig(lif); 3138 3139 err_out: 3140 /* free old allocs without cleaning intr */ 3141 for (i = 0; i < qparam->nxqs; i++) { 3142 if (tx_qcqs && tx_qcqs[i]) { 3143 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3144 ionic_qcq_free(lif, tx_qcqs[i]); 3145 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 3146 tx_qcqs[i] = NULL; 3147 } 3148 if (rx_qcqs && rx_qcqs[i]) { 3149 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3150 ionic_qcq_free(lif, rx_qcqs[i]); 3151 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 3152 rx_qcqs[i] = NULL; 3153 } 3154 } 3155 3156 /* free q array */ 3157 if (rx_qcqs) { 3158 devm_kfree(lif->ionic->dev, rx_qcqs); 3159 rx_qcqs = NULL; 3160 } 3161 if (tx_qcqs) { 3162 devm_kfree(lif->ionic->dev, tx_qcqs); 3163 tx_qcqs = NULL; 3164 } 3165 3166 /* clean the unused dma and info allocations when new set is smaller 3167 * than the full array, but leave the qcq shells in place 3168 */ 3169 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 3170 if (lif->txqcqs && lif->txqcqs[i]) { 3171 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3172 ionic_qcq_free(lif, lif->txqcqs[i]); 3173 } 3174 3175 if (lif->rxqcqs && lif->rxqcqs[i]) { 3176 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3177 ionic_qcq_free(lif, lif->rxqcqs[i]); 3178 } 3179 } 3180 3181 if (err) 3182 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err); 3183 3184 return err; 3185 } 3186 3187 static int ionic_affinity_masks_alloc(struct ionic *ionic) 3188 { 3189 cpumask_var_t *affinity_masks; 3190 int nintrs = ionic->nintrs; 3191 int i; 3192 3193 affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL); 3194 if (!affinity_masks) 3195 return -ENOMEM; 3196 3197 for (i = 0; i < nintrs; i++) { 3198 if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL, 3199 dev_to_node(ionic->dev))) 3200 goto err_out; 3201 } 3202 3203 ionic->affinity_masks = affinity_masks; 3204 3205 return 0; 3206 3207 err_out: 3208 for (--i; i >= 0; i--) 3209 free_cpumask_var(affinity_masks[i]); 3210 kfree(affinity_masks); 3211 3212 return -ENOMEM; 3213 } 3214 3215 static void ionic_affinity_masks_free(struct ionic *ionic) 3216 { 3217 int i; 3218 3219 for (i = 0; i < ionic->nintrs; i++) 3220 free_cpumask_var(ionic->affinity_masks[i]); 3221 kfree(ionic->affinity_masks); 3222 ionic->affinity_masks = NULL; 3223 } 3224 3225 int ionic_lif_alloc(struct ionic *ionic) 3226 { 3227 struct device *dev = ionic->dev; 3228 union ionic_lif_identity *lid; 3229 struct net_device *netdev; 3230 struct ionic_lif *lif; 3231 int tbl_sz; 3232 int err; 3233 3234 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 3235 if (!lid) 3236 return -ENOMEM; 3237 3238 netdev = alloc_etherdev_mqs(sizeof(*lif), 3239 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 3240 if (!netdev) { 3241 dev_err(dev, "Cannot allocate netdev, aborting\n"); 3242 err = -ENOMEM; 3243 goto err_out_free_lid; 3244 } 3245 3246 SET_NETDEV_DEV(netdev, dev); 3247 3248 lif = netdev_priv(netdev); 3249 lif->netdev = netdev; 3250 ionic->lif = lif; 3251 lif->ionic = ionic; 3252 netdev->netdev_ops = &ionic_netdev_ops; 3253 ionic_ethtool_set_ops(netdev); 3254 3255 netdev->watchdog_timeo = 5 * HZ; 3256 netif_carrier_off(netdev); 3257 3258 lif->identity = lid; 3259 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 3260 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity); 3261 if (err) { 3262 dev_err(ionic->dev, "Cannot identify type %d: %d\n", 3263 lif->lif_type, err); 3264 goto err_out_free_netdev; 3265 } 3266 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 3267 le32_to_cpu(lif->identity->eth.min_frame_size)); 3268 lif->netdev->max_mtu = 3269 le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; 3270 3271 lif->neqs = ionic->neqs_per_lif; 3272 lif->nxqs = ionic->ntxqs_per_lif; 3273 3274 lif->index = 0; 3275 3276 if (is_kdump_kernel()) { 3277 lif->ntxq_descs = IONIC_MIN_TXRX_DESC; 3278 lif->nrxq_descs = IONIC_MIN_TXRX_DESC; 3279 } else { 3280 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 3281 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 3282 } 3283 3284 /* Convert the default coalesce value to actual hw resolution */ 3285 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 3286 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 3287 lif->rx_coalesce_usecs); 3288 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 3289 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 3290 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); 3291 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); 3292 3293 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 3294 3295 mutex_init(&lif->queue_lock); 3296 mutex_init(&lif->config_lock); 3297 mutex_init(&lif->adev_lock); 3298 3299 spin_lock_init(&lif->adminq_lock); 3300 3301 spin_lock_init(&lif->deferred.lock); 3302 INIT_LIST_HEAD(&lif->deferred.list); 3303 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 3304 3305 /* allocate lif info */ 3306 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 3307 lif->info = dma_alloc_coherent(dev, lif->info_sz, 3308 &lif->info_pa, GFP_KERNEL); 3309 if (!lif->info) { 3310 dev_err(dev, "Failed to allocate lif info, aborting\n"); 3311 err = -ENOMEM; 3312 goto err_out_free_mutex; 3313 } 3314 3315 ionic_debugfs_add_lif(lif); 3316 3317 err = ionic_affinity_masks_alloc(ionic); 3318 if (err) 3319 goto err_out_free_lif_info; 3320 3321 /* allocate control queues and txrx queue arrays */ 3322 ionic_lif_queue_identify(lif); 3323 err = ionic_qcqs_alloc(lif); 3324 if (err) 3325 goto err_out_free_affinity_masks; 3326 3327 /* allocate rss indirection table */ 3328 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 3329 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 3330 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 3331 &lif->rss_ind_tbl_pa, 3332 GFP_KERNEL); 3333 3334 if (!lif->rss_ind_tbl) { 3335 err = -ENOMEM; 3336 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 3337 goto err_out_free_qcqs; 3338 } 3339 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 3340 3341 ionic_lif_alloc_phc(lif); 3342 3343 return 0; 3344 3345 err_out_free_qcqs: 3346 ionic_qcqs_free(lif); 3347 err_out_free_affinity_masks: 3348 ionic_affinity_masks_free(lif->ionic); 3349 err_out_free_lif_info: 3350 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3351 lif->info = NULL; 3352 lif->info_pa = 0; 3353 err_out_free_mutex: 3354 mutex_destroy(&lif->adev_lock); 3355 mutex_destroy(&lif->config_lock); 3356 mutex_destroy(&lif->queue_lock); 3357 err_out_free_netdev: 3358 free_netdev(lif->netdev); 3359 lif = NULL; 3360 err_out_free_lid: 3361 kfree(lid); 3362 3363 return err; 3364 } 3365 3366 static void ionic_lif_reset(struct ionic_lif *lif) 3367 { 3368 struct ionic_dev *idev = &lif->ionic->idev; 3369 3370 if (!ionic_is_fw_running(idev)) 3371 return; 3372 3373 mutex_lock(&lif->ionic->dev_cmd_lock); 3374 ionic_dev_cmd_lif_reset(idev, lif->index); 3375 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3376 mutex_unlock(&lif->ionic->dev_cmd_lock); 3377 } 3378 3379 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 3380 { 3381 struct ionic *ionic = lif->ionic; 3382 3383 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3384 return; 3385 3386 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 3387 3388 netif_device_detach(lif->netdev); 3389 3390 ionic_auxbus_unregister(ionic->lif); 3391 mutex_lock(&lif->queue_lock); 3392 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 3393 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 3394 ionic_stop_queues(lif); 3395 } 3396 3397 if (netif_running(lif->netdev)) { 3398 ionic_txrx_deinit(lif); 3399 ionic_txrx_free(lif); 3400 } 3401 ionic_lif_deinit(lif); 3402 ionic_reset(ionic); 3403 ionic_qcqs_free(lif); 3404 3405 mutex_unlock(&lif->queue_lock); 3406 3407 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); 3408 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 3409 } 3410 3411 int ionic_restart_lif(struct ionic_lif *lif) 3412 { 3413 struct ionic *ionic = lif->ionic; 3414 int err; 3415 3416 mutex_lock(&lif->queue_lock); 3417 3418 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 3419 dev_info(ionic->dev, "FW Up: clearing broken state\n"); 3420 3421 err = ionic_qcqs_alloc(lif); 3422 if (err) 3423 goto err_unlock; 3424 3425 err = ionic_lif_init(lif); 3426 if (err) 3427 goto err_qcqs_free; 3428 3429 ionic_vf_attr_replay(lif); 3430 3431 if (lif->registered) 3432 ionic_lif_set_netdev_info(lif); 3433 3434 ionic_rx_filter_replay(lif); 3435 3436 if (netif_running(lif->netdev)) { 3437 err = ionic_txrx_alloc(lif); 3438 if (err) 3439 goto err_lifs_deinit; 3440 3441 err = ionic_txrx_init(lif); 3442 if (err) 3443 goto err_txrx_free; 3444 } 3445 3446 mutex_unlock(&lif->queue_lock); 3447 3448 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 3449 ionic_link_status_check_request(lif, CAN_SLEEP); 3450 netif_device_attach(lif->netdev); 3451 ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); 3452 3453 ionic_auxbus_register(ionic->lif); 3454 3455 return 0; 3456 3457 err_txrx_free: 3458 ionic_txrx_free(lif); 3459 err_lifs_deinit: 3460 ionic_lif_deinit(lif); 3461 err_qcqs_free: 3462 ionic_qcqs_free(lif); 3463 err_unlock: 3464 mutex_unlock(&lif->queue_lock); 3465 3466 return err; 3467 } 3468 3469 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 3470 { 3471 struct ionic *ionic = lif->ionic; 3472 int err; 3473 3474 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3475 return; 3476 3477 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 3478 3479 /* This is a little different from what happens at 3480 * probe time because the LIF already exists so we 3481 * just need to reanimate it. 3482 */ 3483 ionic_init_devinfo(ionic); 3484 ionic_reset(ionic); 3485 err = ionic_identify(ionic); 3486 if (err) 3487 goto err_out; 3488 err = ionic_port_identify(ionic); 3489 if (err) 3490 goto err_out; 3491 err = ionic_port_init(ionic); 3492 if (err) 3493 goto err_out; 3494 3495 err = ionic_restart_lif(lif); 3496 if (err) 3497 goto err_out; 3498 3499 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 3500 3501 /* restore the hardware timestamping queues */ 3502 ionic_lif_hwstamp_replay(lif); 3503 3504 return; 3505 3506 err_out: 3507 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 3508 } 3509 3510 void ionic_lif_free(struct ionic_lif *lif) 3511 { 3512 struct device *dev = lif->ionic->dev; 3513 3514 ionic_lif_free_phc(lif); 3515 3516 /* free rss indirection table */ 3517 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 3518 lif->rss_ind_tbl_pa); 3519 lif->rss_ind_tbl = NULL; 3520 lif->rss_ind_tbl_pa = 0; 3521 3522 /* free queues */ 3523 ionic_qcqs_free(lif); 3524 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3525 ionic_lif_reset(lif); 3526 3527 ionic_affinity_masks_free(lif->ionic); 3528 3529 /* free lif info */ 3530 kfree(lif->identity); 3531 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3532 lif->info = NULL; 3533 lif->info_pa = 0; 3534 3535 mutex_destroy(&lif->config_lock); 3536 mutex_destroy(&lif->queue_lock); 3537 mutex_destroy(&lif->adev_lock); 3538 3539 /* free netdev & lif */ 3540 ionic_debugfs_del_lif(lif); 3541 free_netdev(lif->netdev); 3542 } 3543 3544 void ionic_lif_deinit(struct ionic_lif *lif) 3545 { 3546 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 3547 return; 3548 3549 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3550 cancel_work_sync(&lif->deferred.work); 3551 cancel_work_sync(&lif->tx_timeout_work); 3552 ionic_rx_filters_deinit(lif); 3553 if (lif->netdev->features & NETIF_F_RXHASH) 3554 ionic_lif_rss_deinit(lif); 3555 } 3556 3557 napi_disable(&lif->adminqcq->napi); 3558 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3559 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3560 3561 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3562 lif->kern_dbpage = NULL; 3563 3564 ionic_lif_reset(lif); 3565 } 3566 3567 static int ionic_lif_adminq_init(struct ionic_lif *lif) 3568 { 3569 struct device *dev = lif->ionic->dev; 3570 struct ionic_q_init_comp comp; 3571 struct ionic_dev *idev; 3572 struct ionic_qcq *qcq; 3573 struct ionic_queue *q; 3574 int err; 3575 3576 idev = &lif->ionic->idev; 3577 qcq = lif->adminqcq; 3578 q = &qcq->q; 3579 3580 mutex_lock(&lif->ionic->dev_cmd_lock); 3581 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 3582 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3583 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3584 mutex_unlock(&lif->ionic->dev_cmd_lock); 3585 if (err) { 3586 netdev_err(lif->netdev, "adminq init failed %d\n", err); 3587 return err; 3588 } 3589 3590 q->hw_type = comp.hw_type; 3591 q->hw_index = le32_to_cpu(comp.hw_index); 3592 q->dbval = IONIC_DBELL_QID(q->hw_index); 3593 3594 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 3595 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 3596 3597 q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE; 3598 q->dbell_jiffies = jiffies; 3599 3600 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi); 3601 3602 napi_enable(&qcq->napi); 3603 3604 if (qcq->flags & IONIC_QCQ_F_INTR) { 3605 irq_set_affinity_hint(qcq->intr.vector, 3606 *qcq->intr.affinity_mask); 3607 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 3608 IONIC_INTR_MASK_CLEAR); 3609 } 3610 3611 qcq->flags |= IONIC_QCQ_F_INITED; 3612 3613 return 0; 3614 } 3615 3616 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 3617 { 3618 struct ionic_qcq *qcq = lif->notifyqcq; 3619 struct device *dev = lif->ionic->dev; 3620 struct ionic_queue *q = &qcq->q; 3621 int err; 3622 3623 struct ionic_admin_ctx ctx = { 3624 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3625 .cmd.q_init = { 3626 .opcode = IONIC_CMD_Q_INIT, 3627 .lif_index = cpu_to_le16(lif->index), 3628 .type = q->type, 3629 .ver = lif->qtype_info[q->type].version, 3630 .index = cpu_to_le32(q->index), 3631 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 3632 IONIC_QINIT_F_ENA), 3633 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 3634 .pid = cpu_to_le16(q->pid), 3635 .ring_size = ilog2(q->num_descs), 3636 .ring_base = cpu_to_le64(q->base_pa), 3637 } 3638 }; 3639 3640 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 3641 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 3642 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 3643 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 3644 3645 err = ionic_adminq_post_wait(lif, &ctx); 3646 if (err) 3647 return err; 3648 3649 lif->last_eid = 0; 3650 q->hw_type = ctx.comp.q_init.hw_type; 3651 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 3652 q->dbval = IONIC_DBELL_QID(q->hw_index); 3653 3654 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 3655 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 3656 3657 /* preset the callback info */ 3658 q->admin_info[0].ctx = lif; 3659 3660 qcq->flags |= IONIC_QCQ_F_INITED; 3661 3662 return 0; 3663 } 3664 3665 static int ionic_station_set(struct ionic_lif *lif) 3666 { 3667 struct net_device *netdev = lif->netdev; 3668 struct ionic_admin_ctx ctx = { 3669 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3670 .cmd.lif_getattr = { 3671 .opcode = IONIC_CMD_LIF_GETATTR, 3672 .index = cpu_to_le16(lif->index), 3673 .attr = IONIC_LIF_ATTR_MAC, 3674 }, 3675 }; 3676 u8 mac_address[ETH_ALEN]; 3677 struct sockaddr addr; 3678 int err; 3679 3680 err = ionic_adminq_post_wait(lif, &ctx); 3681 if (err) 3682 return err; 3683 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 3684 ctx.comp.lif_getattr.mac); 3685 ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac); 3686 3687 if (is_zero_ether_addr(mac_address)) { 3688 eth_hw_addr_random(netdev); 3689 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr); 3690 ether_addr_copy(mac_address, netdev->dev_addr); 3691 3692 err = ionic_program_mac(lif, mac_address); 3693 if (err < 0) 3694 return err; 3695 3696 if (err > 0) { 3697 netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n", 3698 __func__); 3699 return 0; 3700 } 3701 } 3702 3703 if (!is_zero_ether_addr(netdev->dev_addr)) { 3704 /* If the netdev mac is non-zero and doesn't match the default 3705 * device address, it was set by something earlier and we're 3706 * likely here again after a fw-upgrade reset. We need to be 3707 * sure the netdev mac is in our filter list. 3708 */ 3709 if (!ether_addr_equal(mac_address, netdev->dev_addr)) 3710 ionic_lif_addr_add(lif, netdev->dev_addr); 3711 } else { 3712 /* Update the netdev mac with the device's mac */ 3713 ether_addr_copy(addr.sa_data, mac_address); 3714 addr.sa_family = AF_INET; 3715 err = eth_prepare_mac_addr_change(netdev, &addr); 3716 if (err) { 3717 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 3718 addr.sa_data, err); 3719 return 0; 3720 } 3721 3722 eth_commit_mac_addr_change(netdev, &addr); 3723 } 3724 3725 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 3726 netdev->dev_addr); 3727 ionic_lif_addr_add(lif, netdev->dev_addr); 3728 3729 return 0; 3730 } 3731 3732 int ionic_lif_init(struct ionic_lif *lif) 3733 { 3734 struct ionic_dev *idev = &lif->ionic->idev; 3735 struct device *dev = lif->ionic->dev; 3736 struct ionic_lif_init_comp comp; 3737 int dbpage_num; 3738 int err; 3739 3740 mutex_lock(&lif->ionic->dev_cmd_lock); 3741 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 3742 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3743 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3744 mutex_unlock(&lif->ionic->dev_cmd_lock); 3745 if (err) 3746 return err; 3747 3748 lif->hw_index = le16_to_cpu(comp.hw_index); 3749 3750 /* now that we have the hw_index we can figure out our doorbell page */ 3751 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 3752 if (!lif->dbid_count) { 3753 dev_err(dev, "No doorbell pages, aborting\n"); 3754 return -EINVAL; 3755 } 3756 3757 lif->kern_pid = 0; 3758 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 3759 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 3760 if (!lif->kern_dbpage) { 3761 dev_err(dev, "Cannot map dbpage, aborting\n"); 3762 return -ENOMEM; 3763 } 3764 3765 err = ionic_lif_adminq_init(lif); 3766 if (err) 3767 goto err_out_adminq_deinit; 3768 3769 if (lif->ionic->nnqs_per_lif) { 3770 err = ionic_lif_notifyq_init(lif); 3771 if (err) 3772 goto err_out_notifyq_deinit; 3773 } 3774 3775 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3776 err = ionic_set_nic_features(lif, lif->netdev->features); 3777 else 3778 err = ionic_init_nic_features(lif); 3779 if (err) 3780 goto err_out_notifyq_deinit; 3781 3782 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3783 err = ionic_rx_filters_init(lif); 3784 if (err) 3785 goto err_out_notifyq_deinit; 3786 } 3787 3788 err = ionic_station_set(lif); 3789 if (err) 3790 goto err_out_notifyq_deinit; 3791 3792 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 3793 lif->doorbell_wa = ionic_doorbell_wa(lif->ionic); 3794 3795 set_bit(IONIC_LIF_F_INITED, lif->state); 3796 3797 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 3798 3799 return 0; 3800 3801 err_out_notifyq_deinit: 3802 napi_disable(&lif->adminqcq->napi); 3803 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3804 err_out_adminq_deinit: 3805 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3806 ionic_lif_reset(lif); 3807 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3808 lif->kern_dbpage = NULL; 3809 3810 return err; 3811 } 3812 3813 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 3814 { 3815 struct ionic_admin_ctx ctx = { 3816 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3817 .cmd.lif_setattr = { 3818 .opcode = IONIC_CMD_LIF_SETATTR, 3819 .index = cpu_to_le16(lif->index), 3820 .attr = IONIC_LIF_ATTR_NAME, 3821 }, 3822 }; 3823 3824 strscpy(ctx.cmd.lif_setattr.name, netdev_name(lif->netdev), 3825 sizeof(ctx.cmd.lif_setattr.name)); 3826 3827 ionic_adminq_post_wait(lif, &ctx); 3828 } 3829 3830 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 3831 { 3832 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 3833 return NULL; 3834 3835 return netdev_priv(netdev); 3836 } 3837 3838 static int ionic_lif_notify(struct notifier_block *nb, 3839 unsigned long event, void *info) 3840 { 3841 struct net_device *ndev = netdev_notifier_info_to_dev(info); 3842 struct ionic *ionic = container_of(nb, struct ionic, nb); 3843 struct ionic_lif *lif = ionic_netdev_lif(ndev); 3844 3845 if (!lif || lif->ionic != ionic) 3846 return NOTIFY_DONE; 3847 3848 switch (event) { 3849 case NETDEV_CHANGENAME: 3850 ionic_lif_set_netdev_info(lif); 3851 break; 3852 } 3853 3854 return NOTIFY_DONE; 3855 } 3856 3857 int ionic_lif_register(struct ionic_lif *lif) 3858 { 3859 int err; 3860 3861 ionic_lif_register_phc(lif); 3862 3863 lif->ionic->nb.notifier_call = ionic_lif_notify; 3864 3865 err = register_netdevice_notifier(&lif->ionic->nb); 3866 if (err) 3867 lif->ionic->nb.notifier_call = NULL; 3868 3869 /* only register LIF0 for now */ 3870 err = register_netdev(lif->netdev); 3871 if (err) { 3872 dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err); 3873 ionic_lif_unregister(lif); 3874 return err; 3875 } 3876 3877 ionic_link_status_check_request(lif, CAN_SLEEP); 3878 lif->registered = true; 3879 ionic_lif_set_netdev_info(lif); 3880 3881 return 0; 3882 } 3883 3884 void ionic_lif_unregister(struct ionic_lif *lif) 3885 { 3886 if (lif->ionic->nb.notifier_call) { 3887 unregister_netdevice_notifier(&lif->ionic->nb); 3888 lif->ionic->nb.notifier_call = NULL; 3889 } 3890 3891 if (lif->netdev->reg_state == NETREG_REGISTERED) 3892 unregister_netdev(lif->netdev); 3893 3894 ionic_lif_unregister_phc(lif); 3895 3896 lif->registered = false; 3897 } 3898 3899 static void ionic_lif_queue_identify(struct ionic_lif *lif) 3900 { 3901 union ionic_q_identity __iomem *q_ident; 3902 struct ionic *ionic = lif->ionic; 3903 struct ionic_dev *idev; 3904 u16 max_frags; 3905 int qtype; 3906 int err; 3907 3908 idev = &lif->ionic->idev; 3909 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; 3910 3911 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 3912 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 3913 3914 /* filter out the ones we know about */ 3915 switch (qtype) { 3916 case IONIC_QTYPE_ADMINQ: 3917 case IONIC_QTYPE_NOTIFYQ: 3918 case IONIC_QTYPE_RXQ: 3919 case IONIC_QTYPE_TXQ: 3920 break; 3921 default: 3922 continue; 3923 } 3924 3925 memset(qti, 0, sizeof(*qti)); 3926 3927 mutex_lock(&ionic->dev_cmd_lock); 3928 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 3929 ionic_qtype_versions[qtype]); 3930 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3931 if (!err) { 3932 qti->version = readb(&q_ident->version); 3933 qti->supported = readb(&q_ident->supported); 3934 qti->features = readq(&q_ident->features); 3935 qti->desc_sz = readw(&q_ident->desc_sz); 3936 qti->comp_sz = readw(&q_ident->comp_sz); 3937 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz); 3938 qti->max_sg_elems = readw(&q_ident->max_sg_elems); 3939 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride); 3940 } 3941 mutex_unlock(&ionic->dev_cmd_lock); 3942 3943 if (err == -EINVAL) { 3944 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 3945 continue; 3946 } else if (err == -EIO) { 3947 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 3948 return; 3949 } else if (err) { 3950 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 3951 qtype, err); 3952 return; 3953 } 3954 3955 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 3956 qtype, qti->version); 3957 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 3958 qtype, qti->supported); 3959 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 3960 qtype, qti->features); 3961 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 3962 qtype, qti->desc_sz); 3963 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 3964 qtype, qti->comp_sz); 3965 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 3966 qtype, qti->sg_desc_sz); 3967 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 3968 qtype, qti->max_sg_elems); 3969 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 3970 qtype, qti->sg_desc_stride); 3971 3972 if (qtype == IONIC_QTYPE_TXQ) 3973 max_frags = IONIC_TX_MAX_FRAGS; 3974 else if (qtype == IONIC_QTYPE_RXQ) 3975 max_frags = IONIC_RX_MAX_FRAGS; 3976 else 3977 max_frags = 1; 3978 3979 qti->max_sg_elems = min_t(u16, max_frags - 1, MAX_SKB_FRAGS); 3980 dev_dbg(ionic->dev, "qtype %d max_sg_elems %d\n", 3981 qtype, qti->max_sg_elems); 3982 } 3983 } 3984 3985 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 3986 union ionic_lif_identity *lid) 3987 { 3988 struct ionic_dev *idev = &ionic->idev; 3989 size_t sz; 3990 int err; 3991 3992 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 3993 3994 mutex_lock(&ionic->dev_cmd_lock); 3995 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 3996 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3997 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 3998 mutex_unlock(&ionic->dev_cmd_lock); 3999 if (err) 4000 return (err); 4001 4002 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 4003 le64_to_cpu(lid->capabilities)); 4004 4005 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 4006 le32_to_cpu(lid->eth.max_ucast_filters)); 4007 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 4008 le32_to_cpu(lid->eth.max_mcast_filters)); 4009 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 4010 le64_to_cpu(lid->eth.config.features)); 4011 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 4012 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 4013 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 4014 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 4015 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 4016 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 4017 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 4018 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 4019 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 4020 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 4021 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 4022 le32_to_cpu(lid->eth.config.mtu)); 4023 4024 return 0; 4025 } 4026 4027 int ionic_lif_size(struct ionic *ionic) 4028 { 4029 struct ionic_identity *ident = &ionic->ident; 4030 unsigned int nintrs, dev_nintrs; 4031 union ionic_lif_config *lc; 4032 unsigned int ntxqs_per_lif; 4033 unsigned int nrxqs_per_lif; 4034 unsigned int neqs_per_lif; 4035 unsigned int nnqs_per_lif; 4036 unsigned int nxqs, neqs; 4037 unsigned int min_intrs; 4038 int err; 4039 4040 /* retrieve basic values from FW */ 4041 lc = &ident->lif.eth.config; 4042 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 4043 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 4044 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 4045 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 4046 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 4047 4048 /* limit values to play nice with kdump */ 4049 if (is_kdump_kernel()) { 4050 dev_nintrs = 2; 4051 neqs_per_lif = 0; 4052 nnqs_per_lif = 0; 4053 ntxqs_per_lif = 1; 4054 nrxqs_per_lif = 1; 4055 } 4056 4057 /* reserve last queue id for hardware timestamping */ 4058 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { 4059 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { 4060 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP); 4061 } else { 4062 ntxqs_per_lif -= 1; 4063 nrxqs_per_lif -= 1; 4064 } 4065 } 4066 4067 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 4068 nxqs = min(nxqs, num_online_cpus()); 4069 neqs = min(neqs_per_lif, num_online_cpus()); 4070 4071 try_again: 4072 /* interrupt usage: 4073 * 1 for master lif adminq/notifyq 4074 * 1 for each CPU for master lif TxRx queue pairs 4075 * whatever's left is for RDMA queues 4076 */ 4077 nintrs = 1 + nxqs + neqs; 4078 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 4079 4080 if (nintrs > dev_nintrs) 4081 goto try_fewer; 4082 4083 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 4084 if (err < 0 && err != -ENOSPC) { 4085 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 4086 return err; 4087 } 4088 if (err == -ENOSPC) 4089 goto try_fewer; 4090 4091 if (err != nintrs) { 4092 ionic_bus_free_irq_vectors(ionic); 4093 goto try_fewer; 4094 } 4095 4096 ionic->nnqs_per_lif = nnqs_per_lif; 4097 ionic->neqs_per_lif = neqs; 4098 ionic->ntxqs_per_lif = nxqs; 4099 ionic->nrxqs_per_lif = nxqs; 4100 ionic->nintrs = nintrs; 4101 4102 ionic_debugfs_add_sizes(ionic); 4103 4104 return 0; 4105 4106 try_fewer: 4107 if (nnqs_per_lif > 1) { 4108 nnqs_per_lif >>= 1; 4109 goto try_again; 4110 } 4111 if (neqs > 1) { 4112 neqs >>= 1; 4113 goto try_again; 4114 } 4115 if (nxqs > 1) { 4116 nxqs >>= 1; 4117 goto try_again; 4118 } 4119 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 4120 return -ENOSPC; 4121 } 4122