1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ethtool.h> 5 #include <linux/printk.h> 6 #include <linux/dynamic_debug.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_vlan.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/cpumask.h> 14 #include <linux/crash_dump.h> 15 #include <linux/vmalloc.h> 16 #include <net/page_pool/helpers.h> 17 18 #include "ionic.h" 19 #include "ionic_bus.h" 20 #include "ionic_dev.h" 21 #include "ionic_lif.h" 22 #include "ionic_aux.h" 23 #include "ionic_txrx.h" 24 #include "ionic_ethtool.h" 25 #include "ionic_debugfs.h" 26 27 /* queuetype support level */ 28 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 29 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 30 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 31 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support 32 * 2 = ... with CMB rings 33 */ 34 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support 35 * 1 = ... with Tx SG version 1 36 * 3 = ... with CMB rings 37 */ 38 }; 39 40 static void ionic_link_status_check(struct ionic_lif *lif); 41 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 42 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 43 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 44 45 static void ionic_txrx_deinit(struct ionic_lif *lif); 46 static int ionic_txrx_init(struct ionic_lif *lif); 47 static int ionic_start_queues(struct ionic_lif *lif); 48 static void ionic_stop_queues(struct ionic_lif *lif); 49 static void ionic_lif_queue_identify(struct ionic_lif *lif); 50 51 static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif); 52 static void ionic_unregister_rxq_info(struct ionic_queue *q); 53 static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id); 54 55 static void ionic_dim_work(struct work_struct *work) 56 { 57 struct dim *dim = container_of(work, struct dim, work); 58 struct dim_cq_moder cur_moder; 59 struct ionic_intr_info *intr; 60 struct ionic_qcq *qcq; 61 struct ionic_lif *lif; 62 struct ionic_queue *q; 63 u32 new_coal; 64 65 qcq = container_of(dim, struct ionic_qcq, dim); 66 q = &qcq->q; 67 if (q->type == IONIC_QTYPE_RXQ) 68 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 69 else 70 cur_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix); 71 lif = q->lif; 72 new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec); 73 new_coal = new_coal ? new_coal : 1; 74 75 intr = &qcq->intr; 76 if (intr->dim_coal_hw != new_coal) { 77 intr->dim_coal_hw = new_coal; 78 79 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 80 intr->index, intr->dim_coal_hw); 81 } 82 83 dim->state = DIM_START_MEASURE; 84 } 85 86 static void ionic_lif_deferred_work(struct work_struct *work) 87 { 88 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 89 struct ionic_deferred *def = &lif->deferred; 90 struct ionic_deferred_work *w = NULL; 91 92 do { 93 spin_lock_bh(&def->lock); 94 if (!list_empty(&def->list)) { 95 w = list_first_entry(&def->list, 96 struct ionic_deferred_work, list); 97 list_del(&w->list); 98 } 99 spin_unlock_bh(&def->lock); 100 101 if (!w) 102 break; 103 104 switch (w->type) { 105 case IONIC_DW_TYPE_RX_MODE: 106 ionic_lif_rx_mode(lif); 107 break; 108 case IONIC_DW_TYPE_LINK_STATUS: 109 ionic_link_status_check(lif); 110 break; 111 case IONIC_DW_TYPE_LIF_RESET: 112 if (w->fw_status) { 113 ionic_lif_handle_fw_up(lif); 114 } else { 115 ionic_lif_handle_fw_down(lif); 116 117 /* Fire off another watchdog to see 118 * if the FW is already back rather than 119 * waiting another whole cycle 120 */ 121 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1); 122 } 123 break; 124 default: 125 break; 126 } 127 kfree(w); 128 w = NULL; 129 } while (true); 130 } 131 132 void ionic_lif_deferred_enqueue(struct ionic_lif *lif, 133 struct ionic_deferred_work *work) 134 { 135 spin_lock_bh(&lif->deferred.lock); 136 list_add_tail(&work->list, &lif->deferred.list); 137 spin_unlock_bh(&lif->deferred.lock); 138 queue_work(lif->ionic->wq, &lif->deferred.work); 139 } 140 141 static void ionic_link_status_check(struct ionic_lif *lif) 142 { 143 struct net_device *netdev = lif->netdev; 144 u16 link_status; 145 bool link_up; 146 147 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 148 return; 149 150 /* Don't put carrier back up if we're in a broken state */ 151 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { 152 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 153 return; 154 } 155 156 link_status = le16_to_cpu(lif->info->status.link_status); 157 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 158 159 if (link_up) { 160 int err = 0; 161 162 if (netdev->flags & IFF_UP && netif_running(netdev)) { 163 mutex_lock(&lif->queue_lock); 164 err = ionic_start_queues(lif); 165 if (err && err != -EBUSY) { 166 netdev_err(netdev, 167 "Failed to start queues: %d\n", err); 168 set_bit(IONIC_LIF_F_BROKEN, lif->state); 169 netif_carrier_off(lif->netdev); 170 } 171 mutex_unlock(&lif->queue_lock); 172 } 173 174 if (!err && !netif_carrier_ok(netdev)) { 175 ionic_port_identify(lif->ionic); 176 netdev_info(netdev, "Link up - %d Gbps\n", 177 le32_to_cpu(lif->info->status.link_speed) / 1000); 178 netif_carrier_on(netdev); 179 } 180 } else { 181 if (netif_carrier_ok(netdev)) { 182 lif->link_down_count++; 183 netdev_info(netdev, "Link down\n"); 184 netif_carrier_off(netdev); 185 } 186 187 if (netdev->flags & IFF_UP && netif_running(netdev)) { 188 mutex_lock(&lif->queue_lock); 189 ionic_stop_queues(lif); 190 mutex_unlock(&lif->queue_lock); 191 } 192 } 193 194 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 195 } 196 197 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) 198 { 199 struct ionic_deferred_work *work; 200 201 /* we only need one request outstanding at a time */ 202 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 203 return; 204 205 if (!can_sleep) { 206 work = kzalloc(sizeof(*work), GFP_ATOMIC); 207 if (!work) { 208 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 209 return; 210 } 211 212 work->type = IONIC_DW_TYPE_LINK_STATUS; 213 ionic_lif_deferred_enqueue(lif, work); 214 } else { 215 ionic_link_status_check(lif); 216 } 217 } 218 219 static irqreturn_t ionic_isr(int irq, void *data) 220 { 221 struct napi_struct *napi = data; 222 223 napi_schedule_irqoff(napi); 224 225 return IRQ_HANDLED; 226 } 227 228 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 229 { 230 struct ionic_intr_info *intr = &qcq->intr; 231 struct device *dev = lif->ionic->dev; 232 struct ionic_queue *q = &qcq->q; 233 const char *name; 234 235 if (lif->registered) 236 name = netdev_name(lif->netdev); 237 else 238 name = dev_name(dev); 239 240 snprintf(intr->name, sizeof(intr->name), 241 "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name); 242 243 return devm_request_irq(dev, intr->vector, ionic_isr, 244 0, intr->name, &qcq->napi); 245 } 246 247 int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 248 { 249 struct ionic *ionic = lif->ionic; 250 int index, err; 251 252 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 253 if (index == ionic->nintrs) 254 return -ENOSPC; 255 256 set_bit(index, ionic->intrs); 257 ionic_intr_init(&ionic->idev, intr, index); 258 259 err = ionic_bus_get_irq(ionic, intr->index); 260 if (err < 0) { 261 clear_bit(index, ionic->intrs); 262 return err; 263 } 264 265 intr->vector = err; 266 267 return 0; 268 } 269 EXPORT_SYMBOL_NS(ionic_intr_alloc, "NET_IONIC"); 270 271 void ionic_intr_free(struct ionic_lif *lif, int index) 272 { 273 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs) 274 clear_bit(index, lif->ionic->intrs); 275 } 276 EXPORT_SYMBOL_NS(ionic_intr_free, "NET_IONIC"); 277 278 static void ionic_irq_aff_notify(struct irq_affinity_notify *notify, 279 const cpumask_t *mask) 280 { 281 struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify); 282 283 cpumask_copy(*intr->affinity_mask, mask); 284 } 285 286 static void ionic_irq_aff_release(struct kref __always_unused *ref) 287 { 288 } 289 290 static int ionic_qcq_enable(struct ionic_qcq *qcq) 291 { 292 struct ionic_queue *q = &qcq->q; 293 struct ionic_lif *lif = q->lif; 294 struct ionic_dev *idev; 295 struct device *dev; 296 297 struct ionic_admin_ctx ctx = { 298 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 299 .cmd.q_control = { 300 .opcode = IONIC_CMD_Q_CONTROL, 301 .lif_index = cpu_to_le16(lif->index), 302 .type = q->type, 303 .index = cpu_to_le32(q->index), 304 .oper = IONIC_Q_ENABLE, 305 }, 306 }; 307 int ret; 308 309 idev = &lif->ionic->idev; 310 dev = lif->ionic->dev; 311 312 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 313 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 314 315 if (qcq->flags & IONIC_QCQ_F_INTR) 316 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 317 318 ret = ionic_adminq_post_wait(lif, &ctx); 319 if (ret) 320 return ret; 321 322 if (qcq->flags & IONIC_QCQ_F_INTR) { 323 napi_enable(&qcq->napi); 324 irq_set_affinity_notifier(qcq->intr.vector, 325 &qcq->intr.aff_notify); 326 irq_set_affinity_hint(qcq->intr.vector, 327 *qcq->intr.affinity_mask); 328 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 329 IONIC_INTR_MASK_CLEAR); 330 } 331 332 return 0; 333 } 334 335 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err) 336 { 337 struct ionic_queue *q; 338 339 struct ionic_admin_ctx ctx = { 340 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 341 .cmd.q_control = { 342 .opcode = IONIC_CMD_Q_CONTROL, 343 .oper = IONIC_Q_DISABLE, 344 }, 345 }; 346 347 if (!qcq) { 348 netdev_err(lif->netdev, "%s: bad qcq\n", __func__); 349 return -ENXIO; 350 } 351 352 q = &qcq->q; 353 354 if (qcq->flags & IONIC_QCQ_F_INTR) { 355 struct ionic_dev *idev = &lif->ionic->idev; 356 357 if (lif->doorbell_wa) 358 cancel_work_sync(&qcq->doorbell_napi_work); 359 cancel_work_sync(&qcq->dim.work); 360 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 361 IONIC_INTR_MASK_SET); 362 synchronize_irq(qcq->intr.vector); 363 irq_set_affinity_notifier(qcq->intr.vector, NULL); 364 irq_set_affinity_hint(qcq->intr.vector, NULL); 365 napi_disable(&qcq->napi); 366 } 367 368 /* If there was a previous fw communcation error, don't bother with 369 * sending the adminq command and just return the same error value. 370 */ 371 if (fw_err == -ETIMEDOUT || fw_err == -ENXIO) 372 return fw_err; 373 374 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); 375 ctx.cmd.q_control.type = q->type; 376 ctx.cmd.q_control.index = cpu_to_le32(q->index); 377 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", 378 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 379 380 return ionic_adminq_post_wait(lif, &ctx); 381 } 382 383 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 384 { 385 struct ionic_dev *idev = &lif->ionic->idev; 386 387 if (!qcq) 388 return; 389 390 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 391 return; 392 393 ionic_unregister_rxq_info(&qcq->q); 394 if (qcq->flags & IONIC_QCQ_F_INTR) { 395 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 396 IONIC_INTR_MASK_SET); 397 netif_napi_del(&qcq->napi); 398 } 399 400 qcq->flags &= ~IONIC_QCQ_F_INITED; 401 } 402 403 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 404 { 405 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 406 return; 407 408 irq_set_affinity_hint(qcq->intr.vector, NULL); 409 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 410 qcq->intr.vector = 0; 411 ionic_intr_free(lif, qcq->intr.index); 412 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 413 } 414 415 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 416 { 417 struct device *dev = lif->ionic->dev; 418 419 if (!qcq) 420 return; 421 422 ionic_debugfs_del_qcq(qcq); 423 424 if (qcq->q_base) { 425 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 426 qcq->q_base = NULL; 427 qcq->q_base_pa = 0; 428 } 429 430 if (qcq->cmb_q_base) { 431 iounmap(qcq->cmb_q_base); 432 ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order); 433 qcq->cmb_pgid = 0; 434 qcq->cmb_order = 0; 435 qcq->cmb_q_base = NULL; 436 qcq->cmb_q_base_pa = 0; 437 } 438 439 if (qcq->cq_base) { 440 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 441 qcq->cq_base = NULL; 442 qcq->cq_base_pa = 0; 443 } 444 445 if (qcq->sg_base) { 446 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 447 qcq->sg_base = NULL; 448 qcq->sg_base_pa = 0; 449 } 450 451 page_pool_destroy(qcq->q.page_pool); 452 qcq->q.page_pool = NULL; 453 454 ionic_qcq_intr_free(lif, qcq); 455 vfree(qcq->q.info); 456 qcq->q.info = NULL; 457 } 458 459 void ionic_qcqs_free(struct ionic_lif *lif) 460 { 461 struct device *dev = lif->ionic->dev; 462 struct ionic_qcq *adminqcq; 463 unsigned long irqflags; 464 465 if (lif->notifyqcq) { 466 ionic_qcq_free(lif, lif->notifyqcq); 467 devm_kfree(dev, lif->notifyqcq); 468 lif->notifyqcq = NULL; 469 } 470 471 if (lif->adminqcq) { 472 spin_lock_irqsave(&lif->adminq_lock, irqflags); 473 adminqcq = READ_ONCE(lif->adminqcq); 474 lif->adminqcq = NULL; 475 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 476 if (adminqcq) { 477 ionic_qcq_free(lif, adminqcq); 478 devm_kfree(dev, adminqcq); 479 } 480 } 481 482 if (lif->rxqcqs) { 483 devm_kfree(dev, lif->rxqstats); 484 lif->rxqstats = NULL; 485 devm_kfree(dev, lif->rxqcqs); 486 lif->rxqcqs = NULL; 487 } 488 489 if (lif->txqcqs) { 490 devm_kfree(dev, lif->txqstats); 491 lif->txqstats = NULL; 492 devm_kfree(dev, lif->txqcqs); 493 lif->txqcqs = NULL; 494 } 495 } 496 497 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 498 struct ionic_qcq *n_qcq) 499 { 500 n_qcq->intr.vector = src_qcq->intr.vector; 501 n_qcq->intr.index = src_qcq->intr.index; 502 } 503 504 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 505 { 506 cpumask_var_t *affinity_mask; 507 int err; 508 509 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 510 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 511 return 0; 512 } 513 514 err = ionic_intr_alloc(lif, &qcq->intr); 515 if (err) { 516 netdev_warn(lif->netdev, "no intr for %s: %d\n", 517 qcq->q.name, err); 518 goto err_out; 519 } 520 521 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 522 IONIC_INTR_MASK_SET); 523 524 err = ionic_request_irq(lif, qcq); 525 if (err) { 526 netdev_warn(lif->netdev, "irq request failed %d\n", err); 527 goto err_out_free_intr; 528 } 529 530 /* try to get the irq on the local numa node first */ 531 affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index]; 532 if (cpumask_empty(*affinity_mask)) { 533 unsigned int cpu; 534 535 cpu = cpumask_local_spread(qcq->intr.index, 536 dev_to_node(lif->ionic->dev)); 537 if (cpu != -1) 538 cpumask_set_cpu(cpu, *affinity_mask); 539 } 540 541 qcq->intr.affinity_mask = affinity_mask; 542 qcq->intr.aff_notify.notify = ionic_irq_aff_notify; 543 qcq->intr.aff_notify.release = ionic_irq_aff_release; 544 545 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 546 return 0; 547 548 err_out_free_intr: 549 ionic_intr_free(lif, qcq->intr.index); 550 err_out: 551 return err; 552 } 553 554 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 555 unsigned int index, 556 const char *name, unsigned int flags, 557 unsigned int num_descs, unsigned int desc_size, 558 unsigned int cq_desc_size, 559 unsigned int sg_desc_size, 560 unsigned int desc_info_size, 561 unsigned int pid, struct bpf_prog *xdp_prog, 562 struct ionic_qcq **qcq) 563 { 564 struct ionic_dev *idev = &lif->ionic->idev; 565 struct device *dev = lif->ionic->dev; 566 struct ionic_qcq *new; 567 int err; 568 569 *qcq = NULL; 570 571 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 572 if (!new) { 573 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 574 err = -ENOMEM; 575 goto err_out; 576 } 577 578 new->q.dev = dev; 579 new->flags = flags; 580 581 new->q.info = vcalloc(num_descs, desc_info_size); 582 if (!new->q.info) { 583 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 584 err = -ENOMEM; 585 goto err_out_free_qcq; 586 } 587 588 if (type == IONIC_QTYPE_RXQ) { 589 struct page_pool_params pp_params = { 590 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 591 .order = 0, 592 .pool_size = num_descs, 593 .nid = NUMA_NO_NODE, 594 .dev = lif->ionic->dev, 595 .napi = &new->napi, 596 .dma_dir = DMA_FROM_DEVICE, 597 .max_len = PAGE_SIZE, 598 .netdev = lif->netdev, 599 }; 600 601 if (xdp_prog) 602 pp_params.dma_dir = DMA_BIDIRECTIONAL; 603 604 new->q.page_pool = page_pool_create(&pp_params); 605 if (IS_ERR(new->q.page_pool)) { 606 netdev_err(lif->netdev, "Cannot create page_pool\n"); 607 err = PTR_ERR(new->q.page_pool); 608 new->q.page_pool = NULL; 609 goto err_out_free_q_info; 610 } 611 } 612 613 new->q.type = type; 614 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; 615 616 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 617 desc_size, sg_desc_size, pid); 618 if (err) { 619 netdev_err(lif->netdev, "Cannot initialize queue\n"); 620 goto err_out_free_page_pool; 621 } 622 623 err = ionic_alloc_qcq_interrupt(lif, new); 624 if (err) 625 goto err_out_free_page_pool; 626 627 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 628 if (err) { 629 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 630 goto err_out_free_irq; 631 } 632 633 if (flags & IONIC_QCQ_F_NOTIFYQ) { 634 int q_size; 635 636 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q 637 * and don't alloc qc. We leave new->qc_size and new->qc_base 638 * as 0 to be sure we don't try to free it later. 639 */ 640 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); 641 new->q_size = PAGE_SIZE + q_size + 642 ALIGN(num_descs * cq_desc_size, PAGE_SIZE); 643 new->q_base = dma_alloc_coherent(dev, new->q_size, 644 &new->q_base_pa, GFP_KERNEL); 645 if (!new->q_base) { 646 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); 647 err = -ENOMEM; 648 goto err_out_free_irq; 649 } 650 new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE); 651 new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 652 653 /* Base the NotifyQ cq.base off of the ALIGNed q.base */ 654 new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE); 655 new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); 656 new->cq.bound_q = &new->q; 657 } else { 658 /* regular DMA q descriptors */ 659 new->q_size = PAGE_SIZE + (num_descs * desc_size); 660 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 661 GFP_KERNEL); 662 if (!new->q_base) { 663 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 664 err = -ENOMEM; 665 goto err_out_free_irq; 666 } 667 new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE); 668 new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 669 670 if (flags & IONIC_QCQ_F_CMB_RINGS) { 671 /* on-chip CMB q descriptors */ 672 new->cmb_q_size = num_descs * desc_size; 673 new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE); 674 675 err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa, 676 new->cmb_order, 0, NULL); 677 if (err) { 678 netdev_err(lif->netdev, 679 "Cannot allocate queue order %d from cmb: err %d\n", 680 new->cmb_order, err); 681 goto err_out_free_q; 682 } 683 684 new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size); 685 if (!new->cmb_q_base) { 686 netdev_err(lif->netdev, "Cannot map queue from cmb\n"); 687 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); 688 err = -ENOMEM; 689 goto err_out_free_q; 690 } 691 692 new->cmb_q_base_pa -= idev->phy_cmb_pages; 693 new->q.cmb_base = new->cmb_q_base; 694 new->q.cmb_base_pa = new->cmb_q_base_pa; 695 } 696 697 /* cq DMA descriptors */ 698 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 699 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 700 GFP_KERNEL); 701 if (!new->cq_base) { 702 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 703 err = -ENOMEM; 704 goto err_out_free_q; 705 } 706 new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 707 new->cq.base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 708 new->cq.bound_q = &new->q; 709 } 710 711 if (flags & IONIC_QCQ_F_SG) { 712 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 713 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 714 GFP_KERNEL); 715 if (!new->sg_base) { 716 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 717 err = -ENOMEM; 718 goto err_out_free_cq; 719 } 720 new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 721 new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 722 } 723 724 INIT_WORK(&new->dim.work, ionic_dim_work); 725 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; 726 if (lif->doorbell_wa) 727 INIT_WORK(&new->doorbell_napi_work, ionic_doorbell_napi_work); 728 729 *qcq = new; 730 731 return 0; 732 733 err_out_free_cq: 734 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 735 err_out_free_q: 736 if (new->cmb_q_base) { 737 iounmap(new->cmb_q_base); 738 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); 739 } 740 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 741 err_out_free_irq: 742 if (flags & IONIC_QCQ_F_INTR) { 743 devm_free_irq(dev, new->intr.vector, &new->napi); 744 ionic_intr_free(lif, new->intr.index); 745 } 746 err_out_free_page_pool: 747 page_pool_destroy(new->q.page_pool); 748 err_out_free_q_info: 749 vfree(new->q.info); 750 err_out_free_qcq: 751 devm_kfree(dev, new); 752 err_out: 753 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 754 return err; 755 } 756 757 static int ionic_qcqs_alloc(struct ionic_lif *lif) 758 { 759 struct device *dev = lif->ionic->dev; 760 unsigned int flags; 761 int err; 762 763 flags = IONIC_QCQ_F_INTR; 764 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 765 IONIC_ADMINQ_LENGTH, 766 sizeof(struct ionic_admin_cmd), 767 sizeof(struct ionic_admin_comp), 768 0, 769 sizeof(struct ionic_admin_desc_info), 770 lif->kern_pid, NULL, &lif->adminqcq); 771 if (err) 772 return err; 773 ionic_debugfs_add_qcq(lif, lif->adminqcq); 774 775 if (lif->ionic->nnqs_per_lif) { 776 flags = IONIC_QCQ_F_NOTIFYQ; 777 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 778 flags, IONIC_NOTIFYQ_LENGTH, 779 sizeof(struct ionic_notifyq_cmd), 780 sizeof(union ionic_notifyq_comp), 781 0, 782 sizeof(struct ionic_admin_desc_info), 783 lif->kern_pid, NULL, &lif->notifyqcq); 784 if (err) 785 goto err_out; 786 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 787 788 /* Let the notifyq ride on the adminq interrupt */ 789 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 790 } 791 792 err = -ENOMEM; 793 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 794 sizeof(*lif->txqcqs), GFP_KERNEL); 795 if (!lif->txqcqs) 796 goto err_out; 797 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 798 sizeof(*lif->rxqcqs), GFP_KERNEL); 799 if (!lif->rxqcqs) 800 goto err_out; 801 802 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1, 803 sizeof(*lif->txqstats), GFP_KERNEL); 804 if (!lif->txqstats) 805 goto err_out; 806 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1, 807 sizeof(*lif->rxqstats), GFP_KERNEL); 808 if (!lif->rxqstats) 809 goto err_out; 810 811 return 0; 812 813 err_out: 814 ionic_qcqs_free(lif); 815 return err; 816 } 817 818 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 819 { 820 qcq->q.tail_idx = 0; 821 qcq->q.head_idx = 0; 822 qcq->cq.tail_idx = 0; 823 qcq->cq.done_color = 1; 824 memset(qcq->q_base, 0, qcq->q_size); 825 if (qcq->cmb_q_base) 826 memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size); 827 memset(qcq->cq_base, 0, qcq->cq_size); 828 memset(qcq->sg_base, 0, qcq->sg_size); 829 } 830 831 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 832 { 833 struct device *dev = lif->ionic->dev; 834 struct ionic_queue *q = &qcq->q; 835 struct ionic_cq *cq = &qcq->cq; 836 struct ionic_admin_ctx ctx = { 837 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 838 .cmd.q_init = { 839 .opcode = IONIC_CMD_Q_INIT, 840 .lif_index = cpu_to_le16(lif->index), 841 .type = q->type, 842 .ver = lif->qtype_info[q->type].version, 843 .index = cpu_to_le32(q->index), 844 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 845 IONIC_QINIT_F_SG), 846 .intr_index = cpu_to_le16(qcq->intr.index), 847 .pid = cpu_to_le16(q->pid), 848 .ring_size = ilog2(q->num_descs), 849 .ring_base = cpu_to_le64(q->base_pa), 850 .cq_ring_base = cpu_to_le64(cq->base_pa), 851 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 852 .features = cpu_to_le64(q->features), 853 }, 854 }; 855 int err; 856 857 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { 858 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); 859 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); 860 } 861 862 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 863 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 864 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 865 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 866 dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base); 867 dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base); 868 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 869 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 870 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 871 872 ionic_qcq_sanitize(qcq); 873 874 err = ionic_adminq_post_wait(lif, &ctx); 875 if (err) 876 return err; 877 878 q->hw_type = ctx.comp.q_init.hw_type; 879 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 880 q->dbval = IONIC_DBELL_QID(q->hw_index); 881 882 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 883 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 884 885 q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE; 886 q->dbell_jiffies = jiffies; 887 888 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 889 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi); 890 891 qcq->flags |= IONIC_QCQ_F_INITED; 892 893 return 0; 894 } 895 896 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 897 { 898 struct device *dev = lif->ionic->dev; 899 struct ionic_queue *q = &qcq->q; 900 struct ionic_cq *cq = &qcq->cq; 901 struct ionic_admin_ctx ctx = { 902 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 903 .cmd.q_init = { 904 .opcode = IONIC_CMD_Q_INIT, 905 .lif_index = cpu_to_le16(lif->index), 906 .type = q->type, 907 .ver = lif->qtype_info[q->type].version, 908 .index = cpu_to_le32(q->index), 909 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ), 910 .intr_index = cpu_to_le16(cq->bound_intr->index), 911 .pid = cpu_to_le16(q->pid), 912 .ring_size = ilog2(q->num_descs), 913 .ring_base = cpu_to_le64(q->base_pa), 914 .cq_ring_base = cpu_to_le64(cq->base_pa), 915 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 916 .features = cpu_to_le64(q->features), 917 }, 918 }; 919 int err; 920 921 q->partner = &lif->txqcqs[q->index]->q; 922 q->partner->partner = q; 923 924 if (!lif->xdp_prog || 925 (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags)) 926 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG); 927 928 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { 929 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); 930 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); 931 } 932 933 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 934 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 935 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 936 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 937 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 938 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 939 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 940 941 ionic_qcq_sanitize(qcq); 942 943 err = ionic_adminq_post_wait(lif, &ctx); 944 if (err) 945 return err; 946 947 q->hw_type = ctx.comp.q_init.hw_type; 948 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 949 q->dbval = IONIC_DBELL_QID(q->hw_index); 950 951 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 952 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 953 954 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; 955 q->dbell_jiffies = jiffies; 956 957 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 958 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi); 959 else 960 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); 961 err = ionic_register_rxq_info(q, qcq->napi.napi_id); 962 if (err) { 963 netif_napi_del(&qcq->napi); 964 return err; 965 } 966 967 qcq->flags |= IONIC_QCQ_F_INITED; 968 969 return 0; 970 } 971 972 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) 973 { 974 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 975 unsigned int txq_i, flags; 976 struct ionic_qcq *txq; 977 u64 features; 978 int err; 979 980 if (lif->hwstamp_txq) 981 return 0; 982 983 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; 984 985 num_desc = IONIC_MIN_TXRX_DESC; 986 desc_sz = sizeof(struct ionic_txq_desc); 987 comp_sz = 2 * sizeof(struct ionic_txq_comp); 988 989 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 990 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) 991 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 992 else 993 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 994 995 txq_i = lif->ionic->ntxqs_per_lif; 996 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 997 998 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, 999 num_desc, desc_sz, comp_sz, sg_desc_sz, 1000 sizeof(struct ionic_tx_desc_info), 1001 lif->kern_pid, NULL, &txq); 1002 if (err) 1003 goto err_qcq_alloc; 1004 1005 txq->q.features = features; 1006 1007 ionic_link_qcq_interrupts(lif->adminqcq, txq); 1008 ionic_debugfs_add_qcq(lif, txq); 1009 1010 lif->hwstamp_txq = txq; 1011 1012 if (netif_running(lif->netdev)) { 1013 err = ionic_lif_txq_init(lif, txq); 1014 if (err) 1015 goto err_qcq_init; 1016 1017 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 1018 err = ionic_qcq_enable(txq); 1019 if (err) 1020 goto err_qcq_enable; 1021 } 1022 } 1023 1024 return 0; 1025 1026 err_qcq_enable: 1027 ionic_lif_qcq_deinit(lif, txq); 1028 err_qcq_init: 1029 lif->hwstamp_txq = NULL; 1030 ionic_debugfs_del_qcq(txq); 1031 ionic_qcq_free(lif, txq); 1032 devm_kfree(lif->ionic->dev, txq); 1033 err_qcq_alloc: 1034 return err; 1035 } 1036 1037 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) 1038 { 1039 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 1040 unsigned int rxq_i, flags; 1041 struct ionic_qcq *rxq; 1042 u64 features; 1043 int err; 1044 1045 if (lif->hwstamp_rxq) 1046 return 0; 1047 1048 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 1049 1050 num_desc = IONIC_MIN_TXRX_DESC; 1051 desc_sz = sizeof(struct ionic_rxq_desc); 1052 comp_sz = 2 * sizeof(struct ionic_rxq_comp); 1053 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 1054 1055 rxq_i = lif->ionic->nrxqs_per_lif; 1056 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 1057 1058 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, 1059 num_desc, desc_sz, comp_sz, sg_desc_sz, 1060 sizeof(struct ionic_rx_desc_info), 1061 lif->kern_pid, NULL, &rxq); 1062 if (err) 1063 goto err_qcq_alloc; 1064 1065 rxq->q.features = features; 1066 1067 ionic_link_qcq_interrupts(lif->adminqcq, rxq); 1068 ionic_debugfs_add_qcq(lif, rxq); 1069 1070 lif->hwstamp_rxq = rxq; 1071 1072 if (netif_running(lif->netdev)) { 1073 err = ionic_lif_rxq_init(lif, rxq); 1074 if (err) 1075 goto err_qcq_init; 1076 1077 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 1078 ionic_rx_fill(&rxq->q, NULL); 1079 err = ionic_qcq_enable(rxq); 1080 if (err) 1081 goto err_qcq_enable; 1082 } 1083 } 1084 1085 return 0; 1086 1087 err_qcq_enable: 1088 ionic_lif_qcq_deinit(lif, rxq); 1089 err_qcq_init: 1090 lif->hwstamp_rxq = NULL; 1091 ionic_debugfs_del_qcq(rxq); 1092 ionic_qcq_free(lif, rxq); 1093 devm_kfree(lif->ionic->dev, rxq); 1094 err_qcq_alloc: 1095 return err; 1096 } 1097 1098 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all) 1099 { 1100 struct ionic_queue_params qparam; 1101 1102 ionic_init_queue_params(lif, &qparam); 1103 1104 if (rx_all) 1105 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 1106 else 1107 qparam.rxq_features = 0; 1108 1109 /* if we're not running, just set the values and return */ 1110 if (!netif_running(lif->netdev)) { 1111 lif->rxq_features = qparam.rxq_features; 1112 return 0; 1113 } 1114 1115 return ionic_reconfigure_queues(lif, &qparam); 1116 } 1117 1118 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode) 1119 { 1120 struct ionic_admin_ctx ctx = { 1121 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1122 .cmd.lif_setattr = { 1123 .opcode = IONIC_CMD_LIF_SETATTR, 1124 .index = cpu_to_le16(lif->index), 1125 .attr = IONIC_LIF_ATTR_TXSTAMP, 1126 .txstamp_mode = cpu_to_le16(txstamp_mode), 1127 }, 1128 }; 1129 1130 return ionic_adminq_post_wait(lif, &ctx); 1131 } 1132 1133 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif) 1134 { 1135 struct ionic_admin_ctx ctx = { 1136 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1137 .cmd.rx_filter_del = { 1138 .opcode = IONIC_CMD_RX_FILTER_DEL, 1139 .lif_index = cpu_to_le16(lif->index), 1140 }, 1141 }; 1142 struct ionic_rx_filter *f; 1143 u32 filter_id; 1144 int err; 1145 1146 spin_lock_bh(&lif->rx_filters.lock); 1147 1148 f = ionic_rx_filter_rxsteer(lif); 1149 if (!f) { 1150 spin_unlock_bh(&lif->rx_filters.lock); 1151 return; 1152 } 1153 1154 filter_id = f->filter_id; 1155 ionic_rx_filter_free(lif, f); 1156 1157 spin_unlock_bh(&lif->rx_filters.lock); 1158 1159 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id); 1160 1161 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id); 1162 1163 err = ionic_adminq_post_wait(lif, &ctx); 1164 if (err && err != -EEXIST) 1165 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id); 1166 } 1167 1168 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1169 { 1170 struct ionic_admin_ctx ctx = { 1171 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1172 .cmd.rx_filter_add = { 1173 .opcode = IONIC_CMD_RX_FILTER_ADD, 1174 .lif_index = cpu_to_le16(lif->index), 1175 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS), 1176 .pkt_class = cpu_to_le64(pkt_class), 1177 }, 1178 }; 1179 u8 qtype; 1180 u32 qid; 1181 int err; 1182 1183 if (!lif->hwstamp_rxq) 1184 return -EINVAL; 1185 1186 qtype = lif->hwstamp_rxq->q.type; 1187 ctx.cmd.rx_filter_add.qtype = qtype; 1188 1189 qid = lif->hwstamp_rxq->q.index; 1190 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid); 1191 1192 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n"); 1193 err = ionic_adminq_post_wait(lif, &ctx); 1194 if (err && err != -EEXIST) 1195 return err; 1196 1197 spin_lock_bh(&lif->rx_filters.lock); 1198 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED); 1199 spin_unlock_bh(&lif->rx_filters.lock); 1200 1201 return err; 1202 } 1203 1204 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1205 { 1206 ionic_lif_del_hwstamp_rxfilt(lif); 1207 1208 if (!pkt_class) 1209 return 0; 1210 1211 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); 1212 } 1213 1214 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 1215 { 1216 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 1217 struct ionic_lif *lif = napi_to_cq(napi)->lif; 1218 struct ionic_dev *idev = &lif->ionic->idev; 1219 unsigned long irqflags; 1220 unsigned int flags = 0; 1221 int rx_work = 0; 1222 int tx_work = 0; 1223 int n_work = 0; 1224 int a_work = 0; 1225 int work_done; 1226 int credits; 1227 1228 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 1229 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 1230 ionic_notifyq_service, NULL, NULL); 1231 1232 spin_lock_irqsave(&lif->adminq_lock, irqflags); 1233 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 1234 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 1235 ionic_adminq_service, NULL, NULL); 1236 1237 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 1238 1239 if (lif->hwstamp_rxq) 1240 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget, 1241 ionic_rx_service, NULL, NULL); 1242 1243 if (lif->hwstamp_txq) 1244 tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget); 1245 1246 work_done = max(max(n_work, a_work), max(rx_work, tx_work)); 1247 if (work_done < budget && napi_complete_done(napi, work_done)) { 1248 flags |= IONIC_INTR_CRED_UNMASK; 1249 intr->rearm_count++; 1250 } 1251 1252 if (work_done || flags) { 1253 flags |= IONIC_INTR_CRED_RESET_COALESCE; 1254 credits = n_work + a_work + rx_work + tx_work; 1255 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); 1256 } 1257 1258 if (lif->doorbell_wa) { 1259 if (!a_work) 1260 ionic_adminq_poke_doorbell(&lif->adminqcq->q); 1261 if (lif->hwstamp_rxq && !rx_work) 1262 ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q); 1263 if (lif->hwstamp_txq && !tx_work) 1264 ionic_txq_poke_doorbell(&lif->hwstamp_txq->q); 1265 } 1266 1267 return work_done; 1268 } 1269 1270 void ionic_get_stats64(struct net_device *netdev, 1271 struct rtnl_link_stats64 *ns) 1272 { 1273 struct ionic_lif *lif = netdev_priv(netdev); 1274 struct ionic_lif_stats *ls; 1275 1276 memset(ns, 0, sizeof(*ns)); 1277 ls = &lif->info->stats; 1278 1279 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 1280 le64_to_cpu(ls->rx_mcast_packets) + 1281 le64_to_cpu(ls->rx_bcast_packets); 1282 1283 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 1284 le64_to_cpu(ls->tx_mcast_packets) + 1285 le64_to_cpu(ls->tx_bcast_packets); 1286 1287 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 1288 le64_to_cpu(ls->rx_mcast_bytes) + 1289 le64_to_cpu(ls->rx_bcast_bytes); 1290 1291 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 1292 le64_to_cpu(ls->tx_mcast_bytes) + 1293 le64_to_cpu(ls->tx_bcast_bytes); 1294 1295 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 1296 le64_to_cpu(ls->rx_mcast_drop_packets) + 1297 le64_to_cpu(ls->rx_bcast_drop_packets); 1298 1299 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 1300 le64_to_cpu(ls->tx_mcast_drop_packets) + 1301 le64_to_cpu(ls->tx_bcast_drop_packets); 1302 1303 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 1304 1305 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 1306 1307 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 1308 le64_to_cpu(ls->rx_queue_disabled) + 1309 le64_to_cpu(ls->rx_desc_fetch_error) + 1310 le64_to_cpu(ls->rx_desc_data_error); 1311 1312 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 1313 le64_to_cpu(ls->tx_queue_disabled) + 1314 le64_to_cpu(ls->tx_desc_fetch_error) + 1315 le64_to_cpu(ls->tx_desc_data_error); 1316 1317 ns->rx_errors = ns->rx_over_errors + 1318 ns->rx_missed_errors; 1319 1320 ns->tx_errors = ns->tx_aborted_errors; 1321 } 1322 1323 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1324 { 1325 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); 1326 } 1327 1328 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1329 { 1330 /* Don't delete our own address from the uc list */ 1331 if (ether_addr_equal(addr, netdev->dev_addr)) 1332 return 0; 1333 1334 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); 1335 } 1336 1337 void ionic_lif_rx_mode(struct ionic_lif *lif) 1338 { 1339 struct net_device *netdev = lif->netdev; 1340 unsigned int nfilters; 1341 unsigned int nd_flags; 1342 char buf[128]; 1343 u16 rx_mode; 1344 int i; 1345 #define REMAIN(__x) (sizeof(buf) - (__x)) 1346 1347 mutex_lock(&lif->config_lock); 1348 1349 /* grab the flags once for local use */ 1350 nd_flags = netdev->flags; 1351 1352 rx_mode = IONIC_RX_MODE_F_UNICAST; 1353 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1354 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1355 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1356 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1357 1358 /* sync the filters */ 1359 ionic_rx_filter_sync(lif); 1360 1361 /* check for overflow state 1362 * if so, we track that we overflowed and enable NIC PROMISC 1363 * else if the overflow is set and not needed 1364 * we remove our overflow flag and check the netdev flags 1365 * to see if we can disable NIC PROMISC 1366 */ 1367 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1368 1369 if (((lif->nucast + lif->nmcast) >= nfilters) || 1370 (lif->max_vlans && lif->nvlans >= lif->max_vlans)) { 1371 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1372 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1373 } else { 1374 if (!(nd_flags & IFF_PROMISC)) 1375 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1376 if (!(nd_flags & IFF_ALLMULTI)) 1377 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1378 } 1379 1380 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1381 lif->rx_mode, rx_mode); 1382 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1383 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1384 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1385 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1386 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1387 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1388 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1389 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1390 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1391 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1392 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER) 1393 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER"); 1394 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf); 1395 1396 if (lif->rx_mode != rx_mode) { 1397 struct ionic_admin_ctx ctx = { 1398 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1399 .cmd.rx_mode_set = { 1400 .opcode = IONIC_CMD_RX_MODE_SET, 1401 .lif_index = cpu_to_le16(lif->index), 1402 }, 1403 }; 1404 int err; 1405 1406 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode); 1407 err = ionic_adminq_post_wait(lif, &ctx); 1408 if (err) 1409 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n", 1410 rx_mode, err); 1411 else 1412 lif->rx_mode = rx_mode; 1413 } 1414 1415 mutex_unlock(&lif->config_lock); 1416 } 1417 1418 static void ionic_ndo_set_rx_mode(struct net_device *netdev) 1419 { 1420 struct ionic_lif *lif = netdev_priv(netdev); 1421 struct ionic_deferred_work *work; 1422 1423 /* Sync the kernel filter list with the driver filter list */ 1424 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1425 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1426 1427 /* Shove off the rest of the rxmode work to the work task 1428 * which will include syncing the filters to the firmware. 1429 */ 1430 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1431 if (!work) { 1432 netdev_err(lif->netdev, "rxmode change dropped\n"); 1433 return; 1434 } 1435 work->type = IONIC_DW_TYPE_RX_MODE; 1436 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1437 ionic_lif_deferred_enqueue(lif, work); 1438 } 1439 1440 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1441 { 1442 u64 wanted = 0; 1443 1444 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1445 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1446 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1447 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1448 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1449 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1450 if (features & NETIF_F_RXHASH) 1451 wanted |= IONIC_ETH_HW_RX_HASH; 1452 if (features & NETIF_F_RXCSUM) 1453 wanted |= IONIC_ETH_HW_RX_CSUM; 1454 if (features & NETIF_F_SG) 1455 wanted |= IONIC_ETH_HW_TX_SG; 1456 if (features & NETIF_F_HW_CSUM) 1457 wanted |= IONIC_ETH_HW_TX_CSUM; 1458 if (features & NETIF_F_TSO) 1459 wanted |= IONIC_ETH_HW_TSO; 1460 if (features & NETIF_F_TSO6) 1461 wanted |= IONIC_ETH_HW_TSO_IPV6; 1462 if (features & NETIF_F_TSO_ECN) 1463 wanted |= IONIC_ETH_HW_TSO_ECN; 1464 if (features & NETIF_F_GSO_GRE) 1465 wanted |= IONIC_ETH_HW_TSO_GRE; 1466 if (features & NETIF_F_GSO_GRE_CSUM) 1467 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1468 if (features & NETIF_F_GSO_IPXIP4) 1469 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1470 if (features & NETIF_F_GSO_IPXIP6) 1471 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1472 if (features & NETIF_F_GSO_UDP_TUNNEL) 1473 wanted |= IONIC_ETH_HW_TSO_UDP; 1474 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1475 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1476 1477 return cpu_to_le64(wanted); 1478 } 1479 1480 static int ionic_set_nic_features(struct ionic_lif *lif, 1481 netdev_features_t features) 1482 { 1483 struct device *dev = lif->ionic->dev; 1484 struct ionic_admin_ctx ctx = { 1485 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1486 .cmd.lif_setattr = { 1487 .opcode = IONIC_CMD_LIF_SETATTR, 1488 .index = cpu_to_le16(lif->index), 1489 .attr = IONIC_LIF_ATTR_FEATURES, 1490 }, 1491 }; 1492 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1493 IONIC_ETH_HW_VLAN_RX_STRIP | 1494 IONIC_ETH_HW_VLAN_RX_FILTER; 1495 u64 old_hw_features; 1496 int err; 1497 1498 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1499 1500 if (lif->phc) 1501 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP); 1502 1503 err = ionic_adminq_post_wait(lif, &ctx); 1504 if (err) 1505 return err; 1506 1507 old_hw_features = lif->hw_features; 1508 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1509 ctx.comp.lif_setattr.features); 1510 1511 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1512 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1513 1514 if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) && 1515 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1516 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1517 1518 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1519 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1520 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1521 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1522 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1523 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1524 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1525 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1526 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1527 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1528 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1529 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1530 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1531 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1532 if (lif->hw_features & IONIC_ETH_HW_TSO) 1533 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1534 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1535 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1536 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1537 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1538 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1539 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1540 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1541 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1542 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1543 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1544 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1545 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1546 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1547 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1548 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1549 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1550 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP) 1551 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n"); 1552 1553 return 0; 1554 } 1555 1556 static int ionic_init_nic_features(struct ionic_lif *lif) 1557 { 1558 struct net_device *netdev = lif->netdev; 1559 netdev_features_t features; 1560 int err; 1561 1562 /* set up what we expect to support by default */ 1563 features = NETIF_F_HW_VLAN_CTAG_TX | 1564 NETIF_F_HW_VLAN_CTAG_RX | 1565 NETIF_F_HW_VLAN_CTAG_FILTER | 1566 NETIF_F_SG | 1567 NETIF_F_HW_CSUM | 1568 NETIF_F_RXCSUM | 1569 NETIF_F_TSO | 1570 NETIF_F_TSO6 | 1571 NETIF_F_TSO_ECN | 1572 NETIF_F_GSO_GRE | 1573 NETIF_F_GSO_GRE_CSUM | 1574 NETIF_F_GSO_IPXIP4 | 1575 NETIF_F_GSO_IPXIP6 | 1576 NETIF_F_GSO_UDP_TUNNEL | 1577 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1578 1579 if (lif->nxqs > 1) 1580 features |= NETIF_F_RXHASH; 1581 1582 err = ionic_set_nic_features(lif, features); 1583 if (err) 1584 return err; 1585 1586 /* tell the netdev what we actually can support */ 1587 netdev->features |= NETIF_F_HIGHDMA; 1588 1589 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1590 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1591 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1592 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1593 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1594 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1595 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1596 netdev->hw_features |= NETIF_F_RXHASH; 1597 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1598 netdev->hw_features |= NETIF_F_SG; 1599 1600 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1601 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1602 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1603 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1604 if (lif->hw_features & IONIC_ETH_HW_TSO) 1605 netdev->hw_enc_features |= NETIF_F_TSO; 1606 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1607 netdev->hw_enc_features |= NETIF_F_TSO6; 1608 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1609 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1610 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1611 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1612 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1613 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1614 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1615 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1616 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1617 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1618 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1619 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1620 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1621 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1622 1623 netdev->hw_features |= netdev->hw_enc_features; 1624 netdev->features |= netdev->hw_features; 1625 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1626 1627 netdev->priv_flags |= IFF_UNICAST_FLT | 1628 IFF_LIVE_ADDR_CHANGE; 1629 1630 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | 1631 NETDEV_XDP_ACT_REDIRECT | 1632 NETDEV_XDP_ACT_RX_SG | 1633 NETDEV_XDP_ACT_NDO_XMIT | 1634 NETDEV_XDP_ACT_NDO_XMIT_SG; 1635 1636 return 0; 1637 } 1638 1639 static int ionic_set_features(struct net_device *netdev, 1640 netdev_features_t features) 1641 { 1642 struct ionic_lif *lif = netdev_priv(netdev); 1643 int err; 1644 1645 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1646 __func__, (u64)lif->netdev->features, (u64)features); 1647 1648 err = ionic_set_nic_features(lif, features); 1649 1650 return err; 1651 } 1652 1653 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac) 1654 { 1655 struct ionic_admin_ctx ctx = { 1656 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1657 .cmd.lif_setattr = { 1658 .opcode = IONIC_CMD_LIF_SETATTR, 1659 .index = cpu_to_le16(lif->index), 1660 .attr = IONIC_LIF_ATTR_MAC, 1661 }, 1662 }; 1663 1664 ether_addr_copy(ctx.cmd.lif_setattr.mac, mac); 1665 return ionic_adminq_post_wait(lif, &ctx); 1666 } 1667 1668 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr) 1669 { 1670 struct ionic_admin_ctx ctx = { 1671 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1672 .cmd.lif_getattr = { 1673 .opcode = IONIC_CMD_LIF_GETATTR, 1674 .index = cpu_to_le16(lif->index), 1675 .attr = IONIC_LIF_ATTR_MAC, 1676 }, 1677 }; 1678 int err; 1679 1680 err = ionic_adminq_post_wait(lif, &ctx); 1681 if (err) 1682 return err; 1683 1684 ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac); 1685 return 0; 1686 } 1687 1688 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac) 1689 { 1690 u8 get_mac[ETH_ALEN]; 1691 int err; 1692 1693 err = ionic_set_attr_mac(lif, mac); 1694 if (err) 1695 return err; 1696 1697 err = ionic_get_attr_mac(lif, get_mac); 1698 if (err) 1699 return err; 1700 1701 /* To deal with older firmware that silently ignores the set attr mac: 1702 * doesn't actually change the mac and doesn't return an error, so we 1703 * do the get attr to verify whether or not the set actually happened 1704 */ 1705 if (!ether_addr_equal(get_mac, mac)) 1706 return 1; 1707 1708 return 0; 1709 } 1710 1711 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1712 { 1713 struct ionic_lif *lif = netdev_priv(netdev); 1714 struct sockaddr *addr = sa; 1715 u8 *mac; 1716 int err; 1717 1718 mac = (u8 *)addr->sa_data; 1719 if (ether_addr_equal(netdev->dev_addr, mac)) 1720 return 0; 1721 1722 err = ionic_program_mac(lif, mac); 1723 if (err < 0) 1724 return err; 1725 1726 if (err > 0) 1727 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n", 1728 __func__); 1729 1730 err = eth_prepare_mac_addr_change(netdev, addr); 1731 if (err) 1732 return err; 1733 1734 if (!is_zero_ether_addr(netdev->dev_addr)) { 1735 netdev_info(netdev, "deleting mac addr %pM\n", 1736 netdev->dev_addr); 1737 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr); 1738 } 1739 1740 eth_commit_mac_addr_change(netdev, addr); 1741 netdev_info(netdev, "updating mac addr %pM\n", mac); 1742 1743 return ionic_lif_addr_add(netdev_priv(netdev), mac); 1744 } 1745 1746 void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1747 { 1748 /* Stop and clean the queues before reconfiguration */ 1749 netif_device_detach(lif->netdev); 1750 ionic_stop_queues(lif); 1751 ionic_txrx_deinit(lif); 1752 } 1753 1754 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1755 { 1756 int err; 1757 1758 /* Re-init the queues after reconfiguration */ 1759 1760 /* The only way txrx_init can fail here is if communication 1761 * with FW is suddenly broken. There's not much we can do 1762 * at this point - error messages have already been printed, 1763 * so we can continue on and the user can eventually do a 1764 * DOWN and UP to try to reset and clear the issue. 1765 */ 1766 err = ionic_txrx_init(lif); 1767 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 1768 netif_device_attach(lif->netdev); 1769 1770 return err; 1771 } 1772 1773 static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu, 1774 struct bpf_prog *xdp_prog) 1775 { 1776 if (!xdp_prog) 1777 return true; 1778 1779 if (mtu <= IONIC_XDP_MAX_LINEAR_MTU) 1780 return true; 1781 1782 if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags) 1783 return true; 1784 1785 return false; 1786 } 1787 1788 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1789 { 1790 struct ionic_lif *lif = netdev_priv(netdev); 1791 struct ionic_admin_ctx ctx = { 1792 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1793 .cmd.lif_setattr = { 1794 .opcode = IONIC_CMD_LIF_SETATTR, 1795 .index = cpu_to_le16(lif->index), 1796 .attr = IONIC_LIF_ATTR_MTU, 1797 .mtu = cpu_to_le32(new_mtu), 1798 }, 1799 }; 1800 struct bpf_prog *xdp_prog; 1801 int err; 1802 1803 xdp_prog = READ_ONCE(lif->xdp_prog); 1804 if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog)) 1805 return -EINVAL; 1806 1807 err = ionic_adminq_post_wait(lif, &ctx); 1808 if (err) 1809 return err; 1810 1811 /* if we're not running, nothing more to do */ 1812 if (!netif_running(netdev)) { 1813 WRITE_ONCE(netdev->mtu, new_mtu); 1814 return 0; 1815 } 1816 1817 mutex_lock(&lif->queue_lock); 1818 ionic_stop_queues_reconfig(lif); 1819 WRITE_ONCE(netdev->mtu, new_mtu); 1820 err = ionic_start_queues_reconfig(lif); 1821 mutex_unlock(&lif->queue_lock); 1822 1823 return err; 1824 } 1825 1826 static void ionic_tx_timeout_work(struct work_struct *ws) 1827 { 1828 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1829 int err; 1830 1831 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1832 return; 1833 1834 /* if we were stopped before this scheduled job was launched, 1835 * don't bother the queues as they are already stopped. 1836 */ 1837 if (!netif_running(lif->netdev)) 1838 return; 1839 1840 mutex_lock(&lif->queue_lock); 1841 ionic_stop_queues_reconfig(lif); 1842 err = ionic_start_queues_reconfig(lif); 1843 mutex_unlock(&lif->queue_lock); 1844 1845 if (err) 1846 dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__); 1847 } 1848 1849 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1850 { 1851 struct ionic_lif *lif = netdev_priv(netdev); 1852 1853 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); 1854 schedule_work(&lif->tx_timeout_work); 1855 } 1856 1857 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1858 u16 vid) 1859 { 1860 struct ionic_lif *lif = netdev_priv(netdev); 1861 int err; 1862 1863 err = ionic_lif_vlan_add(lif, vid); 1864 if (err) 1865 return err; 1866 1867 ionic_lif_rx_mode(lif); 1868 1869 return 0; 1870 } 1871 1872 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1873 u16 vid) 1874 { 1875 struct ionic_lif *lif = netdev_priv(netdev); 1876 int err; 1877 1878 err = ionic_lif_vlan_del(lif, vid); 1879 if (err) 1880 return err; 1881 1882 ionic_lif_rx_mode(lif); 1883 1884 return 0; 1885 } 1886 1887 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1888 const u8 *key, const u32 *indir) 1889 { 1890 struct ionic_admin_ctx ctx = { 1891 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1892 .cmd.lif_setattr = { 1893 .opcode = IONIC_CMD_LIF_SETATTR, 1894 .attr = IONIC_LIF_ATTR_RSS, 1895 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1896 }, 1897 }; 1898 unsigned int i, tbl_sz; 1899 1900 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1901 lif->rss_types = types; 1902 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1903 } 1904 1905 if (key) 1906 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1907 1908 if (indir) { 1909 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1910 for (i = 0; i < tbl_sz; i++) 1911 lif->rss_ind_tbl[i] = indir[i]; 1912 } 1913 1914 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1915 IONIC_RSS_HASH_KEY_SIZE); 1916 1917 return ionic_adminq_post_wait(lif, &ctx); 1918 } 1919 1920 static int ionic_lif_rss_init(struct ionic_lif *lif) 1921 { 1922 unsigned int tbl_sz; 1923 unsigned int i; 1924 1925 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1926 IONIC_RSS_TYPE_IPV4_TCP | 1927 IONIC_RSS_TYPE_IPV4_UDP | 1928 IONIC_RSS_TYPE_IPV6 | 1929 IONIC_RSS_TYPE_IPV6_TCP | 1930 IONIC_RSS_TYPE_IPV6_UDP; 1931 1932 /* Fill indirection table with 'default' values */ 1933 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1934 for (i = 0; i < tbl_sz; i++) 1935 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1936 1937 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1938 } 1939 1940 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1941 { 1942 int tbl_sz; 1943 1944 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1945 memset(lif->rss_ind_tbl, 0, tbl_sz); 1946 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1947 1948 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1949 } 1950 1951 static void ionic_lif_quiesce(struct ionic_lif *lif) 1952 { 1953 struct ionic_admin_ctx ctx = { 1954 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1955 .cmd.lif_setattr = { 1956 .opcode = IONIC_CMD_LIF_SETATTR, 1957 .index = cpu_to_le16(lif->index), 1958 .attr = IONIC_LIF_ATTR_STATE, 1959 .state = IONIC_LIF_QUIESCE, 1960 }, 1961 }; 1962 int err; 1963 1964 err = ionic_adminq_post_wait(lif, &ctx); 1965 if (err) 1966 netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err); 1967 } 1968 1969 static void ionic_txrx_disable(struct ionic_lif *lif) 1970 { 1971 unsigned int i; 1972 int err = 0; 1973 1974 if (lif->txqcqs) { 1975 for (i = 0; i < lif->nxqs; i++) 1976 err = ionic_qcq_disable(lif, lif->txqcqs[i], err); 1977 } 1978 1979 if (lif->hwstamp_txq) 1980 err = ionic_qcq_disable(lif, lif->hwstamp_txq, err); 1981 1982 if (lif->rxqcqs) { 1983 for (i = 0; i < lif->nxqs; i++) 1984 err = ionic_qcq_disable(lif, lif->rxqcqs[i], err); 1985 } 1986 1987 if (lif->hwstamp_rxq) 1988 err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err); 1989 1990 ionic_lif_quiesce(lif); 1991 } 1992 1993 static void ionic_txrx_deinit(struct ionic_lif *lif) 1994 { 1995 unsigned int i; 1996 1997 if (lif->txqcqs) { 1998 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 1999 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2000 ionic_tx_flush(&lif->txqcqs[i]->cq); 2001 ionic_tx_empty(&lif->txqcqs[i]->q); 2002 } 2003 } 2004 2005 if (lif->rxqcqs) { 2006 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 2007 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2008 ionic_rx_empty(&lif->rxqcqs[i]->q); 2009 } 2010 } 2011 lif->rx_mode = 0; 2012 2013 if (lif->hwstamp_txq) { 2014 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq); 2015 ionic_tx_flush(&lif->hwstamp_txq->cq); 2016 ionic_tx_empty(&lif->hwstamp_txq->q); 2017 } 2018 2019 if (lif->hwstamp_rxq) { 2020 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq); 2021 ionic_rx_empty(&lif->hwstamp_rxq->q); 2022 } 2023 } 2024 2025 void ionic_txrx_free(struct ionic_lif *lif) 2026 { 2027 unsigned int i; 2028 2029 if (lif->txqcqs) { 2030 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 2031 ionic_qcq_free(lif, lif->txqcqs[i]); 2032 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 2033 lif->txqcqs[i] = NULL; 2034 } 2035 } 2036 2037 if (lif->rxqcqs) { 2038 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2039 ionic_qcq_free(lif, lif->rxqcqs[i]); 2040 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 2041 lif->rxqcqs[i] = NULL; 2042 } 2043 } 2044 2045 if (lif->hwstamp_txq) { 2046 ionic_qcq_free(lif, lif->hwstamp_txq); 2047 devm_kfree(lif->ionic->dev, lif->hwstamp_txq); 2048 lif->hwstamp_txq = NULL; 2049 } 2050 2051 if (lif->hwstamp_rxq) { 2052 ionic_qcq_free(lif, lif->hwstamp_rxq); 2053 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq); 2054 lif->hwstamp_rxq = NULL; 2055 } 2056 } 2057 2058 static int ionic_txrx_alloc(struct ionic_lif *lif) 2059 { 2060 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2061 unsigned int flags, i; 2062 int err = 0; 2063 2064 num_desc = lif->ntxq_descs; 2065 desc_sz = sizeof(struct ionic_txq_desc); 2066 comp_sz = sizeof(struct ionic_txq_comp); 2067 2068 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2069 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2070 sizeof(struct ionic_txq_sg_desc_v1)) 2071 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2072 else 2073 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2074 2075 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2076 2077 if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state)) 2078 flags |= IONIC_QCQ_F_CMB_RINGS; 2079 2080 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2081 flags |= IONIC_QCQ_F_INTR; 2082 2083 for (i = 0; i < lif->nxqs; i++) { 2084 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2085 num_desc, desc_sz, comp_sz, sg_desc_sz, 2086 sizeof(struct ionic_tx_desc_info), 2087 lif->kern_pid, NULL, &lif->txqcqs[i]); 2088 if (err) 2089 goto err_out; 2090 2091 if (flags & IONIC_QCQ_F_INTR) { 2092 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2093 lif->txqcqs[i]->intr.index, 2094 lif->tx_coalesce_hw); 2095 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2096 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2097 } 2098 2099 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2100 } 2101 2102 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 2103 2104 if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) 2105 flags |= IONIC_QCQ_F_CMB_RINGS; 2106 2107 num_desc = lif->nrxq_descs; 2108 desc_sz = sizeof(struct ionic_rxq_desc); 2109 comp_sz = sizeof(struct ionic_rxq_comp); 2110 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2111 2112 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2113 comp_sz *= 2; 2114 2115 for (i = 0; i < lif->nxqs; i++) { 2116 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2117 num_desc, desc_sz, comp_sz, sg_desc_sz, 2118 sizeof(struct ionic_rx_desc_info), 2119 lif->kern_pid, lif->xdp_prog, 2120 &lif->rxqcqs[i]); 2121 if (err) 2122 goto err_out; 2123 2124 lif->rxqcqs[i]->q.features = lif->rxq_features; 2125 2126 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2127 lif->rxqcqs[i]->intr.index, 2128 lif->rx_coalesce_hw); 2129 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) 2130 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; 2131 2132 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2133 ionic_link_qcq_interrupts(lif->rxqcqs[i], 2134 lif->txqcqs[i]); 2135 2136 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2137 } 2138 2139 return 0; 2140 2141 err_out: 2142 ionic_txrx_free(lif); 2143 2144 return err; 2145 } 2146 2147 static int ionic_txrx_init(struct ionic_lif *lif) 2148 { 2149 unsigned int i; 2150 int err; 2151 2152 for (i = 0; i < lif->nxqs; i++) { 2153 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 2154 if (err) 2155 goto err_out; 2156 2157 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 2158 if (err) { 2159 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2160 goto err_out; 2161 } 2162 } 2163 2164 if (lif->netdev->features & NETIF_F_RXHASH) 2165 ionic_lif_rss_init(lif); 2166 2167 ionic_lif_rx_mode(lif); 2168 2169 return 0; 2170 2171 err_out: 2172 while (i--) { 2173 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2174 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2175 } 2176 2177 return err; 2178 } 2179 2180 static int ionic_txrx_enable(struct ionic_lif *lif) 2181 { 2182 int derr = 0; 2183 int i, err; 2184 2185 ionic_xdp_rxqs_prog_update(lif); 2186 2187 for (i = 0; i < lif->nxqs; i++) { 2188 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { 2189 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); 2190 err = -ENXIO; 2191 goto err_out; 2192 } 2193 2194 ionic_rx_fill(&lif->rxqcqs[i]->q, 2195 READ_ONCE(lif->rxqcqs[i]->q.xdp_prog)); 2196 err = ionic_qcq_enable(lif->rxqcqs[i]); 2197 if (err) 2198 goto err_out; 2199 2200 err = ionic_qcq_enable(lif->txqcqs[i]); 2201 if (err) { 2202 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err); 2203 goto err_out; 2204 } 2205 } 2206 2207 if (lif->hwstamp_rxq) { 2208 ionic_rx_fill(&lif->hwstamp_rxq->q, NULL); 2209 err = ionic_qcq_enable(lif->hwstamp_rxq); 2210 if (err) 2211 goto err_out_hwstamp_rx; 2212 } 2213 2214 if (lif->hwstamp_txq) { 2215 err = ionic_qcq_enable(lif->hwstamp_txq); 2216 if (err) 2217 goto err_out_hwstamp_tx; 2218 } 2219 2220 return 0; 2221 2222 err_out_hwstamp_tx: 2223 if (lif->hwstamp_rxq) 2224 derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr); 2225 err_out_hwstamp_rx: 2226 i = lif->nxqs; 2227 err_out: 2228 while (i--) { 2229 derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr); 2230 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); 2231 } 2232 2233 ionic_xdp_rxqs_prog_update(lif); 2234 2235 return err; 2236 } 2237 2238 static int ionic_start_queues(struct ionic_lif *lif) 2239 { 2240 int err; 2241 2242 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) 2243 return -EIO; 2244 2245 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2246 return -EBUSY; 2247 2248 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 2249 return 0; 2250 2251 err = ionic_txrx_enable(lif); 2252 if (err) { 2253 clear_bit(IONIC_LIF_F_UP, lif->state); 2254 return err; 2255 } 2256 netif_tx_wake_all_queues(lif->netdev); 2257 2258 return 0; 2259 } 2260 2261 static int ionic_open(struct net_device *netdev) 2262 { 2263 struct ionic_lif *lif = netdev_priv(netdev); 2264 int err; 2265 2266 /* If recovering from a broken state, clear the bit and we'll try again */ 2267 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 2268 netdev_info(netdev, "clearing broken state\n"); 2269 2270 mutex_lock(&lif->queue_lock); 2271 2272 err = ionic_txrx_alloc(lif); 2273 if (err) 2274 goto err_unlock; 2275 2276 err = ionic_txrx_init(lif); 2277 if (err) 2278 goto err_txrx_free; 2279 2280 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 2281 if (err) 2282 goto err_txrx_deinit; 2283 2284 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 2285 if (err) 2286 goto err_txrx_deinit; 2287 2288 /* don't start the queues until we have link */ 2289 if (netif_carrier_ok(netdev)) { 2290 err = ionic_start_queues(lif); 2291 if (err) 2292 goto err_txrx_deinit; 2293 } 2294 2295 /* If hardware timestamping is enabled, but the queues were freed by 2296 * ionic_stop, those need to be reallocated and initialized, too. 2297 */ 2298 ionic_lif_hwstamp_recreate_queues(lif); 2299 2300 mutex_unlock(&lif->queue_lock); 2301 2302 return 0; 2303 2304 err_txrx_deinit: 2305 ionic_txrx_deinit(lif); 2306 err_txrx_free: 2307 ionic_txrx_free(lif); 2308 err_unlock: 2309 mutex_unlock(&lif->queue_lock); 2310 return err; 2311 } 2312 2313 static void ionic_stop_queues(struct ionic_lif *lif) 2314 { 2315 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 2316 return; 2317 2318 netif_tx_disable(lif->netdev); 2319 ionic_txrx_disable(lif); 2320 } 2321 2322 static int ionic_stop(struct net_device *netdev) 2323 { 2324 struct ionic_lif *lif = netdev_priv(netdev); 2325 2326 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2327 return 0; 2328 2329 mutex_lock(&lif->queue_lock); 2330 ionic_stop_queues(lif); 2331 ionic_txrx_deinit(lif); 2332 ionic_txrx_free(lif); 2333 mutex_unlock(&lif->queue_lock); 2334 2335 return 0; 2336 } 2337 2338 static int ionic_get_vf_config(struct net_device *netdev, 2339 int vf, struct ifla_vf_info *ivf) 2340 { 2341 struct ionic_lif *lif = netdev_priv(netdev); 2342 struct ionic *ionic = lif->ionic; 2343 int ret = 0; 2344 2345 if (!netif_device_present(netdev)) 2346 return -EBUSY; 2347 2348 down_read(&ionic->vf_op_lock); 2349 2350 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2351 ret = -EINVAL; 2352 } else { 2353 struct ionic_vf *vfdata = &ionic->vfs[vf]; 2354 2355 ivf->vf = vf; 2356 ivf->qos = 0; 2357 ivf->vlan = le16_to_cpu(vfdata->vlanid); 2358 ivf->spoofchk = vfdata->spoofchk; 2359 ivf->linkstate = vfdata->linkstate; 2360 ivf->max_tx_rate = le32_to_cpu(vfdata->maxrate); 2361 ivf->trusted = vfdata->trusted; 2362 ether_addr_copy(ivf->mac, vfdata->macaddr); 2363 } 2364 2365 up_read(&ionic->vf_op_lock); 2366 return ret; 2367 } 2368 2369 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 2370 struct ifla_vf_stats *vf_stats) 2371 { 2372 struct ionic_lif *lif = netdev_priv(netdev); 2373 struct ionic *ionic = lif->ionic; 2374 struct ionic_lif_stats *vs; 2375 int ret = 0; 2376 2377 if (!netif_device_present(netdev)) 2378 return -EBUSY; 2379 2380 down_read(&ionic->vf_op_lock); 2381 2382 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2383 ret = -EINVAL; 2384 } else { 2385 memset(vf_stats, 0, sizeof(*vf_stats)); 2386 vs = &ionic->vfs[vf].stats; 2387 2388 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 2389 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 2390 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 2391 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 2392 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 2393 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 2394 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 2395 le64_to_cpu(vs->rx_mcast_drop_packets) + 2396 le64_to_cpu(vs->rx_bcast_drop_packets); 2397 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 2398 le64_to_cpu(vs->tx_mcast_drop_packets) + 2399 le64_to_cpu(vs->tx_bcast_drop_packets); 2400 } 2401 2402 up_read(&ionic->vf_op_lock); 2403 return ret; 2404 } 2405 2406 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2407 { 2408 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC }; 2409 struct ionic_lif *lif = netdev_priv(netdev); 2410 struct ionic *ionic = lif->ionic; 2411 int ret; 2412 2413 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 2414 return -EINVAL; 2415 2416 if (!netif_device_present(netdev)) 2417 return -EBUSY; 2418 2419 down_write(&ionic->vf_op_lock); 2420 2421 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2422 ret = -EINVAL; 2423 } else { 2424 ether_addr_copy(vfc.macaddr, mac); 2425 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n", 2426 __func__, vf, vfc.macaddr); 2427 2428 ret = ionic_set_vf_config(ionic, vf, &vfc); 2429 if (!ret) 2430 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 2431 } 2432 2433 up_write(&ionic->vf_op_lock); 2434 return ret; 2435 } 2436 2437 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2438 u8 qos, __be16 proto) 2439 { 2440 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN }; 2441 struct ionic_lif *lif = netdev_priv(netdev); 2442 struct ionic *ionic = lif->ionic; 2443 int ret; 2444 2445 /* until someday when we support qos */ 2446 if (qos) 2447 return -EINVAL; 2448 2449 if (vlan > 4095) 2450 return -EINVAL; 2451 2452 if (proto != htons(ETH_P_8021Q)) 2453 return -EPROTONOSUPPORT; 2454 2455 if (!netif_device_present(netdev)) 2456 return -EBUSY; 2457 2458 down_write(&ionic->vf_op_lock); 2459 2460 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2461 ret = -EINVAL; 2462 } else { 2463 vfc.vlanid = cpu_to_le16(vlan); 2464 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n", 2465 __func__, vf, le16_to_cpu(vfc.vlanid)); 2466 2467 ret = ionic_set_vf_config(ionic, vf, &vfc); 2468 if (!ret) 2469 ionic->vfs[vf].vlanid = cpu_to_le16(vlan); 2470 } 2471 2472 up_write(&ionic->vf_op_lock); 2473 return ret; 2474 } 2475 2476 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 2477 int tx_min, int tx_max) 2478 { 2479 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE }; 2480 struct ionic_lif *lif = netdev_priv(netdev); 2481 struct ionic *ionic = lif->ionic; 2482 int ret; 2483 2484 /* setting the min just seems silly */ 2485 if (tx_min) 2486 return -EINVAL; 2487 2488 if (!netif_device_present(netdev)) 2489 return -EBUSY; 2490 2491 down_write(&ionic->vf_op_lock); 2492 2493 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2494 ret = -EINVAL; 2495 } else { 2496 vfc.maxrate = cpu_to_le32(tx_max); 2497 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n", 2498 __func__, vf, le32_to_cpu(vfc.maxrate)); 2499 2500 ret = ionic_set_vf_config(ionic, vf, &vfc); 2501 if (!ret) 2502 ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); 2503 } 2504 2505 up_write(&ionic->vf_op_lock); 2506 return ret; 2507 } 2508 2509 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 2510 { 2511 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK }; 2512 struct ionic_lif *lif = netdev_priv(netdev); 2513 struct ionic *ionic = lif->ionic; 2514 int ret; 2515 2516 if (!netif_device_present(netdev)) 2517 return -EBUSY; 2518 2519 down_write(&ionic->vf_op_lock); 2520 2521 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2522 ret = -EINVAL; 2523 } else { 2524 vfc.spoofchk = set; 2525 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n", 2526 __func__, vf, vfc.spoofchk); 2527 2528 ret = ionic_set_vf_config(ionic, vf, &vfc); 2529 if (!ret) 2530 ionic->vfs[vf].spoofchk = set; 2531 } 2532 2533 up_write(&ionic->vf_op_lock); 2534 return ret; 2535 } 2536 2537 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2538 { 2539 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST }; 2540 struct ionic_lif *lif = netdev_priv(netdev); 2541 struct ionic *ionic = lif->ionic; 2542 int ret; 2543 2544 if (!netif_device_present(netdev)) 2545 return -EBUSY; 2546 2547 down_write(&ionic->vf_op_lock); 2548 2549 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2550 ret = -EINVAL; 2551 } else { 2552 vfc.trust = set; 2553 dev_dbg(ionic->dev, "%s: vf %d trust %d\n", 2554 __func__, vf, vfc.trust); 2555 2556 ret = ionic_set_vf_config(ionic, vf, &vfc); 2557 if (!ret) 2558 ionic->vfs[vf].trusted = set; 2559 } 2560 2561 up_write(&ionic->vf_op_lock); 2562 return ret; 2563 } 2564 2565 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2566 { 2567 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE }; 2568 struct ionic_lif *lif = netdev_priv(netdev); 2569 struct ionic *ionic = lif->ionic; 2570 u8 vfls; 2571 int ret; 2572 2573 switch (set) { 2574 case IFLA_VF_LINK_STATE_ENABLE: 2575 vfls = IONIC_VF_LINK_STATUS_UP; 2576 break; 2577 case IFLA_VF_LINK_STATE_DISABLE: 2578 vfls = IONIC_VF_LINK_STATUS_DOWN; 2579 break; 2580 case IFLA_VF_LINK_STATE_AUTO: 2581 vfls = IONIC_VF_LINK_STATUS_AUTO; 2582 break; 2583 default: 2584 return -EINVAL; 2585 } 2586 2587 if (!netif_device_present(netdev)) 2588 return -EBUSY; 2589 2590 down_write(&ionic->vf_op_lock); 2591 2592 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2593 ret = -EINVAL; 2594 } else { 2595 vfc.linkstate = vfls; 2596 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n", 2597 __func__, vf, vfc.linkstate); 2598 2599 ret = ionic_set_vf_config(ionic, vf, &vfc); 2600 if (!ret) 2601 ionic->vfs[vf].linkstate = set; 2602 } 2603 2604 up_write(&ionic->vf_op_lock); 2605 return ret; 2606 } 2607 2608 static void ionic_vf_attr_replay(struct ionic_lif *lif) 2609 { 2610 struct ionic_vf_setattr_cmd vfc = { }; 2611 struct ionic *ionic = lif->ionic; 2612 struct ionic_vf *v; 2613 int i; 2614 2615 if (!ionic->vfs) 2616 return; 2617 2618 down_read(&ionic->vf_op_lock); 2619 2620 for (i = 0; i < ionic->num_vfs; i++) { 2621 v = &ionic->vfs[i]; 2622 2623 if (v->stats_pa) { 2624 vfc.attr = IONIC_VF_ATTR_STATSADDR; 2625 vfc.stats_pa = cpu_to_le64(v->stats_pa); 2626 ionic_set_vf_config(ionic, i, &vfc); 2627 vfc.stats_pa = 0; 2628 } 2629 2630 if (!is_zero_ether_addr(v->macaddr)) { 2631 vfc.attr = IONIC_VF_ATTR_MAC; 2632 ether_addr_copy(vfc.macaddr, v->macaddr); 2633 ionic_set_vf_config(ionic, i, &vfc); 2634 eth_zero_addr(vfc.macaddr); 2635 } 2636 2637 if (v->vlanid) { 2638 vfc.attr = IONIC_VF_ATTR_VLAN; 2639 vfc.vlanid = v->vlanid; 2640 ionic_set_vf_config(ionic, i, &vfc); 2641 vfc.vlanid = 0; 2642 } 2643 2644 if (v->maxrate) { 2645 vfc.attr = IONIC_VF_ATTR_RATE; 2646 vfc.maxrate = v->maxrate; 2647 ionic_set_vf_config(ionic, i, &vfc); 2648 vfc.maxrate = 0; 2649 } 2650 2651 if (v->spoofchk) { 2652 vfc.attr = IONIC_VF_ATTR_SPOOFCHK; 2653 vfc.spoofchk = v->spoofchk; 2654 ionic_set_vf_config(ionic, i, &vfc); 2655 vfc.spoofchk = 0; 2656 } 2657 2658 if (v->trusted) { 2659 vfc.attr = IONIC_VF_ATTR_TRUST; 2660 vfc.trust = v->trusted; 2661 ionic_set_vf_config(ionic, i, &vfc); 2662 vfc.trust = 0; 2663 } 2664 2665 if (v->linkstate) { 2666 vfc.attr = IONIC_VF_ATTR_LINKSTATE; 2667 vfc.linkstate = v->linkstate; 2668 ionic_set_vf_config(ionic, i, &vfc); 2669 vfc.linkstate = 0; 2670 } 2671 } 2672 2673 up_read(&ionic->vf_op_lock); 2674 2675 ionic_vf_start(ionic); 2676 } 2677 2678 static void ionic_unregister_rxq_info(struct ionic_queue *q) 2679 { 2680 struct xdp_rxq_info *xi; 2681 2682 if (!q->xdp_rxq_info) 2683 return; 2684 2685 xi = q->xdp_rxq_info; 2686 q->xdp_rxq_info = NULL; 2687 2688 xdp_rxq_info_unreg(xi); 2689 kfree(xi); 2690 } 2691 2692 static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id) 2693 { 2694 struct xdp_rxq_info *rxq_info; 2695 int err; 2696 2697 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); 2698 if (!rxq_info) 2699 return -ENOMEM; 2700 2701 err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id); 2702 if (err) { 2703 netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg failed, err %d\n", 2704 q->index, err); 2705 goto err_out; 2706 } 2707 2708 err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_POOL, q->page_pool); 2709 if (err) { 2710 netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg_mem_model failed, err %d\n", 2711 q->index, err); 2712 xdp_rxq_info_unreg(rxq_info); 2713 goto err_out; 2714 } 2715 2716 q->xdp_rxq_info = rxq_info; 2717 2718 return 0; 2719 2720 err_out: 2721 kfree(rxq_info); 2722 return err; 2723 } 2724 2725 static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif) 2726 { 2727 struct bpf_prog *xdp_prog; 2728 unsigned int i; 2729 2730 if (!lif->rxqcqs) 2731 return; 2732 2733 xdp_prog = READ_ONCE(lif->xdp_prog); 2734 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2735 struct ionic_queue *q = &lif->rxqcqs[i]->q; 2736 2737 WRITE_ONCE(q->xdp_prog, xdp_prog); 2738 } 2739 } 2740 2741 static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) 2742 { 2743 struct ionic_lif *lif = netdev_priv(netdev); 2744 struct bpf_prog *old_prog; 2745 u32 maxfs; 2746 2747 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { 2748 #define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts" 2749 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT); 2750 netdev_info(lif->netdev, XDP_ERR_SPLIT); 2751 return -EOPNOTSUPP; 2752 } 2753 2754 if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) { 2755 #define XDP_ERR_MTU "MTU is too large for XDP without frags support" 2756 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU); 2757 netdev_info(lif->netdev, XDP_ERR_MTU); 2758 return -EINVAL; 2759 } 2760 2761 maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; 2762 if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags)) 2763 maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU); 2764 netdev->max_mtu = maxfs; 2765 2766 if (!netif_running(netdev)) { 2767 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2768 } else if (lif->xdp_prog && bpf->prog) { 2769 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2770 ionic_xdp_rxqs_prog_update(lif); 2771 } else { 2772 struct ionic_queue_params qparams; 2773 2774 ionic_init_queue_params(lif, &qparams); 2775 qparams.xdp_prog = bpf->prog; 2776 mutex_lock(&lif->queue_lock); 2777 ionic_reconfigure_queues(lif, &qparams); 2778 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2779 mutex_unlock(&lif->queue_lock); 2780 } 2781 2782 if (old_prog) 2783 bpf_prog_put(old_prog); 2784 2785 return 0; 2786 } 2787 2788 static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf) 2789 { 2790 switch (bpf->command) { 2791 case XDP_SETUP_PROG: 2792 return ionic_xdp_config(netdev, bpf); 2793 default: 2794 return -EINVAL; 2795 } 2796 } 2797 2798 static const struct net_device_ops ionic_netdev_ops = { 2799 .ndo_open = ionic_open, 2800 .ndo_stop = ionic_stop, 2801 .ndo_start_xmit = ionic_start_xmit, 2802 .ndo_bpf = ionic_xdp, 2803 .ndo_xdp_xmit = ionic_xdp_xmit, 2804 .ndo_get_stats64 = ionic_get_stats64, 2805 .ndo_set_rx_mode = ionic_ndo_set_rx_mode, 2806 .ndo_set_features = ionic_set_features, 2807 .ndo_set_mac_address = ionic_set_mac_address, 2808 .ndo_validate_addr = eth_validate_addr, 2809 .ndo_tx_timeout = ionic_tx_timeout, 2810 .ndo_change_mtu = ionic_change_mtu, 2811 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2812 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2813 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2814 .ndo_set_vf_trust = ionic_set_vf_trust, 2815 .ndo_set_vf_mac = ionic_set_vf_mac, 2816 .ndo_set_vf_rate = ionic_set_vf_rate, 2817 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2818 .ndo_get_vf_config = ionic_get_vf_config, 2819 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2820 .ndo_get_vf_stats = ionic_get_vf_stats, 2821 .ndo_hwtstamp_get = ionic_hwstamp_get, 2822 .ndo_hwtstamp_set = ionic_hwstamp_set, 2823 }; 2824 2825 static int ionic_cmb_reconfig(struct ionic_lif *lif, 2826 struct ionic_queue_params *qparam) 2827 { 2828 struct ionic_queue_params start_qparams; 2829 int err = 0; 2830 2831 /* When changing CMB queue parameters, we're using limited 2832 * on-device memory and don't have extra memory to use for 2833 * duplicate allocations, so we free it all first then 2834 * re-allocate with the new parameters. 2835 */ 2836 2837 /* Checkpoint for possible unwind */ 2838 ionic_init_queue_params(lif, &start_qparams); 2839 2840 /* Stop and free the queues */ 2841 ionic_stop_queues_reconfig(lif); 2842 ionic_txrx_free(lif); 2843 2844 /* Set up new qparams */ 2845 ionic_set_queue_params(lif, qparam); 2846 2847 if (netif_running(lif->netdev)) { 2848 /* Alloc and start the new configuration */ 2849 err = ionic_txrx_alloc(lif); 2850 if (err) { 2851 dev_warn(lif->ionic->dev, 2852 "CMB reconfig failed, restoring values: %d\n", err); 2853 2854 /* Back out the changes */ 2855 ionic_set_queue_params(lif, &start_qparams); 2856 err = ionic_txrx_alloc(lif); 2857 if (err) { 2858 dev_err(lif->ionic->dev, 2859 "CMB restore failed: %d\n", err); 2860 goto err_out; 2861 } 2862 } 2863 2864 err = ionic_start_queues_reconfig(lif); 2865 if (err) { 2866 dev_err(lif->ionic->dev, 2867 "CMB reconfig failed: %d\n", err); 2868 goto err_out; 2869 } 2870 } 2871 2872 err_out: 2873 /* This was detached in ionic_stop_queues_reconfig() */ 2874 netif_device_attach(lif->netdev); 2875 2876 return err; 2877 } 2878 2879 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2880 { 2881 /* only swapping the queues and napi, not flags or other stuff */ 2882 swap(a->napi, b->napi); 2883 2884 if (a->q.type == IONIC_QTYPE_RXQ) { 2885 swap(a->q.page_pool, b->q.page_pool); 2886 a->q.page_pool->p.napi = &a->napi; 2887 if (b->q.page_pool) /* is NULL when increasing queue count */ 2888 b->q.page_pool->p.napi = &b->napi; 2889 } 2890 2891 swap(a->q.features, b->q.features); 2892 swap(a->q.num_descs, b->q.num_descs); 2893 swap(a->q.desc_size, b->q.desc_size); 2894 swap(a->q.base, b->q.base); 2895 swap(a->q.base_pa, b->q.base_pa); 2896 swap(a->q.info, b->q.info); 2897 swap(a->q.xdp_prog, b->q.xdp_prog); 2898 swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info); 2899 swap(a->q.partner, b->q.partner); 2900 swap(a->q_base, b->q_base); 2901 swap(a->q_base_pa, b->q_base_pa); 2902 swap(a->q_size, b->q_size); 2903 2904 swap(a->q.sg_desc_size, b->q.sg_desc_size); 2905 swap(a->q.sg_base, b->q.sg_base); 2906 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2907 swap(a->sg_base, b->sg_base); 2908 swap(a->sg_base_pa, b->sg_base_pa); 2909 swap(a->sg_size, b->sg_size); 2910 2911 swap(a->cq.num_descs, b->cq.num_descs); 2912 swap(a->cq.desc_size, b->cq.desc_size); 2913 swap(a->cq.base, b->cq.base); 2914 swap(a->cq.base_pa, b->cq.base_pa); 2915 swap(a->cq_base, b->cq_base); 2916 swap(a->cq_base_pa, b->cq_base_pa); 2917 swap(a->cq_size, b->cq_size); 2918 2919 ionic_debugfs_del_qcq(a); 2920 ionic_debugfs_add_qcq(a->q.lif, a); 2921 } 2922 2923 int ionic_reconfigure_queues(struct ionic_lif *lif, 2924 struct ionic_queue_params *qparam) 2925 { 2926 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2927 struct ionic_qcq **tx_qcqs = NULL; 2928 struct ionic_qcq **rx_qcqs = NULL; 2929 unsigned int flags, i; 2930 int err = 0; 2931 2932 /* Are we changing q params while CMB is on */ 2933 if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) || 2934 (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx)) 2935 return ionic_cmb_reconfig(lif, qparam); 2936 2937 /* allocate temporary qcq arrays to hold new queue structs */ 2938 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2939 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2940 sizeof(struct ionic_qcq *), GFP_KERNEL); 2941 if (!tx_qcqs) { 2942 err = -ENOMEM; 2943 goto err_out; 2944 } 2945 } 2946 if (qparam->nxqs != lif->nxqs || 2947 qparam->nrxq_descs != lif->nrxq_descs || 2948 qparam->rxq_features != lif->rxq_features || 2949 qparam->xdp_prog != lif->xdp_prog) { 2950 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2951 sizeof(struct ionic_qcq *), GFP_KERNEL); 2952 if (!rx_qcqs) { 2953 err = -ENOMEM; 2954 goto err_out; 2955 } 2956 } 2957 2958 /* allocate new desc_info and rings, but leave the interrupt setup 2959 * until later so as to not mess with the still-running queues 2960 */ 2961 if (tx_qcqs) { 2962 num_desc = qparam->ntxq_descs; 2963 desc_sz = sizeof(struct ionic_txq_desc); 2964 comp_sz = sizeof(struct ionic_txq_comp); 2965 2966 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2967 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2968 sizeof(struct ionic_txq_sg_desc_v1)) 2969 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2970 else 2971 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2972 2973 for (i = 0; i < qparam->nxqs; i++) { 2974 /* If missing, short placeholder qcq needed for swap */ 2975 if (!lif->txqcqs[i]) { 2976 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2977 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2978 4, desc_sz, comp_sz, sg_desc_sz, 2979 sizeof(struct ionic_tx_desc_info), 2980 lif->kern_pid, NULL, &lif->txqcqs[i]); 2981 if (err) 2982 goto err_out; 2983 } 2984 2985 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2986 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2987 num_desc, desc_sz, comp_sz, sg_desc_sz, 2988 sizeof(struct ionic_tx_desc_info), 2989 lif->kern_pid, NULL, &tx_qcqs[i]); 2990 if (err) 2991 goto err_out; 2992 } 2993 } 2994 2995 if (rx_qcqs) { 2996 num_desc = qparam->nrxq_descs; 2997 desc_sz = sizeof(struct ionic_rxq_desc); 2998 comp_sz = sizeof(struct ionic_rxq_comp); 2999 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 3000 3001 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC) 3002 comp_sz *= 2; 3003 3004 for (i = 0; i < qparam->nxqs; i++) { 3005 /* If missing, short placeholder qcq needed for swap */ 3006 if (!lif->rxqcqs[i]) { 3007 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 3008 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 3009 4, desc_sz, comp_sz, sg_desc_sz, 3010 sizeof(struct ionic_rx_desc_info), 3011 lif->kern_pid, NULL, &lif->rxqcqs[i]); 3012 if (err) 3013 goto err_out; 3014 } 3015 3016 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 3017 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 3018 num_desc, desc_sz, comp_sz, sg_desc_sz, 3019 sizeof(struct ionic_rx_desc_info), 3020 lif->kern_pid, qparam->xdp_prog, &rx_qcqs[i]); 3021 if (err) 3022 goto err_out; 3023 3024 rx_qcqs[i]->q.features = qparam->rxq_features; 3025 rx_qcqs[i]->q.xdp_prog = qparam->xdp_prog; 3026 } 3027 } 3028 3029 /* stop and clean the queues */ 3030 ionic_stop_queues_reconfig(lif); 3031 3032 if (qparam->nxqs != lif->nxqs) { 3033 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 3034 if (err) 3035 goto err_out_reinit_unlock; 3036 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 3037 if (err) { 3038 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 3039 goto err_out_reinit_unlock; 3040 } 3041 } 3042 3043 /* swap new desc_info and rings, keeping existing interrupt config */ 3044 if (tx_qcqs) { 3045 lif->ntxq_descs = qparam->ntxq_descs; 3046 for (i = 0; i < qparam->nxqs; i++) 3047 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 3048 } 3049 3050 if (rx_qcqs) { 3051 lif->nrxq_descs = qparam->nrxq_descs; 3052 for (i = 0; i < qparam->nxqs; i++) 3053 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 3054 } 3055 3056 /* if we need to change the interrupt layout, this is the time */ 3057 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 3058 qparam->nxqs != lif->nxqs) { 3059 if (qparam->intr_split) { 3060 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 3061 } else { 3062 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 3063 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 3064 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 3065 } 3066 3067 /* Clear existing interrupt assignments. We check for NULL here 3068 * because we're checking the whole array for potential qcqs, not 3069 * just those qcqs that have just been set up. 3070 */ 3071 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 3072 if (lif->txqcqs[i]) 3073 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 3074 if (lif->rxqcqs[i]) 3075 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 3076 } 3077 3078 /* re-assign the interrupts */ 3079 for (i = 0; i < qparam->nxqs; i++) { 3080 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 3081 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 3082 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 3083 lif->rxqcqs[i]->intr.index, 3084 lif->rx_coalesce_hw); 3085 3086 if (qparam->intr_split) { 3087 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 3088 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 3089 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 3090 lif->txqcqs[i]->intr.index, 3091 lif->tx_coalesce_hw); 3092 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 3093 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 3094 } else { 3095 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3096 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 3097 } 3098 } 3099 } 3100 3101 /* now we can rework the debugfs mappings */ 3102 if (tx_qcqs) { 3103 for (i = 0; i < qparam->nxqs; i++) { 3104 ionic_debugfs_del_qcq(lif->txqcqs[i]); 3105 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 3106 } 3107 } 3108 3109 if (rx_qcqs) { 3110 for (i = 0; i < qparam->nxqs; i++) { 3111 ionic_debugfs_del_qcq(lif->rxqcqs[i]); 3112 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 3113 } 3114 } 3115 3116 swap(lif->nxqs, qparam->nxqs); 3117 swap(lif->rxq_features, qparam->rxq_features); 3118 3119 err_out_reinit_unlock: 3120 /* re-init the queues, but don't lose an error code */ 3121 if (err) 3122 ionic_start_queues_reconfig(lif); 3123 else 3124 err = ionic_start_queues_reconfig(lif); 3125 3126 err_out: 3127 /* free old allocs without cleaning intr */ 3128 for (i = 0; i < qparam->nxqs; i++) { 3129 if (tx_qcqs && tx_qcqs[i]) { 3130 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3131 ionic_qcq_free(lif, tx_qcqs[i]); 3132 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 3133 tx_qcqs[i] = NULL; 3134 } 3135 if (rx_qcqs && rx_qcqs[i]) { 3136 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3137 ionic_qcq_free(lif, rx_qcqs[i]); 3138 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 3139 rx_qcqs[i] = NULL; 3140 } 3141 } 3142 3143 /* free q array */ 3144 if (rx_qcqs) { 3145 devm_kfree(lif->ionic->dev, rx_qcqs); 3146 rx_qcqs = NULL; 3147 } 3148 if (tx_qcqs) { 3149 devm_kfree(lif->ionic->dev, tx_qcqs); 3150 tx_qcqs = NULL; 3151 } 3152 3153 /* clean the unused dma and info allocations when new set is smaller 3154 * than the full array, but leave the qcq shells in place 3155 */ 3156 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 3157 if (lif->txqcqs && lif->txqcqs[i]) { 3158 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3159 ionic_qcq_free(lif, lif->txqcqs[i]); 3160 } 3161 3162 if (lif->rxqcqs && lif->rxqcqs[i]) { 3163 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3164 ionic_qcq_free(lif, lif->rxqcqs[i]); 3165 } 3166 } 3167 3168 if (err) 3169 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err); 3170 3171 return err; 3172 } 3173 3174 static int ionic_affinity_masks_alloc(struct ionic *ionic) 3175 { 3176 cpumask_var_t *affinity_masks; 3177 int nintrs = ionic->nintrs; 3178 int i; 3179 3180 affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL); 3181 if (!affinity_masks) 3182 return -ENOMEM; 3183 3184 for (i = 0; i < nintrs; i++) { 3185 if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL, 3186 dev_to_node(ionic->dev))) 3187 goto err_out; 3188 } 3189 3190 ionic->affinity_masks = affinity_masks; 3191 3192 return 0; 3193 3194 err_out: 3195 for (--i; i >= 0; i--) 3196 free_cpumask_var(affinity_masks[i]); 3197 kfree(affinity_masks); 3198 3199 return -ENOMEM; 3200 } 3201 3202 static void ionic_affinity_masks_free(struct ionic *ionic) 3203 { 3204 int i; 3205 3206 for (i = 0; i < ionic->nintrs; i++) 3207 free_cpumask_var(ionic->affinity_masks[i]); 3208 kfree(ionic->affinity_masks); 3209 ionic->affinity_masks = NULL; 3210 } 3211 3212 int ionic_lif_alloc(struct ionic *ionic) 3213 { 3214 struct device *dev = ionic->dev; 3215 union ionic_lif_identity *lid; 3216 struct net_device *netdev; 3217 struct ionic_lif *lif; 3218 int tbl_sz; 3219 int err; 3220 3221 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 3222 if (!lid) 3223 return -ENOMEM; 3224 3225 netdev = alloc_etherdev_mqs(sizeof(*lif), 3226 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 3227 if (!netdev) { 3228 dev_err(dev, "Cannot allocate netdev, aborting\n"); 3229 err = -ENOMEM; 3230 goto err_out_free_lid; 3231 } 3232 3233 SET_NETDEV_DEV(netdev, dev); 3234 3235 lif = netdev_priv(netdev); 3236 lif->netdev = netdev; 3237 ionic->lif = lif; 3238 lif->ionic = ionic; 3239 netdev->netdev_ops = &ionic_netdev_ops; 3240 ionic_ethtool_set_ops(netdev); 3241 3242 netdev->watchdog_timeo = 5 * HZ; 3243 netif_carrier_off(netdev); 3244 3245 lif->identity = lid; 3246 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 3247 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity); 3248 if (err) { 3249 dev_err(ionic->dev, "Cannot identify type %d: %d\n", 3250 lif->lif_type, err); 3251 goto err_out_free_netdev; 3252 } 3253 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 3254 le32_to_cpu(lif->identity->eth.min_frame_size)); 3255 lif->netdev->max_mtu = 3256 le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; 3257 3258 lif->neqs = ionic->neqs_per_lif; 3259 lif->nxqs = ionic->ntxqs_per_lif; 3260 3261 lif->index = 0; 3262 3263 if (is_kdump_kernel()) { 3264 lif->ntxq_descs = IONIC_MIN_TXRX_DESC; 3265 lif->nrxq_descs = IONIC_MIN_TXRX_DESC; 3266 } else { 3267 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 3268 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 3269 } 3270 3271 /* Convert the default coalesce value to actual hw resolution */ 3272 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 3273 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 3274 lif->rx_coalesce_usecs); 3275 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 3276 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 3277 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); 3278 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); 3279 3280 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 3281 3282 mutex_init(&lif->queue_lock); 3283 mutex_init(&lif->config_lock); 3284 mutex_init(&lif->adev_lock); 3285 3286 spin_lock_init(&lif->adminq_lock); 3287 3288 spin_lock_init(&lif->deferred.lock); 3289 INIT_LIST_HEAD(&lif->deferred.list); 3290 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 3291 3292 /* allocate lif info */ 3293 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 3294 lif->info = dma_alloc_coherent(dev, lif->info_sz, 3295 &lif->info_pa, GFP_KERNEL); 3296 if (!lif->info) { 3297 dev_err(dev, "Failed to allocate lif info, aborting\n"); 3298 err = -ENOMEM; 3299 goto err_out_free_mutex; 3300 } 3301 3302 ionic_debugfs_add_lif(lif); 3303 3304 err = ionic_affinity_masks_alloc(ionic); 3305 if (err) 3306 goto err_out_free_lif_info; 3307 3308 /* allocate control queues and txrx queue arrays */ 3309 ionic_lif_queue_identify(lif); 3310 err = ionic_qcqs_alloc(lif); 3311 if (err) 3312 goto err_out_free_affinity_masks; 3313 3314 /* allocate rss indirection table */ 3315 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 3316 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 3317 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 3318 &lif->rss_ind_tbl_pa, 3319 GFP_KERNEL); 3320 3321 if (!lif->rss_ind_tbl) { 3322 err = -ENOMEM; 3323 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 3324 goto err_out_free_qcqs; 3325 } 3326 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 3327 3328 ionic_lif_alloc_phc(lif); 3329 3330 return 0; 3331 3332 err_out_free_qcqs: 3333 ionic_qcqs_free(lif); 3334 err_out_free_affinity_masks: 3335 ionic_affinity_masks_free(lif->ionic); 3336 err_out_free_lif_info: 3337 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3338 lif->info = NULL; 3339 lif->info_pa = 0; 3340 err_out_free_mutex: 3341 mutex_destroy(&lif->adev_lock); 3342 mutex_destroy(&lif->config_lock); 3343 mutex_destroy(&lif->queue_lock); 3344 err_out_free_netdev: 3345 free_netdev(lif->netdev); 3346 lif = NULL; 3347 err_out_free_lid: 3348 kfree(lid); 3349 3350 return err; 3351 } 3352 3353 static void ionic_lif_reset(struct ionic_lif *lif) 3354 { 3355 struct ionic_dev *idev = &lif->ionic->idev; 3356 3357 if (!ionic_is_fw_running(idev)) 3358 return; 3359 3360 mutex_lock(&lif->ionic->dev_cmd_lock); 3361 ionic_dev_cmd_lif_reset(idev, lif->index); 3362 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3363 mutex_unlock(&lif->ionic->dev_cmd_lock); 3364 } 3365 3366 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 3367 { 3368 struct ionic *ionic = lif->ionic; 3369 3370 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3371 return; 3372 3373 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 3374 3375 netif_device_detach(lif->netdev); 3376 3377 ionic_auxbus_unregister(ionic->lif); 3378 mutex_lock(&lif->queue_lock); 3379 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 3380 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 3381 ionic_stop_queues(lif); 3382 } 3383 3384 if (netif_running(lif->netdev)) { 3385 ionic_txrx_deinit(lif); 3386 ionic_txrx_free(lif); 3387 } 3388 ionic_lif_deinit(lif); 3389 ionic_reset(ionic); 3390 ionic_qcqs_free(lif); 3391 3392 mutex_unlock(&lif->queue_lock); 3393 3394 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); 3395 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 3396 } 3397 3398 int ionic_restart_lif(struct ionic_lif *lif) 3399 { 3400 struct ionic *ionic = lif->ionic; 3401 int err; 3402 3403 mutex_lock(&lif->queue_lock); 3404 3405 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 3406 dev_info(ionic->dev, "FW Up: clearing broken state\n"); 3407 3408 err = ionic_qcqs_alloc(lif); 3409 if (err) 3410 goto err_unlock; 3411 3412 err = ionic_lif_init(lif); 3413 if (err) 3414 goto err_qcqs_free; 3415 3416 ionic_vf_attr_replay(lif); 3417 3418 if (lif->registered) 3419 ionic_lif_set_netdev_info(lif); 3420 3421 ionic_rx_filter_replay(lif); 3422 3423 if (netif_running(lif->netdev)) { 3424 err = ionic_txrx_alloc(lif); 3425 if (err) 3426 goto err_lifs_deinit; 3427 3428 err = ionic_txrx_init(lif); 3429 if (err) 3430 goto err_txrx_free; 3431 } 3432 3433 mutex_unlock(&lif->queue_lock); 3434 3435 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 3436 ionic_link_status_check_request(lif, CAN_SLEEP); 3437 netif_device_attach(lif->netdev); 3438 ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); 3439 3440 ionic_auxbus_register(ionic->lif); 3441 3442 return 0; 3443 3444 err_txrx_free: 3445 ionic_txrx_free(lif); 3446 err_lifs_deinit: 3447 ionic_lif_deinit(lif); 3448 err_qcqs_free: 3449 ionic_qcqs_free(lif); 3450 err_unlock: 3451 mutex_unlock(&lif->queue_lock); 3452 3453 return err; 3454 } 3455 3456 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 3457 { 3458 struct ionic *ionic = lif->ionic; 3459 int err; 3460 3461 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3462 return; 3463 3464 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 3465 3466 /* This is a little different from what happens at 3467 * probe time because the LIF already exists so we 3468 * just need to reanimate it. 3469 */ 3470 ionic_init_devinfo(ionic); 3471 ionic_reset(ionic); 3472 err = ionic_identify(ionic); 3473 if (err) 3474 goto err_out; 3475 err = ionic_port_identify(ionic); 3476 if (err) 3477 goto err_out; 3478 err = ionic_port_init(ionic); 3479 if (err) 3480 goto err_out; 3481 3482 err = ionic_restart_lif(lif); 3483 if (err) 3484 goto err_out; 3485 3486 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 3487 3488 /* restore the hardware timestamping queues */ 3489 ionic_lif_hwstamp_replay(lif); 3490 3491 return; 3492 3493 err_out: 3494 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 3495 } 3496 3497 void ionic_lif_free(struct ionic_lif *lif) 3498 { 3499 struct device *dev = lif->ionic->dev; 3500 3501 ionic_lif_free_phc(lif); 3502 3503 /* free rss indirection table */ 3504 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 3505 lif->rss_ind_tbl_pa); 3506 lif->rss_ind_tbl = NULL; 3507 lif->rss_ind_tbl_pa = 0; 3508 3509 /* free queues */ 3510 ionic_qcqs_free(lif); 3511 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3512 ionic_lif_reset(lif); 3513 3514 ionic_affinity_masks_free(lif->ionic); 3515 3516 /* free lif info */ 3517 kfree(lif->identity); 3518 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3519 lif->info = NULL; 3520 lif->info_pa = 0; 3521 3522 mutex_destroy(&lif->config_lock); 3523 mutex_destroy(&lif->queue_lock); 3524 mutex_destroy(&lif->adev_lock); 3525 3526 /* free netdev & lif */ 3527 ionic_debugfs_del_lif(lif); 3528 free_netdev(lif->netdev); 3529 } 3530 3531 void ionic_lif_deinit(struct ionic_lif *lif) 3532 { 3533 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 3534 return; 3535 3536 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3537 cancel_work_sync(&lif->deferred.work); 3538 cancel_work_sync(&lif->tx_timeout_work); 3539 ionic_rx_filters_deinit(lif); 3540 if (lif->netdev->features & NETIF_F_RXHASH) 3541 ionic_lif_rss_deinit(lif); 3542 } 3543 3544 napi_disable(&lif->adminqcq->napi); 3545 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3546 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3547 3548 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3549 lif->kern_dbpage = NULL; 3550 3551 ionic_lif_reset(lif); 3552 } 3553 3554 static int ionic_lif_adminq_init(struct ionic_lif *lif) 3555 { 3556 struct device *dev = lif->ionic->dev; 3557 struct ionic_q_init_comp comp; 3558 struct ionic_dev *idev; 3559 struct ionic_qcq *qcq; 3560 struct ionic_queue *q; 3561 int err; 3562 3563 idev = &lif->ionic->idev; 3564 qcq = lif->adminqcq; 3565 q = &qcq->q; 3566 3567 mutex_lock(&lif->ionic->dev_cmd_lock); 3568 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 3569 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3570 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3571 mutex_unlock(&lif->ionic->dev_cmd_lock); 3572 if (err) { 3573 netdev_err(lif->netdev, "adminq init failed %d\n", err); 3574 return err; 3575 } 3576 3577 q->hw_type = comp.hw_type; 3578 q->hw_index = le32_to_cpu(comp.hw_index); 3579 q->dbval = IONIC_DBELL_QID(q->hw_index); 3580 3581 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 3582 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 3583 3584 q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE; 3585 q->dbell_jiffies = jiffies; 3586 3587 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi); 3588 3589 napi_enable(&qcq->napi); 3590 3591 if (qcq->flags & IONIC_QCQ_F_INTR) { 3592 irq_set_affinity_hint(qcq->intr.vector, 3593 *qcq->intr.affinity_mask); 3594 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 3595 IONIC_INTR_MASK_CLEAR); 3596 } 3597 3598 qcq->flags |= IONIC_QCQ_F_INITED; 3599 3600 return 0; 3601 } 3602 3603 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 3604 { 3605 struct ionic_qcq *qcq = lif->notifyqcq; 3606 struct device *dev = lif->ionic->dev; 3607 struct ionic_queue *q = &qcq->q; 3608 int err; 3609 3610 struct ionic_admin_ctx ctx = { 3611 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3612 .cmd.q_init = { 3613 .opcode = IONIC_CMD_Q_INIT, 3614 .lif_index = cpu_to_le16(lif->index), 3615 .type = q->type, 3616 .ver = lif->qtype_info[q->type].version, 3617 .index = cpu_to_le32(q->index), 3618 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 3619 IONIC_QINIT_F_ENA), 3620 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 3621 .pid = cpu_to_le16(q->pid), 3622 .ring_size = ilog2(q->num_descs), 3623 .ring_base = cpu_to_le64(q->base_pa), 3624 } 3625 }; 3626 3627 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 3628 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 3629 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 3630 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 3631 3632 err = ionic_adminq_post_wait(lif, &ctx); 3633 if (err) 3634 return err; 3635 3636 lif->last_eid = 0; 3637 q->hw_type = ctx.comp.q_init.hw_type; 3638 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 3639 q->dbval = IONIC_DBELL_QID(q->hw_index); 3640 3641 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 3642 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 3643 3644 /* preset the callback info */ 3645 q->admin_info[0].ctx = lif; 3646 3647 qcq->flags |= IONIC_QCQ_F_INITED; 3648 3649 return 0; 3650 } 3651 3652 static int ionic_station_set(struct ionic_lif *lif) 3653 { 3654 struct net_device *netdev = lif->netdev; 3655 struct ionic_admin_ctx ctx = { 3656 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3657 .cmd.lif_getattr = { 3658 .opcode = IONIC_CMD_LIF_GETATTR, 3659 .index = cpu_to_le16(lif->index), 3660 .attr = IONIC_LIF_ATTR_MAC, 3661 }, 3662 }; 3663 u8 mac_address[ETH_ALEN]; 3664 struct sockaddr addr; 3665 int err; 3666 3667 err = ionic_adminq_post_wait(lif, &ctx); 3668 if (err) 3669 return err; 3670 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 3671 ctx.comp.lif_getattr.mac); 3672 ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac); 3673 3674 if (is_zero_ether_addr(mac_address)) { 3675 eth_hw_addr_random(netdev); 3676 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr); 3677 ether_addr_copy(mac_address, netdev->dev_addr); 3678 3679 err = ionic_program_mac(lif, mac_address); 3680 if (err < 0) 3681 return err; 3682 3683 if (err > 0) { 3684 netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n", 3685 __func__); 3686 return 0; 3687 } 3688 } 3689 3690 if (!is_zero_ether_addr(netdev->dev_addr)) { 3691 /* If the netdev mac is non-zero and doesn't match the default 3692 * device address, it was set by something earlier and we're 3693 * likely here again after a fw-upgrade reset. We need to be 3694 * sure the netdev mac is in our filter list. 3695 */ 3696 if (!ether_addr_equal(mac_address, netdev->dev_addr)) 3697 ionic_lif_addr_add(lif, netdev->dev_addr); 3698 } else { 3699 /* Update the netdev mac with the device's mac */ 3700 ether_addr_copy(addr.sa_data, mac_address); 3701 addr.sa_family = AF_INET; 3702 err = eth_prepare_mac_addr_change(netdev, &addr); 3703 if (err) { 3704 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 3705 addr.sa_data, err); 3706 return 0; 3707 } 3708 3709 eth_commit_mac_addr_change(netdev, &addr); 3710 } 3711 3712 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 3713 netdev->dev_addr); 3714 ionic_lif_addr_add(lif, netdev->dev_addr); 3715 3716 return 0; 3717 } 3718 3719 int ionic_lif_init(struct ionic_lif *lif) 3720 { 3721 struct ionic_dev *idev = &lif->ionic->idev; 3722 struct device *dev = lif->ionic->dev; 3723 struct ionic_lif_init_comp comp; 3724 int dbpage_num; 3725 int err; 3726 3727 mutex_lock(&lif->ionic->dev_cmd_lock); 3728 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 3729 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3730 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3731 mutex_unlock(&lif->ionic->dev_cmd_lock); 3732 if (err) 3733 return err; 3734 3735 lif->hw_index = le16_to_cpu(comp.hw_index); 3736 3737 /* now that we have the hw_index we can figure out our doorbell page */ 3738 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 3739 if (!lif->dbid_count) { 3740 dev_err(dev, "No doorbell pages, aborting\n"); 3741 return -EINVAL; 3742 } 3743 3744 lif->kern_pid = 0; 3745 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 3746 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 3747 if (!lif->kern_dbpage) { 3748 dev_err(dev, "Cannot map dbpage, aborting\n"); 3749 return -ENOMEM; 3750 } 3751 3752 err = ionic_lif_adminq_init(lif); 3753 if (err) 3754 goto err_out_adminq_deinit; 3755 3756 if (lif->ionic->nnqs_per_lif) { 3757 err = ionic_lif_notifyq_init(lif); 3758 if (err) 3759 goto err_out_notifyq_deinit; 3760 } 3761 3762 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3763 err = ionic_set_nic_features(lif, lif->netdev->features); 3764 else 3765 err = ionic_init_nic_features(lif); 3766 if (err) 3767 goto err_out_notifyq_deinit; 3768 3769 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3770 err = ionic_rx_filters_init(lif); 3771 if (err) 3772 goto err_out_notifyq_deinit; 3773 } 3774 3775 err = ionic_station_set(lif); 3776 if (err) 3777 goto err_out_notifyq_deinit; 3778 3779 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 3780 lif->doorbell_wa = ionic_doorbell_wa(lif->ionic); 3781 3782 set_bit(IONIC_LIF_F_INITED, lif->state); 3783 3784 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 3785 3786 return 0; 3787 3788 err_out_notifyq_deinit: 3789 napi_disable(&lif->adminqcq->napi); 3790 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3791 err_out_adminq_deinit: 3792 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3793 ionic_lif_reset(lif); 3794 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3795 lif->kern_dbpage = NULL; 3796 3797 return err; 3798 } 3799 3800 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 3801 { 3802 struct ionic_admin_ctx ctx = { 3803 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3804 .cmd.lif_setattr = { 3805 .opcode = IONIC_CMD_LIF_SETATTR, 3806 .index = cpu_to_le16(lif->index), 3807 .attr = IONIC_LIF_ATTR_NAME, 3808 }, 3809 }; 3810 3811 strscpy(ctx.cmd.lif_setattr.name, netdev_name(lif->netdev), 3812 sizeof(ctx.cmd.lif_setattr.name)); 3813 3814 ionic_adminq_post_wait(lif, &ctx); 3815 } 3816 3817 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 3818 { 3819 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 3820 return NULL; 3821 3822 return netdev_priv(netdev); 3823 } 3824 3825 static int ionic_lif_notify(struct notifier_block *nb, 3826 unsigned long event, void *info) 3827 { 3828 struct net_device *ndev = netdev_notifier_info_to_dev(info); 3829 struct ionic *ionic = container_of(nb, struct ionic, nb); 3830 struct ionic_lif *lif = ionic_netdev_lif(ndev); 3831 3832 if (!lif || lif->ionic != ionic) 3833 return NOTIFY_DONE; 3834 3835 switch (event) { 3836 case NETDEV_CHANGENAME: 3837 ionic_lif_set_netdev_info(lif); 3838 break; 3839 } 3840 3841 return NOTIFY_DONE; 3842 } 3843 3844 int ionic_lif_register(struct ionic_lif *lif) 3845 { 3846 int err; 3847 3848 ionic_lif_register_phc(lif); 3849 3850 lif->ionic->nb.notifier_call = ionic_lif_notify; 3851 3852 err = register_netdevice_notifier(&lif->ionic->nb); 3853 if (err) 3854 lif->ionic->nb.notifier_call = NULL; 3855 3856 /* only register LIF0 for now */ 3857 err = register_netdev(lif->netdev); 3858 if (err) { 3859 dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err); 3860 ionic_lif_unregister(lif); 3861 return err; 3862 } 3863 3864 ionic_link_status_check_request(lif, CAN_SLEEP); 3865 lif->registered = true; 3866 ionic_lif_set_netdev_info(lif); 3867 3868 return 0; 3869 } 3870 3871 void ionic_lif_unregister(struct ionic_lif *lif) 3872 { 3873 if (lif->ionic->nb.notifier_call) { 3874 unregister_netdevice_notifier(&lif->ionic->nb); 3875 lif->ionic->nb.notifier_call = NULL; 3876 } 3877 3878 if (lif->netdev->reg_state == NETREG_REGISTERED) 3879 unregister_netdev(lif->netdev); 3880 3881 ionic_lif_unregister_phc(lif); 3882 3883 lif->registered = false; 3884 } 3885 3886 static void ionic_lif_queue_identify(struct ionic_lif *lif) 3887 { 3888 union ionic_q_identity __iomem *q_ident; 3889 struct ionic *ionic = lif->ionic; 3890 struct ionic_dev *idev; 3891 u16 max_frags; 3892 int qtype; 3893 int err; 3894 3895 idev = &lif->ionic->idev; 3896 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; 3897 3898 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 3899 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 3900 3901 /* filter out the ones we know about */ 3902 switch (qtype) { 3903 case IONIC_QTYPE_ADMINQ: 3904 case IONIC_QTYPE_NOTIFYQ: 3905 case IONIC_QTYPE_RXQ: 3906 case IONIC_QTYPE_TXQ: 3907 break; 3908 default: 3909 continue; 3910 } 3911 3912 memset(qti, 0, sizeof(*qti)); 3913 3914 mutex_lock(&ionic->dev_cmd_lock); 3915 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 3916 ionic_qtype_versions[qtype]); 3917 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3918 if (!err) { 3919 qti->version = readb(&q_ident->version); 3920 qti->supported = readb(&q_ident->supported); 3921 qti->features = readq(&q_ident->features); 3922 qti->desc_sz = readw(&q_ident->desc_sz); 3923 qti->comp_sz = readw(&q_ident->comp_sz); 3924 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz); 3925 qti->max_sg_elems = readw(&q_ident->max_sg_elems); 3926 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride); 3927 } 3928 mutex_unlock(&ionic->dev_cmd_lock); 3929 3930 if (err == -EINVAL) { 3931 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 3932 continue; 3933 } else if (err == -EIO) { 3934 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 3935 return; 3936 } else if (err) { 3937 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 3938 qtype, err); 3939 return; 3940 } 3941 3942 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 3943 qtype, qti->version); 3944 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 3945 qtype, qti->supported); 3946 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 3947 qtype, qti->features); 3948 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 3949 qtype, qti->desc_sz); 3950 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 3951 qtype, qti->comp_sz); 3952 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 3953 qtype, qti->sg_desc_sz); 3954 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 3955 qtype, qti->max_sg_elems); 3956 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 3957 qtype, qti->sg_desc_stride); 3958 3959 if (qtype == IONIC_QTYPE_TXQ) 3960 max_frags = IONIC_TX_MAX_FRAGS; 3961 else if (qtype == IONIC_QTYPE_RXQ) 3962 max_frags = IONIC_RX_MAX_FRAGS; 3963 else 3964 max_frags = 1; 3965 3966 qti->max_sg_elems = min_t(u16, max_frags - 1, MAX_SKB_FRAGS); 3967 dev_dbg(ionic->dev, "qtype %d max_sg_elems %d\n", 3968 qtype, qti->max_sg_elems); 3969 } 3970 } 3971 3972 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 3973 union ionic_lif_identity *lid) 3974 { 3975 struct ionic_dev *idev = &ionic->idev; 3976 size_t sz; 3977 int err; 3978 3979 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 3980 3981 mutex_lock(&ionic->dev_cmd_lock); 3982 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 3983 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3984 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 3985 mutex_unlock(&ionic->dev_cmd_lock); 3986 if (err) 3987 return (err); 3988 3989 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 3990 le64_to_cpu(lid->capabilities)); 3991 3992 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 3993 le32_to_cpu(lid->eth.max_ucast_filters)); 3994 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 3995 le32_to_cpu(lid->eth.max_mcast_filters)); 3996 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 3997 le64_to_cpu(lid->eth.config.features)); 3998 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 3999 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 4000 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 4001 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 4002 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 4003 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 4004 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 4005 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 4006 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 4007 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 4008 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 4009 le32_to_cpu(lid->eth.config.mtu)); 4010 4011 return 0; 4012 } 4013 4014 int ionic_lif_size(struct ionic *ionic) 4015 { 4016 struct ionic_identity *ident = &ionic->ident; 4017 unsigned int nintrs, dev_nintrs; 4018 union ionic_lif_config *lc; 4019 unsigned int ntxqs_per_lif; 4020 unsigned int nrxqs_per_lif; 4021 unsigned int neqs_per_lif; 4022 unsigned int nnqs_per_lif; 4023 unsigned int nxqs, neqs; 4024 unsigned int min_intrs; 4025 int err; 4026 4027 /* retrieve basic values from FW */ 4028 lc = &ident->lif.eth.config; 4029 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 4030 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 4031 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 4032 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 4033 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 4034 4035 /* limit values to play nice with kdump */ 4036 if (is_kdump_kernel()) { 4037 dev_nintrs = 2; 4038 neqs_per_lif = 0; 4039 nnqs_per_lif = 0; 4040 ntxqs_per_lif = 1; 4041 nrxqs_per_lif = 1; 4042 } 4043 4044 /* reserve last queue id for hardware timestamping */ 4045 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { 4046 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { 4047 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP); 4048 } else { 4049 ntxqs_per_lif -= 1; 4050 nrxqs_per_lif -= 1; 4051 } 4052 } 4053 4054 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 4055 nxqs = min(nxqs, num_online_cpus()); 4056 neqs = min(neqs_per_lif, num_online_cpus()); 4057 4058 try_again: 4059 /* interrupt usage: 4060 * 1 for master lif adminq/notifyq 4061 * 1 for each CPU for master lif TxRx queue pairs 4062 * whatever's left is for RDMA queues 4063 */ 4064 nintrs = 1 + nxqs + neqs; 4065 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 4066 4067 if (nintrs > dev_nintrs) 4068 goto try_fewer; 4069 4070 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 4071 if (err < 0 && err != -ENOSPC) { 4072 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 4073 return err; 4074 } 4075 if (err == -ENOSPC) 4076 goto try_fewer; 4077 4078 if (err != nintrs) { 4079 ionic_bus_free_irq_vectors(ionic); 4080 goto try_fewer; 4081 } 4082 4083 ionic->nnqs_per_lif = nnqs_per_lif; 4084 ionic->neqs_per_lif = neqs; 4085 ionic->ntxqs_per_lif = nxqs; 4086 ionic->nrxqs_per_lif = nxqs; 4087 ionic->nintrs = nintrs; 4088 4089 ionic_debugfs_add_sizes(ionic); 4090 4091 return 0; 4092 4093 try_fewer: 4094 if (nnqs_per_lif > 1) { 4095 nnqs_per_lif >>= 1; 4096 goto try_again; 4097 } 4098 if (neqs > 1) { 4099 neqs >>= 1; 4100 goto try_again; 4101 } 4102 if (nxqs > 1) { 4103 nxqs >>= 1; 4104 goto try_again; 4105 } 4106 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 4107 return -ENOSPC; 4108 } 4109