1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ethtool.h> 5 #include <linux/printk.h> 6 #include <linux/dynamic_debug.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_vlan.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/cpumask.h> 14 #include <linux/crash_dump.h> 15 #include <linux/vmalloc.h> 16 17 #include "ionic.h" 18 #include "ionic_bus.h" 19 #include "ionic_dev.h" 20 #include "ionic_lif.h" 21 #include "ionic_txrx.h" 22 #include "ionic_ethtool.h" 23 #include "ionic_debugfs.h" 24 25 /* queuetype support level */ 26 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 27 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 28 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 29 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support 30 * 2 = ... with CMB rings 31 */ 32 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support 33 * 1 = ... with Tx SG version 1 34 * 3 = ... with CMB rings 35 */ 36 }; 37 38 static void ionic_link_status_check(struct ionic_lif *lif); 39 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 40 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 41 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 42 43 static void ionic_txrx_deinit(struct ionic_lif *lif); 44 static int ionic_txrx_init(struct ionic_lif *lif); 45 static int ionic_start_queues(struct ionic_lif *lif); 46 static void ionic_stop_queues(struct ionic_lif *lif); 47 static void ionic_lif_queue_identify(struct ionic_lif *lif); 48 49 static int ionic_xdp_queues_config(struct ionic_lif *lif); 50 static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q); 51 52 static void ionic_dim_work(struct work_struct *work) 53 { 54 struct dim *dim = container_of(work, struct dim, work); 55 struct dim_cq_moder cur_moder; 56 struct ionic_intr_info *intr; 57 struct ionic_qcq *qcq; 58 struct ionic_lif *lif; 59 struct ionic_queue *q; 60 u32 new_coal; 61 62 qcq = container_of(dim, struct ionic_qcq, dim); 63 q = &qcq->q; 64 if (q->type == IONIC_QTYPE_RXQ) 65 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 66 else 67 cur_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix); 68 lif = q->lif; 69 new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec); 70 new_coal = new_coal ? new_coal : 1; 71 72 intr = &qcq->intr; 73 if (intr->dim_coal_hw != new_coal) { 74 intr->dim_coal_hw = new_coal; 75 76 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 77 intr->index, intr->dim_coal_hw); 78 } 79 80 dim->state = DIM_START_MEASURE; 81 } 82 83 static void ionic_lif_deferred_work(struct work_struct *work) 84 { 85 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 86 struct ionic_deferred *def = &lif->deferred; 87 struct ionic_deferred_work *w = NULL; 88 89 do { 90 spin_lock_bh(&def->lock); 91 if (!list_empty(&def->list)) { 92 w = list_first_entry(&def->list, 93 struct ionic_deferred_work, list); 94 list_del(&w->list); 95 } 96 spin_unlock_bh(&def->lock); 97 98 if (!w) 99 break; 100 101 switch (w->type) { 102 case IONIC_DW_TYPE_RX_MODE: 103 ionic_lif_rx_mode(lif); 104 break; 105 case IONIC_DW_TYPE_LINK_STATUS: 106 ionic_link_status_check(lif); 107 break; 108 case IONIC_DW_TYPE_LIF_RESET: 109 if (w->fw_status) { 110 ionic_lif_handle_fw_up(lif); 111 } else { 112 ionic_lif_handle_fw_down(lif); 113 114 /* Fire off another watchdog to see 115 * if the FW is already back rather than 116 * waiting another whole cycle 117 */ 118 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1); 119 } 120 break; 121 default: 122 break; 123 } 124 kfree(w); 125 w = NULL; 126 } while (true); 127 } 128 129 void ionic_lif_deferred_enqueue(struct ionic_lif *lif, 130 struct ionic_deferred_work *work) 131 { 132 spin_lock_bh(&lif->deferred.lock); 133 list_add_tail(&work->list, &lif->deferred.list); 134 spin_unlock_bh(&lif->deferred.lock); 135 queue_work(lif->ionic->wq, &lif->deferred.work); 136 } 137 138 static void ionic_link_status_check(struct ionic_lif *lif) 139 { 140 struct net_device *netdev = lif->netdev; 141 u16 link_status; 142 bool link_up; 143 144 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 145 return; 146 147 /* Don't put carrier back up if we're in a broken state */ 148 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { 149 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 150 return; 151 } 152 153 link_status = le16_to_cpu(lif->info->status.link_status); 154 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 155 156 if (link_up) { 157 int err = 0; 158 159 if (netdev->flags & IFF_UP && netif_running(netdev)) { 160 mutex_lock(&lif->queue_lock); 161 err = ionic_start_queues(lif); 162 if (err && err != -EBUSY) { 163 netdev_err(netdev, 164 "Failed to start queues: %d\n", err); 165 set_bit(IONIC_LIF_F_BROKEN, lif->state); 166 netif_carrier_off(lif->netdev); 167 } 168 mutex_unlock(&lif->queue_lock); 169 } 170 171 if (!err && !netif_carrier_ok(netdev)) { 172 ionic_port_identify(lif->ionic); 173 netdev_info(netdev, "Link up - %d Gbps\n", 174 le32_to_cpu(lif->info->status.link_speed) / 1000); 175 netif_carrier_on(netdev); 176 } 177 } else { 178 if (netif_carrier_ok(netdev)) { 179 lif->link_down_count++; 180 netdev_info(netdev, "Link down\n"); 181 netif_carrier_off(netdev); 182 } 183 184 if (netdev->flags & IFF_UP && netif_running(netdev)) { 185 mutex_lock(&lif->queue_lock); 186 ionic_stop_queues(lif); 187 mutex_unlock(&lif->queue_lock); 188 } 189 } 190 191 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 192 } 193 194 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) 195 { 196 struct ionic_deferred_work *work; 197 198 /* we only need one request outstanding at a time */ 199 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 200 return; 201 202 if (!can_sleep) { 203 work = kzalloc(sizeof(*work), GFP_ATOMIC); 204 if (!work) { 205 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 206 return; 207 } 208 209 work->type = IONIC_DW_TYPE_LINK_STATUS; 210 ionic_lif_deferred_enqueue(lif, work); 211 } else { 212 ionic_link_status_check(lif); 213 } 214 } 215 216 static irqreturn_t ionic_isr(int irq, void *data) 217 { 218 struct napi_struct *napi = data; 219 220 napi_schedule_irqoff(napi); 221 222 return IRQ_HANDLED; 223 } 224 225 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 226 { 227 struct ionic_intr_info *intr = &qcq->intr; 228 struct device *dev = lif->ionic->dev; 229 struct ionic_queue *q = &qcq->q; 230 const char *name; 231 232 if (lif->registered) 233 name = netdev_name(lif->netdev); 234 else 235 name = dev_name(dev); 236 237 snprintf(intr->name, sizeof(intr->name), 238 "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name); 239 240 return devm_request_irq(dev, intr->vector, ionic_isr, 241 0, intr->name, &qcq->napi); 242 } 243 244 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 245 { 246 struct ionic *ionic = lif->ionic; 247 int index; 248 249 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 250 if (index == ionic->nintrs) { 251 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 252 __func__, index, ionic->nintrs); 253 return -ENOSPC; 254 } 255 256 set_bit(index, ionic->intrs); 257 ionic_intr_init(&ionic->idev, intr, index); 258 259 return 0; 260 } 261 262 static void ionic_intr_free(struct ionic *ionic, int index) 263 { 264 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 265 clear_bit(index, ionic->intrs); 266 } 267 268 static void ionic_irq_aff_notify(struct irq_affinity_notify *notify, 269 const cpumask_t *mask) 270 { 271 struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify); 272 273 cpumask_copy(*intr->affinity_mask, mask); 274 } 275 276 static void ionic_irq_aff_release(struct kref __always_unused *ref) 277 { 278 } 279 280 static int ionic_qcq_enable(struct ionic_qcq *qcq) 281 { 282 struct ionic_queue *q = &qcq->q; 283 struct ionic_lif *lif = q->lif; 284 struct ionic_dev *idev; 285 struct device *dev; 286 287 struct ionic_admin_ctx ctx = { 288 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 289 .cmd.q_control = { 290 .opcode = IONIC_CMD_Q_CONTROL, 291 .lif_index = cpu_to_le16(lif->index), 292 .type = q->type, 293 .index = cpu_to_le32(q->index), 294 .oper = IONIC_Q_ENABLE, 295 }, 296 }; 297 int ret; 298 299 idev = &lif->ionic->idev; 300 dev = lif->ionic->dev; 301 302 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 303 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 304 305 if (qcq->flags & IONIC_QCQ_F_INTR) 306 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 307 308 ret = ionic_adminq_post_wait(lif, &ctx); 309 if (ret) 310 return ret; 311 312 if (qcq->flags & IONIC_QCQ_F_INTR) { 313 napi_enable(&qcq->napi); 314 irq_set_affinity_notifier(qcq->intr.vector, 315 &qcq->intr.aff_notify); 316 irq_set_affinity_hint(qcq->intr.vector, 317 *qcq->intr.affinity_mask); 318 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 319 IONIC_INTR_MASK_CLEAR); 320 } 321 322 return 0; 323 } 324 325 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err) 326 { 327 struct ionic_queue *q; 328 329 struct ionic_admin_ctx ctx = { 330 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 331 .cmd.q_control = { 332 .opcode = IONIC_CMD_Q_CONTROL, 333 .oper = IONIC_Q_DISABLE, 334 }, 335 }; 336 337 if (!qcq) { 338 netdev_err(lif->netdev, "%s: bad qcq\n", __func__); 339 return -ENXIO; 340 } 341 342 q = &qcq->q; 343 344 if (qcq->flags & IONIC_QCQ_F_INTR) { 345 struct ionic_dev *idev = &lif->ionic->idev; 346 347 if (lif->doorbell_wa) 348 cancel_work_sync(&qcq->doorbell_napi_work); 349 cancel_work_sync(&qcq->dim.work); 350 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 351 IONIC_INTR_MASK_SET); 352 synchronize_irq(qcq->intr.vector); 353 irq_set_affinity_notifier(qcq->intr.vector, NULL); 354 irq_set_affinity_hint(qcq->intr.vector, NULL); 355 napi_disable(&qcq->napi); 356 } 357 358 /* If there was a previous fw communcation error, don't bother with 359 * sending the adminq command and just return the same error value. 360 */ 361 if (fw_err == -ETIMEDOUT || fw_err == -ENXIO) 362 return fw_err; 363 364 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); 365 ctx.cmd.q_control.type = q->type; 366 ctx.cmd.q_control.index = cpu_to_le32(q->index); 367 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", 368 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 369 370 return ionic_adminq_post_wait(lif, &ctx); 371 } 372 373 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 374 { 375 struct ionic_dev *idev = &lif->ionic->idev; 376 377 if (!qcq) 378 return; 379 380 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 381 return; 382 383 if (qcq->flags & IONIC_QCQ_F_INTR) { 384 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 385 IONIC_INTR_MASK_SET); 386 netif_napi_del(&qcq->napi); 387 } 388 389 qcq->flags &= ~IONIC_QCQ_F_INITED; 390 } 391 392 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 393 { 394 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 395 return; 396 397 irq_set_affinity_hint(qcq->intr.vector, NULL); 398 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 399 qcq->intr.vector = 0; 400 ionic_intr_free(lif->ionic, qcq->intr.index); 401 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 402 } 403 404 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 405 { 406 struct device *dev = lif->ionic->dev; 407 408 if (!qcq) 409 return; 410 411 ionic_debugfs_del_qcq(qcq); 412 413 if (qcq->q_base) { 414 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 415 qcq->q_base = NULL; 416 qcq->q_base_pa = 0; 417 } 418 419 if (qcq->cmb_q_base) { 420 iounmap(qcq->cmb_q_base); 421 ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order); 422 qcq->cmb_pgid = 0; 423 qcq->cmb_order = 0; 424 qcq->cmb_q_base = NULL; 425 qcq->cmb_q_base_pa = 0; 426 } 427 428 if (qcq->cq_base) { 429 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 430 qcq->cq_base = NULL; 431 qcq->cq_base_pa = 0; 432 } 433 434 if (qcq->sg_base) { 435 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 436 qcq->sg_base = NULL; 437 qcq->sg_base_pa = 0; 438 } 439 440 ionic_xdp_unregister_rxq_info(&qcq->q); 441 ionic_qcq_intr_free(lif, qcq); 442 443 vfree(qcq->q.info); 444 qcq->q.info = NULL; 445 } 446 447 void ionic_qcqs_free(struct ionic_lif *lif) 448 { 449 struct device *dev = lif->ionic->dev; 450 struct ionic_qcq *adminqcq; 451 unsigned long irqflags; 452 453 if (lif->notifyqcq) { 454 ionic_qcq_free(lif, lif->notifyqcq); 455 devm_kfree(dev, lif->notifyqcq); 456 lif->notifyqcq = NULL; 457 } 458 459 if (lif->adminqcq) { 460 spin_lock_irqsave(&lif->adminq_lock, irqflags); 461 adminqcq = READ_ONCE(lif->adminqcq); 462 lif->adminqcq = NULL; 463 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 464 if (adminqcq) { 465 ionic_qcq_free(lif, adminqcq); 466 devm_kfree(dev, adminqcq); 467 } 468 } 469 470 if (lif->rxqcqs) { 471 devm_kfree(dev, lif->rxqstats); 472 lif->rxqstats = NULL; 473 devm_kfree(dev, lif->rxqcqs); 474 lif->rxqcqs = NULL; 475 } 476 477 if (lif->txqcqs) { 478 devm_kfree(dev, lif->txqstats); 479 lif->txqstats = NULL; 480 devm_kfree(dev, lif->txqcqs); 481 lif->txqcqs = NULL; 482 } 483 } 484 485 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 486 struct ionic_qcq *n_qcq) 487 { 488 n_qcq->intr.vector = src_qcq->intr.vector; 489 n_qcq->intr.index = src_qcq->intr.index; 490 } 491 492 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 493 { 494 cpumask_var_t *affinity_mask; 495 int err; 496 497 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 498 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 499 return 0; 500 } 501 502 err = ionic_intr_alloc(lif, &qcq->intr); 503 if (err) { 504 netdev_warn(lif->netdev, "no intr for %s: %d\n", 505 qcq->q.name, err); 506 goto err_out; 507 } 508 509 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); 510 if (err < 0) { 511 netdev_warn(lif->netdev, "no vector for %s: %d\n", 512 qcq->q.name, err); 513 goto err_out_free_intr; 514 } 515 qcq->intr.vector = err; 516 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 517 IONIC_INTR_MASK_SET); 518 519 err = ionic_request_irq(lif, qcq); 520 if (err) { 521 netdev_warn(lif->netdev, "irq request failed %d\n", err); 522 goto err_out_free_intr; 523 } 524 525 /* try to get the irq on the local numa node first */ 526 affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index]; 527 if (cpumask_empty(*affinity_mask)) { 528 unsigned int cpu; 529 530 cpu = cpumask_local_spread(qcq->intr.index, 531 dev_to_node(lif->ionic->dev)); 532 if (cpu != -1) 533 cpumask_set_cpu(cpu, *affinity_mask); 534 } 535 536 qcq->intr.affinity_mask = affinity_mask; 537 qcq->intr.aff_notify.notify = ionic_irq_aff_notify; 538 qcq->intr.aff_notify.release = ionic_irq_aff_release; 539 540 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 541 return 0; 542 543 err_out_free_intr: 544 ionic_intr_free(lif->ionic, qcq->intr.index); 545 err_out: 546 return err; 547 } 548 549 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 550 unsigned int index, 551 const char *name, unsigned int flags, 552 unsigned int num_descs, unsigned int desc_size, 553 unsigned int cq_desc_size, 554 unsigned int sg_desc_size, 555 unsigned int desc_info_size, 556 unsigned int pid, struct ionic_qcq **qcq) 557 { 558 struct ionic_dev *idev = &lif->ionic->idev; 559 struct device *dev = lif->ionic->dev; 560 struct ionic_qcq *new; 561 int err; 562 563 *qcq = NULL; 564 565 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 566 if (!new) { 567 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 568 err = -ENOMEM; 569 goto err_out; 570 } 571 572 new->q.dev = dev; 573 new->flags = flags; 574 575 new->q.info = vcalloc(num_descs, desc_info_size); 576 if (!new->q.info) { 577 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 578 err = -ENOMEM; 579 goto err_out_free_qcq; 580 } 581 582 new->q.type = type; 583 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; 584 585 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 586 desc_size, sg_desc_size, pid); 587 if (err) { 588 netdev_err(lif->netdev, "Cannot initialize queue\n"); 589 goto err_out_free_q_info; 590 } 591 592 err = ionic_alloc_qcq_interrupt(lif, new); 593 if (err) 594 goto err_out_free_q_info; 595 596 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 597 if (err) { 598 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 599 goto err_out_free_irq; 600 } 601 602 if (flags & IONIC_QCQ_F_NOTIFYQ) { 603 int q_size; 604 605 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q 606 * and don't alloc qc. We leave new->qc_size and new->qc_base 607 * as 0 to be sure we don't try to free it later. 608 */ 609 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); 610 new->q_size = PAGE_SIZE + q_size + 611 ALIGN(num_descs * cq_desc_size, PAGE_SIZE); 612 new->q_base = dma_alloc_coherent(dev, new->q_size, 613 &new->q_base_pa, GFP_KERNEL); 614 if (!new->q_base) { 615 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); 616 err = -ENOMEM; 617 goto err_out_free_irq; 618 } 619 new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE); 620 new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 621 622 /* Base the NotifyQ cq.base off of the ALIGNed q.base */ 623 new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE); 624 new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); 625 new->cq.bound_q = &new->q; 626 } else { 627 /* regular DMA q descriptors */ 628 new->q_size = PAGE_SIZE + (num_descs * desc_size); 629 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 630 GFP_KERNEL); 631 if (!new->q_base) { 632 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 633 err = -ENOMEM; 634 goto err_out_free_irq; 635 } 636 new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE); 637 new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 638 639 if (flags & IONIC_QCQ_F_CMB_RINGS) { 640 /* on-chip CMB q descriptors */ 641 new->cmb_q_size = num_descs * desc_size; 642 new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE); 643 644 err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa, 645 new->cmb_order); 646 if (err) { 647 netdev_err(lif->netdev, 648 "Cannot allocate queue order %d from cmb: err %d\n", 649 new->cmb_order, err); 650 goto err_out_free_q; 651 } 652 653 new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size); 654 if (!new->cmb_q_base) { 655 netdev_err(lif->netdev, "Cannot map queue from cmb\n"); 656 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); 657 err = -ENOMEM; 658 goto err_out_free_q; 659 } 660 661 new->cmb_q_base_pa -= idev->phy_cmb_pages; 662 new->q.cmb_base = new->cmb_q_base; 663 new->q.cmb_base_pa = new->cmb_q_base_pa; 664 } 665 666 /* cq DMA descriptors */ 667 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 668 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 669 GFP_KERNEL); 670 if (!new->cq_base) { 671 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 672 err = -ENOMEM; 673 goto err_out_free_q; 674 } 675 new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 676 new->cq.base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 677 new->cq.bound_q = &new->q; 678 } 679 680 if (flags & IONIC_QCQ_F_SG) { 681 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 682 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 683 GFP_KERNEL); 684 if (!new->sg_base) { 685 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 686 err = -ENOMEM; 687 goto err_out_free_cq; 688 } 689 new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 690 new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 691 } 692 693 INIT_WORK(&new->dim.work, ionic_dim_work); 694 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; 695 if (lif->doorbell_wa) 696 INIT_WORK(&new->doorbell_napi_work, ionic_doorbell_napi_work); 697 698 *qcq = new; 699 700 return 0; 701 702 err_out_free_cq: 703 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 704 err_out_free_q: 705 if (new->cmb_q_base) { 706 iounmap(new->cmb_q_base); 707 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); 708 } 709 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 710 err_out_free_irq: 711 if (flags & IONIC_QCQ_F_INTR) { 712 devm_free_irq(dev, new->intr.vector, &new->napi); 713 ionic_intr_free(lif->ionic, new->intr.index); 714 } 715 err_out_free_q_info: 716 vfree(new->q.info); 717 err_out_free_qcq: 718 devm_kfree(dev, new); 719 err_out: 720 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 721 return err; 722 } 723 724 static int ionic_qcqs_alloc(struct ionic_lif *lif) 725 { 726 struct device *dev = lif->ionic->dev; 727 unsigned int flags; 728 int err; 729 730 flags = IONIC_QCQ_F_INTR; 731 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 732 IONIC_ADMINQ_LENGTH, 733 sizeof(struct ionic_admin_cmd), 734 sizeof(struct ionic_admin_comp), 735 0, 736 sizeof(struct ionic_admin_desc_info), 737 lif->kern_pid, &lif->adminqcq); 738 if (err) 739 return err; 740 ionic_debugfs_add_qcq(lif, lif->adminqcq); 741 742 if (lif->ionic->nnqs_per_lif) { 743 flags = IONIC_QCQ_F_NOTIFYQ; 744 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 745 flags, IONIC_NOTIFYQ_LENGTH, 746 sizeof(struct ionic_notifyq_cmd), 747 sizeof(union ionic_notifyq_comp), 748 0, 749 sizeof(struct ionic_admin_desc_info), 750 lif->kern_pid, &lif->notifyqcq); 751 if (err) 752 goto err_out; 753 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 754 755 /* Let the notifyq ride on the adminq interrupt */ 756 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 757 } 758 759 err = -ENOMEM; 760 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 761 sizeof(*lif->txqcqs), GFP_KERNEL); 762 if (!lif->txqcqs) 763 goto err_out; 764 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 765 sizeof(*lif->rxqcqs), GFP_KERNEL); 766 if (!lif->rxqcqs) 767 goto err_out; 768 769 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1, 770 sizeof(*lif->txqstats), GFP_KERNEL); 771 if (!lif->txqstats) 772 goto err_out; 773 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1, 774 sizeof(*lif->rxqstats), GFP_KERNEL); 775 if (!lif->rxqstats) 776 goto err_out; 777 778 return 0; 779 780 err_out: 781 ionic_qcqs_free(lif); 782 return err; 783 } 784 785 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 786 { 787 qcq->q.tail_idx = 0; 788 qcq->q.head_idx = 0; 789 qcq->cq.tail_idx = 0; 790 qcq->cq.done_color = 1; 791 memset(qcq->q_base, 0, qcq->q_size); 792 if (qcq->cmb_q_base) 793 memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size); 794 memset(qcq->cq_base, 0, qcq->cq_size); 795 memset(qcq->sg_base, 0, qcq->sg_size); 796 } 797 798 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 799 { 800 struct device *dev = lif->ionic->dev; 801 struct ionic_queue *q = &qcq->q; 802 struct ionic_cq *cq = &qcq->cq; 803 struct ionic_admin_ctx ctx = { 804 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 805 .cmd.q_init = { 806 .opcode = IONIC_CMD_Q_INIT, 807 .lif_index = cpu_to_le16(lif->index), 808 .type = q->type, 809 .ver = lif->qtype_info[q->type].version, 810 .index = cpu_to_le32(q->index), 811 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 812 IONIC_QINIT_F_SG), 813 .intr_index = cpu_to_le16(qcq->intr.index), 814 .pid = cpu_to_le16(q->pid), 815 .ring_size = ilog2(q->num_descs), 816 .ring_base = cpu_to_le64(q->base_pa), 817 .cq_ring_base = cpu_to_le64(cq->base_pa), 818 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 819 .features = cpu_to_le64(q->features), 820 }, 821 }; 822 int err; 823 824 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { 825 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); 826 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); 827 } 828 829 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 830 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 831 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 832 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 833 dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base); 834 dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base); 835 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 836 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 837 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 838 839 ionic_qcq_sanitize(qcq); 840 841 err = ionic_adminq_post_wait(lif, &ctx); 842 if (err) 843 return err; 844 845 q->hw_type = ctx.comp.q_init.hw_type; 846 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 847 q->dbval = IONIC_DBELL_QID(q->hw_index); 848 849 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 850 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 851 852 q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE; 853 q->dbell_jiffies = jiffies; 854 855 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 856 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi); 857 858 qcq->flags |= IONIC_QCQ_F_INITED; 859 860 return 0; 861 } 862 863 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 864 { 865 struct device *dev = lif->ionic->dev; 866 struct ionic_queue *q = &qcq->q; 867 struct ionic_cq *cq = &qcq->cq; 868 struct ionic_admin_ctx ctx = { 869 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 870 .cmd.q_init = { 871 .opcode = IONIC_CMD_Q_INIT, 872 .lif_index = cpu_to_le16(lif->index), 873 .type = q->type, 874 .ver = lif->qtype_info[q->type].version, 875 .index = cpu_to_le32(q->index), 876 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ), 877 .intr_index = cpu_to_le16(cq->bound_intr->index), 878 .pid = cpu_to_le16(q->pid), 879 .ring_size = ilog2(q->num_descs), 880 .ring_base = cpu_to_le64(q->base_pa), 881 .cq_ring_base = cpu_to_le64(cq->base_pa), 882 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 883 .features = cpu_to_le64(q->features), 884 }, 885 }; 886 int err; 887 888 q->partner = &lif->txqcqs[q->index]->q; 889 q->partner->partner = q; 890 891 if (!lif->xdp_prog || 892 (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags)) 893 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG); 894 895 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { 896 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); 897 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); 898 } 899 900 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 901 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 902 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 903 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 904 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 905 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 906 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 907 908 ionic_qcq_sanitize(qcq); 909 910 err = ionic_adminq_post_wait(lif, &ctx); 911 if (err) 912 return err; 913 914 q->hw_type = ctx.comp.q_init.hw_type; 915 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 916 q->dbval = IONIC_DBELL_QID(q->hw_index); 917 918 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 919 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 920 921 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; 922 q->dbell_jiffies = jiffies; 923 924 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 925 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi); 926 else 927 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); 928 929 qcq->flags |= IONIC_QCQ_F_INITED; 930 931 return 0; 932 } 933 934 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) 935 { 936 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 937 unsigned int txq_i, flags; 938 struct ionic_qcq *txq; 939 u64 features; 940 int err; 941 942 if (lif->hwstamp_txq) 943 return 0; 944 945 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; 946 947 num_desc = IONIC_MIN_TXRX_DESC; 948 desc_sz = sizeof(struct ionic_txq_desc); 949 comp_sz = 2 * sizeof(struct ionic_txq_comp); 950 951 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 952 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) 953 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 954 else 955 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 956 957 txq_i = lif->ionic->ntxqs_per_lif; 958 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 959 960 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, 961 num_desc, desc_sz, comp_sz, sg_desc_sz, 962 sizeof(struct ionic_tx_desc_info), 963 lif->kern_pid, &txq); 964 if (err) 965 goto err_qcq_alloc; 966 967 txq->q.features = features; 968 969 ionic_link_qcq_interrupts(lif->adminqcq, txq); 970 ionic_debugfs_add_qcq(lif, txq); 971 972 lif->hwstamp_txq = txq; 973 974 if (netif_running(lif->netdev)) { 975 err = ionic_lif_txq_init(lif, txq); 976 if (err) 977 goto err_qcq_init; 978 979 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 980 err = ionic_qcq_enable(txq); 981 if (err) 982 goto err_qcq_enable; 983 } 984 } 985 986 return 0; 987 988 err_qcq_enable: 989 ionic_lif_qcq_deinit(lif, txq); 990 err_qcq_init: 991 lif->hwstamp_txq = NULL; 992 ionic_debugfs_del_qcq(txq); 993 ionic_qcq_free(lif, txq); 994 devm_kfree(lif->ionic->dev, txq); 995 err_qcq_alloc: 996 return err; 997 } 998 999 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) 1000 { 1001 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 1002 unsigned int rxq_i, flags; 1003 struct ionic_qcq *rxq; 1004 u64 features; 1005 int err; 1006 1007 if (lif->hwstamp_rxq) 1008 return 0; 1009 1010 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 1011 1012 num_desc = IONIC_MIN_TXRX_DESC; 1013 desc_sz = sizeof(struct ionic_rxq_desc); 1014 comp_sz = 2 * sizeof(struct ionic_rxq_comp); 1015 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 1016 1017 rxq_i = lif->ionic->nrxqs_per_lif; 1018 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 1019 1020 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, 1021 num_desc, desc_sz, comp_sz, sg_desc_sz, 1022 sizeof(struct ionic_rx_desc_info), 1023 lif->kern_pid, &rxq); 1024 if (err) 1025 goto err_qcq_alloc; 1026 1027 rxq->q.features = features; 1028 1029 ionic_link_qcq_interrupts(lif->adminqcq, rxq); 1030 ionic_debugfs_add_qcq(lif, rxq); 1031 1032 lif->hwstamp_rxq = rxq; 1033 1034 if (netif_running(lif->netdev)) { 1035 err = ionic_lif_rxq_init(lif, rxq); 1036 if (err) 1037 goto err_qcq_init; 1038 1039 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 1040 ionic_rx_fill(&rxq->q); 1041 err = ionic_qcq_enable(rxq); 1042 if (err) 1043 goto err_qcq_enable; 1044 } 1045 } 1046 1047 return 0; 1048 1049 err_qcq_enable: 1050 ionic_lif_qcq_deinit(lif, rxq); 1051 err_qcq_init: 1052 lif->hwstamp_rxq = NULL; 1053 ionic_debugfs_del_qcq(rxq); 1054 ionic_qcq_free(lif, rxq); 1055 devm_kfree(lif->ionic->dev, rxq); 1056 err_qcq_alloc: 1057 return err; 1058 } 1059 1060 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all) 1061 { 1062 struct ionic_queue_params qparam; 1063 1064 ionic_init_queue_params(lif, &qparam); 1065 1066 if (rx_all) 1067 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 1068 else 1069 qparam.rxq_features = 0; 1070 1071 /* if we're not running, just set the values and return */ 1072 if (!netif_running(lif->netdev)) { 1073 lif->rxq_features = qparam.rxq_features; 1074 return 0; 1075 } 1076 1077 return ionic_reconfigure_queues(lif, &qparam); 1078 } 1079 1080 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode) 1081 { 1082 struct ionic_admin_ctx ctx = { 1083 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1084 .cmd.lif_setattr = { 1085 .opcode = IONIC_CMD_LIF_SETATTR, 1086 .index = cpu_to_le16(lif->index), 1087 .attr = IONIC_LIF_ATTR_TXSTAMP, 1088 .txstamp_mode = cpu_to_le16(txstamp_mode), 1089 }, 1090 }; 1091 1092 return ionic_adminq_post_wait(lif, &ctx); 1093 } 1094 1095 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif) 1096 { 1097 struct ionic_admin_ctx ctx = { 1098 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1099 .cmd.rx_filter_del = { 1100 .opcode = IONIC_CMD_RX_FILTER_DEL, 1101 .lif_index = cpu_to_le16(lif->index), 1102 }, 1103 }; 1104 struct ionic_rx_filter *f; 1105 u32 filter_id; 1106 int err; 1107 1108 spin_lock_bh(&lif->rx_filters.lock); 1109 1110 f = ionic_rx_filter_rxsteer(lif); 1111 if (!f) { 1112 spin_unlock_bh(&lif->rx_filters.lock); 1113 return; 1114 } 1115 1116 filter_id = f->filter_id; 1117 ionic_rx_filter_free(lif, f); 1118 1119 spin_unlock_bh(&lif->rx_filters.lock); 1120 1121 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id); 1122 1123 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id); 1124 1125 err = ionic_adminq_post_wait(lif, &ctx); 1126 if (err && err != -EEXIST) 1127 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id); 1128 } 1129 1130 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1131 { 1132 struct ionic_admin_ctx ctx = { 1133 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1134 .cmd.rx_filter_add = { 1135 .opcode = IONIC_CMD_RX_FILTER_ADD, 1136 .lif_index = cpu_to_le16(lif->index), 1137 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS), 1138 .pkt_class = cpu_to_le64(pkt_class), 1139 }, 1140 }; 1141 u8 qtype; 1142 u32 qid; 1143 int err; 1144 1145 if (!lif->hwstamp_rxq) 1146 return -EINVAL; 1147 1148 qtype = lif->hwstamp_rxq->q.type; 1149 ctx.cmd.rx_filter_add.qtype = qtype; 1150 1151 qid = lif->hwstamp_rxq->q.index; 1152 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid); 1153 1154 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n"); 1155 err = ionic_adminq_post_wait(lif, &ctx); 1156 if (err && err != -EEXIST) 1157 return err; 1158 1159 spin_lock_bh(&lif->rx_filters.lock); 1160 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED); 1161 spin_unlock_bh(&lif->rx_filters.lock); 1162 1163 return err; 1164 } 1165 1166 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1167 { 1168 ionic_lif_del_hwstamp_rxfilt(lif); 1169 1170 if (!pkt_class) 1171 return 0; 1172 1173 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); 1174 } 1175 1176 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 1177 { 1178 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 1179 struct ionic_lif *lif = napi_to_cq(napi)->lif; 1180 struct ionic_dev *idev = &lif->ionic->idev; 1181 unsigned long irqflags; 1182 unsigned int flags = 0; 1183 int rx_work = 0; 1184 int tx_work = 0; 1185 int n_work = 0; 1186 int a_work = 0; 1187 int work_done; 1188 int credits; 1189 1190 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 1191 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 1192 ionic_notifyq_service, NULL, NULL); 1193 1194 spin_lock_irqsave(&lif->adminq_lock, irqflags); 1195 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 1196 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 1197 ionic_adminq_service, NULL, NULL); 1198 1199 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 1200 1201 if (lif->hwstamp_rxq) 1202 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget, 1203 ionic_rx_service, NULL, NULL); 1204 1205 if (lif->hwstamp_txq) 1206 tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget); 1207 1208 work_done = max(max(n_work, a_work), max(rx_work, tx_work)); 1209 if (work_done < budget && napi_complete_done(napi, work_done)) { 1210 flags |= IONIC_INTR_CRED_UNMASK; 1211 intr->rearm_count++; 1212 } 1213 1214 if (work_done || flags) { 1215 flags |= IONIC_INTR_CRED_RESET_COALESCE; 1216 credits = n_work + a_work + rx_work + tx_work; 1217 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); 1218 } 1219 1220 if (lif->doorbell_wa) { 1221 if (!a_work) 1222 ionic_adminq_poke_doorbell(&lif->adminqcq->q); 1223 if (lif->hwstamp_rxq && !rx_work) 1224 ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q); 1225 if (lif->hwstamp_txq && !tx_work) 1226 ionic_txq_poke_doorbell(&lif->hwstamp_txq->q); 1227 } 1228 1229 return work_done; 1230 } 1231 1232 void ionic_get_stats64(struct net_device *netdev, 1233 struct rtnl_link_stats64 *ns) 1234 { 1235 struct ionic_lif *lif = netdev_priv(netdev); 1236 struct ionic_lif_stats *ls; 1237 1238 memset(ns, 0, sizeof(*ns)); 1239 ls = &lif->info->stats; 1240 1241 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 1242 le64_to_cpu(ls->rx_mcast_packets) + 1243 le64_to_cpu(ls->rx_bcast_packets); 1244 1245 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 1246 le64_to_cpu(ls->tx_mcast_packets) + 1247 le64_to_cpu(ls->tx_bcast_packets); 1248 1249 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 1250 le64_to_cpu(ls->rx_mcast_bytes) + 1251 le64_to_cpu(ls->rx_bcast_bytes); 1252 1253 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 1254 le64_to_cpu(ls->tx_mcast_bytes) + 1255 le64_to_cpu(ls->tx_bcast_bytes); 1256 1257 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 1258 le64_to_cpu(ls->rx_mcast_drop_packets) + 1259 le64_to_cpu(ls->rx_bcast_drop_packets); 1260 1261 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 1262 le64_to_cpu(ls->tx_mcast_drop_packets) + 1263 le64_to_cpu(ls->tx_bcast_drop_packets); 1264 1265 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 1266 1267 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 1268 1269 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 1270 le64_to_cpu(ls->rx_queue_disabled) + 1271 le64_to_cpu(ls->rx_desc_fetch_error) + 1272 le64_to_cpu(ls->rx_desc_data_error); 1273 1274 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 1275 le64_to_cpu(ls->tx_queue_disabled) + 1276 le64_to_cpu(ls->tx_desc_fetch_error) + 1277 le64_to_cpu(ls->tx_desc_data_error); 1278 1279 ns->rx_errors = ns->rx_over_errors + 1280 ns->rx_missed_errors; 1281 1282 ns->tx_errors = ns->tx_aborted_errors; 1283 } 1284 1285 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1286 { 1287 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); 1288 } 1289 1290 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1291 { 1292 /* Don't delete our own address from the uc list */ 1293 if (ether_addr_equal(addr, netdev->dev_addr)) 1294 return 0; 1295 1296 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); 1297 } 1298 1299 void ionic_lif_rx_mode(struct ionic_lif *lif) 1300 { 1301 struct net_device *netdev = lif->netdev; 1302 unsigned int nfilters; 1303 unsigned int nd_flags; 1304 char buf[128]; 1305 u16 rx_mode; 1306 int i; 1307 #define REMAIN(__x) (sizeof(buf) - (__x)) 1308 1309 mutex_lock(&lif->config_lock); 1310 1311 /* grab the flags once for local use */ 1312 nd_flags = netdev->flags; 1313 1314 rx_mode = IONIC_RX_MODE_F_UNICAST; 1315 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1316 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1317 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1318 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1319 1320 /* sync the filters */ 1321 ionic_rx_filter_sync(lif); 1322 1323 /* check for overflow state 1324 * if so, we track that we overflowed and enable NIC PROMISC 1325 * else if the overflow is set and not needed 1326 * we remove our overflow flag and check the netdev flags 1327 * to see if we can disable NIC PROMISC 1328 */ 1329 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1330 1331 if (((lif->nucast + lif->nmcast) >= nfilters) || 1332 (lif->max_vlans && lif->nvlans >= lif->max_vlans)) { 1333 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1334 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1335 } else { 1336 if (!(nd_flags & IFF_PROMISC)) 1337 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1338 if (!(nd_flags & IFF_ALLMULTI)) 1339 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1340 } 1341 1342 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1343 lif->rx_mode, rx_mode); 1344 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1345 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1346 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1347 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1348 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1349 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1350 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1351 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1352 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1353 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1354 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER) 1355 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER"); 1356 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf); 1357 1358 if (lif->rx_mode != rx_mode) { 1359 struct ionic_admin_ctx ctx = { 1360 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1361 .cmd.rx_mode_set = { 1362 .opcode = IONIC_CMD_RX_MODE_SET, 1363 .lif_index = cpu_to_le16(lif->index), 1364 }, 1365 }; 1366 int err; 1367 1368 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode); 1369 err = ionic_adminq_post_wait(lif, &ctx); 1370 if (err) 1371 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n", 1372 rx_mode, err); 1373 else 1374 lif->rx_mode = rx_mode; 1375 } 1376 1377 mutex_unlock(&lif->config_lock); 1378 } 1379 1380 static void ionic_ndo_set_rx_mode(struct net_device *netdev) 1381 { 1382 struct ionic_lif *lif = netdev_priv(netdev); 1383 struct ionic_deferred_work *work; 1384 1385 /* Sync the kernel filter list with the driver filter list */ 1386 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1387 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1388 1389 /* Shove off the rest of the rxmode work to the work task 1390 * which will include syncing the filters to the firmware. 1391 */ 1392 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1393 if (!work) { 1394 netdev_err(lif->netdev, "rxmode change dropped\n"); 1395 return; 1396 } 1397 work->type = IONIC_DW_TYPE_RX_MODE; 1398 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1399 ionic_lif_deferred_enqueue(lif, work); 1400 } 1401 1402 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1403 { 1404 u64 wanted = 0; 1405 1406 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1407 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1408 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1409 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1410 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1411 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1412 if (features & NETIF_F_RXHASH) 1413 wanted |= IONIC_ETH_HW_RX_HASH; 1414 if (features & NETIF_F_RXCSUM) 1415 wanted |= IONIC_ETH_HW_RX_CSUM; 1416 if (features & NETIF_F_SG) 1417 wanted |= IONIC_ETH_HW_TX_SG; 1418 if (features & NETIF_F_HW_CSUM) 1419 wanted |= IONIC_ETH_HW_TX_CSUM; 1420 if (features & NETIF_F_TSO) 1421 wanted |= IONIC_ETH_HW_TSO; 1422 if (features & NETIF_F_TSO6) 1423 wanted |= IONIC_ETH_HW_TSO_IPV6; 1424 if (features & NETIF_F_TSO_ECN) 1425 wanted |= IONIC_ETH_HW_TSO_ECN; 1426 if (features & NETIF_F_GSO_GRE) 1427 wanted |= IONIC_ETH_HW_TSO_GRE; 1428 if (features & NETIF_F_GSO_GRE_CSUM) 1429 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1430 if (features & NETIF_F_GSO_IPXIP4) 1431 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1432 if (features & NETIF_F_GSO_IPXIP6) 1433 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1434 if (features & NETIF_F_GSO_UDP_TUNNEL) 1435 wanted |= IONIC_ETH_HW_TSO_UDP; 1436 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1437 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1438 1439 return cpu_to_le64(wanted); 1440 } 1441 1442 static int ionic_set_nic_features(struct ionic_lif *lif, 1443 netdev_features_t features) 1444 { 1445 struct device *dev = lif->ionic->dev; 1446 struct ionic_admin_ctx ctx = { 1447 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1448 .cmd.lif_setattr = { 1449 .opcode = IONIC_CMD_LIF_SETATTR, 1450 .index = cpu_to_le16(lif->index), 1451 .attr = IONIC_LIF_ATTR_FEATURES, 1452 }, 1453 }; 1454 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1455 IONIC_ETH_HW_VLAN_RX_STRIP | 1456 IONIC_ETH_HW_VLAN_RX_FILTER; 1457 u64 old_hw_features; 1458 int err; 1459 1460 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1461 1462 if (lif->phc) 1463 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP); 1464 1465 err = ionic_adminq_post_wait(lif, &ctx); 1466 if (err) 1467 return err; 1468 1469 old_hw_features = lif->hw_features; 1470 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1471 ctx.comp.lif_setattr.features); 1472 1473 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1474 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1475 1476 if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) && 1477 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1478 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1479 1480 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1481 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1482 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1483 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1484 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1485 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1486 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1487 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1488 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1489 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1490 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1491 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1492 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1493 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1494 if (lif->hw_features & IONIC_ETH_HW_TSO) 1495 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1496 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1497 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1498 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1499 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1500 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1501 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1502 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1503 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1504 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1505 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1506 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1507 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1508 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1509 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1510 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1511 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1512 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP) 1513 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n"); 1514 1515 return 0; 1516 } 1517 1518 static int ionic_init_nic_features(struct ionic_lif *lif) 1519 { 1520 struct net_device *netdev = lif->netdev; 1521 netdev_features_t features; 1522 int err; 1523 1524 /* set up what we expect to support by default */ 1525 features = NETIF_F_HW_VLAN_CTAG_TX | 1526 NETIF_F_HW_VLAN_CTAG_RX | 1527 NETIF_F_HW_VLAN_CTAG_FILTER | 1528 NETIF_F_SG | 1529 NETIF_F_HW_CSUM | 1530 NETIF_F_RXCSUM | 1531 NETIF_F_TSO | 1532 NETIF_F_TSO6 | 1533 NETIF_F_TSO_ECN | 1534 NETIF_F_GSO_GRE | 1535 NETIF_F_GSO_GRE_CSUM | 1536 NETIF_F_GSO_IPXIP4 | 1537 NETIF_F_GSO_IPXIP6 | 1538 NETIF_F_GSO_UDP_TUNNEL | 1539 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1540 1541 if (lif->nxqs > 1) 1542 features |= NETIF_F_RXHASH; 1543 1544 err = ionic_set_nic_features(lif, features); 1545 if (err) 1546 return err; 1547 1548 /* tell the netdev what we actually can support */ 1549 netdev->features |= NETIF_F_HIGHDMA; 1550 1551 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1552 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1553 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1554 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1555 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1556 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1557 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1558 netdev->hw_features |= NETIF_F_RXHASH; 1559 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1560 netdev->hw_features |= NETIF_F_SG; 1561 1562 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1563 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1564 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1565 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1566 if (lif->hw_features & IONIC_ETH_HW_TSO) 1567 netdev->hw_enc_features |= NETIF_F_TSO; 1568 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1569 netdev->hw_enc_features |= NETIF_F_TSO6; 1570 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1571 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1572 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1573 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1574 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1575 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1576 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1577 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1578 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1579 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1580 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1581 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1582 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1583 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1584 1585 netdev->hw_features |= netdev->hw_enc_features; 1586 netdev->features |= netdev->hw_features; 1587 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1588 1589 netdev->priv_flags |= IFF_UNICAST_FLT | 1590 IFF_LIVE_ADDR_CHANGE; 1591 1592 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | 1593 NETDEV_XDP_ACT_REDIRECT | 1594 NETDEV_XDP_ACT_RX_SG | 1595 NETDEV_XDP_ACT_NDO_XMIT | 1596 NETDEV_XDP_ACT_NDO_XMIT_SG; 1597 1598 return 0; 1599 } 1600 1601 static int ionic_set_features(struct net_device *netdev, 1602 netdev_features_t features) 1603 { 1604 struct ionic_lif *lif = netdev_priv(netdev); 1605 int err; 1606 1607 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1608 __func__, (u64)lif->netdev->features, (u64)features); 1609 1610 err = ionic_set_nic_features(lif, features); 1611 1612 return err; 1613 } 1614 1615 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac) 1616 { 1617 struct ionic_admin_ctx ctx = { 1618 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1619 .cmd.lif_setattr = { 1620 .opcode = IONIC_CMD_LIF_SETATTR, 1621 .index = cpu_to_le16(lif->index), 1622 .attr = IONIC_LIF_ATTR_MAC, 1623 }, 1624 }; 1625 1626 ether_addr_copy(ctx.cmd.lif_setattr.mac, mac); 1627 return ionic_adminq_post_wait(lif, &ctx); 1628 } 1629 1630 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr) 1631 { 1632 struct ionic_admin_ctx ctx = { 1633 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1634 .cmd.lif_getattr = { 1635 .opcode = IONIC_CMD_LIF_GETATTR, 1636 .index = cpu_to_le16(lif->index), 1637 .attr = IONIC_LIF_ATTR_MAC, 1638 }, 1639 }; 1640 int err; 1641 1642 err = ionic_adminq_post_wait(lif, &ctx); 1643 if (err) 1644 return err; 1645 1646 ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac); 1647 return 0; 1648 } 1649 1650 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac) 1651 { 1652 u8 get_mac[ETH_ALEN]; 1653 int err; 1654 1655 err = ionic_set_attr_mac(lif, mac); 1656 if (err) 1657 return err; 1658 1659 err = ionic_get_attr_mac(lif, get_mac); 1660 if (err) 1661 return err; 1662 1663 /* To deal with older firmware that silently ignores the set attr mac: 1664 * doesn't actually change the mac and doesn't return an error, so we 1665 * do the get attr to verify whether or not the set actually happened 1666 */ 1667 if (!ether_addr_equal(get_mac, mac)) 1668 return 1; 1669 1670 return 0; 1671 } 1672 1673 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1674 { 1675 struct ionic_lif *lif = netdev_priv(netdev); 1676 struct sockaddr *addr = sa; 1677 u8 *mac; 1678 int err; 1679 1680 mac = (u8 *)addr->sa_data; 1681 if (ether_addr_equal(netdev->dev_addr, mac)) 1682 return 0; 1683 1684 err = ionic_program_mac(lif, mac); 1685 if (err < 0) 1686 return err; 1687 1688 if (err > 0) 1689 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n", 1690 __func__); 1691 1692 err = eth_prepare_mac_addr_change(netdev, addr); 1693 if (err) 1694 return err; 1695 1696 if (!is_zero_ether_addr(netdev->dev_addr)) { 1697 netdev_info(netdev, "deleting mac addr %pM\n", 1698 netdev->dev_addr); 1699 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr); 1700 } 1701 1702 eth_commit_mac_addr_change(netdev, addr); 1703 netdev_info(netdev, "updating mac addr %pM\n", mac); 1704 1705 return ionic_lif_addr_add(netdev_priv(netdev), mac); 1706 } 1707 1708 void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1709 { 1710 /* Stop and clean the queues before reconfiguration */ 1711 netif_device_detach(lif->netdev); 1712 ionic_stop_queues(lif); 1713 ionic_txrx_deinit(lif); 1714 } 1715 1716 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1717 { 1718 int err; 1719 1720 /* Re-init the queues after reconfiguration */ 1721 1722 /* The only way txrx_init can fail here is if communication 1723 * with FW is suddenly broken. There's not much we can do 1724 * at this point - error messages have already been printed, 1725 * so we can continue on and the user can eventually do a 1726 * DOWN and UP to try to reset and clear the issue. 1727 */ 1728 err = ionic_txrx_init(lif); 1729 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 1730 netif_device_attach(lif->netdev); 1731 1732 return err; 1733 } 1734 1735 static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu, 1736 struct bpf_prog *xdp_prog) 1737 { 1738 if (!xdp_prog) 1739 return true; 1740 1741 if (mtu <= IONIC_XDP_MAX_LINEAR_MTU) 1742 return true; 1743 1744 if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags) 1745 return true; 1746 1747 return false; 1748 } 1749 1750 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1751 { 1752 struct ionic_lif *lif = netdev_priv(netdev); 1753 struct ionic_admin_ctx ctx = { 1754 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1755 .cmd.lif_setattr = { 1756 .opcode = IONIC_CMD_LIF_SETATTR, 1757 .index = cpu_to_le16(lif->index), 1758 .attr = IONIC_LIF_ATTR_MTU, 1759 .mtu = cpu_to_le32(new_mtu), 1760 }, 1761 }; 1762 struct bpf_prog *xdp_prog; 1763 int err; 1764 1765 xdp_prog = READ_ONCE(lif->xdp_prog); 1766 if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog)) 1767 return -EINVAL; 1768 1769 err = ionic_adminq_post_wait(lif, &ctx); 1770 if (err) 1771 return err; 1772 1773 /* if we're not running, nothing more to do */ 1774 if (!netif_running(netdev)) { 1775 WRITE_ONCE(netdev->mtu, new_mtu); 1776 return 0; 1777 } 1778 1779 mutex_lock(&lif->queue_lock); 1780 ionic_stop_queues_reconfig(lif); 1781 WRITE_ONCE(netdev->mtu, new_mtu); 1782 err = ionic_start_queues_reconfig(lif); 1783 mutex_unlock(&lif->queue_lock); 1784 1785 return err; 1786 } 1787 1788 static void ionic_tx_timeout_work(struct work_struct *ws) 1789 { 1790 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1791 int err; 1792 1793 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1794 return; 1795 1796 /* if we were stopped before this scheduled job was launched, 1797 * don't bother the queues as they are already stopped. 1798 */ 1799 if (!netif_running(lif->netdev)) 1800 return; 1801 1802 mutex_lock(&lif->queue_lock); 1803 ionic_stop_queues_reconfig(lif); 1804 err = ionic_start_queues_reconfig(lif); 1805 mutex_unlock(&lif->queue_lock); 1806 1807 if (err) 1808 dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__); 1809 } 1810 1811 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1812 { 1813 struct ionic_lif *lif = netdev_priv(netdev); 1814 1815 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); 1816 schedule_work(&lif->tx_timeout_work); 1817 } 1818 1819 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1820 u16 vid) 1821 { 1822 struct ionic_lif *lif = netdev_priv(netdev); 1823 int err; 1824 1825 err = ionic_lif_vlan_add(lif, vid); 1826 if (err) 1827 return err; 1828 1829 ionic_lif_rx_mode(lif); 1830 1831 return 0; 1832 } 1833 1834 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1835 u16 vid) 1836 { 1837 struct ionic_lif *lif = netdev_priv(netdev); 1838 int err; 1839 1840 err = ionic_lif_vlan_del(lif, vid); 1841 if (err) 1842 return err; 1843 1844 ionic_lif_rx_mode(lif); 1845 1846 return 0; 1847 } 1848 1849 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1850 const u8 *key, const u32 *indir) 1851 { 1852 struct ionic_admin_ctx ctx = { 1853 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1854 .cmd.lif_setattr = { 1855 .opcode = IONIC_CMD_LIF_SETATTR, 1856 .attr = IONIC_LIF_ATTR_RSS, 1857 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1858 }, 1859 }; 1860 unsigned int i, tbl_sz; 1861 1862 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1863 lif->rss_types = types; 1864 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1865 } 1866 1867 if (key) 1868 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1869 1870 if (indir) { 1871 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1872 for (i = 0; i < tbl_sz; i++) 1873 lif->rss_ind_tbl[i] = indir[i]; 1874 } 1875 1876 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1877 IONIC_RSS_HASH_KEY_SIZE); 1878 1879 return ionic_adminq_post_wait(lif, &ctx); 1880 } 1881 1882 static int ionic_lif_rss_init(struct ionic_lif *lif) 1883 { 1884 unsigned int tbl_sz; 1885 unsigned int i; 1886 1887 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1888 IONIC_RSS_TYPE_IPV4_TCP | 1889 IONIC_RSS_TYPE_IPV4_UDP | 1890 IONIC_RSS_TYPE_IPV6 | 1891 IONIC_RSS_TYPE_IPV6_TCP | 1892 IONIC_RSS_TYPE_IPV6_UDP; 1893 1894 /* Fill indirection table with 'default' values */ 1895 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1896 for (i = 0; i < tbl_sz; i++) 1897 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1898 1899 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1900 } 1901 1902 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1903 { 1904 int tbl_sz; 1905 1906 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1907 memset(lif->rss_ind_tbl, 0, tbl_sz); 1908 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1909 1910 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1911 } 1912 1913 static void ionic_lif_quiesce(struct ionic_lif *lif) 1914 { 1915 struct ionic_admin_ctx ctx = { 1916 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1917 .cmd.lif_setattr = { 1918 .opcode = IONIC_CMD_LIF_SETATTR, 1919 .index = cpu_to_le16(lif->index), 1920 .attr = IONIC_LIF_ATTR_STATE, 1921 .state = IONIC_LIF_QUIESCE, 1922 }, 1923 }; 1924 int err; 1925 1926 err = ionic_adminq_post_wait(lif, &ctx); 1927 if (err) 1928 netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err); 1929 } 1930 1931 static void ionic_txrx_disable(struct ionic_lif *lif) 1932 { 1933 unsigned int i; 1934 int err = 0; 1935 1936 if (lif->txqcqs) { 1937 for (i = 0; i < lif->nxqs; i++) 1938 err = ionic_qcq_disable(lif, lif->txqcqs[i], err); 1939 } 1940 1941 if (lif->hwstamp_txq) 1942 err = ionic_qcq_disable(lif, lif->hwstamp_txq, err); 1943 1944 if (lif->rxqcqs) { 1945 for (i = 0; i < lif->nxqs; i++) 1946 err = ionic_qcq_disable(lif, lif->rxqcqs[i], err); 1947 } 1948 1949 if (lif->hwstamp_rxq) 1950 err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err); 1951 1952 ionic_lif_quiesce(lif); 1953 } 1954 1955 static void ionic_txrx_deinit(struct ionic_lif *lif) 1956 { 1957 unsigned int i; 1958 1959 if (lif->txqcqs) { 1960 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 1961 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1962 ionic_tx_flush(&lif->txqcqs[i]->cq); 1963 ionic_tx_empty(&lif->txqcqs[i]->q); 1964 } 1965 } 1966 1967 if (lif->rxqcqs) { 1968 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 1969 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 1970 ionic_rx_empty(&lif->rxqcqs[i]->q); 1971 } 1972 } 1973 lif->rx_mode = 0; 1974 1975 if (lif->hwstamp_txq) { 1976 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq); 1977 ionic_tx_flush(&lif->hwstamp_txq->cq); 1978 ionic_tx_empty(&lif->hwstamp_txq->q); 1979 } 1980 1981 if (lif->hwstamp_rxq) { 1982 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq); 1983 ionic_rx_empty(&lif->hwstamp_rxq->q); 1984 } 1985 } 1986 1987 void ionic_txrx_free(struct ionic_lif *lif) 1988 { 1989 unsigned int i; 1990 1991 if (lif->txqcqs) { 1992 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 1993 ionic_qcq_free(lif, lif->txqcqs[i]); 1994 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 1995 lif->txqcqs[i] = NULL; 1996 } 1997 } 1998 1999 if (lif->rxqcqs) { 2000 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2001 ionic_qcq_free(lif, lif->rxqcqs[i]); 2002 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 2003 lif->rxqcqs[i] = NULL; 2004 } 2005 } 2006 2007 if (lif->hwstamp_txq) { 2008 ionic_qcq_free(lif, lif->hwstamp_txq); 2009 devm_kfree(lif->ionic->dev, lif->hwstamp_txq); 2010 lif->hwstamp_txq = NULL; 2011 } 2012 2013 if (lif->hwstamp_rxq) { 2014 ionic_qcq_free(lif, lif->hwstamp_rxq); 2015 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq); 2016 lif->hwstamp_rxq = NULL; 2017 } 2018 } 2019 2020 static int ionic_txrx_alloc(struct ionic_lif *lif) 2021 { 2022 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2023 unsigned int flags, i; 2024 int err = 0; 2025 2026 num_desc = lif->ntxq_descs; 2027 desc_sz = sizeof(struct ionic_txq_desc); 2028 comp_sz = sizeof(struct ionic_txq_comp); 2029 2030 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2031 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2032 sizeof(struct ionic_txq_sg_desc_v1)) 2033 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2034 else 2035 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2036 2037 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2038 2039 if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state)) 2040 flags |= IONIC_QCQ_F_CMB_RINGS; 2041 2042 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2043 flags |= IONIC_QCQ_F_INTR; 2044 2045 for (i = 0; i < lif->nxqs; i++) { 2046 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2047 num_desc, desc_sz, comp_sz, sg_desc_sz, 2048 sizeof(struct ionic_tx_desc_info), 2049 lif->kern_pid, &lif->txqcqs[i]); 2050 if (err) 2051 goto err_out; 2052 2053 if (flags & IONIC_QCQ_F_INTR) { 2054 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2055 lif->txqcqs[i]->intr.index, 2056 lif->tx_coalesce_hw); 2057 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2058 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2059 } 2060 2061 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2062 } 2063 2064 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 2065 2066 if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) 2067 flags |= IONIC_QCQ_F_CMB_RINGS; 2068 2069 num_desc = lif->nrxq_descs; 2070 desc_sz = sizeof(struct ionic_rxq_desc); 2071 comp_sz = sizeof(struct ionic_rxq_comp); 2072 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2073 2074 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2075 comp_sz *= 2; 2076 2077 for (i = 0; i < lif->nxqs; i++) { 2078 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2079 num_desc, desc_sz, comp_sz, sg_desc_sz, 2080 sizeof(struct ionic_rx_desc_info), 2081 lif->kern_pid, &lif->rxqcqs[i]); 2082 if (err) 2083 goto err_out; 2084 2085 lif->rxqcqs[i]->q.features = lif->rxq_features; 2086 2087 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2088 lif->rxqcqs[i]->intr.index, 2089 lif->rx_coalesce_hw); 2090 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) 2091 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; 2092 2093 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2094 ionic_link_qcq_interrupts(lif->rxqcqs[i], 2095 lif->txqcqs[i]); 2096 2097 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2098 } 2099 2100 return 0; 2101 2102 err_out: 2103 ionic_txrx_free(lif); 2104 2105 return err; 2106 } 2107 2108 static int ionic_txrx_init(struct ionic_lif *lif) 2109 { 2110 unsigned int i; 2111 int err; 2112 2113 for (i = 0; i < lif->nxqs; i++) { 2114 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 2115 if (err) 2116 goto err_out; 2117 2118 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 2119 if (err) { 2120 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2121 goto err_out; 2122 } 2123 } 2124 2125 if (lif->netdev->features & NETIF_F_RXHASH) 2126 ionic_lif_rss_init(lif); 2127 2128 ionic_lif_rx_mode(lif); 2129 2130 return 0; 2131 2132 err_out: 2133 while (i--) { 2134 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2135 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2136 } 2137 2138 return err; 2139 } 2140 2141 static int ionic_txrx_enable(struct ionic_lif *lif) 2142 { 2143 int derr = 0; 2144 int i, err; 2145 2146 err = ionic_xdp_queues_config(lif); 2147 if (err) 2148 return err; 2149 2150 for (i = 0; i < lif->nxqs; i++) { 2151 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { 2152 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); 2153 err = -ENXIO; 2154 goto err_out; 2155 } 2156 2157 ionic_rx_fill(&lif->rxqcqs[i]->q); 2158 err = ionic_qcq_enable(lif->rxqcqs[i]); 2159 if (err) 2160 goto err_out; 2161 2162 err = ionic_qcq_enable(lif->txqcqs[i]); 2163 if (err) { 2164 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err); 2165 goto err_out; 2166 } 2167 } 2168 2169 if (lif->hwstamp_rxq) { 2170 ionic_rx_fill(&lif->hwstamp_rxq->q); 2171 err = ionic_qcq_enable(lif->hwstamp_rxq); 2172 if (err) 2173 goto err_out_hwstamp_rx; 2174 } 2175 2176 if (lif->hwstamp_txq) { 2177 err = ionic_qcq_enable(lif->hwstamp_txq); 2178 if (err) 2179 goto err_out_hwstamp_tx; 2180 } 2181 2182 return 0; 2183 2184 err_out_hwstamp_tx: 2185 if (lif->hwstamp_rxq) 2186 derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr); 2187 err_out_hwstamp_rx: 2188 i = lif->nxqs; 2189 err_out: 2190 while (i--) { 2191 derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr); 2192 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); 2193 } 2194 2195 ionic_xdp_queues_config(lif); 2196 2197 return err; 2198 } 2199 2200 static int ionic_start_queues(struct ionic_lif *lif) 2201 { 2202 int err; 2203 2204 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) 2205 return -EIO; 2206 2207 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2208 return -EBUSY; 2209 2210 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 2211 return 0; 2212 2213 err = ionic_txrx_enable(lif); 2214 if (err) { 2215 clear_bit(IONIC_LIF_F_UP, lif->state); 2216 return err; 2217 } 2218 netif_tx_wake_all_queues(lif->netdev); 2219 2220 return 0; 2221 } 2222 2223 static int ionic_open(struct net_device *netdev) 2224 { 2225 struct ionic_lif *lif = netdev_priv(netdev); 2226 int err; 2227 2228 /* If recovering from a broken state, clear the bit and we'll try again */ 2229 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 2230 netdev_info(netdev, "clearing broken state\n"); 2231 2232 mutex_lock(&lif->queue_lock); 2233 2234 err = ionic_txrx_alloc(lif); 2235 if (err) 2236 goto err_unlock; 2237 2238 err = ionic_txrx_init(lif); 2239 if (err) 2240 goto err_txrx_free; 2241 2242 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 2243 if (err) 2244 goto err_txrx_deinit; 2245 2246 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 2247 if (err) 2248 goto err_txrx_deinit; 2249 2250 /* don't start the queues until we have link */ 2251 if (netif_carrier_ok(netdev)) { 2252 err = ionic_start_queues(lif); 2253 if (err) 2254 goto err_txrx_deinit; 2255 } 2256 2257 /* If hardware timestamping is enabled, but the queues were freed by 2258 * ionic_stop, those need to be reallocated and initialized, too. 2259 */ 2260 ionic_lif_hwstamp_recreate_queues(lif); 2261 2262 mutex_unlock(&lif->queue_lock); 2263 2264 return 0; 2265 2266 err_txrx_deinit: 2267 ionic_txrx_deinit(lif); 2268 err_txrx_free: 2269 ionic_txrx_free(lif); 2270 err_unlock: 2271 mutex_unlock(&lif->queue_lock); 2272 return err; 2273 } 2274 2275 static void ionic_stop_queues(struct ionic_lif *lif) 2276 { 2277 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 2278 return; 2279 2280 netif_tx_disable(lif->netdev); 2281 ionic_txrx_disable(lif); 2282 } 2283 2284 static int ionic_stop(struct net_device *netdev) 2285 { 2286 struct ionic_lif *lif = netdev_priv(netdev); 2287 2288 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2289 return 0; 2290 2291 mutex_lock(&lif->queue_lock); 2292 ionic_stop_queues(lif); 2293 ionic_txrx_deinit(lif); 2294 ionic_txrx_free(lif); 2295 mutex_unlock(&lif->queue_lock); 2296 2297 return 0; 2298 } 2299 2300 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2301 { 2302 struct ionic_lif *lif = netdev_priv(netdev); 2303 2304 switch (cmd) { 2305 case SIOCSHWTSTAMP: 2306 return ionic_lif_hwstamp_set(lif, ifr); 2307 case SIOCGHWTSTAMP: 2308 return ionic_lif_hwstamp_get(lif, ifr); 2309 default: 2310 return -EOPNOTSUPP; 2311 } 2312 } 2313 2314 static int ionic_get_vf_config(struct net_device *netdev, 2315 int vf, struct ifla_vf_info *ivf) 2316 { 2317 struct ionic_lif *lif = netdev_priv(netdev); 2318 struct ionic *ionic = lif->ionic; 2319 int ret = 0; 2320 2321 if (!netif_device_present(netdev)) 2322 return -EBUSY; 2323 2324 down_read(&ionic->vf_op_lock); 2325 2326 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2327 ret = -EINVAL; 2328 } else { 2329 struct ionic_vf *vfdata = &ionic->vfs[vf]; 2330 2331 ivf->vf = vf; 2332 ivf->qos = 0; 2333 ivf->vlan = le16_to_cpu(vfdata->vlanid); 2334 ivf->spoofchk = vfdata->spoofchk; 2335 ivf->linkstate = vfdata->linkstate; 2336 ivf->max_tx_rate = le32_to_cpu(vfdata->maxrate); 2337 ivf->trusted = vfdata->trusted; 2338 ether_addr_copy(ivf->mac, vfdata->macaddr); 2339 } 2340 2341 up_read(&ionic->vf_op_lock); 2342 return ret; 2343 } 2344 2345 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 2346 struct ifla_vf_stats *vf_stats) 2347 { 2348 struct ionic_lif *lif = netdev_priv(netdev); 2349 struct ionic *ionic = lif->ionic; 2350 struct ionic_lif_stats *vs; 2351 int ret = 0; 2352 2353 if (!netif_device_present(netdev)) 2354 return -EBUSY; 2355 2356 down_read(&ionic->vf_op_lock); 2357 2358 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2359 ret = -EINVAL; 2360 } else { 2361 memset(vf_stats, 0, sizeof(*vf_stats)); 2362 vs = &ionic->vfs[vf].stats; 2363 2364 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 2365 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 2366 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 2367 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 2368 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 2369 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 2370 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 2371 le64_to_cpu(vs->rx_mcast_drop_packets) + 2372 le64_to_cpu(vs->rx_bcast_drop_packets); 2373 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 2374 le64_to_cpu(vs->tx_mcast_drop_packets) + 2375 le64_to_cpu(vs->tx_bcast_drop_packets); 2376 } 2377 2378 up_read(&ionic->vf_op_lock); 2379 return ret; 2380 } 2381 2382 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2383 { 2384 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC }; 2385 struct ionic_lif *lif = netdev_priv(netdev); 2386 struct ionic *ionic = lif->ionic; 2387 int ret; 2388 2389 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 2390 return -EINVAL; 2391 2392 if (!netif_device_present(netdev)) 2393 return -EBUSY; 2394 2395 down_write(&ionic->vf_op_lock); 2396 2397 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2398 ret = -EINVAL; 2399 } else { 2400 ether_addr_copy(vfc.macaddr, mac); 2401 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n", 2402 __func__, vf, vfc.macaddr); 2403 2404 ret = ionic_set_vf_config(ionic, vf, &vfc); 2405 if (!ret) 2406 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 2407 } 2408 2409 up_write(&ionic->vf_op_lock); 2410 return ret; 2411 } 2412 2413 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2414 u8 qos, __be16 proto) 2415 { 2416 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN }; 2417 struct ionic_lif *lif = netdev_priv(netdev); 2418 struct ionic *ionic = lif->ionic; 2419 int ret; 2420 2421 /* until someday when we support qos */ 2422 if (qos) 2423 return -EINVAL; 2424 2425 if (vlan > 4095) 2426 return -EINVAL; 2427 2428 if (proto != htons(ETH_P_8021Q)) 2429 return -EPROTONOSUPPORT; 2430 2431 if (!netif_device_present(netdev)) 2432 return -EBUSY; 2433 2434 down_write(&ionic->vf_op_lock); 2435 2436 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2437 ret = -EINVAL; 2438 } else { 2439 vfc.vlanid = cpu_to_le16(vlan); 2440 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n", 2441 __func__, vf, le16_to_cpu(vfc.vlanid)); 2442 2443 ret = ionic_set_vf_config(ionic, vf, &vfc); 2444 if (!ret) 2445 ionic->vfs[vf].vlanid = cpu_to_le16(vlan); 2446 } 2447 2448 up_write(&ionic->vf_op_lock); 2449 return ret; 2450 } 2451 2452 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 2453 int tx_min, int tx_max) 2454 { 2455 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE }; 2456 struct ionic_lif *lif = netdev_priv(netdev); 2457 struct ionic *ionic = lif->ionic; 2458 int ret; 2459 2460 /* setting the min just seems silly */ 2461 if (tx_min) 2462 return -EINVAL; 2463 2464 if (!netif_device_present(netdev)) 2465 return -EBUSY; 2466 2467 down_write(&ionic->vf_op_lock); 2468 2469 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2470 ret = -EINVAL; 2471 } else { 2472 vfc.maxrate = cpu_to_le32(tx_max); 2473 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n", 2474 __func__, vf, le32_to_cpu(vfc.maxrate)); 2475 2476 ret = ionic_set_vf_config(ionic, vf, &vfc); 2477 if (!ret) 2478 ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); 2479 } 2480 2481 up_write(&ionic->vf_op_lock); 2482 return ret; 2483 } 2484 2485 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 2486 { 2487 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK }; 2488 struct ionic_lif *lif = netdev_priv(netdev); 2489 struct ionic *ionic = lif->ionic; 2490 int ret; 2491 2492 if (!netif_device_present(netdev)) 2493 return -EBUSY; 2494 2495 down_write(&ionic->vf_op_lock); 2496 2497 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2498 ret = -EINVAL; 2499 } else { 2500 vfc.spoofchk = set; 2501 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n", 2502 __func__, vf, vfc.spoofchk); 2503 2504 ret = ionic_set_vf_config(ionic, vf, &vfc); 2505 if (!ret) 2506 ionic->vfs[vf].spoofchk = set; 2507 } 2508 2509 up_write(&ionic->vf_op_lock); 2510 return ret; 2511 } 2512 2513 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2514 { 2515 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST }; 2516 struct ionic_lif *lif = netdev_priv(netdev); 2517 struct ionic *ionic = lif->ionic; 2518 int ret; 2519 2520 if (!netif_device_present(netdev)) 2521 return -EBUSY; 2522 2523 down_write(&ionic->vf_op_lock); 2524 2525 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2526 ret = -EINVAL; 2527 } else { 2528 vfc.trust = set; 2529 dev_dbg(ionic->dev, "%s: vf %d trust %d\n", 2530 __func__, vf, vfc.trust); 2531 2532 ret = ionic_set_vf_config(ionic, vf, &vfc); 2533 if (!ret) 2534 ionic->vfs[vf].trusted = set; 2535 } 2536 2537 up_write(&ionic->vf_op_lock); 2538 return ret; 2539 } 2540 2541 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2542 { 2543 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE }; 2544 struct ionic_lif *lif = netdev_priv(netdev); 2545 struct ionic *ionic = lif->ionic; 2546 u8 vfls; 2547 int ret; 2548 2549 switch (set) { 2550 case IFLA_VF_LINK_STATE_ENABLE: 2551 vfls = IONIC_VF_LINK_STATUS_UP; 2552 break; 2553 case IFLA_VF_LINK_STATE_DISABLE: 2554 vfls = IONIC_VF_LINK_STATUS_DOWN; 2555 break; 2556 case IFLA_VF_LINK_STATE_AUTO: 2557 vfls = IONIC_VF_LINK_STATUS_AUTO; 2558 break; 2559 default: 2560 return -EINVAL; 2561 } 2562 2563 if (!netif_device_present(netdev)) 2564 return -EBUSY; 2565 2566 down_write(&ionic->vf_op_lock); 2567 2568 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2569 ret = -EINVAL; 2570 } else { 2571 vfc.linkstate = vfls; 2572 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n", 2573 __func__, vf, vfc.linkstate); 2574 2575 ret = ionic_set_vf_config(ionic, vf, &vfc); 2576 if (!ret) 2577 ionic->vfs[vf].linkstate = set; 2578 } 2579 2580 up_write(&ionic->vf_op_lock); 2581 return ret; 2582 } 2583 2584 static void ionic_vf_attr_replay(struct ionic_lif *lif) 2585 { 2586 struct ionic_vf_setattr_cmd vfc = { }; 2587 struct ionic *ionic = lif->ionic; 2588 struct ionic_vf *v; 2589 int i; 2590 2591 if (!ionic->vfs) 2592 return; 2593 2594 down_read(&ionic->vf_op_lock); 2595 2596 for (i = 0; i < ionic->num_vfs; i++) { 2597 v = &ionic->vfs[i]; 2598 2599 if (v->stats_pa) { 2600 vfc.attr = IONIC_VF_ATTR_STATSADDR; 2601 vfc.stats_pa = cpu_to_le64(v->stats_pa); 2602 ionic_set_vf_config(ionic, i, &vfc); 2603 vfc.stats_pa = 0; 2604 } 2605 2606 if (!is_zero_ether_addr(v->macaddr)) { 2607 vfc.attr = IONIC_VF_ATTR_MAC; 2608 ether_addr_copy(vfc.macaddr, v->macaddr); 2609 ionic_set_vf_config(ionic, i, &vfc); 2610 eth_zero_addr(vfc.macaddr); 2611 } 2612 2613 if (v->vlanid) { 2614 vfc.attr = IONIC_VF_ATTR_VLAN; 2615 vfc.vlanid = v->vlanid; 2616 ionic_set_vf_config(ionic, i, &vfc); 2617 vfc.vlanid = 0; 2618 } 2619 2620 if (v->maxrate) { 2621 vfc.attr = IONIC_VF_ATTR_RATE; 2622 vfc.maxrate = v->maxrate; 2623 ionic_set_vf_config(ionic, i, &vfc); 2624 vfc.maxrate = 0; 2625 } 2626 2627 if (v->spoofchk) { 2628 vfc.attr = IONIC_VF_ATTR_SPOOFCHK; 2629 vfc.spoofchk = v->spoofchk; 2630 ionic_set_vf_config(ionic, i, &vfc); 2631 vfc.spoofchk = 0; 2632 } 2633 2634 if (v->trusted) { 2635 vfc.attr = IONIC_VF_ATTR_TRUST; 2636 vfc.trust = v->trusted; 2637 ionic_set_vf_config(ionic, i, &vfc); 2638 vfc.trust = 0; 2639 } 2640 2641 if (v->linkstate) { 2642 vfc.attr = IONIC_VF_ATTR_LINKSTATE; 2643 vfc.linkstate = v->linkstate; 2644 ionic_set_vf_config(ionic, i, &vfc); 2645 vfc.linkstate = 0; 2646 } 2647 } 2648 2649 up_read(&ionic->vf_op_lock); 2650 2651 ionic_vf_start(ionic); 2652 } 2653 2654 static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q) 2655 { 2656 struct xdp_rxq_info *xi; 2657 2658 if (!q->xdp_rxq_info) 2659 return; 2660 2661 xi = q->xdp_rxq_info; 2662 q->xdp_rxq_info = NULL; 2663 2664 xdp_rxq_info_unreg(xi); 2665 kfree(xi); 2666 } 2667 2668 static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id) 2669 { 2670 struct xdp_rxq_info *rxq_info; 2671 int err; 2672 2673 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); 2674 if (!rxq_info) 2675 return -ENOMEM; 2676 2677 err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id); 2678 if (err) { 2679 dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n", 2680 q->index, err); 2681 goto err_out; 2682 } 2683 2684 err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL); 2685 if (err) { 2686 dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n", 2687 q->index, err); 2688 xdp_rxq_info_unreg(rxq_info); 2689 goto err_out; 2690 } 2691 2692 q->xdp_rxq_info = rxq_info; 2693 2694 return 0; 2695 2696 err_out: 2697 kfree(rxq_info); 2698 return err; 2699 } 2700 2701 static int ionic_xdp_queues_config(struct ionic_lif *lif) 2702 { 2703 unsigned int i; 2704 int err; 2705 2706 if (!lif->rxqcqs) 2707 return 0; 2708 2709 /* There's no need to rework memory if not going to/from NULL program. 2710 * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info 2711 * This way we don't need to keep an *xdp_prog in every queue struct. 2712 */ 2713 if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info) 2714 return 0; 2715 2716 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2717 struct ionic_queue *q = &lif->rxqcqs[i]->q; 2718 2719 if (q->xdp_rxq_info) { 2720 ionic_xdp_unregister_rxq_info(q); 2721 continue; 2722 } 2723 2724 err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id); 2725 if (err) { 2726 dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n", 2727 i, err); 2728 goto err_out; 2729 } 2730 } 2731 2732 return 0; 2733 2734 err_out: 2735 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) 2736 ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q); 2737 2738 return err; 2739 } 2740 2741 static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) 2742 { 2743 struct ionic_lif *lif = netdev_priv(netdev); 2744 struct bpf_prog *old_prog; 2745 u32 maxfs; 2746 2747 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { 2748 #define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts" 2749 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT); 2750 netdev_info(lif->netdev, XDP_ERR_SPLIT); 2751 return -EOPNOTSUPP; 2752 } 2753 2754 if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) { 2755 #define XDP_ERR_MTU "MTU is too large for XDP without frags support" 2756 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU); 2757 netdev_info(lif->netdev, XDP_ERR_MTU); 2758 return -EINVAL; 2759 } 2760 2761 maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; 2762 if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags)) 2763 maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU); 2764 netdev->max_mtu = maxfs; 2765 2766 if (!netif_running(netdev)) { 2767 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2768 } else { 2769 mutex_lock(&lif->queue_lock); 2770 ionic_stop_queues_reconfig(lif); 2771 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2772 ionic_start_queues_reconfig(lif); 2773 mutex_unlock(&lif->queue_lock); 2774 } 2775 2776 if (old_prog) 2777 bpf_prog_put(old_prog); 2778 2779 return 0; 2780 } 2781 2782 static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf) 2783 { 2784 switch (bpf->command) { 2785 case XDP_SETUP_PROG: 2786 return ionic_xdp_config(netdev, bpf); 2787 default: 2788 return -EINVAL; 2789 } 2790 } 2791 2792 static const struct net_device_ops ionic_netdev_ops = { 2793 .ndo_open = ionic_open, 2794 .ndo_stop = ionic_stop, 2795 .ndo_eth_ioctl = ionic_eth_ioctl, 2796 .ndo_start_xmit = ionic_start_xmit, 2797 .ndo_bpf = ionic_xdp, 2798 .ndo_xdp_xmit = ionic_xdp_xmit, 2799 .ndo_get_stats64 = ionic_get_stats64, 2800 .ndo_set_rx_mode = ionic_ndo_set_rx_mode, 2801 .ndo_set_features = ionic_set_features, 2802 .ndo_set_mac_address = ionic_set_mac_address, 2803 .ndo_validate_addr = eth_validate_addr, 2804 .ndo_tx_timeout = ionic_tx_timeout, 2805 .ndo_change_mtu = ionic_change_mtu, 2806 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2807 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2808 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2809 .ndo_set_vf_trust = ionic_set_vf_trust, 2810 .ndo_set_vf_mac = ionic_set_vf_mac, 2811 .ndo_set_vf_rate = ionic_set_vf_rate, 2812 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2813 .ndo_get_vf_config = ionic_get_vf_config, 2814 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2815 .ndo_get_vf_stats = ionic_get_vf_stats, 2816 }; 2817 2818 static int ionic_cmb_reconfig(struct ionic_lif *lif, 2819 struct ionic_queue_params *qparam) 2820 { 2821 struct ionic_queue_params start_qparams; 2822 int err = 0; 2823 2824 /* When changing CMB queue parameters, we're using limited 2825 * on-device memory and don't have extra memory to use for 2826 * duplicate allocations, so we free it all first then 2827 * re-allocate with the new parameters. 2828 */ 2829 2830 /* Checkpoint for possible unwind */ 2831 ionic_init_queue_params(lif, &start_qparams); 2832 2833 /* Stop and free the queues */ 2834 ionic_stop_queues_reconfig(lif); 2835 ionic_txrx_free(lif); 2836 2837 /* Set up new qparams */ 2838 ionic_set_queue_params(lif, qparam); 2839 2840 if (netif_running(lif->netdev)) { 2841 /* Alloc and start the new configuration */ 2842 err = ionic_txrx_alloc(lif); 2843 if (err) { 2844 dev_warn(lif->ionic->dev, 2845 "CMB reconfig failed, restoring values: %d\n", err); 2846 2847 /* Back out the changes */ 2848 ionic_set_queue_params(lif, &start_qparams); 2849 err = ionic_txrx_alloc(lif); 2850 if (err) { 2851 dev_err(lif->ionic->dev, 2852 "CMB restore failed: %d\n", err); 2853 goto err_out; 2854 } 2855 } 2856 2857 err = ionic_start_queues_reconfig(lif); 2858 if (err) { 2859 dev_err(lif->ionic->dev, 2860 "CMB reconfig failed: %d\n", err); 2861 goto err_out; 2862 } 2863 } 2864 2865 err_out: 2866 /* This was detached in ionic_stop_queues_reconfig() */ 2867 netif_device_attach(lif->netdev); 2868 2869 return err; 2870 } 2871 2872 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2873 { 2874 /* only swapping the queues, not the napi, flags, or other stuff */ 2875 swap(a->q.features, b->q.features); 2876 swap(a->q.num_descs, b->q.num_descs); 2877 swap(a->q.desc_size, b->q.desc_size); 2878 swap(a->q.base, b->q.base); 2879 swap(a->q.base_pa, b->q.base_pa); 2880 swap(a->q.info, b->q.info); 2881 swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info); 2882 swap(a->q.partner, b->q.partner); 2883 swap(a->q_base, b->q_base); 2884 swap(a->q_base_pa, b->q_base_pa); 2885 swap(a->q_size, b->q_size); 2886 2887 swap(a->q.sg_desc_size, b->q.sg_desc_size); 2888 swap(a->q.sg_base, b->q.sg_base); 2889 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2890 swap(a->sg_base, b->sg_base); 2891 swap(a->sg_base_pa, b->sg_base_pa); 2892 swap(a->sg_size, b->sg_size); 2893 2894 swap(a->cq.num_descs, b->cq.num_descs); 2895 swap(a->cq.desc_size, b->cq.desc_size); 2896 swap(a->cq.base, b->cq.base); 2897 swap(a->cq.base_pa, b->cq.base_pa); 2898 swap(a->cq_base, b->cq_base); 2899 swap(a->cq_base_pa, b->cq_base_pa); 2900 swap(a->cq_size, b->cq_size); 2901 2902 ionic_debugfs_del_qcq(a); 2903 ionic_debugfs_add_qcq(a->q.lif, a); 2904 } 2905 2906 int ionic_reconfigure_queues(struct ionic_lif *lif, 2907 struct ionic_queue_params *qparam) 2908 { 2909 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2910 struct ionic_qcq **tx_qcqs = NULL; 2911 struct ionic_qcq **rx_qcqs = NULL; 2912 unsigned int flags, i; 2913 int err = 0; 2914 2915 /* Are we changing q params while CMB is on */ 2916 if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) || 2917 (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx)) 2918 return ionic_cmb_reconfig(lif, qparam); 2919 2920 /* allocate temporary qcq arrays to hold new queue structs */ 2921 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2922 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2923 sizeof(struct ionic_qcq *), GFP_KERNEL); 2924 if (!tx_qcqs) { 2925 err = -ENOMEM; 2926 goto err_out; 2927 } 2928 } 2929 if (qparam->nxqs != lif->nxqs || 2930 qparam->nrxq_descs != lif->nrxq_descs || 2931 qparam->rxq_features != lif->rxq_features) { 2932 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2933 sizeof(struct ionic_qcq *), GFP_KERNEL); 2934 if (!rx_qcqs) { 2935 err = -ENOMEM; 2936 goto err_out; 2937 } 2938 } 2939 2940 /* allocate new desc_info and rings, but leave the interrupt setup 2941 * until later so as to not mess with the still-running queues 2942 */ 2943 if (tx_qcqs) { 2944 num_desc = qparam->ntxq_descs; 2945 desc_sz = sizeof(struct ionic_txq_desc); 2946 comp_sz = sizeof(struct ionic_txq_comp); 2947 2948 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2949 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2950 sizeof(struct ionic_txq_sg_desc_v1)) 2951 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2952 else 2953 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2954 2955 for (i = 0; i < qparam->nxqs; i++) { 2956 /* If missing, short placeholder qcq needed for swap */ 2957 if (!lif->txqcqs[i]) { 2958 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2959 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2960 4, desc_sz, comp_sz, sg_desc_sz, 2961 sizeof(struct ionic_tx_desc_info), 2962 lif->kern_pid, &lif->txqcqs[i]); 2963 if (err) 2964 goto err_out; 2965 } 2966 2967 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2968 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2969 num_desc, desc_sz, comp_sz, sg_desc_sz, 2970 sizeof(struct ionic_tx_desc_info), 2971 lif->kern_pid, &tx_qcqs[i]); 2972 if (err) 2973 goto err_out; 2974 } 2975 } 2976 2977 if (rx_qcqs) { 2978 num_desc = qparam->nrxq_descs; 2979 desc_sz = sizeof(struct ionic_rxq_desc); 2980 comp_sz = sizeof(struct ionic_rxq_comp); 2981 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2982 2983 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2984 comp_sz *= 2; 2985 2986 for (i = 0; i < qparam->nxqs; i++) { 2987 /* If missing, short placeholder qcq needed for swap */ 2988 if (!lif->rxqcqs[i]) { 2989 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 2990 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2991 4, desc_sz, comp_sz, sg_desc_sz, 2992 sizeof(struct ionic_rx_desc_info), 2993 lif->kern_pid, &lif->rxqcqs[i]); 2994 if (err) 2995 goto err_out; 2996 } 2997 2998 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2999 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 3000 num_desc, desc_sz, comp_sz, sg_desc_sz, 3001 sizeof(struct ionic_rx_desc_info), 3002 lif->kern_pid, &rx_qcqs[i]); 3003 if (err) 3004 goto err_out; 3005 3006 rx_qcqs[i]->q.features = qparam->rxq_features; 3007 } 3008 } 3009 3010 /* stop and clean the queues */ 3011 ionic_stop_queues_reconfig(lif); 3012 3013 if (qparam->nxqs != lif->nxqs) { 3014 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 3015 if (err) 3016 goto err_out_reinit_unlock; 3017 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 3018 if (err) { 3019 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 3020 goto err_out_reinit_unlock; 3021 } 3022 } 3023 3024 /* swap new desc_info and rings, keeping existing interrupt config */ 3025 if (tx_qcqs) { 3026 lif->ntxq_descs = qparam->ntxq_descs; 3027 for (i = 0; i < qparam->nxqs; i++) 3028 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 3029 } 3030 3031 if (rx_qcqs) { 3032 lif->nrxq_descs = qparam->nrxq_descs; 3033 for (i = 0; i < qparam->nxqs; i++) 3034 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 3035 } 3036 3037 /* if we need to change the interrupt layout, this is the time */ 3038 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 3039 qparam->nxqs != lif->nxqs) { 3040 if (qparam->intr_split) { 3041 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 3042 } else { 3043 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 3044 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 3045 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 3046 } 3047 3048 /* Clear existing interrupt assignments. We check for NULL here 3049 * because we're checking the whole array for potential qcqs, not 3050 * just those qcqs that have just been set up. 3051 */ 3052 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 3053 if (lif->txqcqs[i]) 3054 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 3055 if (lif->rxqcqs[i]) 3056 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 3057 } 3058 3059 /* re-assign the interrupts */ 3060 for (i = 0; i < qparam->nxqs; i++) { 3061 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 3062 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 3063 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 3064 lif->rxqcqs[i]->intr.index, 3065 lif->rx_coalesce_hw); 3066 3067 if (qparam->intr_split) { 3068 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 3069 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 3070 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 3071 lif->txqcqs[i]->intr.index, 3072 lif->tx_coalesce_hw); 3073 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 3074 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 3075 } else { 3076 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3077 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 3078 } 3079 } 3080 } 3081 3082 /* now we can rework the debugfs mappings */ 3083 if (tx_qcqs) { 3084 for (i = 0; i < qparam->nxqs; i++) { 3085 ionic_debugfs_del_qcq(lif->txqcqs[i]); 3086 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 3087 } 3088 } 3089 3090 if (rx_qcqs) { 3091 for (i = 0; i < qparam->nxqs; i++) { 3092 ionic_debugfs_del_qcq(lif->rxqcqs[i]); 3093 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 3094 } 3095 } 3096 3097 swap(lif->nxqs, qparam->nxqs); 3098 swap(lif->rxq_features, qparam->rxq_features); 3099 3100 err_out_reinit_unlock: 3101 /* re-init the queues, but don't lose an error code */ 3102 if (err) 3103 ionic_start_queues_reconfig(lif); 3104 else 3105 err = ionic_start_queues_reconfig(lif); 3106 3107 err_out: 3108 /* free old allocs without cleaning intr */ 3109 for (i = 0; i < qparam->nxqs; i++) { 3110 if (tx_qcqs && tx_qcqs[i]) { 3111 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3112 ionic_qcq_free(lif, tx_qcqs[i]); 3113 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 3114 tx_qcqs[i] = NULL; 3115 } 3116 if (rx_qcqs && rx_qcqs[i]) { 3117 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3118 ionic_qcq_free(lif, rx_qcqs[i]); 3119 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 3120 rx_qcqs[i] = NULL; 3121 } 3122 } 3123 3124 /* free q array */ 3125 if (rx_qcqs) { 3126 devm_kfree(lif->ionic->dev, rx_qcqs); 3127 rx_qcqs = NULL; 3128 } 3129 if (tx_qcqs) { 3130 devm_kfree(lif->ionic->dev, tx_qcqs); 3131 tx_qcqs = NULL; 3132 } 3133 3134 /* clean the unused dma and info allocations when new set is smaller 3135 * than the full array, but leave the qcq shells in place 3136 */ 3137 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 3138 if (lif->txqcqs && lif->txqcqs[i]) { 3139 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3140 ionic_qcq_free(lif, lif->txqcqs[i]); 3141 } 3142 3143 if (lif->rxqcqs && lif->rxqcqs[i]) { 3144 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3145 ionic_qcq_free(lif, lif->rxqcqs[i]); 3146 } 3147 } 3148 3149 if (err) 3150 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err); 3151 3152 return err; 3153 } 3154 3155 static int ionic_affinity_masks_alloc(struct ionic *ionic) 3156 { 3157 cpumask_var_t *affinity_masks; 3158 int nintrs = ionic->nintrs; 3159 int i; 3160 3161 affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL); 3162 if (!affinity_masks) 3163 return -ENOMEM; 3164 3165 for (i = 0; i < nintrs; i++) { 3166 if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL, 3167 dev_to_node(ionic->dev))) 3168 goto err_out; 3169 } 3170 3171 ionic->affinity_masks = affinity_masks; 3172 3173 return 0; 3174 3175 err_out: 3176 for (--i; i >= 0; i--) 3177 free_cpumask_var(affinity_masks[i]); 3178 kfree(affinity_masks); 3179 3180 return -ENOMEM; 3181 } 3182 3183 static void ionic_affinity_masks_free(struct ionic *ionic) 3184 { 3185 int i; 3186 3187 for (i = 0; i < ionic->nintrs; i++) 3188 free_cpumask_var(ionic->affinity_masks[i]); 3189 kfree(ionic->affinity_masks); 3190 ionic->affinity_masks = NULL; 3191 } 3192 3193 int ionic_lif_alloc(struct ionic *ionic) 3194 { 3195 struct device *dev = ionic->dev; 3196 union ionic_lif_identity *lid; 3197 struct net_device *netdev; 3198 struct ionic_lif *lif; 3199 int tbl_sz; 3200 int err; 3201 3202 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 3203 if (!lid) 3204 return -ENOMEM; 3205 3206 netdev = alloc_etherdev_mqs(sizeof(*lif), 3207 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 3208 if (!netdev) { 3209 dev_err(dev, "Cannot allocate netdev, aborting\n"); 3210 err = -ENOMEM; 3211 goto err_out_free_lid; 3212 } 3213 3214 SET_NETDEV_DEV(netdev, dev); 3215 3216 lif = netdev_priv(netdev); 3217 lif->netdev = netdev; 3218 ionic->lif = lif; 3219 lif->ionic = ionic; 3220 netdev->netdev_ops = &ionic_netdev_ops; 3221 ionic_ethtool_set_ops(netdev); 3222 3223 netdev->watchdog_timeo = 2 * HZ; 3224 netif_carrier_off(netdev); 3225 3226 lif->identity = lid; 3227 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 3228 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity); 3229 if (err) { 3230 dev_err(ionic->dev, "Cannot identify type %d: %d\n", 3231 lif->lif_type, err); 3232 goto err_out_free_netdev; 3233 } 3234 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 3235 le32_to_cpu(lif->identity->eth.min_frame_size)); 3236 lif->netdev->max_mtu = 3237 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; 3238 3239 lif->neqs = ionic->neqs_per_lif; 3240 lif->nxqs = ionic->ntxqs_per_lif; 3241 3242 lif->index = 0; 3243 3244 if (is_kdump_kernel()) { 3245 lif->ntxq_descs = IONIC_MIN_TXRX_DESC; 3246 lif->nrxq_descs = IONIC_MIN_TXRX_DESC; 3247 } else { 3248 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 3249 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 3250 } 3251 3252 /* Convert the default coalesce value to actual hw resolution */ 3253 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 3254 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 3255 lif->rx_coalesce_usecs); 3256 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 3257 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 3258 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); 3259 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); 3260 3261 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 3262 3263 mutex_init(&lif->queue_lock); 3264 mutex_init(&lif->config_lock); 3265 3266 spin_lock_init(&lif->adminq_lock); 3267 3268 spin_lock_init(&lif->deferred.lock); 3269 INIT_LIST_HEAD(&lif->deferred.list); 3270 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 3271 3272 /* allocate lif info */ 3273 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 3274 lif->info = dma_alloc_coherent(dev, lif->info_sz, 3275 &lif->info_pa, GFP_KERNEL); 3276 if (!lif->info) { 3277 dev_err(dev, "Failed to allocate lif info, aborting\n"); 3278 err = -ENOMEM; 3279 goto err_out_free_mutex; 3280 } 3281 3282 ionic_debugfs_add_lif(lif); 3283 3284 err = ionic_affinity_masks_alloc(ionic); 3285 if (err) 3286 goto err_out_free_lif_info; 3287 3288 /* allocate control queues and txrx queue arrays */ 3289 ionic_lif_queue_identify(lif); 3290 err = ionic_qcqs_alloc(lif); 3291 if (err) 3292 goto err_out_free_affinity_masks; 3293 3294 /* allocate rss indirection table */ 3295 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 3296 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 3297 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 3298 &lif->rss_ind_tbl_pa, 3299 GFP_KERNEL); 3300 3301 if (!lif->rss_ind_tbl) { 3302 err = -ENOMEM; 3303 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 3304 goto err_out_free_qcqs; 3305 } 3306 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 3307 3308 ionic_lif_alloc_phc(lif); 3309 3310 return 0; 3311 3312 err_out_free_qcqs: 3313 ionic_qcqs_free(lif); 3314 err_out_free_affinity_masks: 3315 ionic_affinity_masks_free(lif->ionic); 3316 err_out_free_lif_info: 3317 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3318 lif->info = NULL; 3319 lif->info_pa = 0; 3320 err_out_free_mutex: 3321 mutex_destroy(&lif->config_lock); 3322 mutex_destroy(&lif->queue_lock); 3323 err_out_free_netdev: 3324 free_netdev(lif->netdev); 3325 lif = NULL; 3326 err_out_free_lid: 3327 kfree(lid); 3328 3329 return err; 3330 } 3331 3332 static void ionic_lif_reset(struct ionic_lif *lif) 3333 { 3334 struct ionic_dev *idev = &lif->ionic->idev; 3335 3336 if (!ionic_is_fw_running(idev)) 3337 return; 3338 3339 mutex_lock(&lif->ionic->dev_cmd_lock); 3340 ionic_dev_cmd_lif_reset(idev, lif->index); 3341 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3342 mutex_unlock(&lif->ionic->dev_cmd_lock); 3343 } 3344 3345 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 3346 { 3347 struct ionic *ionic = lif->ionic; 3348 3349 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3350 return; 3351 3352 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 3353 3354 netif_device_detach(lif->netdev); 3355 3356 mutex_lock(&lif->queue_lock); 3357 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 3358 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 3359 ionic_stop_queues(lif); 3360 } 3361 3362 if (netif_running(lif->netdev)) { 3363 ionic_txrx_deinit(lif); 3364 ionic_txrx_free(lif); 3365 } 3366 ionic_lif_deinit(lif); 3367 ionic_reset(ionic); 3368 ionic_qcqs_free(lif); 3369 3370 mutex_unlock(&lif->queue_lock); 3371 3372 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); 3373 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 3374 } 3375 3376 int ionic_restart_lif(struct ionic_lif *lif) 3377 { 3378 struct ionic *ionic = lif->ionic; 3379 int err; 3380 3381 mutex_lock(&lif->queue_lock); 3382 3383 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 3384 dev_info(ionic->dev, "FW Up: clearing broken state\n"); 3385 3386 err = ionic_qcqs_alloc(lif); 3387 if (err) 3388 goto err_unlock; 3389 3390 err = ionic_lif_init(lif); 3391 if (err) 3392 goto err_qcqs_free; 3393 3394 ionic_vf_attr_replay(lif); 3395 3396 if (lif->registered) 3397 ionic_lif_set_netdev_info(lif); 3398 3399 ionic_rx_filter_replay(lif); 3400 3401 if (netif_running(lif->netdev)) { 3402 err = ionic_txrx_alloc(lif); 3403 if (err) 3404 goto err_lifs_deinit; 3405 3406 err = ionic_txrx_init(lif); 3407 if (err) 3408 goto err_txrx_free; 3409 } 3410 3411 mutex_unlock(&lif->queue_lock); 3412 3413 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 3414 ionic_link_status_check_request(lif, CAN_SLEEP); 3415 netif_device_attach(lif->netdev); 3416 ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); 3417 3418 return 0; 3419 3420 err_txrx_free: 3421 ionic_txrx_free(lif); 3422 err_lifs_deinit: 3423 ionic_lif_deinit(lif); 3424 err_qcqs_free: 3425 ionic_qcqs_free(lif); 3426 err_unlock: 3427 mutex_unlock(&lif->queue_lock); 3428 3429 return err; 3430 } 3431 3432 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 3433 { 3434 struct ionic *ionic = lif->ionic; 3435 int err; 3436 3437 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3438 return; 3439 3440 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 3441 3442 /* This is a little different from what happens at 3443 * probe time because the LIF already exists so we 3444 * just need to reanimate it. 3445 */ 3446 ionic_init_devinfo(ionic); 3447 ionic_reset(ionic); 3448 err = ionic_identify(ionic); 3449 if (err) 3450 goto err_out; 3451 err = ionic_port_identify(ionic); 3452 if (err) 3453 goto err_out; 3454 err = ionic_port_init(ionic); 3455 if (err) 3456 goto err_out; 3457 3458 err = ionic_restart_lif(lif); 3459 if (err) 3460 goto err_out; 3461 3462 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 3463 3464 /* restore the hardware timestamping queues */ 3465 ionic_lif_hwstamp_replay(lif); 3466 3467 return; 3468 3469 err_out: 3470 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 3471 } 3472 3473 void ionic_lif_free(struct ionic_lif *lif) 3474 { 3475 struct device *dev = lif->ionic->dev; 3476 3477 ionic_lif_free_phc(lif); 3478 3479 /* free rss indirection table */ 3480 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 3481 lif->rss_ind_tbl_pa); 3482 lif->rss_ind_tbl = NULL; 3483 lif->rss_ind_tbl_pa = 0; 3484 3485 /* free queues */ 3486 ionic_qcqs_free(lif); 3487 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3488 ionic_lif_reset(lif); 3489 3490 ionic_affinity_masks_free(lif->ionic); 3491 3492 /* free lif info */ 3493 kfree(lif->identity); 3494 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3495 lif->info = NULL; 3496 lif->info_pa = 0; 3497 3498 /* unmap doorbell page */ 3499 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3500 lif->kern_dbpage = NULL; 3501 3502 mutex_destroy(&lif->config_lock); 3503 mutex_destroy(&lif->queue_lock); 3504 3505 /* free netdev & lif */ 3506 ionic_debugfs_del_lif(lif); 3507 free_netdev(lif->netdev); 3508 } 3509 3510 void ionic_lif_deinit(struct ionic_lif *lif) 3511 { 3512 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 3513 return; 3514 3515 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3516 cancel_work_sync(&lif->deferred.work); 3517 cancel_work_sync(&lif->tx_timeout_work); 3518 ionic_rx_filters_deinit(lif); 3519 if (lif->netdev->features & NETIF_F_RXHASH) 3520 ionic_lif_rss_deinit(lif); 3521 } 3522 3523 napi_disable(&lif->adminqcq->napi); 3524 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3525 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3526 3527 ionic_lif_reset(lif); 3528 } 3529 3530 static int ionic_lif_adminq_init(struct ionic_lif *lif) 3531 { 3532 struct device *dev = lif->ionic->dev; 3533 struct ionic_q_init_comp comp; 3534 struct ionic_dev *idev; 3535 struct ionic_qcq *qcq; 3536 struct ionic_queue *q; 3537 int err; 3538 3539 idev = &lif->ionic->idev; 3540 qcq = lif->adminqcq; 3541 q = &qcq->q; 3542 3543 mutex_lock(&lif->ionic->dev_cmd_lock); 3544 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 3545 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3546 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3547 mutex_unlock(&lif->ionic->dev_cmd_lock); 3548 if (err) { 3549 netdev_err(lif->netdev, "adminq init failed %d\n", err); 3550 return err; 3551 } 3552 3553 q->hw_type = comp.hw_type; 3554 q->hw_index = le32_to_cpu(comp.hw_index); 3555 q->dbval = IONIC_DBELL_QID(q->hw_index); 3556 3557 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 3558 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 3559 3560 q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE; 3561 q->dbell_jiffies = jiffies; 3562 3563 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi); 3564 3565 napi_enable(&qcq->napi); 3566 3567 if (qcq->flags & IONIC_QCQ_F_INTR) { 3568 irq_set_affinity_hint(qcq->intr.vector, 3569 *qcq->intr.affinity_mask); 3570 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 3571 IONIC_INTR_MASK_CLEAR); 3572 } 3573 3574 qcq->flags |= IONIC_QCQ_F_INITED; 3575 3576 return 0; 3577 } 3578 3579 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 3580 { 3581 struct ionic_qcq *qcq = lif->notifyqcq; 3582 struct device *dev = lif->ionic->dev; 3583 struct ionic_queue *q = &qcq->q; 3584 int err; 3585 3586 struct ionic_admin_ctx ctx = { 3587 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3588 .cmd.q_init = { 3589 .opcode = IONIC_CMD_Q_INIT, 3590 .lif_index = cpu_to_le16(lif->index), 3591 .type = q->type, 3592 .ver = lif->qtype_info[q->type].version, 3593 .index = cpu_to_le32(q->index), 3594 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 3595 IONIC_QINIT_F_ENA), 3596 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 3597 .pid = cpu_to_le16(q->pid), 3598 .ring_size = ilog2(q->num_descs), 3599 .ring_base = cpu_to_le64(q->base_pa), 3600 } 3601 }; 3602 3603 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 3604 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 3605 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 3606 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 3607 3608 err = ionic_adminq_post_wait(lif, &ctx); 3609 if (err) 3610 return err; 3611 3612 lif->last_eid = 0; 3613 q->hw_type = ctx.comp.q_init.hw_type; 3614 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 3615 q->dbval = IONIC_DBELL_QID(q->hw_index); 3616 3617 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 3618 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 3619 3620 /* preset the callback info */ 3621 q->admin_info[0].ctx = lif; 3622 3623 qcq->flags |= IONIC_QCQ_F_INITED; 3624 3625 return 0; 3626 } 3627 3628 static int ionic_station_set(struct ionic_lif *lif) 3629 { 3630 struct net_device *netdev = lif->netdev; 3631 struct ionic_admin_ctx ctx = { 3632 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3633 .cmd.lif_getattr = { 3634 .opcode = IONIC_CMD_LIF_GETATTR, 3635 .index = cpu_to_le16(lif->index), 3636 .attr = IONIC_LIF_ATTR_MAC, 3637 }, 3638 }; 3639 u8 mac_address[ETH_ALEN]; 3640 struct sockaddr addr; 3641 int err; 3642 3643 err = ionic_adminq_post_wait(lif, &ctx); 3644 if (err) 3645 return err; 3646 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 3647 ctx.comp.lif_getattr.mac); 3648 ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac); 3649 3650 if (is_zero_ether_addr(mac_address)) { 3651 eth_hw_addr_random(netdev); 3652 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr); 3653 ether_addr_copy(mac_address, netdev->dev_addr); 3654 3655 err = ionic_program_mac(lif, mac_address); 3656 if (err < 0) 3657 return err; 3658 3659 if (err > 0) { 3660 netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n", 3661 __func__); 3662 return 0; 3663 } 3664 } 3665 3666 if (!is_zero_ether_addr(netdev->dev_addr)) { 3667 /* If the netdev mac is non-zero and doesn't match the default 3668 * device address, it was set by something earlier and we're 3669 * likely here again after a fw-upgrade reset. We need to be 3670 * sure the netdev mac is in our filter list. 3671 */ 3672 if (!ether_addr_equal(mac_address, netdev->dev_addr)) 3673 ionic_lif_addr_add(lif, netdev->dev_addr); 3674 } else { 3675 /* Update the netdev mac with the device's mac */ 3676 ether_addr_copy(addr.sa_data, mac_address); 3677 addr.sa_family = AF_INET; 3678 err = eth_prepare_mac_addr_change(netdev, &addr); 3679 if (err) { 3680 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 3681 addr.sa_data, err); 3682 return 0; 3683 } 3684 3685 eth_commit_mac_addr_change(netdev, &addr); 3686 } 3687 3688 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 3689 netdev->dev_addr); 3690 ionic_lif_addr_add(lif, netdev->dev_addr); 3691 3692 return 0; 3693 } 3694 3695 int ionic_lif_init(struct ionic_lif *lif) 3696 { 3697 struct ionic_dev *idev = &lif->ionic->idev; 3698 struct device *dev = lif->ionic->dev; 3699 struct ionic_lif_init_comp comp; 3700 int dbpage_num; 3701 int err; 3702 3703 mutex_lock(&lif->ionic->dev_cmd_lock); 3704 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 3705 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3706 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3707 mutex_unlock(&lif->ionic->dev_cmd_lock); 3708 if (err) 3709 return err; 3710 3711 lif->hw_index = le16_to_cpu(comp.hw_index); 3712 3713 /* now that we have the hw_index we can figure out our doorbell page */ 3714 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 3715 if (!lif->dbid_count) { 3716 dev_err(dev, "No doorbell pages, aborting\n"); 3717 return -EINVAL; 3718 } 3719 3720 lif->kern_pid = 0; 3721 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 3722 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 3723 if (!lif->kern_dbpage) { 3724 dev_err(dev, "Cannot map dbpage, aborting\n"); 3725 return -ENOMEM; 3726 } 3727 3728 err = ionic_lif_adminq_init(lif); 3729 if (err) 3730 goto err_out_adminq_deinit; 3731 3732 if (lif->ionic->nnqs_per_lif) { 3733 err = ionic_lif_notifyq_init(lif); 3734 if (err) 3735 goto err_out_notifyq_deinit; 3736 } 3737 3738 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3739 err = ionic_set_nic_features(lif, lif->netdev->features); 3740 else 3741 err = ionic_init_nic_features(lif); 3742 if (err) 3743 goto err_out_notifyq_deinit; 3744 3745 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3746 err = ionic_rx_filters_init(lif); 3747 if (err) 3748 goto err_out_notifyq_deinit; 3749 } 3750 3751 err = ionic_station_set(lif); 3752 if (err) 3753 goto err_out_notifyq_deinit; 3754 3755 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 3756 lif->doorbell_wa = ionic_doorbell_wa(lif->ionic); 3757 3758 set_bit(IONIC_LIF_F_INITED, lif->state); 3759 3760 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 3761 3762 return 0; 3763 3764 err_out_notifyq_deinit: 3765 napi_disable(&lif->adminqcq->napi); 3766 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3767 err_out_adminq_deinit: 3768 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3769 ionic_lif_reset(lif); 3770 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3771 lif->kern_dbpage = NULL; 3772 3773 return err; 3774 } 3775 3776 static void ionic_lif_notify_work(struct work_struct *ws) 3777 { 3778 } 3779 3780 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 3781 { 3782 struct ionic_admin_ctx ctx = { 3783 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3784 .cmd.lif_setattr = { 3785 .opcode = IONIC_CMD_LIF_SETATTR, 3786 .index = cpu_to_le16(lif->index), 3787 .attr = IONIC_LIF_ATTR_NAME, 3788 }, 3789 }; 3790 3791 strscpy(ctx.cmd.lif_setattr.name, netdev_name(lif->netdev), 3792 sizeof(ctx.cmd.lif_setattr.name)); 3793 3794 ionic_adminq_post_wait(lif, &ctx); 3795 } 3796 3797 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 3798 { 3799 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 3800 return NULL; 3801 3802 return netdev_priv(netdev); 3803 } 3804 3805 static int ionic_lif_notify(struct notifier_block *nb, 3806 unsigned long event, void *info) 3807 { 3808 struct net_device *ndev = netdev_notifier_info_to_dev(info); 3809 struct ionic *ionic = container_of(nb, struct ionic, nb); 3810 struct ionic_lif *lif = ionic_netdev_lif(ndev); 3811 3812 if (!lif || lif->ionic != ionic) 3813 return NOTIFY_DONE; 3814 3815 switch (event) { 3816 case NETDEV_CHANGENAME: 3817 ionic_lif_set_netdev_info(lif); 3818 break; 3819 } 3820 3821 return NOTIFY_DONE; 3822 } 3823 3824 int ionic_lif_register(struct ionic_lif *lif) 3825 { 3826 int err; 3827 3828 ionic_lif_register_phc(lif); 3829 3830 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); 3831 3832 lif->ionic->nb.notifier_call = ionic_lif_notify; 3833 3834 err = register_netdevice_notifier(&lif->ionic->nb); 3835 if (err) 3836 lif->ionic->nb.notifier_call = NULL; 3837 3838 /* only register LIF0 for now */ 3839 err = register_netdev(lif->netdev); 3840 if (err) { 3841 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); 3842 ionic_lif_unregister_phc(lif); 3843 return err; 3844 } 3845 3846 ionic_link_status_check_request(lif, CAN_SLEEP); 3847 lif->registered = true; 3848 ionic_lif_set_netdev_info(lif); 3849 3850 return 0; 3851 } 3852 3853 void ionic_lif_unregister(struct ionic_lif *lif) 3854 { 3855 if (lif->ionic->nb.notifier_call) { 3856 unregister_netdevice_notifier(&lif->ionic->nb); 3857 cancel_work_sync(&lif->ionic->nb_work); 3858 lif->ionic->nb.notifier_call = NULL; 3859 } 3860 3861 if (lif->netdev->reg_state == NETREG_REGISTERED) 3862 unregister_netdev(lif->netdev); 3863 3864 ionic_lif_unregister_phc(lif); 3865 3866 lif->registered = false; 3867 } 3868 3869 static void ionic_lif_queue_identify(struct ionic_lif *lif) 3870 { 3871 union ionic_q_identity __iomem *q_ident; 3872 struct ionic *ionic = lif->ionic; 3873 struct ionic_dev *idev; 3874 u16 max_frags; 3875 int qtype; 3876 int err; 3877 3878 idev = &lif->ionic->idev; 3879 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; 3880 3881 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 3882 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 3883 3884 /* filter out the ones we know about */ 3885 switch (qtype) { 3886 case IONIC_QTYPE_ADMINQ: 3887 case IONIC_QTYPE_NOTIFYQ: 3888 case IONIC_QTYPE_RXQ: 3889 case IONIC_QTYPE_TXQ: 3890 break; 3891 default: 3892 continue; 3893 } 3894 3895 memset(qti, 0, sizeof(*qti)); 3896 3897 mutex_lock(&ionic->dev_cmd_lock); 3898 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 3899 ionic_qtype_versions[qtype]); 3900 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3901 if (!err) { 3902 qti->version = readb(&q_ident->version); 3903 qti->supported = readb(&q_ident->supported); 3904 qti->features = readq(&q_ident->features); 3905 qti->desc_sz = readw(&q_ident->desc_sz); 3906 qti->comp_sz = readw(&q_ident->comp_sz); 3907 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz); 3908 qti->max_sg_elems = readw(&q_ident->max_sg_elems); 3909 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride); 3910 } 3911 mutex_unlock(&ionic->dev_cmd_lock); 3912 3913 if (err == -EINVAL) { 3914 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 3915 continue; 3916 } else if (err == -EIO) { 3917 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 3918 return; 3919 } else if (err) { 3920 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 3921 qtype, err); 3922 return; 3923 } 3924 3925 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 3926 qtype, qti->version); 3927 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 3928 qtype, qti->supported); 3929 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 3930 qtype, qti->features); 3931 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 3932 qtype, qti->desc_sz); 3933 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 3934 qtype, qti->comp_sz); 3935 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 3936 qtype, qti->sg_desc_sz); 3937 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 3938 qtype, qti->max_sg_elems); 3939 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 3940 qtype, qti->sg_desc_stride); 3941 3942 if (qtype == IONIC_QTYPE_TXQ) 3943 max_frags = IONIC_TX_MAX_FRAGS; 3944 else if (qtype == IONIC_QTYPE_RXQ) 3945 max_frags = IONIC_RX_MAX_FRAGS; 3946 else 3947 max_frags = 1; 3948 3949 qti->max_sg_elems = min_t(u16, max_frags - 1, MAX_SKB_FRAGS); 3950 dev_dbg(ionic->dev, "qtype %d max_sg_elems %d\n", 3951 qtype, qti->max_sg_elems); 3952 } 3953 } 3954 3955 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 3956 union ionic_lif_identity *lid) 3957 { 3958 struct ionic_dev *idev = &ionic->idev; 3959 size_t sz; 3960 int err; 3961 3962 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 3963 3964 mutex_lock(&ionic->dev_cmd_lock); 3965 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 3966 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3967 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 3968 mutex_unlock(&ionic->dev_cmd_lock); 3969 if (err) 3970 return (err); 3971 3972 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 3973 le64_to_cpu(lid->capabilities)); 3974 3975 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 3976 le32_to_cpu(lid->eth.max_ucast_filters)); 3977 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 3978 le32_to_cpu(lid->eth.max_mcast_filters)); 3979 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 3980 le64_to_cpu(lid->eth.config.features)); 3981 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 3982 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 3983 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 3984 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 3985 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 3986 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 3987 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 3988 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 3989 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 3990 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 3991 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 3992 le32_to_cpu(lid->eth.config.mtu)); 3993 3994 return 0; 3995 } 3996 3997 int ionic_lif_size(struct ionic *ionic) 3998 { 3999 struct ionic_identity *ident = &ionic->ident; 4000 unsigned int nintrs, dev_nintrs; 4001 union ionic_lif_config *lc; 4002 unsigned int ntxqs_per_lif; 4003 unsigned int nrxqs_per_lif; 4004 unsigned int neqs_per_lif; 4005 unsigned int nnqs_per_lif; 4006 unsigned int nxqs, neqs; 4007 unsigned int min_intrs; 4008 int err; 4009 4010 /* retrieve basic values from FW */ 4011 lc = &ident->lif.eth.config; 4012 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 4013 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 4014 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 4015 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 4016 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 4017 4018 /* limit values to play nice with kdump */ 4019 if (is_kdump_kernel()) { 4020 dev_nintrs = 2; 4021 neqs_per_lif = 0; 4022 nnqs_per_lif = 0; 4023 ntxqs_per_lif = 1; 4024 nrxqs_per_lif = 1; 4025 } 4026 4027 /* reserve last queue id for hardware timestamping */ 4028 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { 4029 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { 4030 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP); 4031 } else { 4032 ntxqs_per_lif -= 1; 4033 nrxqs_per_lif -= 1; 4034 } 4035 } 4036 4037 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 4038 nxqs = min(nxqs, num_online_cpus()); 4039 neqs = min(neqs_per_lif, num_online_cpus()); 4040 4041 try_again: 4042 /* interrupt usage: 4043 * 1 for master lif adminq/notifyq 4044 * 1 for each CPU for master lif TxRx queue pairs 4045 * whatever's left is for RDMA queues 4046 */ 4047 nintrs = 1 + nxqs + neqs; 4048 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 4049 4050 if (nintrs > dev_nintrs) 4051 goto try_fewer; 4052 4053 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 4054 if (err < 0 && err != -ENOSPC) { 4055 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 4056 return err; 4057 } 4058 if (err == -ENOSPC) 4059 goto try_fewer; 4060 4061 if (err != nintrs) { 4062 ionic_bus_free_irq_vectors(ionic); 4063 goto try_fewer; 4064 } 4065 4066 ionic->nnqs_per_lif = nnqs_per_lif; 4067 ionic->neqs_per_lif = neqs; 4068 ionic->ntxqs_per_lif = nxqs; 4069 ionic->nrxqs_per_lif = nxqs; 4070 ionic->nintrs = nintrs; 4071 4072 ionic_debugfs_add_sizes(ionic); 4073 4074 return 0; 4075 4076 try_fewer: 4077 if (nnqs_per_lif > 1) { 4078 nnqs_per_lif >>= 1; 4079 goto try_again; 4080 } 4081 if (neqs > 1) { 4082 neqs >>= 1; 4083 goto try_again; 4084 } 4085 if (nxqs > 1) { 4086 nxqs >>= 1; 4087 goto try_again; 4088 } 4089 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 4090 return -ENOSPC; 4091 } 4092