1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ethtool.h> 5 #include <linux/printk.h> 6 #include <linux/dynamic_debug.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_vlan.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/cpumask.h> 14 #include <linux/crash_dump.h> 15 #include <linux/vmalloc.h> 16 17 #include "ionic.h" 18 #include "ionic_bus.h" 19 #include "ionic_dev.h" 20 #include "ionic_lif.h" 21 #include "ionic_txrx.h" 22 #include "ionic_ethtool.h" 23 #include "ionic_debugfs.h" 24 25 /* queuetype support level */ 26 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 27 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 28 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 29 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support 30 * 2 = ... with CMB rings 31 */ 32 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support 33 * 1 = ... with Tx SG version 1 34 * 3 = ... with CMB rings 35 */ 36 }; 37 38 static void ionic_link_status_check(struct ionic_lif *lif); 39 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 40 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 41 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 42 43 static void ionic_txrx_deinit(struct ionic_lif *lif); 44 static int ionic_txrx_init(struct ionic_lif *lif); 45 static int ionic_start_queues(struct ionic_lif *lif); 46 static void ionic_stop_queues(struct ionic_lif *lif); 47 static void ionic_lif_queue_identify(struct ionic_lif *lif); 48 49 static int ionic_xdp_queues_config(struct ionic_lif *lif); 50 static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q); 51 52 static void ionic_dim_work(struct work_struct *work) 53 { 54 struct dim *dim = container_of(work, struct dim, work); 55 struct dim_cq_moder cur_moder; 56 struct ionic_intr_info *intr; 57 struct ionic_qcq *qcq; 58 struct ionic_lif *lif; 59 struct ionic_queue *q; 60 u32 new_coal; 61 62 qcq = container_of(dim, struct ionic_qcq, dim); 63 q = &qcq->q; 64 if (q->type == IONIC_QTYPE_RXQ) 65 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 66 else 67 cur_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix); 68 lif = q->lif; 69 new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec); 70 new_coal = new_coal ? new_coal : 1; 71 72 intr = &qcq->intr; 73 if (intr->dim_coal_hw != new_coal) { 74 intr->dim_coal_hw = new_coal; 75 76 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 77 intr->index, intr->dim_coal_hw); 78 } 79 80 dim->state = DIM_START_MEASURE; 81 } 82 83 static void ionic_lif_deferred_work(struct work_struct *work) 84 { 85 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 86 struct ionic_deferred *def = &lif->deferred; 87 struct ionic_deferred_work *w = NULL; 88 89 do { 90 spin_lock_bh(&def->lock); 91 if (!list_empty(&def->list)) { 92 w = list_first_entry(&def->list, 93 struct ionic_deferred_work, list); 94 list_del(&w->list); 95 } 96 spin_unlock_bh(&def->lock); 97 98 if (!w) 99 break; 100 101 switch (w->type) { 102 case IONIC_DW_TYPE_RX_MODE: 103 ionic_lif_rx_mode(lif); 104 break; 105 case IONIC_DW_TYPE_LINK_STATUS: 106 ionic_link_status_check(lif); 107 break; 108 case IONIC_DW_TYPE_LIF_RESET: 109 if (w->fw_status) { 110 ionic_lif_handle_fw_up(lif); 111 } else { 112 ionic_lif_handle_fw_down(lif); 113 114 /* Fire off another watchdog to see 115 * if the FW is already back rather than 116 * waiting another whole cycle 117 */ 118 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1); 119 } 120 break; 121 default: 122 break; 123 } 124 kfree(w); 125 w = NULL; 126 } while (true); 127 } 128 129 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 130 struct ionic_deferred_work *work) 131 { 132 spin_lock_bh(&def->lock); 133 list_add_tail(&work->list, &def->list); 134 spin_unlock_bh(&def->lock); 135 schedule_work(&def->work); 136 } 137 138 static void ionic_link_status_check(struct ionic_lif *lif) 139 { 140 struct net_device *netdev = lif->netdev; 141 u16 link_status; 142 bool link_up; 143 144 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 145 return; 146 147 /* Don't put carrier back up if we're in a broken state */ 148 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { 149 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 150 return; 151 } 152 153 link_status = le16_to_cpu(lif->info->status.link_status); 154 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 155 156 if (link_up) { 157 int err = 0; 158 159 if (netdev->flags & IFF_UP && netif_running(netdev)) { 160 mutex_lock(&lif->queue_lock); 161 err = ionic_start_queues(lif); 162 if (err && err != -EBUSY) { 163 netdev_err(netdev, 164 "Failed to start queues: %d\n", err); 165 set_bit(IONIC_LIF_F_BROKEN, lif->state); 166 netif_carrier_off(lif->netdev); 167 } 168 mutex_unlock(&lif->queue_lock); 169 } 170 171 if (!err && !netif_carrier_ok(netdev)) { 172 ionic_port_identify(lif->ionic); 173 netdev_info(netdev, "Link up - %d Gbps\n", 174 le32_to_cpu(lif->info->status.link_speed) / 1000); 175 netif_carrier_on(netdev); 176 } 177 } else { 178 if (netif_carrier_ok(netdev)) { 179 lif->link_down_count++; 180 netdev_info(netdev, "Link down\n"); 181 netif_carrier_off(netdev); 182 } 183 184 if (netdev->flags & IFF_UP && netif_running(netdev)) { 185 mutex_lock(&lif->queue_lock); 186 ionic_stop_queues(lif); 187 mutex_unlock(&lif->queue_lock); 188 } 189 } 190 191 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 192 } 193 194 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) 195 { 196 struct ionic_deferred_work *work; 197 198 /* we only need one request outstanding at a time */ 199 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 200 return; 201 202 if (!can_sleep) { 203 work = kzalloc(sizeof(*work), GFP_ATOMIC); 204 if (!work) { 205 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 206 return; 207 } 208 209 work->type = IONIC_DW_TYPE_LINK_STATUS; 210 ionic_lif_deferred_enqueue(&lif->deferred, work); 211 } else { 212 ionic_link_status_check(lif); 213 } 214 } 215 216 static void ionic_napi_deadline(struct timer_list *timer) 217 { 218 struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline); 219 220 napi_schedule(&qcq->napi); 221 } 222 223 static irqreturn_t ionic_isr(int irq, void *data) 224 { 225 struct napi_struct *napi = data; 226 227 napi_schedule_irqoff(napi); 228 229 return IRQ_HANDLED; 230 } 231 232 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 233 { 234 struct ionic_intr_info *intr = &qcq->intr; 235 struct device *dev = lif->ionic->dev; 236 struct ionic_queue *q = &qcq->q; 237 const char *name; 238 239 if (lif->registered) 240 name = lif->netdev->name; 241 else 242 name = dev_name(dev); 243 244 snprintf(intr->name, sizeof(intr->name), 245 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 246 247 return devm_request_irq(dev, intr->vector, ionic_isr, 248 0, intr->name, &qcq->napi); 249 } 250 251 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 252 { 253 struct ionic *ionic = lif->ionic; 254 int index; 255 256 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 257 if (index == ionic->nintrs) { 258 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 259 __func__, index, ionic->nintrs); 260 return -ENOSPC; 261 } 262 263 set_bit(index, ionic->intrs); 264 ionic_intr_init(&ionic->idev, intr, index); 265 266 return 0; 267 } 268 269 static void ionic_intr_free(struct ionic *ionic, int index) 270 { 271 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 272 clear_bit(index, ionic->intrs); 273 } 274 275 static int ionic_qcq_enable(struct ionic_qcq *qcq) 276 { 277 struct ionic_queue *q = &qcq->q; 278 struct ionic_lif *lif = q->lif; 279 struct ionic_dev *idev; 280 struct device *dev; 281 282 struct ionic_admin_ctx ctx = { 283 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 284 .cmd.q_control = { 285 .opcode = IONIC_CMD_Q_CONTROL, 286 .lif_index = cpu_to_le16(lif->index), 287 .type = q->type, 288 .index = cpu_to_le32(q->index), 289 .oper = IONIC_Q_ENABLE, 290 }, 291 }; 292 int ret; 293 294 idev = &lif->ionic->idev; 295 dev = lif->ionic->dev; 296 297 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 298 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 299 300 if (qcq->flags & IONIC_QCQ_F_INTR) 301 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 302 303 ret = ionic_adminq_post_wait(lif, &ctx); 304 if (ret) 305 return ret; 306 307 if (qcq->napi.poll) 308 napi_enable(&qcq->napi); 309 310 if (qcq->flags & IONIC_QCQ_F_INTR) { 311 irq_set_affinity_hint(qcq->intr.vector, 312 &qcq->intr.affinity_mask); 313 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 314 IONIC_INTR_MASK_CLEAR); 315 } 316 317 return 0; 318 } 319 320 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err) 321 { 322 struct ionic_queue *q; 323 324 struct ionic_admin_ctx ctx = { 325 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 326 .cmd.q_control = { 327 .opcode = IONIC_CMD_Q_CONTROL, 328 .oper = IONIC_Q_DISABLE, 329 }, 330 }; 331 332 if (!qcq) { 333 netdev_err(lif->netdev, "%s: bad qcq\n", __func__); 334 return -ENXIO; 335 } 336 337 q = &qcq->q; 338 339 if (qcq->flags & IONIC_QCQ_F_INTR) { 340 struct ionic_dev *idev = &lif->ionic->idev; 341 342 cancel_work_sync(&qcq->dim.work); 343 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 344 IONIC_INTR_MASK_SET); 345 synchronize_irq(qcq->intr.vector); 346 irq_set_affinity_hint(qcq->intr.vector, NULL); 347 napi_disable(&qcq->napi); 348 del_timer_sync(&qcq->napi_deadline); 349 } 350 351 /* If there was a previous fw communcation error, don't bother with 352 * sending the adminq command and just return the same error value. 353 */ 354 if (fw_err == -ETIMEDOUT || fw_err == -ENXIO) 355 return fw_err; 356 357 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); 358 ctx.cmd.q_control.type = q->type; 359 ctx.cmd.q_control.index = cpu_to_le32(q->index); 360 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", 361 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 362 363 return ionic_adminq_post_wait(lif, &ctx); 364 } 365 366 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 367 { 368 struct ionic_dev *idev = &lif->ionic->idev; 369 370 if (!qcq) 371 return; 372 373 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 374 return; 375 376 if (qcq->flags & IONIC_QCQ_F_INTR) { 377 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 378 IONIC_INTR_MASK_SET); 379 netif_napi_del(&qcq->napi); 380 } 381 382 qcq->flags &= ~IONIC_QCQ_F_INITED; 383 } 384 385 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 386 { 387 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 388 return; 389 390 irq_set_affinity_hint(qcq->intr.vector, NULL); 391 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 392 qcq->intr.vector = 0; 393 ionic_intr_free(lif->ionic, qcq->intr.index); 394 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 395 } 396 397 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 398 { 399 struct device *dev = lif->ionic->dev; 400 401 if (!qcq) 402 return; 403 404 ionic_debugfs_del_qcq(qcq); 405 406 if (qcq->q_base) { 407 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 408 qcq->q_base = NULL; 409 qcq->q_base_pa = 0; 410 } 411 412 if (qcq->cmb_q_base) { 413 iounmap(qcq->cmb_q_base); 414 ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order); 415 qcq->cmb_pgid = 0; 416 qcq->cmb_order = 0; 417 qcq->cmb_q_base = NULL; 418 qcq->cmb_q_base_pa = 0; 419 } 420 421 if (qcq->cq_base) { 422 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 423 qcq->cq_base = NULL; 424 qcq->cq_base_pa = 0; 425 } 426 427 if (qcq->sg_base) { 428 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 429 qcq->sg_base = NULL; 430 qcq->sg_base_pa = 0; 431 } 432 433 ionic_xdp_unregister_rxq_info(&qcq->q); 434 ionic_qcq_intr_free(lif, qcq); 435 436 vfree(qcq->cq.info); 437 qcq->cq.info = NULL; 438 vfree(qcq->q.info); 439 qcq->q.info = NULL; 440 } 441 442 void ionic_qcqs_free(struct ionic_lif *lif) 443 { 444 struct device *dev = lif->ionic->dev; 445 struct ionic_qcq *adminqcq; 446 unsigned long irqflags; 447 448 if (lif->notifyqcq) { 449 ionic_qcq_free(lif, lif->notifyqcq); 450 devm_kfree(dev, lif->notifyqcq); 451 lif->notifyqcq = NULL; 452 } 453 454 if (lif->adminqcq) { 455 spin_lock_irqsave(&lif->adminq_lock, irqflags); 456 adminqcq = READ_ONCE(lif->adminqcq); 457 lif->adminqcq = NULL; 458 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 459 if (adminqcq) { 460 ionic_qcq_free(lif, adminqcq); 461 devm_kfree(dev, adminqcq); 462 } 463 } 464 465 if (lif->rxqcqs) { 466 devm_kfree(dev, lif->rxqstats); 467 lif->rxqstats = NULL; 468 devm_kfree(dev, lif->rxqcqs); 469 lif->rxqcqs = NULL; 470 } 471 472 if (lif->txqcqs) { 473 devm_kfree(dev, lif->txqstats); 474 lif->txqstats = NULL; 475 devm_kfree(dev, lif->txqcqs); 476 lif->txqcqs = NULL; 477 } 478 } 479 480 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 481 struct ionic_qcq *n_qcq) 482 { 483 n_qcq->intr.vector = src_qcq->intr.vector; 484 n_qcq->intr.index = src_qcq->intr.index; 485 n_qcq->napi_qcq = src_qcq->napi_qcq; 486 } 487 488 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 489 { 490 int err; 491 492 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 493 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 494 return 0; 495 } 496 497 err = ionic_intr_alloc(lif, &qcq->intr); 498 if (err) { 499 netdev_warn(lif->netdev, "no intr for %s: %d\n", 500 qcq->q.name, err); 501 goto err_out; 502 } 503 504 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); 505 if (err < 0) { 506 netdev_warn(lif->netdev, "no vector for %s: %d\n", 507 qcq->q.name, err); 508 goto err_out_free_intr; 509 } 510 qcq->intr.vector = err; 511 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 512 IONIC_INTR_MASK_SET); 513 514 err = ionic_request_irq(lif, qcq); 515 if (err) { 516 netdev_warn(lif->netdev, "irq request failed %d\n", err); 517 goto err_out_free_intr; 518 } 519 520 /* try to get the irq on the local numa node first */ 521 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, 522 dev_to_node(lif->ionic->dev)); 523 if (qcq->intr.cpu != -1) 524 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask); 525 526 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 527 return 0; 528 529 err_out_free_intr: 530 ionic_intr_free(lif->ionic, qcq->intr.index); 531 err_out: 532 return err; 533 } 534 535 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 536 unsigned int index, 537 const char *name, unsigned int flags, 538 unsigned int num_descs, unsigned int desc_size, 539 unsigned int cq_desc_size, 540 unsigned int sg_desc_size, 541 unsigned int pid, struct ionic_qcq **qcq) 542 { 543 struct ionic_dev *idev = &lif->ionic->idev; 544 struct device *dev = lif->ionic->dev; 545 void *q_base, *cq_base, *sg_base; 546 dma_addr_t cq_base_pa = 0; 547 dma_addr_t sg_base_pa = 0; 548 dma_addr_t q_base_pa = 0; 549 struct ionic_qcq *new; 550 int err; 551 552 *qcq = NULL; 553 554 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 555 if (!new) { 556 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 557 err = -ENOMEM; 558 goto err_out; 559 } 560 561 new->q.dev = dev; 562 new->flags = flags; 563 564 new->q.info = vcalloc(num_descs, sizeof(*new->q.info)); 565 if (!new->q.info) { 566 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 567 err = -ENOMEM; 568 goto err_out_free_qcq; 569 } 570 571 new->q.type = type; 572 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; 573 574 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 575 desc_size, sg_desc_size, pid); 576 if (err) { 577 netdev_err(lif->netdev, "Cannot initialize queue\n"); 578 goto err_out_free_q_info; 579 } 580 581 err = ionic_alloc_qcq_interrupt(lif, new); 582 if (err) 583 goto err_out; 584 585 new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info)); 586 if (!new->cq.info) { 587 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 588 err = -ENOMEM; 589 goto err_out_free_irq; 590 } 591 592 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 593 if (err) { 594 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 595 goto err_out_free_cq_info; 596 } 597 598 if (flags & IONIC_QCQ_F_NOTIFYQ) { 599 int q_size; 600 601 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q 602 * and don't alloc qc. We leave new->qc_size and new->qc_base 603 * as 0 to be sure we don't try to free it later. 604 */ 605 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); 606 new->q_size = PAGE_SIZE + q_size + 607 ALIGN(num_descs * cq_desc_size, PAGE_SIZE); 608 new->q_base = dma_alloc_coherent(dev, new->q_size, 609 &new->q_base_pa, GFP_KERNEL); 610 if (!new->q_base) { 611 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); 612 err = -ENOMEM; 613 goto err_out_free_cq_info; 614 } 615 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 616 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 617 ionic_q_map(&new->q, q_base, q_base_pa); 618 619 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); 620 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); 621 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 622 ionic_cq_bind(&new->cq, &new->q); 623 } else { 624 /* regular DMA q descriptors */ 625 new->q_size = PAGE_SIZE + (num_descs * desc_size); 626 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 627 GFP_KERNEL); 628 if (!new->q_base) { 629 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 630 err = -ENOMEM; 631 goto err_out_free_cq_info; 632 } 633 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 634 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 635 ionic_q_map(&new->q, q_base, q_base_pa); 636 637 if (flags & IONIC_QCQ_F_CMB_RINGS) { 638 /* on-chip CMB q descriptors */ 639 new->cmb_q_size = num_descs * desc_size; 640 new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE); 641 642 err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa, 643 new->cmb_order); 644 if (err) { 645 netdev_err(lif->netdev, 646 "Cannot allocate queue order %d from cmb: err %d\n", 647 new->cmb_order, err); 648 goto err_out_free_q; 649 } 650 651 new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size); 652 if (!new->cmb_q_base) { 653 netdev_err(lif->netdev, "Cannot map queue from cmb\n"); 654 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); 655 err = -ENOMEM; 656 goto err_out_free_q; 657 } 658 659 new->cmb_q_base_pa -= idev->phy_cmb_pages; 660 ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa); 661 } 662 663 /* cq DMA descriptors */ 664 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 665 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 666 GFP_KERNEL); 667 if (!new->cq_base) { 668 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 669 err = -ENOMEM; 670 goto err_out_free_q; 671 } 672 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 673 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 674 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 675 ionic_cq_bind(&new->cq, &new->q); 676 } 677 678 if (flags & IONIC_QCQ_F_SG) { 679 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 680 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 681 GFP_KERNEL); 682 if (!new->sg_base) { 683 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 684 err = -ENOMEM; 685 goto err_out_free_cq; 686 } 687 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 688 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 689 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 690 } 691 692 INIT_WORK(&new->dim.work, ionic_dim_work); 693 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; 694 695 *qcq = new; 696 697 return 0; 698 699 err_out_free_cq: 700 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 701 err_out_free_q: 702 if (new->cmb_q_base) { 703 iounmap(new->cmb_q_base); 704 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); 705 } 706 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 707 err_out_free_cq_info: 708 vfree(new->cq.info); 709 err_out_free_irq: 710 if (flags & IONIC_QCQ_F_INTR) { 711 devm_free_irq(dev, new->intr.vector, &new->napi); 712 ionic_intr_free(lif->ionic, new->intr.index); 713 } 714 err_out_free_q_info: 715 vfree(new->q.info); 716 err_out_free_qcq: 717 devm_kfree(dev, new); 718 err_out: 719 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 720 return err; 721 } 722 723 static int ionic_qcqs_alloc(struct ionic_lif *lif) 724 { 725 struct device *dev = lif->ionic->dev; 726 unsigned int flags; 727 int err; 728 729 flags = IONIC_QCQ_F_INTR; 730 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 731 IONIC_ADMINQ_LENGTH, 732 sizeof(struct ionic_admin_cmd), 733 sizeof(struct ionic_admin_comp), 734 0, lif->kern_pid, &lif->adminqcq); 735 if (err) 736 return err; 737 ionic_debugfs_add_qcq(lif, lif->adminqcq); 738 739 if (lif->ionic->nnqs_per_lif) { 740 flags = IONIC_QCQ_F_NOTIFYQ; 741 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 742 flags, IONIC_NOTIFYQ_LENGTH, 743 sizeof(struct ionic_notifyq_cmd), 744 sizeof(union ionic_notifyq_comp), 745 0, lif->kern_pid, &lif->notifyqcq); 746 if (err) 747 goto err_out; 748 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 749 750 /* Let the notifyq ride on the adminq interrupt */ 751 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 752 } 753 754 err = -ENOMEM; 755 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 756 sizeof(*lif->txqcqs), GFP_KERNEL); 757 if (!lif->txqcqs) 758 goto err_out; 759 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 760 sizeof(*lif->rxqcqs), GFP_KERNEL); 761 if (!lif->rxqcqs) 762 goto err_out; 763 764 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1, 765 sizeof(*lif->txqstats), GFP_KERNEL); 766 if (!lif->txqstats) 767 goto err_out; 768 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1, 769 sizeof(*lif->rxqstats), GFP_KERNEL); 770 if (!lif->rxqstats) 771 goto err_out; 772 773 return 0; 774 775 err_out: 776 ionic_qcqs_free(lif); 777 return err; 778 } 779 780 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 781 { 782 qcq->q.tail_idx = 0; 783 qcq->q.head_idx = 0; 784 qcq->cq.tail_idx = 0; 785 qcq->cq.done_color = 1; 786 memset(qcq->q_base, 0, qcq->q_size); 787 if (qcq->cmb_q_base) 788 memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size); 789 memset(qcq->cq_base, 0, qcq->cq_size); 790 memset(qcq->sg_base, 0, qcq->sg_size); 791 } 792 793 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 794 { 795 struct device *dev = lif->ionic->dev; 796 struct ionic_queue *q = &qcq->q; 797 struct ionic_cq *cq = &qcq->cq; 798 struct ionic_admin_ctx ctx = { 799 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 800 .cmd.q_init = { 801 .opcode = IONIC_CMD_Q_INIT, 802 .lif_index = cpu_to_le16(lif->index), 803 .type = q->type, 804 .ver = lif->qtype_info[q->type].version, 805 .index = cpu_to_le32(q->index), 806 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 807 IONIC_QINIT_F_SG), 808 .intr_index = cpu_to_le16(qcq->intr.index), 809 .pid = cpu_to_le16(q->pid), 810 .ring_size = ilog2(q->num_descs), 811 .ring_base = cpu_to_le64(q->base_pa), 812 .cq_ring_base = cpu_to_le64(cq->base_pa), 813 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 814 .features = cpu_to_le64(q->features), 815 }, 816 }; 817 int err; 818 819 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { 820 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); 821 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); 822 } 823 824 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 825 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 826 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 827 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 828 dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base); 829 dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base); 830 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 831 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 832 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 833 834 ionic_qcq_sanitize(qcq); 835 836 err = ionic_adminq_post_wait(lif, &ctx); 837 if (err) 838 return err; 839 840 q->hw_type = ctx.comp.q_init.hw_type; 841 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 842 q->dbval = IONIC_DBELL_QID(q->hw_index); 843 844 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 845 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 846 847 q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE; 848 q->dbell_jiffies = jiffies; 849 850 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { 851 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi); 852 qcq->napi_qcq = qcq; 853 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); 854 } 855 856 qcq->flags |= IONIC_QCQ_F_INITED; 857 858 return 0; 859 } 860 861 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 862 { 863 struct device *dev = lif->ionic->dev; 864 struct ionic_queue *q = &qcq->q; 865 struct ionic_cq *cq = &qcq->cq; 866 struct ionic_admin_ctx ctx = { 867 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 868 .cmd.q_init = { 869 .opcode = IONIC_CMD_Q_INIT, 870 .lif_index = cpu_to_le16(lif->index), 871 .type = q->type, 872 .ver = lif->qtype_info[q->type].version, 873 .index = cpu_to_le32(q->index), 874 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ), 875 .intr_index = cpu_to_le16(cq->bound_intr->index), 876 .pid = cpu_to_le16(q->pid), 877 .ring_size = ilog2(q->num_descs), 878 .ring_base = cpu_to_le64(q->base_pa), 879 .cq_ring_base = cpu_to_le64(cq->base_pa), 880 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 881 .features = cpu_to_le64(q->features), 882 }, 883 }; 884 int err; 885 886 q->partner = &lif->txqcqs[q->index]->q; 887 q->partner->partner = q; 888 889 if (!lif->xdp_prog || 890 (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags)) 891 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG); 892 893 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { 894 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); 895 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); 896 } 897 898 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 899 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 900 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 901 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 902 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 903 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 904 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 905 906 ionic_qcq_sanitize(qcq); 907 908 err = ionic_adminq_post_wait(lif, &ctx); 909 if (err) 910 return err; 911 912 q->hw_type = ctx.comp.q_init.hw_type; 913 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 914 q->dbval = IONIC_DBELL_QID(q->hw_index); 915 916 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 917 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 918 919 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; 920 q->dbell_jiffies = jiffies; 921 922 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 923 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi); 924 else 925 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); 926 927 qcq->napi_qcq = qcq; 928 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); 929 930 qcq->flags |= IONIC_QCQ_F_INITED; 931 932 return 0; 933 } 934 935 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) 936 { 937 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 938 unsigned int txq_i, flags; 939 struct ionic_qcq *txq; 940 u64 features; 941 int err; 942 943 if (lif->hwstamp_txq) 944 return 0; 945 946 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; 947 948 num_desc = IONIC_MIN_TXRX_DESC; 949 desc_sz = sizeof(struct ionic_txq_desc); 950 comp_sz = 2 * sizeof(struct ionic_txq_comp); 951 952 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 953 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) 954 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 955 else 956 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 957 958 txq_i = lif->ionic->ntxqs_per_lif; 959 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 960 961 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, 962 num_desc, desc_sz, comp_sz, sg_desc_sz, 963 lif->kern_pid, &txq); 964 if (err) 965 goto err_qcq_alloc; 966 967 txq->q.features = features; 968 969 ionic_link_qcq_interrupts(lif->adminqcq, txq); 970 ionic_debugfs_add_qcq(lif, txq); 971 972 lif->hwstamp_txq = txq; 973 974 if (netif_running(lif->netdev)) { 975 err = ionic_lif_txq_init(lif, txq); 976 if (err) 977 goto err_qcq_init; 978 979 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 980 err = ionic_qcq_enable(txq); 981 if (err) 982 goto err_qcq_enable; 983 } 984 } 985 986 return 0; 987 988 err_qcq_enable: 989 ionic_lif_qcq_deinit(lif, txq); 990 err_qcq_init: 991 lif->hwstamp_txq = NULL; 992 ionic_debugfs_del_qcq(txq); 993 ionic_qcq_free(lif, txq); 994 devm_kfree(lif->ionic->dev, txq); 995 err_qcq_alloc: 996 return err; 997 } 998 999 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) 1000 { 1001 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 1002 unsigned int rxq_i, flags; 1003 struct ionic_qcq *rxq; 1004 u64 features; 1005 int err; 1006 1007 if (lif->hwstamp_rxq) 1008 return 0; 1009 1010 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 1011 1012 num_desc = IONIC_MIN_TXRX_DESC; 1013 desc_sz = sizeof(struct ionic_rxq_desc); 1014 comp_sz = 2 * sizeof(struct ionic_rxq_comp); 1015 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 1016 1017 rxq_i = lif->ionic->nrxqs_per_lif; 1018 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 1019 1020 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, 1021 num_desc, desc_sz, comp_sz, sg_desc_sz, 1022 lif->kern_pid, &rxq); 1023 if (err) 1024 goto err_qcq_alloc; 1025 1026 rxq->q.features = features; 1027 1028 ionic_link_qcq_interrupts(lif->adminqcq, rxq); 1029 ionic_debugfs_add_qcq(lif, rxq); 1030 1031 lif->hwstamp_rxq = rxq; 1032 1033 if (netif_running(lif->netdev)) { 1034 err = ionic_lif_rxq_init(lif, rxq); 1035 if (err) 1036 goto err_qcq_init; 1037 1038 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 1039 ionic_rx_fill(&rxq->q); 1040 err = ionic_qcq_enable(rxq); 1041 if (err) 1042 goto err_qcq_enable; 1043 } 1044 } 1045 1046 return 0; 1047 1048 err_qcq_enable: 1049 ionic_lif_qcq_deinit(lif, rxq); 1050 err_qcq_init: 1051 lif->hwstamp_rxq = NULL; 1052 ionic_debugfs_del_qcq(rxq); 1053 ionic_qcq_free(lif, rxq); 1054 devm_kfree(lif->ionic->dev, rxq); 1055 err_qcq_alloc: 1056 return err; 1057 } 1058 1059 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all) 1060 { 1061 struct ionic_queue_params qparam; 1062 1063 ionic_init_queue_params(lif, &qparam); 1064 1065 if (rx_all) 1066 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 1067 else 1068 qparam.rxq_features = 0; 1069 1070 /* if we're not running, just set the values and return */ 1071 if (!netif_running(lif->netdev)) { 1072 lif->rxq_features = qparam.rxq_features; 1073 return 0; 1074 } 1075 1076 return ionic_reconfigure_queues(lif, &qparam); 1077 } 1078 1079 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode) 1080 { 1081 struct ionic_admin_ctx ctx = { 1082 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1083 .cmd.lif_setattr = { 1084 .opcode = IONIC_CMD_LIF_SETATTR, 1085 .index = cpu_to_le16(lif->index), 1086 .attr = IONIC_LIF_ATTR_TXSTAMP, 1087 .txstamp_mode = cpu_to_le16(txstamp_mode), 1088 }, 1089 }; 1090 1091 return ionic_adminq_post_wait(lif, &ctx); 1092 } 1093 1094 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif) 1095 { 1096 struct ionic_admin_ctx ctx = { 1097 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1098 .cmd.rx_filter_del = { 1099 .opcode = IONIC_CMD_RX_FILTER_DEL, 1100 .lif_index = cpu_to_le16(lif->index), 1101 }, 1102 }; 1103 struct ionic_rx_filter *f; 1104 u32 filter_id; 1105 int err; 1106 1107 spin_lock_bh(&lif->rx_filters.lock); 1108 1109 f = ionic_rx_filter_rxsteer(lif); 1110 if (!f) { 1111 spin_unlock_bh(&lif->rx_filters.lock); 1112 return; 1113 } 1114 1115 filter_id = f->filter_id; 1116 ionic_rx_filter_free(lif, f); 1117 1118 spin_unlock_bh(&lif->rx_filters.lock); 1119 1120 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id); 1121 1122 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id); 1123 1124 err = ionic_adminq_post_wait(lif, &ctx); 1125 if (err && err != -EEXIST) 1126 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id); 1127 } 1128 1129 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1130 { 1131 struct ionic_admin_ctx ctx = { 1132 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1133 .cmd.rx_filter_add = { 1134 .opcode = IONIC_CMD_RX_FILTER_ADD, 1135 .lif_index = cpu_to_le16(lif->index), 1136 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS), 1137 .pkt_class = cpu_to_le64(pkt_class), 1138 }, 1139 }; 1140 u8 qtype; 1141 u32 qid; 1142 int err; 1143 1144 if (!lif->hwstamp_rxq) 1145 return -EINVAL; 1146 1147 qtype = lif->hwstamp_rxq->q.type; 1148 ctx.cmd.rx_filter_add.qtype = qtype; 1149 1150 qid = lif->hwstamp_rxq->q.index; 1151 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid); 1152 1153 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n"); 1154 err = ionic_adminq_post_wait(lif, &ctx); 1155 if (err && err != -EEXIST) 1156 return err; 1157 1158 spin_lock_bh(&lif->rx_filters.lock); 1159 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED); 1160 spin_unlock_bh(&lif->rx_filters.lock); 1161 1162 return err; 1163 } 1164 1165 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1166 { 1167 ionic_lif_del_hwstamp_rxfilt(lif); 1168 1169 if (!pkt_class) 1170 return 0; 1171 1172 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); 1173 } 1174 1175 static bool ionic_notifyq_service(struct ionic_cq *cq, 1176 struct ionic_cq_info *cq_info) 1177 { 1178 union ionic_notifyq_comp *comp = cq_info->cq_desc; 1179 struct ionic_deferred_work *work; 1180 struct net_device *netdev; 1181 struct ionic_queue *q; 1182 struct ionic_lif *lif; 1183 u64 eid; 1184 1185 q = cq->bound_q; 1186 lif = q->info[0].cb_arg; 1187 netdev = lif->netdev; 1188 eid = le64_to_cpu(comp->event.eid); 1189 1190 /* Have we run out of new completions to process? */ 1191 if ((s64)(eid - lif->last_eid) <= 0) 1192 return false; 1193 1194 lif->last_eid = eid; 1195 1196 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 1197 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 1198 comp, sizeof(*comp), true); 1199 1200 switch (le16_to_cpu(comp->event.ecode)) { 1201 case IONIC_EVENT_LINK_CHANGE: 1202 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 1203 break; 1204 case IONIC_EVENT_RESET: 1205 if (lif->ionic->idev.fw_status_ready && 1206 !test_bit(IONIC_LIF_F_FW_RESET, lif->state) && 1207 !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { 1208 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1209 if (!work) { 1210 netdev_err(lif->netdev, "Reset event dropped\n"); 1211 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); 1212 } else { 1213 work->type = IONIC_DW_TYPE_LIF_RESET; 1214 ionic_lif_deferred_enqueue(&lif->deferred, work); 1215 } 1216 } 1217 break; 1218 default: 1219 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", 1220 comp->event.ecode, eid); 1221 break; 1222 } 1223 1224 return true; 1225 } 1226 1227 static bool ionic_adminq_service(struct ionic_cq *cq, 1228 struct ionic_cq_info *cq_info) 1229 { 1230 struct ionic_admin_comp *comp = cq_info->cq_desc; 1231 1232 if (!color_match(comp->color, cq->done_color)) 1233 return false; 1234 1235 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 1236 1237 return true; 1238 } 1239 1240 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 1241 { 1242 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 1243 struct ionic_lif *lif = napi_to_cq(napi)->lif; 1244 struct ionic_dev *idev = &lif->ionic->idev; 1245 unsigned long irqflags; 1246 unsigned int flags = 0; 1247 bool resched = false; 1248 int rx_work = 0; 1249 int tx_work = 0; 1250 int n_work = 0; 1251 int a_work = 0; 1252 int work_done; 1253 int credits; 1254 1255 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 1256 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 1257 ionic_notifyq_service, NULL, NULL); 1258 1259 spin_lock_irqsave(&lif->adminq_lock, irqflags); 1260 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 1261 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 1262 ionic_adminq_service, NULL, NULL); 1263 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 1264 1265 if (lif->hwstamp_rxq) 1266 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget, 1267 ionic_rx_service, NULL, NULL); 1268 1269 if (lif->hwstamp_txq) 1270 tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget); 1271 1272 work_done = max(max(n_work, a_work), max(rx_work, tx_work)); 1273 if (work_done < budget && napi_complete_done(napi, work_done)) { 1274 flags |= IONIC_INTR_CRED_UNMASK; 1275 intr->rearm_count++; 1276 } 1277 1278 if (work_done || flags) { 1279 flags |= IONIC_INTR_CRED_RESET_COALESCE; 1280 credits = n_work + a_work + rx_work + tx_work; 1281 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); 1282 } 1283 1284 if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q)) 1285 resched = true; 1286 if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q)) 1287 resched = true; 1288 if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q)) 1289 resched = true; 1290 if (resched) 1291 mod_timer(&lif->adminqcq->napi_deadline, 1292 jiffies + IONIC_NAPI_DEADLINE); 1293 1294 return work_done; 1295 } 1296 1297 void ionic_get_stats64(struct net_device *netdev, 1298 struct rtnl_link_stats64 *ns) 1299 { 1300 struct ionic_lif *lif = netdev_priv(netdev); 1301 struct ionic_lif_stats *ls; 1302 1303 memset(ns, 0, sizeof(*ns)); 1304 ls = &lif->info->stats; 1305 1306 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 1307 le64_to_cpu(ls->rx_mcast_packets) + 1308 le64_to_cpu(ls->rx_bcast_packets); 1309 1310 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 1311 le64_to_cpu(ls->tx_mcast_packets) + 1312 le64_to_cpu(ls->tx_bcast_packets); 1313 1314 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 1315 le64_to_cpu(ls->rx_mcast_bytes) + 1316 le64_to_cpu(ls->rx_bcast_bytes); 1317 1318 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 1319 le64_to_cpu(ls->tx_mcast_bytes) + 1320 le64_to_cpu(ls->tx_bcast_bytes); 1321 1322 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 1323 le64_to_cpu(ls->rx_mcast_drop_packets) + 1324 le64_to_cpu(ls->rx_bcast_drop_packets); 1325 1326 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 1327 le64_to_cpu(ls->tx_mcast_drop_packets) + 1328 le64_to_cpu(ls->tx_bcast_drop_packets); 1329 1330 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 1331 1332 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 1333 1334 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 1335 le64_to_cpu(ls->rx_queue_disabled) + 1336 le64_to_cpu(ls->rx_desc_fetch_error) + 1337 le64_to_cpu(ls->rx_desc_data_error); 1338 1339 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 1340 le64_to_cpu(ls->tx_queue_disabled) + 1341 le64_to_cpu(ls->tx_desc_fetch_error) + 1342 le64_to_cpu(ls->tx_desc_data_error); 1343 1344 ns->rx_errors = ns->rx_over_errors + 1345 ns->rx_missed_errors; 1346 1347 ns->tx_errors = ns->tx_aborted_errors; 1348 } 1349 1350 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1351 { 1352 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); 1353 } 1354 1355 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1356 { 1357 /* Don't delete our own address from the uc list */ 1358 if (ether_addr_equal(addr, netdev->dev_addr)) 1359 return 0; 1360 1361 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); 1362 } 1363 1364 void ionic_lif_rx_mode(struct ionic_lif *lif) 1365 { 1366 struct net_device *netdev = lif->netdev; 1367 unsigned int nfilters; 1368 unsigned int nd_flags; 1369 char buf[128]; 1370 u16 rx_mode; 1371 int i; 1372 #define REMAIN(__x) (sizeof(buf) - (__x)) 1373 1374 mutex_lock(&lif->config_lock); 1375 1376 /* grab the flags once for local use */ 1377 nd_flags = netdev->flags; 1378 1379 rx_mode = IONIC_RX_MODE_F_UNICAST; 1380 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1381 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1382 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1383 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1384 1385 /* sync the filters */ 1386 ionic_rx_filter_sync(lif); 1387 1388 /* check for overflow state 1389 * if so, we track that we overflowed and enable NIC PROMISC 1390 * else if the overflow is set and not needed 1391 * we remove our overflow flag and check the netdev flags 1392 * to see if we can disable NIC PROMISC 1393 */ 1394 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1395 1396 if (((lif->nucast + lif->nmcast) >= nfilters) || 1397 (lif->max_vlans && lif->nvlans >= lif->max_vlans)) { 1398 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1399 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1400 } else { 1401 if (!(nd_flags & IFF_PROMISC)) 1402 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1403 if (!(nd_flags & IFF_ALLMULTI)) 1404 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1405 } 1406 1407 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1408 lif->rx_mode, rx_mode); 1409 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1410 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1411 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1412 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1413 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1414 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1415 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1416 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1417 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1418 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1419 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER) 1420 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER"); 1421 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf); 1422 1423 if (lif->rx_mode != rx_mode) { 1424 struct ionic_admin_ctx ctx = { 1425 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1426 .cmd.rx_mode_set = { 1427 .opcode = IONIC_CMD_RX_MODE_SET, 1428 .lif_index = cpu_to_le16(lif->index), 1429 }, 1430 }; 1431 int err; 1432 1433 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode); 1434 err = ionic_adminq_post_wait(lif, &ctx); 1435 if (err) 1436 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n", 1437 rx_mode, err); 1438 else 1439 lif->rx_mode = rx_mode; 1440 } 1441 1442 mutex_unlock(&lif->config_lock); 1443 } 1444 1445 static void ionic_ndo_set_rx_mode(struct net_device *netdev) 1446 { 1447 struct ionic_lif *lif = netdev_priv(netdev); 1448 struct ionic_deferred_work *work; 1449 1450 /* Sync the kernel filter list with the driver filter list */ 1451 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1452 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1453 1454 /* Shove off the rest of the rxmode work to the work task 1455 * which will include syncing the filters to the firmware. 1456 */ 1457 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1458 if (!work) { 1459 netdev_err(lif->netdev, "rxmode change dropped\n"); 1460 return; 1461 } 1462 work->type = IONIC_DW_TYPE_RX_MODE; 1463 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1464 ionic_lif_deferred_enqueue(&lif->deferred, work); 1465 } 1466 1467 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1468 { 1469 u64 wanted = 0; 1470 1471 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1472 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1473 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1474 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1475 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1476 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1477 if (features & NETIF_F_RXHASH) 1478 wanted |= IONIC_ETH_HW_RX_HASH; 1479 if (features & NETIF_F_RXCSUM) 1480 wanted |= IONIC_ETH_HW_RX_CSUM; 1481 if (features & NETIF_F_SG) 1482 wanted |= IONIC_ETH_HW_TX_SG; 1483 if (features & NETIF_F_HW_CSUM) 1484 wanted |= IONIC_ETH_HW_TX_CSUM; 1485 if (features & NETIF_F_TSO) 1486 wanted |= IONIC_ETH_HW_TSO; 1487 if (features & NETIF_F_TSO6) 1488 wanted |= IONIC_ETH_HW_TSO_IPV6; 1489 if (features & NETIF_F_TSO_ECN) 1490 wanted |= IONIC_ETH_HW_TSO_ECN; 1491 if (features & NETIF_F_GSO_GRE) 1492 wanted |= IONIC_ETH_HW_TSO_GRE; 1493 if (features & NETIF_F_GSO_GRE_CSUM) 1494 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1495 if (features & NETIF_F_GSO_IPXIP4) 1496 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1497 if (features & NETIF_F_GSO_IPXIP6) 1498 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1499 if (features & NETIF_F_GSO_UDP_TUNNEL) 1500 wanted |= IONIC_ETH_HW_TSO_UDP; 1501 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1502 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1503 1504 return cpu_to_le64(wanted); 1505 } 1506 1507 static int ionic_set_nic_features(struct ionic_lif *lif, 1508 netdev_features_t features) 1509 { 1510 struct device *dev = lif->ionic->dev; 1511 struct ionic_admin_ctx ctx = { 1512 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1513 .cmd.lif_setattr = { 1514 .opcode = IONIC_CMD_LIF_SETATTR, 1515 .index = cpu_to_le16(lif->index), 1516 .attr = IONIC_LIF_ATTR_FEATURES, 1517 }, 1518 }; 1519 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1520 IONIC_ETH_HW_VLAN_RX_STRIP | 1521 IONIC_ETH_HW_VLAN_RX_FILTER; 1522 u64 old_hw_features; 1523 int err; 1524 1525 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1526 1527 if (lif->phc) 1528 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP); 1529 1530 err = ionic_adminq_post_wait(lif, &ctx); 1531 if (err) 1532 return err; 1533 1534 old_hw_features = lif->hw_features; 1535 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1536 ctx.comp.lif_setattr.features); 1537 1538 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1539 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1540 1541 if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) && 1542 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1543 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1544 1545 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1546 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1547 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1548 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1549 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1550 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1551 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1552 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1553 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1554 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1555 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1556 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1557 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1558 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1559 if (lif->hw_features & IONIC_ETH_HW_TSO) 1560 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1561 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1562 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1563 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1564 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1565 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1566 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1567 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1568 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1569 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1570 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1571 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1572 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1573 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1574 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1575 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1576 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1577 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP) 1578 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n"); 1579 1580 return 0; 1581 } 1582 1583 static int ionic_init_nic_features(struct ionic_lif *lif) 1584 { 1585 struct net_device *netdev = lif->netdev; 1586 netdev_features_t features; 1587 int err; 1588 1589 /* set up what we expect to support by default */ 1590 features = NETIF_F_HW_VLAN_CTAG_TX | 1591 NETIF_F_HW_VLAN_CTAG_RX | 1592 NETIF_F_HW_VLAN_CTAG_FILTER | 1593 NETIF_F_SG | 1594 NETIF_F_HW_CSUM | 1595 NETIF_F_RXCSUM | 1596 NETIF_F_TSO | 1597 NETIF_F_TSO6 | 1598 NETIF_F_TSO_ECN | 1599 NETIF_F_GSO_GRE | 1600 NETIF_F_GSO_GRE_CSUM | 1601 NETIF_F_GSO_IPXIP4 | 1602 NETIF_F_GSO_IPXIP6 | 1603 NETIF_F_GSO_UDP_TUNNEL | 1604 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1605 1606 if (lif->nxqs > 1) 1607 features |= NETIF_F_RXHASH; 1608 1609 err = ionic_set_nic_features(lif, features); 1610 if (err) 1611 return err; 1612 1613 /* tell the netdev what we actually can support */ 1614 netdev->features |= NETIF_F_HIGHDMA; 1615 1616 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1617 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1618 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1619 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1620 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1621 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1622 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1623 netdev->hw_features |= NETIF_F_RXHASH; 1624 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1625 netdev->hw_features |= NETIF_F_SG; 1626 1627 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1628 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1629 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1630 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1631 if (lif->hw_features & IONIC_ETH_HW_TSO) 1632 netdev->hw_enc_features |= NETIF_F_TSO; 1633 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1634 netdev->hw_enc_features |= NETIF_F_TSO6; 1635 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1636 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1637 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1638 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1639 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1640 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1641 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1642 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1643 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1644 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1645 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1646 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1647 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1648 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1649 1650 netdev->hw_features |= netdev->hw_enc_features; 1651 netdev->features |= netdev->hw_features; 1652 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1653 1654 netdev->priv_flags |= IFF_UNICAST_FLT | 1655 IFF_LIVE_ADDR_CHANGE; 1656 1657 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | 1658 NETDEV_XDP_ACT_REDIRECT | 1659 NETDEV_XDP_ACT_RX_SG | 1660 NETDEV_XDP_ACT_NDO_XMIT | 1661 NETDEV_XDP_ACT_NDO_XMIT_SG; 1662 1663 return 0; 1664 } 1665 1666 static int ionic_set_features(struct net_device *netdev, 1667 netdev_features_t features) 1668 { 1669 struct ionic_lif *lif = netdev_priv(netdev); 1670 int err; 1671 1672 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1673 __func__, (u64)lif->netdev->features, (u64)features); 1674 1675 err = ionic_set_nic_features(lif, features); 1676 1677 return err; 1678 } 1679 1680 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac) 1681 { 1682 struct ionic_admin_ctx ctx = { 1683 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1684 .cmd.lif_setattr = { 1685 .opcode = IONIC_CMD_LIF_SETATTR, 1686 .index = cpu_to_le16(lif->index), 1687 .attr = IONIC_LIF_ATTR_MAC, 1688 }, 1689 }; 1690 1691 ether_addr_copy(ctx.cmd.lif_setattr.mac, mac); 1692 return ionic_adminq_post_wait(lif, &ctx); 1693 } 1694 1695 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr) 1696 { 1697 struct ionic_admin_ctx ctx = { 1698 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1699 .cmd.lif_getattr = { 1700 .opcode = IONIC_CMD_LIF_GETATTR, 1701 .index = cpu_to_le16(lif->index), 1702 .attr = IONIC_LIF_ATTR_MAC, 1703 }, 1704 }; 1705 int err; 1706 1707 err = ionic_adminq_post_wait(lif, &ctx); 1708 if (err) 1709 return err; 1710 1711 ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac); 1712 return 0; 1713 } 1714 1715 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac) 1716 { 1717 u8 get_mac[ETH_ALEN]; 1718 int err; 1719 1720 err = ionic_set_attr_mac(lif, mac); 1721 if (err) 1722 return err; 1723 1724 err = ionic_get_attr_mac(lif, get_mac); 1725 if (err) 1726 return err; 1727 1728 /* To deal with older firmware that silently ignores the set attr mac: 1729 * doesn't actually change the mac and doesn't return an error, so we 1730 * do the get attr to verify whether or not the set actually happened 1731 */ 1732 if (!ether_addr_equal(get_mac, mac)) 1733 return 1; 1734 1735 return 0; 1736 } 1737 1738 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1739 { 1740 struct ionic_lif *lif = netdev_priv(netdev); 1741 struct sockaddr *addr = sa; 1742 u8 *mac; 1743 int err; 1744 1745 mac = (u8 *)addr->sa_data; 1746 if (ether_addr_equal(netdev->dev_addr, mac)) 1747 return 0; 1748 1749 err = ionic_program_mac(lif, mac); 1750 if (err < 0) 1751 return err; 1752 1753 if (err > 0) 1754 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n", 1755 __func__); 1756 1757 err = eth_prepare_mac_addr_change(netdev, addr); 1758 if (err) 1759 return err; 1760 1761 if (!is_zero_ether_addr(netdev->dev_addr)) { 1762 netdev_info(netdev, "deleting mac addr %pM\n", 1763 netdev->dev_addr); 1764 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr); 1765 } 1766 1767 eth_commit_mac_addr_change(netdev, addr); 1768 netdev_info(netdev, "updating mac addr %pM\n", mac); 1769 1770 return ionic_lif_addr_add(netdev_priv(netdev), mac); 1771 } 1772 1773 void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1774 { 1775 /* Stop and clean the queues before reconfiguration */ 1776 netif_device_detach(lif->netdev); 1777 ionic_stop_queues(lif); 1778 ionic_txrx_deinit(lif); 1779 } 1780 1781 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1782 { 1783 int err; 1784 1785 /* Re-init the queues after reconfiguration */ 1786 1787 /* The only way txrx_init can fail here is if communication 1788 * with FW is suddenly broken. There's not much we can do 1789 * at this point - error messages have already been printed, 1790 * so we can continue on and the user can eventually do a 1791 * DOWN and UP to try to reset and clear the issue. 1792 */ 1793 err = ionic_txrx_init(lif); 1794 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 1795 netif_device_attach(lif->netdev); 1796 1797 return err; 1798 } 1799 1800 static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu, 1801 struct bpf_prog *xdp_prog) 1802 { 1803 if (!xdp_prog) 1804 return true; 1805 1806 if (mtu <= IONIC_XDP_MAX_LINEAR_MTU) 1807 return true; 1808 1809 if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags) 1810 return true; 1811 1812 return false; 1813 } 1814 1815 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1816 { 1817 struct ionic_lif *lif = netdev_priv(netdev); 1818 struct ionic_admin_ctx ctx = { 1819 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1820 .cmd.lif_setattr = { 1821 .opcode = IONIC_CMD_LIF_SETATTR, 1822 .index = cpu_to_le16(lif->index), 1823 .attr = IONIC_LIF_ATTR_MTU, 1824 .mtu = cpu_to_le32(new_mtu), 1825 }, 1826 }; 1827 struct bpf_prog *xdp_prog; 1828 int err; 1829 1830 xdp_prog = READ_ONCE(lif->xdp_prog); 1831 if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog)) 1832 return -EINVAL; 1833 1834 err = ionic_adminq_post_wait(lif, &ctx); 1835 if (err) 1836 return err; 1837 1838 /* if we're not running, nothing more to do */ 1839 if (!netif_running(netdev)) { 1840 netdev->mtu = new_mtu; 1841 return 0; 1842 } 1843 1844 mutex_lock(&lif->queue_lock); 1845 ionic_stop_queues_reconfig(lif); 1846 netdev->mtu = new_mtu; 1847 err = ionic_start_queues_reconfig(lif); 1848 mutex_unlock(&lif->queue_lock); 1849 1850 return err; 1851 } 1852 1853 static void ionic_tx_timeout_work(struct work_struct *ws) 1854 { 1855 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1856 int err; 1857 1858 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1859 return; 1860 1861 /* if we were stopped before this scheduled job was launched, 1862 * don't bother the queues as they are already stopped. 1863 */ 1864 if (!netif_running(lif->netdev)) 1865 return; 1866 1867 mutex_lock(&lif->queue_lock); 1868 ionic_stop_queues_reconfig(lif); 1869 err = ionic_start_queues_reconfig(lif); 1870 mutex_unlock(&lif->queue_lock); 1871 1872 if (err) 1873 dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__); 1874 } 1875 1876 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1877 { 1878 struct ionic_lif *lif = netdev_priv(netdev); 1879 1880 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); 1881 schedule_work(&lif->tx_timeout_work); 1882 } 1883 1884 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1885 u16 vid) 1886 { 1887 struct ionic_lif *lif = netdev_priv(netdev); 1888 int err; 1889 1890 err = ionic_lif_vlan_add(lif, vid); 1891 if (err) 1892 return err; 1893 1894 ionic_lif_rx_mode(lif); 1895 1896 return 0; 1897 } 1898 1899 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1900 u16 vid) 1901 { 1902 struct ionic_lif *lif = netdev_priv(netdev); 1903 int err; 1904 1905 err = ionic_lif_vlan_del(lif, vid); 1906 if (err) 1907 return err; 1908 1909 ionic_lif_rx_mode(lif); 1910 1911 return 0; 1912 } 1913 1914 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1915 const u8 *key, const u32 *indir) 1916 { 1917 struct ionic_admin_ctx ctx = { 1918 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1919 .cmd.lif_setattr = { 1920 .opcode = IONIC_CMD_LIF_SETATTR, 1921 .attr = IONIC_LIF_ATTR_RSS, 1922 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1923 }, 1924 }; 1925 unsigned int i, tbl_sz; 1926 1927 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1928 lif->rss_types = types; 1929 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1930 } 1931 1932 if (key) 1933 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1934 1935 if (indir) { 1936 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1937 for (i = 0; i < tbl_sz; i++) 1938 lif->rss_ind_tbl[i] = indir[i]; 1939 } 1940 1941 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1942 IONIC_RSS_HASH_KEY_SIZE); 1943 1944 return ionic_adminq_post_wait(lif, &ctx); 1945 } 1946 1947 static int ionic_lif_rss_init(struct ionic_lif *lif) 1948 { 1949 unsigned int tbl_sz; 1950 unsigned int i; 1951 1952 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1953 IONIC_RSS_TYPE_IPV4_TCP | 1954 IONIC_RSS_TYPE_IPV4_UDP | 1955 IONIC_RSS_TYPE_IPV6 | 1956 IONIC_RSS_TYPE_IPV6_TCP | 1957 IONIC_RSS_TYPE_IPV6_UDP; 1958 1959 /* Fill indirection table with 'default' values */ 1960 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1961 for (i = 0; i < tbl_sz; i++) 1962 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1963 1964 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1965 } 1966 1967 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1968 { 1969 int tbl_sz; 1970 1971 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1972 memset(lif->rss_ind_tbl, 0, tbl_sz); 1973 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1974 1975 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1976 } 1977 1978 static void ionic_lif_quiesce(struct ionic_lif *lif) 1979 { 1980 struct ionic_admin_ctx ctx = { 1981 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1982 .cmd.lif_setattr = { 1983 .opcode = IONIC_CMD_LIF_SETATTR, 1984 .index = cpu_to_le16(lif->index), 1985 .attr = IONIC_LIF_ATTR_STATE, 1986 .state = IONIC_LIF_QUIESCE, 1987 }, 1988 }; 1989 int err; 1990 1991 err = ionic_adminq_post_wait(lif, &ctx); 1992 if (err) 1993 netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err); 1994 } 1995 1996 static void ionic_txrx_disable(struct ionic_lif *lif) 1997 { 1998 unsigned int i; 1999 int err = 0; 2000 2001 if (lif->txqcqs) { 2002 for (i = 0; i < lif->nxqs; i++) 2003 err = ionic_qcq_disable(lif, lif->txqcqs[i], err); 2004 } 2005 2006 if (lif->hwstamp_txq) 2007 err = ionic_qcq_disable(lif, lif->hwstamp_txq, err); 2008 2009 if (lif->rxqcqs) { 2010 for (i = 0; i < lif->nxqs; i++) 2011 err = ionic_qcq_disable(lif, lif->rxqcqs[i], err); 2012 } 2013 2014 if (lif->hwstamp_rxq) 2015 err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err); 2016 2017 ionic_lif_quiesce(lif); 2018 } 2019 2020 static void ionic_txrx_deinit(struct ionic_lif *lif) 2021 { 2022 unsigned int i; 2023 2024 if (lif->txqcqs) { 2025 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 2026 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2027 ionic_tx_flush(&lif->txqcqs[i]->cq); 2028 ionic_tx_empty(&lif->txqcqs[i]->q); 2029 } 2030 } 2031 2032 if (lif->rxqcqs) { 2033 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 2034 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2035 ionic_rx_empty(&lif->rxqcqs[i]->q); 2036 } 2037 } 2038 lif->rx_mode = 0; 2039 2040 if (lif->hwstamp_txq) { 2041 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq); 2042 ionic_tx_flush(&lif->hwstamp_txq->cq); 2043 ionic_tx_empty(&lif->hwstamp_txq->q); 2044 } 2045 2046 if (lif->hwstamp_rxq) { 2047 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq); 2048 ionic_rx_empty(&lif->hwstamp_rxq->q); 2049 } 2050 } 2051 2052 void ionic_txrx_free(struct ionic_lif *lif) 2053 { 2054 unsigned int i; 2055 2056 if (lif->txqcqs) { 2057 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 2058 ionic_qcq_free(lif, lif->txqcqs[i]); 2059 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 2060 lif->txqcqs[i] = NULL; 2061 } 2062 } 2063 2064 if (lif->rxqcqs) { 2065 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2066 ionic_qcq_free(lif, lif->rxqcqs[i]); 2067 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 2068 lif->rxqcqs[i] = NULL; 2069 } 2070 } 2071 2072 if (lif->hwstamp_txq) { 2073 ionic_qcq_free(lif, lif->hwstamp_txq); 2074 devm_kfree(lif->ionic->dev, lif->hwstamp_txq); 2075 lif->hwstamp_txq = NULL; 2076 } 2077 2078 if (lif->hwstamp_rxq) { 2079 ionic_qcq_free(lif, lif->hwstamp_rxq); 2080 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq); 2081 lif->hwstamp_rxq = NULL; 2082 } 2083 } 2084 2085 static int ionic_txrx_alloc(struct ionic_lif *lif) 2086 { 2087 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2088 unsigned int flags, i; 2089 int err = 0; 2090 2091 num_desc = lif->ntxq_descs; 2092 desc_sz = sizeof(struct ionic_txq_desc); 2093 comp_sz = sizeof(struct ionic_txq_comp); 2094 2095 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2096 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2097 sizeof(struct ionic_txq_sg_desc_v1)) 2098 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2099 else 2100 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2101 2102 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2103 2104 if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state)) 2105 flags |= IONIC_QCQ_F_CMB_RINGS; 2106 2107 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2108 flags |= IONIC_QCQ_F_INTR; 2109 2110 for (i = 0; i < lif->nxqs; i++) { 2111 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2112 num_desc, desc_sz, comp_sz, sg_desc_sz, 2113 lif->kern_pid, &lif->txqcqs[i]); 2114 if (err) 2115 goto err_out; 2116 2117 if (flags & IONIC_QCQ_F_INTR) { 2118 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2119 lif->txqcqs[i]->intr.index, 2120 lif->tx_coalesce_hw); 2121 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2122 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2123 } 2124 2125 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2126 } 2127 2128 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 2129 2130 if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) 2131 flags |= IONIC_QCQ_F_CMB_RINGS; 2132 2133 num_desc = lif->nrxq_descs; 2134 desc_sz = sizeof(struct ionic_rxq_desc); 2135 comp_sz = sizeof(struct ionic_rxq_comp); 2136 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2137 2138 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2139 comp_sz *= 2; 2140 2141 for (i = 0; i < lif->nxqs; i++) { 2142 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2143 num_desc, desc_sz, comp_sz, sg_desc_sz, 2144 lif->kern_pid, &lif->rxqcqs[i]); 2145 if (err) 2146 goto err_out; 2147 2148 lif->rxqcqs[i]->q.features = lif->rxq_features; 2149 2150 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2151 lif->rxqcqs[i]->intr.index, 2152 lif->rx_coalesce_hw); 2153 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) 2154 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; 2155 2156 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2157 ionic_link_qcq_interrupts(lif->rxqcqs[i], 2158 lif->txqcqs[i]); 2159 2160 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2161 } 2162 2163 return 0; 2164 2165 err_out: 2166 ionic_txrx_free(lif); 2167 2168 return err; 2169 } 2170 2171 static int ionic_txrx_init(struct ionic_lif *lif) 2172 { 2173 unsigned int i; 2174 int err; 2175 2176 for (i = 0; i < lif->nxqs; i++) { 2177 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 2178 if (err) 2179 goto err_out; 2180 2181 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 2182 if (err) { 2183 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2184 goto err_out; 2185 } 2186 } 2187 2188 if (lif->netdev->features & NETIF_F_RXHASH) 2189 ionic_lif_rss_init(lif); 2190 2191 ionic_lif_rx_mode(lif); 2192 2193 return 0; 2194 2195 err_out: 2196 while (i--) { 2197 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2198 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2199 } 2200 2201 return err; 2202 } 2203 2204 static int ionic_txrx_enable(struct ionic_lif *lif) 2205 { 2206 int derr = 0; 2207 int i, err; 2208 2209 err = ionic_xdp_queues_config(lif); 2210 if (err) 2211 return err; 2212 2213 for (i = 0; i < lif->nxqs; i++) { 2214 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { 2215 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); 2216 err = -ENXIO; 2217 goto err_out; 2218 } 2219 2220 ionic_rx_fill(&lif->rxqcqs[i]->q); 2221 err = ionic_qcq_enable(lif->rxqcqs[i]); 2222 if (err) 2223 goto err_out; 2224 2225 err = ionic_qcq_enable(lif->txqcqs[i]); 2226 if (err) { 2227 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err); 2228 goto err_out; 2229 } 2230 } 2231 2232 if (lif->hwstamp_rxq) { 2233 ionic_rx_fill(&lif->hwstamp_rxq->q); 2234 err = ionic_qcq_enable(lif->hwstamp_rxq); 2235 if (err) 2236 goto err_out_hwstamp_rx; 2237 } 2238 2239 if (lif->hwstamp_txq) { 2240 err = ionic_qcq_enable(lif->hwstamp_txq); 2241 if (err) 2242 goto err_out_hwstamp_tx; 2243 } 2244 2245 return 0; 2246 2247 err_out_hwstamp_tx: 2248 if (lif->hwstamp_rxq) 2249 derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr); 2250 err_out_hwstamp_rx: 2251 i = lif->nxqs; 2252 err_out: 2253 while (i--) { 2254 derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr); 2255 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); 2256 } 2257 2258 ionic_xdp_queues_config(lif); 2259 2260 return err; 2261 } 2262 2263 static int ionic_start_queues(struct ionic_lif *lif) 2264 { 2265 int err; 2266 2267 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) 2268 return -EIO; 2269 2270 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2271 return -EBUSY; 2272 2273 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 2274 return 0; 2275 2276 err = ionic_txrx_enable(lif); 2277 if (err) { 2278 clear_bit(IONIC_LIF_F_UP, lif->state); 2279 return err; 2280 } 2281 netif_tx_wake_all_queues(lif->netdev); 2282 2283 return 0; 2284 } 2285 2286 static int ionic_open(struct net_device *netdev) 2287 { 2288 struct ionic_lif *lif = netdev_priv(netdev); 2289 int err; 2290 2291 /* If recovering from a broken state, clear the bit and we'll try again */ 2292 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 2293 netdev_info(netdev, "clearing broken state\n"); 2294 2295 mutex_lock(&lif->queue_lock); 2296 2297 err = ionic_txrx_alloc(lif); 2298 if (err) 2299 goto err_unlock; 2300 2301 err = ionic_txrx_init(lif); 2302 if (err) 2303 goto err_txrx_free; 2304 2305 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 2306 if (err) 2307 goto err_txrx_deinit; 2308 2309 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 2310 if (err) 2311 goto err_txrx_deinit; 2312 2313 /* don't start the queues until we have link */ 2314 if (netif_carrier_ok(netdev)) { 2315 err = ionic_start_queues(lif); 2316 if (err) 2317 goto err_txrx_deinit; 2318 } 2319 2320 /* If hardware timestamping is enabled, but the queues were freed by 2321 * ionic_stop, those need to be reallocated and initialized, too. 2322 */ 2323 ionic_lif_hwstamp_recreate_queues(lif); 2324 2325 mutex_unlock(&lif->queue_lock); 2326 2327 return 0; 2328 2329 err_txrx_deinit: 2330 ionic_txrx_deinit(lif); 2331 err_txrx_free: 2332 ionic_txrx_free(lif); 2333 err_unlock: 2334 mutex_unlock(&lif->queue_lock); 2335 return err; 2336 } 2337 2338 static void ionic_stop_queues(struct ionic_lif *lif) 2339 { 2340 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 2341 return; 2342 2343 netif_tx_disable(lif->netdev); 2344 ionic_txrx_disable(lif); 2345 } 2346 2347 static int ionic_stop(struct net_device *netdev) 2348 { 2349 struct ionic_lif *lif = netdev_priv(netdev); 2350 2351 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2352 return 0; 2353 2354 mutex_lock(&lif->queue_lock); 2355 ionic_stop_queues(lif); 2356 ionic_txrx_deinit(lif); 2357 ionic_txrx_free(lif); 2358 mutex_unlock(&lif->queue_lock); 2359 2360 return 0; 2361 } 2362 2363 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2364 { 2365 struct ionic_lif *lif = netdev_priv(netdev); 2366 2367 switch (cmd) { 2368 case SIOCSHWTSTAMP: 2369 return ionic_lif_hwstamp_set(lif, ifr); 2370 case SIOCGHWTSTAMP: 2371 return ionic_lif_hwstamp_get(lif, ifr); 2372 default: 2373 return -EOPNOTSUPP; 2374 } 2375 } 2376 2377 static int ionic_get_vf_config(struct net_device *netdev, 2378 int vf, struct ifla_vf_info *ivf) 2379 { 2380 struct ionic_lif *lif = netdev_priv(netdev); 2381 struct ionic *ionic = lif->ionic; 2382 int ret = 0; 2383 2384 if (!netif_device_present(netdev)) 2385 return -EBUSY; 2386 2387 down_read(&ionic->vf_op_lock); 2388 2389 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2390 ret = -EINVAL; 2391 } else { 2392 struct ionic_vf *vfdata = &ionic->vfs[vf]; 2393 2394 ivf->vf = vf; 2395 ivf->qos = 0; 2396 ivf->vlan = le16_to_cpu(vfdata->vlanid); 2397 ivf->spoofchk = vfdata->spoofchk; 2398 ivf->linkstate = vfdata->linkstate; 2399 ivf->max_tx_rate = le32_to_cpu(vfdata->maxrate); 2400 ivf->trusted = vfdata->trusted; 2401 ether_addr_copy(ivf->mac, vfdata->macaddr); 2402 } 2403 2404 up_read(&ionic->vf_op_lock); 2405 return ret; 2406 } 2407 2408 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 2409 struct ifla_vf_stats *vf_stats) 2410 { 2411 struct ionic_lif *lif = netdev_priv(netdev); 2412 struct ionic *ionic = lif->ionic; 2413 struct ionic_lif_stats *vs; 2414 int ret = 0; 2415 2416 if (!netif_device_present(netdev)) 2417 return -EBUSY; 2418 2419 down_read(&ionic->vf_op_lock); 2420 2421 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2422 ret = -EINVAL; 2423 } else { 2424 memset(vf_stats, 0, sizeof(*vf_stats)); 2425 vs = &ionic->vfs[vf].stats; 2426 2427 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 2428 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 2429 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 2430 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 2431 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 2432 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 2433 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 2434 le64_to_cpu(vs->rx_mcast_drop_packets) + 2435 le64_to_cpu(vs->rx_bcast_drop_packets); 2436 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 2437 le64_to_cpu(vs->tx_mcast_drop_packets) + 2438 le64_to_cpu(vs->tx_bcast_drop_packets); 2439 } 2440 2441 up_read(&ionic->vf_op_lock); 2442 return ret; 2443 } 2444 2445 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2446 { 2447 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC }; 2448 struct ionic_lif *lif = netdev_priv(netdev); 2449 struct ionic *ionic = lif->ionic; 2450 int ret; 2451 2452 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 2453 return -EINVAL; 2454 2455 if (!netif_device_present(netdev)) 2456 return -EBUSY; 2457 2458 down_write(&ionic->vf_op_lock); 2459 2460 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2461 ret = -EINVAL; 2462 } else { 2463 ether_addr_copy(vfc.macaddr, mac); 2464 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n", 2465 __func__, vf, vfc.macaddr); 2466 2467 ret = ionic_set_vf_config(ionic, vf, &vfc); 2468 if (!ret) 2469 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 2470 } 2471 2472 up_write(&ionic->vf_op_lock); 2473 return ret; 2474 } 2475 2476 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2477 u8 qos, __be16 proto) 2478 { 2479 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN }; 2480 struct ionic_lif *lif = netdev_priv(netdev); 2481 struct ionic *ionic = lif->ionic; 2482 int ret; 2483 2484 /* until someday when we support qos */ 2485 if (qos) 2486 return -EINVAL; 2487 2488 if (vlan > 4095) 2489 return -EINVAL; 2490 2491 if (proto != htons(ETH_P_8021Q)) 2492 return -EPROTONOSUPPORT; 2493 2494 if (!netif_device_present(netdev)) 2495 return -EBUSY; 2496 2497 down_write(&ionic->vf_op_lock); 2498 2499 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2500 ret = -EINVAL; 2501 } else { 2502 vfc.vlanid = cpu_to_le16(vlan); 2503 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n", 2504 __func__, vf, le16_to_cpu(vfc.vlanid)); 2505 2506 ret = ionic_set_vf_config(ionic, vf, &vfc); 2507 if (!ret) 2508 ionic->vfs[vf].vlanid = cpu_to_le16(vlan); 2509 } 2510 2511 up_write(&ionic->vf_op_lock); 2512 return ret; 2513 } 2514 2515 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 2516 int tx_min, int tx_max) 2517 { 2518 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE }; 2519 struct ionic_lif *lif = netdev_priv(netdev); 2520 struct ionic *ionic = lif->ionic; 2521 int ret; 2522 2523 /* setting the min just seems silly */ 2524 if (tx_min) 2525 return -EINVAL; 2526 2527 if (!netif_device_present(netdev)) 2528 return -EBUSY; 2529 2530 down_write(&ionic->vf_op_lock); 2531 2532 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2533 ret = -EINVAL; 2534 } else { 2535 vfc.maxrate = cpu_to_le32(tx_max); 2536 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n", 2537 __func__, vf, le32_to_cpu(vfc.maxrate)); 2538 2539 ret = ionic_set_vf_config(ionic, vf, &vfc); 2540 if (!ret) 2541 ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); 2542 } 2543 2544 up_write(&ionic->vf_op_lock); 2545 return ret; 2546 } 2547 2548 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 2549 { 2550 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK }; 2551 struct ionic_lif *lif = netdev_priv(netdev); 2552 struct ionic *ionic = lif->ionic; 2553 int ret; 2554 2555 if (!netif_device_present(netdev)) 2556 return -EBUSY; 2557 2558 down_write(&ionic->vf_op_lock); 2559 2560 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2561 ret = -EINVAL; 2562 } else { 2563 vfc.spoofchk = set; 2564 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n", 2565 __func__, vf, vfc.spoofchk); 2566 2567 ret = ionic_set_vf_config(ionic, vf, &vfc); 2568 if (!ret) 2569 ionic->vfs[vf].spoofchk = set; 2570 } 2571 2572 up_write(&ionic->vf_op_lock); 2573 return ret; 2574 } 2575 2576 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2577 { 2578 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST }; 2579 struct ionic_lif *lif = netdev_priv(netdev); 2580 struct ionic *ionic = lif->ionic; 2581 int ret; 2582 2583 if (!netif_device_present(netdev)) 2584 return -EBUSY; 2585 2586 down_write(&ionic->vf_op_lock); 2587 2588 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2589 ret = -EINVAL; 2590 } else { 2591 vfc.trust = set; 2592 dev_dbg(ionic->dev, "%s: vf %d trust %d\n", 2593 __func__, vf, vfc.trust); 2594 2595 ret = ionic_set_vf_config(ionic, vf, &vfc); 2596 if (!ret) 2597 ionic->vfs[vf].trusted = set; 2598 } 2599 2600 up_write(&ionic->vf_op_lock); 2601 return ret; 2602 } 2603 2604 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2605 { 2606 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE }; 2607 struct ionic_lif *lif = netdev_priv(netdev); 2608 struct ionic *ionic = lif->ionic; 2609 u8 vfls; 2610 int ret; 2611 2612 switch (set) { 2613 case IFLA_VF_LINK_STATE_ENABLE: 2614 vfls = IONIC_VF_LINK_STATUS_UP; 2615 break; 2616 case IFLA_VF_LINK_STATE_DISABLE: 2617 vfls = IONIC_VF_LINK_STATUS_DOWN; 2618 break; 2619 case IFLA_VF_LINK_STATE_AUTO: 2620 vfls = IONIC_VF_LINK_STATUS_AUTO; 2621 break; 2622 default: 2623 return -EINVAL; 2624 } 2625 2626 if (!netif_device_present(netdev)) 2627 return -EBUSY; 2628 2629 down_write(&ionic->vf_op_lock); 2630 2631 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2632 ret = -EINVAL; 2633 } else { 2634 vfc.linkstate = vfls; 2635 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n", 2636 __func__, vf, vfc.linkstate); 2637 2638 ret = ionic_set_vf_config(ionic, vf, &vfc); 2639 if (!ret) 2640 ionic->vfs[vf].linkstate = set; 2641 } 2642 2643 up_write(&ionic->vf_op_lock); 2644 return ret; 2645 } 2646 2647 static void ionic_vf_attr_replay(struct ionic_lif *lif) 2648 { 2649 struct ionic_vf_setattr_cmd vfc = { }; 2650 struct ionic *ionic = lif->ionic; 2651 struct ionic_vf *v; 2652 int i; 2653 2654 if (!ionic->vfs) 2655 return; 2656 2657 down_read(&ionic->vf_op_lock); 2658 2659 for (i = 0; i < ionic->num_vfs; i++) { 2660 v = &ionic->vfs[i]; 2661 2662 if (v->stats_pa) { 2663 vfc.attr = IONIC_VF_ATTR_STATSADDR; 2664 vfc.stats_pa = cpu_to_le64(v->stats_pa); 2665 ionic_set_vf_config(ionic, i, &vfc); 2666 vfc.stats_pa = 0; 2667 } 2668 2669 if (!is_zero_ether_addr(v->macaddr)) { 2670 vfc.attr = IONIC_VF_ATTR_MAC; 2671 ether_addr_copy(vfc.macaddr, v->macaddr); 2672 ionic_set_vf_config(ionic, i, &vfc); 2673 eth_zero_addr(vfc.macaddr); 2674 } 2675 2676 if (v->vlanid) { 2677 vfc.attr = IONIC_VF_ATTR_VLAN; 2678 vfc.vlanid = v->vlanid; 2679 ionic_set_vf_config(ionic, i, &vfc); 2680 vfc.vlanid = 0; 2681 } 2682 2683 if (v->maxrate) { 2684 vfc.attr = IONIC_VF_ATTR_RATE; 2685 vfc.maxrate = v->maxrate; 2686 ionic_set_vf_config(ionic, i, &vfc); 2687 vfc.maxrate = 0; 2688 } 2689 2690 if (v->spoofchk) { 2691 vfc.attr = IONIC_VF_ATTR_SPOOFCHK; 2692 vfc.spoofchk = v->spoofchk; 2693 ionic_set_vf_config(ionic, i, &vfc); 2694 vfc.spoofchk = 0; 2695 } 2696 2697 if (v->trusted) { 2698 vfc.attr = IONIC_VF_ATTR_TRUST; 2699 vfc.trust = v->trusted; 2700 ionic_set_vf_config(ionic, i, &vfc); 2701 vfc.trust = 0; 2702 } 2703 2704 if (v->linkstate) { 2705 vfc.attr = IONIC_VF_ATTR_LINKSTATE; 2706 vfc.linkstate = v->linkstate; 2707 ionic_set_vf_config(ionic, i, &vfc); 2708 vfc.linkstate = 0; 2709 } 2710 } 2711 2712 up_read(&ionic->vf_op_lock); 2713 2714 ionic_vf_start(ionic); 2715 } 2716 2717 static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q) 2718 { 2719 struct xdp_rxq_info *xi; 2720 2721 if (!q->xdp_rxq_info) 2722 return; 2723 2724 xi = q->xdp_rxq_info; 2725 q->xdp_rxq_info = NULL; 2726 2727 xdp_rxq_info_unreg(xi); 2728 kfree(xi); 2729 } 2730 2731 static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id) 2732 { 2733 struct xdp_rxq_info *rxq_info; 2734 int err; 2735 2736 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); 2737 if (!rxq_info) 2738 return -ENOMEM; 2739 2740 err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id); 2741 if (err) { 2742 dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n", 2743 q->index, err); 2744 goto err_out; 2745 } 2746 2747 err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL); 2748 if (err) { 2749 dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n", 2750 q->index, err); 2751 xdp_rxq_info_unreg(rxq_info); 2752 goto err_out; 2753 } 2754 2755 q->xdp_rxq_info = rxq_info; 2756 2757 return 0; 2758 2759 err_out: 2760 kfree(rxq_info); 2761 return err; 2762 } 2763 2764 static int ionic_xdp_queues_config(struct ionic_lif *lif) 2765 { 2766 unsigned int i; 2767 int err; 2768 2769 if (!lif->rxqcqs) 2770 return 0; 2771 2772 /* There's no need to rework memory if not going to/from NULL program. 2773 * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info 2774 * This way we don't need to keep an *xdp_prog in every queue struct. 2775 */ 2776 if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info) 2777 return 0; 2778 2779 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2780 struct ionic_queue *q = &lif->rxqcqs[i]->q; 2781 2782 if (q->xdp_rxq_info) { 2783 ionic_xdp_unregister_rxq_info(q); 2784 continue; 2785 } 2786 2787 err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id); 2788 if (err) { 2789 dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n", 2790 i, err); 2791 goto err_out; 2792 } 2793 } 2794 2795 return 0; 2796 2797 err_out: 2798 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) 2799 ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q); 2800 2801 return err; 2802 } 2803 2804 static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) 2805 { 2806 struct ionic_lif *lif = netdev_priv(netdev); 2807 struct bpf_prog *old_prog; 2808 u32 maxfs; 2809 2810 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { 2811 #define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts" 2812 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT); 2813 netdev_info(lif->netdev, XDP_ERR_SPLIT); 2814 return -EOPNOTSUPP; 2815 } 2816 2817 if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) { 2818 #define XDP_ERR_MTU "MTU is too large for XDP without frags support" 2819 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU); 2820 netdev_info(lif->netdev, XDP_ERR_MTU); 2821 return -EINVAL; 2822 } 2823 2824 maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; 2825 if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags)) 2826 maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU); 2827 netdev->max_mtu = maxfs; 2828 2829 if (!netif_running(netdev)) { 2830 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2831 } else { 2832 mutex_lock(&lif->queue_lock); 2833 ionic_stop_queues_reconfig(lif); 2834 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2835 ionic_start_queues_reconfig(lif); 2836 mutex_unlock(&lif->queue_lock); 2837 } 2838 2839 if (old_prog) 2840 bpf_prog_put(old_prog); 2841 2842 return 0; 2843 } 2844 2845 static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf) 2846 { 2847 switch (bpf->command) { 2848 case XDP_SETUP_PROG: 2849 return ionic_xdp_config(netdev, bpf); 2850 default: 2851 return -EINVAL; 2852 } 2853 } 2854 2855 static const struct net_device_ops ionic_netdev_ops = { 2856 .ndo_open = ionic_open, 2857 .ndo_stop = ionic_stop, 2858 .ndo_eth_ioctl = ionic_eth_ioctl, 2859 .ndo_start_xmit = ionic_start_xmit, 2860 .ndo_bpf = ionic_xdp, 2861 .ndo_xdp_xmit = ionic_xdp_xmit, 2862 .ndo_get_stats64 = ionic_get_stats64, 2863 .ndo_set_rx_mode = ionic_ndo_set_rx_mode, 2864 .ndo_set_features = ionic_set_features, 2865 .ndo_set_mac_address = ionic_set_mac_address, 2866 .ndo_validate_addr = eth_validate_addr, 2867 .ndo_tx_timeout = ionic_tx_timeout, 2868 .ndo_change_mtu = ionic_change_mtu, 2869 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2870 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2871 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2872 .ndo_set_vf_trust = ionic_set_vf_trust, 2873 .ndo_set_vf_mac = ionic_set_vf_mac, 2874 .ndo_set_vf_rate = ionic_set_vf_rate, 2875 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2876 .ndo_get_vf_config = ionic_get_vf_config, 2877 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2878 .ndo_get_vf_stats = ionic_get_vf_stats, 2879 }; 2880 2881 static int ionic_cmb_reconfig(struct ionic_lif *lif, 2882 struct ionic_queue_params *qparam) 2883 { 2884 struct ionic_queue_params start_qparams; 2885 int err = 0; 2886 2887 /* When changing CMB queue parameters, we're using limited 2888 * on-device memory and don't have extra memory to use for 2889 * duplicate allocations, so we free it all first then 2890 * re-allocate with the new parameters. 2891 */ 2892 2893 /* Checkpoint for possible unwind */ 2894 ionic_init_queue_params(lif, &start_qparams); 2895 2896 /* Stop and free the queues */ 2897 ionic_stop_queues_reconfig(lif); 2898 ionic_txrx_free(lif); 2899 2900 /* Set up new qparams */ 2901 ionic_set_queue_params(lif, qparam); 2902 2903 if (netif_running(lif->netdev)) { 2904 /* Alloc and start the new configuration */ 2905 err = ionic_txrx_alloc(lif); 2906 if (err) { 2907 dev_warn(lif->ionic->dev, 2908 "CMB reconfig failed, restoring values: %d\n", err); 2909 2910 /* Back out the changes */ 2911 ionic_set_queue_params(lif, &start_qparams); 2912 err = ionic_txrx_alloc(lif); 2913 if (err) { 2914 dev_err(lif->ionic->dev, 2915 "CMB restore failed: %d\n", err); 2916 goto err_out; 2917 } 2918 } 2919 2920 err = ionic_start_queues_reconfig(lif); 2921 if (err) { 2922 dev_err(lif->ionic->dev, 2923 "CMB reconfig failed: %d\n", err); 2924 goto err_out; 2925 } 2926 } 2927 2928 err_out: 2929 /* This was detached in ionic_stop_queues_reconfig() */ 2930 netif_device_attach(lif->netdev); 2931 2932 return err; 2933 } 2934 2935 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2936 { 2937 /* only swapping the queues, not the napi, flags, or other stuff */ 2938 swap(a->q.features, b->q.features); 2939 swap(a->q.num_descs, b->q.num_descs); 2940 swap(a->q.desc_size, b->q.desc_size); 2941 swap(a->q.base, b->q.base); 2942 swap(a->q.base_pa, b->q.base_pa); 2943 swap(a->q.info, b->q.info); 2944 swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info); 2945 swap(a->q.partner, b->q.partner); 2946 swap(a->q_base, b->q_base); 2947 swap(a->q_base_pa, b->q_base_pa); 2948 swap(a->q_size, b->q_size); 2949 2950 swap(a->q.sg_desc_size, b->q.sg_desc_size); 2951 swap(a->q.sg_base, b->q.sg_base); 2952 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2953 swap(a->sg_base, b->sg_base); 2954 swap(a->sg_base_pa, b->sg_base_pa); 2955 swap(a->sg_size, b->sg_size); 2956 2957 swap(a->cq.num_descs, b->cq.num_descs); 2958 swap(a->cq.desc_size, b->cq.desc_size); 2959 swap(a->cq.base, b->cq.base); 2960 swap(a->cq.base_pa, b->cq.base_pa); 2961 swap(a->cq.info, b->cq.info); 2962 swap(a->cq_base, b->cq_base); 2963 swap(a->cq_base_pa, b->cq_base_pa); 2964 swap(a->cq_size, b->cq_size); 2965 2966 ionic_debugfs_del_qcq(a); 2967 ionic_debugfs_add_qcq(a->q.lif, a); 2968 } 2969 2970 int ionic_reconfigure_queues(struct ionic_lif *lif, 2971 struct ionic_queue_params *qparam) 2972 { 2973 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2974 struct ionic_qcq **tx_qcqs = NULL; 2975 struct ionic_qcq **rx_qcqs = NULL; 2976 unsigned int flags, i; 2977 int err = 0; 2978 2979 /* Are we changing q params while CMB is on */ 2980 if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) || 2981 (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx)) 2982 return ionic_cmb_reconfig(lif, qparam); 2983 2984 /* allocate temporary qcq arrays to hold new queue structs */ 2985 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2986 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2987 sizeof(struct ionic_qcq *), GFP_KERNEL); 2988 if (!tx_qcqs) { 2989 err = -ENOMEM; 2990 goto err_out; 2991 } 2992 } 2993 if (qparam->nxqs != lif->nxqs || 2994 qparam->nrxq_descs != lif->nrxq_descs || 2995 qparam->rxq_features != lif->rxq_features) { 2996 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2997 sizeof(struct ionic_qcq *), GFP_KERNEL); 2998 if (!rx_qcqs) { 2999 err = -ENOMEM; 3000 goto err_out; 3001 } 3002 } 3003 3004 /* allocate new desc_info and rings, but leave the interrupt setup 3005 * until later so as to not mess with the still-running queues 3006 */ 3007 if (tx_qcqs) { 3008 num_desc = qparam->ntxq_descs; 3009 desc_sz = sizeof(struct ionic_txq_desc); 3010 comp_sz = sizeof(struct ionic_txq_comp); 3011 3012 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 3013 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 3014 sizeof(struct ionic_txq_sg_desc_v1)) 3015 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 3016 else 3017 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 3018 3019 for (i = 0; i < qparam->nxqs; i++) { 3020 /* If missing, short placeholder qcq needed for swap */ 3021 if (!lif->txqcqs[i]) { 3022 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 3023 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 3024 4, desc_sz, comp_sz, sg_desc_sz, 3025 lif->kern_pid, &lif->txqcqs[i]); 3026 if (err) 3027 goto err_out; 3028 } 3029 3030 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 3031 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 3032 num_desc, desc_sz, comp_sz, sg_desc_sz, 3033 lif->kern_pid, &tx_qcqs[i]); 3034 if (err) 3035 goto err_out; 3036 } 3037 } 3038 3039 if (rx_qcqs) { 3040 num_desc = qparam->nrxq_descs; 3041 desc_sz = sizeof(struct ionic_rxq_desc); 3042 comp_sz = sizeof(struct ionic_rxq_comp); 3043 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 3044 3045 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC) 3046 comp_sz *= 2; 3047 3048 for (i = 0; i < qparam->nxqs; i++) { 3049 /* If missing, short placeholder qcq needed for swap */ 3050 if (!lif->rxqcqs[i]) { 3051 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 3052 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 3053 4, desc_sz, comp_sz, sg_desc_sz, 3054 lif->kern_pid, &lif->rxqcqs[i]); 3055 if (err) 3056 goto err_out; 3057 } 3058 3059 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 3060 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 3061 num_desc, desc_sz, comp_sz, sg_desc_sz, 3062 lif->kern_pid, &rx_qcqs[i]); 3063 if (err) 3064 goto err_out; 3065 3066 rx_qcqs[i]->q.features = qparam->rxq_features; 3067 } 3068 } 3069 3070 /* stop and clean the queues */ 3071 ionic_stop_queues_reconfig(lif); 3072 3073 if (qparam->nxqs != lif->nxqs) { 3074 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 3075 if (err) 3076 goto err_out_reinit_unlock; 3077 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 3078 if (err) { 3079 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 3080 goto err_out_reinit_unlock; 3081 } 3082 } 3083 3084 /* swap new desc_info and rings, keeping existing interrupt config */ 3085 if (tx_qcqs) { 3086 lif->ntxq_descs = qparam->ntxq_descs; 3087 for (i = 0; i < qparam->nxqs; i++) 3088 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 3089 } 3090 3091 if (rx_qcqs) { 3092 lif->nrxq_descs = qparam->nrxq_descs; 3093 for (i = 0; i < qparam->nxqs; i++) 3094 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 3095 } 3096 3097 /* if we need to change the interrupt layout, this is the time */ 3098 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 3099 qparam->nxqs != lif->nxqs) { 3100 if (qparam->intr_split) { 3101 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 3102 } else { 3103 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 3104 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 3105 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 3106 } 3107 3108 /* Clear existing interrupt assignments. We check for NULL here 3109 * because we're checking the whole array for potential qcqs, not 3110 * just those qcqs that have just been set up. 3111 */ 3112 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 3113 if (lif->txqcqs[i]) 3114 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 3115 if (lif->rxqcqs[i]) 3116 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 3117 } 3118 3119 /* re-assign the interrupts */ 3120 for (i = 0; i < qparam->nxqs; i++) { 3121 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 3122 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 3123 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 3124 lif->rxqcqs[i]->intr.index, 3125 lif->rx_coalesce_hw); 3126 3127 if (qparam->intr_split) { 3128 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 3129 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 3130 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 3131 lif->txqcqs[i]->intr.index, 3132 lif->tx_coalesce_hw); 3133 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 3134 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 3135 } else { 3136 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3137 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 3138 } 3139 } 3140 } 3141 3142 /* now we can rework the debugfs mappings */ 3143 if (tx_qcqs) { 3144 for (i = 0; i < qparam->nxqs; i++) { 3145 ionic_debugfs_del_qcq(lif->txqcqs[i]); 3146 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 3147 } 3148 } 3149 3150 if (rx_qcqs) { 3151 for (i = 0; i < qparam->nxqs; i++) { 3152 ionic_debugfs_del_qcq(lif->rxqcqs[i]); 3153 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 3154 } 3155 } 3156 3157 swap(lif->nxqs, qparam->nxqs); 3158 swap(lif->rxq_features, qparam->rxq_features); 3159 3160 err_out_reinit_unlock: 3161 /* re-init the queues, but don't lose an error code */ 3162 if (err) 3163 ionic_start_queues_reconfig(lif); 3164 else 3165 err = ionic_start_queues_reconfig(lif); 3166 3167 err_out: 3168 /* free old allocs without cleaning intr */ 3169 for (i = 0; i < qparam->nxqs; i++) { 3170 if (tx_qcqs && tx_qcqs[i]) { 3171 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3172 ionic_qcq_free(lif, tx_qcqs[i]); 3173 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 3174 tx_qcqs[i] = NULL; 3175 } 3176 if (rx_qcqs && rx_qcqs[i]) { 3177 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3178 ionic_qcq_free(lif, rx_qcqs[i]); 3179 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 3180 rx_qcqs[i] = NULL; 3181 } 3182 } 3183 3184 /* free q array */ 3185 if (rx_qcqs) { 3186 devm_kfree(lif->ionic->dev, rx_qcqs); 3187 rx_qcqs = NULL; 3188 } 3189 if (tx_qcqs) { 3190 devm_kfree(lif->ionic->dev, tx_qcqs); 3191 tx_qcqs = NULL; 3192 } 3193 3194 /* clean the unused dma and info allocations when new set is smaller 3195 * than the full array, but leave the qcq shells in place 3196 */ 3197 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 3198 if (lif->txqcqs && lif->txqcqs[i]) { 3199 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3200 ionic_qcq_free(lif, lif->txqcqs[i]); 3201 } 3202 3203 if (lif->rxqcqs && lif->rxqcqs[i]) { 3204 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3205 ionic_qcq_free(lif, lif->rxqcqs[i]); 3206 } 3207 } 3208 3209 if (err) 3210 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err); 3211 3212 return err; 3213 } 3214 3215 int ionic_lif_alloc(struct ionic *ionic) 3216 { 3217 struct device *dev = ionic->dev; 3218 union ionic_lif_identity *lid; 3219 struct net_device *netdev; 3220 struct ionic_lif *lif; 3221 int tbl_sz; 3222 int err; 3223 3224 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 3225 if (!lid) 3226 return -ENOMEM; 3227 3228 netdev = alloc_etherdev_mqs(sizeof(*lif), 3229 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 3230 if (!netdev) { 3231 dev_err(dev, "Cannot allocate netdev, aborting\n"); 3232 err = -ENOMEM; 3233 goto err_out_free_lid; 3234 } 3235 3236 SET_NETDEV_DEV(netdev, dev); 3237 3238 lif = netdev_priv(netdev); 3239 lif->netdev = netdev; 3240 ionic->lif = lif; 3241 lif->ionic = ionic; 3242 netdev->netdev_ops = &ionic_netdev_ops; 3243 ionic_ethtool_set_ops(netdev); 3244 3245 netdev->watchdog_timeo = 2 * HZ; 3246 netif_carrier_off(netdev); 3247 3248 lif->identity = lid; 3249 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 3250 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity); 3251 if (err) { 3252 dev_err(ionic->dev, "Cannot identify type %d: %d\n", 3253 lif->lif_type, err); 3254 goto err_out_free_netdev; 3255 } 3256 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 3257 le32_to_cpu(lif->identity->eth.min_frame_size)); 3258 lif->netdev->max_mtu = 3259 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; 3260 3261 lif->neqs = ionic->neqs_per_lif; 3262 lif->nxqs = ionic->ntxqs_per_lif; 3263 3264 lif->index = 0; 3265 3266 if (is_kdump_kernel()) { 3267 lif->ntxq_descs = IONIC_MIN_TXRX_DESC; 3268 lif->nrxq_descs = IONIC_MIN_TXRX_DESC; 3269 } else { 3270 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 3271 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 3272 } 3273 3274 /* Convert the default coalesce value to actual hw resolution */ 3275 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 3276 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 3277 lif->rx_coalesce_usecs); 3278 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 3279 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 3280 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); 3281 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); 3282 3283 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 3284 3285 mutex_init(&lif->queue_lock); 3286 mutex_init(&lif->config_lock); 3287 3288 spin_lock_init(&lif->adminq_lock); 3289 3290 spin_lock_init(&lif->deferred.lock); 3291 INIT_LIST_HEAD(&lif->deferred.list); 3292 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 3293 3294 /* allocate lif info */ 3295 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 3296 lif->info = dma_alloc_coherent(dev, lif->info_sz, 3297 &lif->info_pa, GFP_KERNEL); 3298 if (!lif->info) { 3299 dev_err(dev, "Failed to allocate lif info, aborting\n"); 3300 err = -ENOMEM; 3301 goto err_out_free_mutex; 3302 } 3303 3304 ionic_debugfs_add_lif(lif); 3305 3306 /* allocate control queues and txrx queue arrays */ 3307 ionic_lif_queue_identify(lif); 3308 err = ionic_qcqs_alloc(lif); 3309 if (err) 3310 goto err_out_free_lif_info; 3311 3312 /* allocate rss indirection table */ 3313 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 3314 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 3315 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 3316 &lif->rss_ind_tbl_pa, 3317 GFP_KERNEL); 3318 3319 if (!lif->rss_ind_tbl) { 3320 err = -ENOMEM; 3321 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 3322 goto err_out_free_qcqs; 3323 } 3324 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 3325 3326 ionic_lif_alloc_phc(lif); 3327 3328 return 0; 3329 3330 err_out_free_qcqs: 3331 ionic_qcqs_free(lif); 3332 err_out_free_lif_info: 3333 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3334 lif->info = NULL; 3335 lif->info_pa = 0; 3336 err_out_free_mutex: 3337 mutex_destroy(&lif->config_lock); 3338 mutex_destroy(&lif->queue_lock); 3339 err_out_free_netdev: 3340 free_netdev(lif->netdev); 3341 lif = NULL; 3342 err_out_free_lid: 3343 kfree(lid); 3344 3345 return err; 3346 } 3347 3348 static void ionic_lif_reset(struct ionic_lif *lif) 3349 { 3350 struct ionic_dev *idev = &lif->ionic->idev; 3351 3352 if (!ionic_is_fw_running(idev)) 3353 return; 3354 3355 mutex_lock(&lif->ionic->dev_cmd_lock); 3356 ionic_dev_cmd_lif_reset(idev, lif->index); 3357 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3358 mutex_unlock(&lif->ionic->dev_cmd_lock); 3359 } 3360 3361 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 3362 { 3363 struct ionic *ionic = lif->ionic; 3364 3365 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3366 return; 3367 3368 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 3369 3370 netif_device_detach(lif->netdev); 3371 3372 mutex_lock(&lif->queue_lock); 3373 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 3374 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 3375 ionic_stop_queues(lif); 3376 } 3377 3378 if (netif_running(lif->netdev)) { 3379 ionic_txrx_deinit(lif); 3380 ionic_txrx_free(lif); 3381 } 3382 ionic_lif_deinit(lif); 3383 ionic_reset(ionic); 3384 ionic_qcqs_free(lif); 3385 3386 mutex_unlock(&lif->queue_lock); 3387 3388 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); 3389 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 3390 } 3391 3392 int ionic_restart_lif(struct ionic_lif *lif) 3393 { 3394 struct ionic *ionic = lif->ionic; 3395 int err; 3396 3397 mutex_lock(&lif->queue_lock); 3398 3399 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 3400 dev_info(ionic->dev, "FW Up: clearing broken state\n"); 3401 3402 err = ionic_qcqs_alloc(lif); 3403 if (err) 3404 goto err_unlock; 3405 3406 err = ionic_lif_init(lif); 3407 if (err) 3408 goto err_qcqs_free; 3409 3410 ionic_vf_attr_replay(lif); 3411 3412 if (lif->registered) 3413 ionic_lif_set_netdev_info(lif); 3414 3415 ionic_rx_filter_replay(lif); 3416 3417 if (netif_running(lif->netdev)) { 3418 err = ionic_txrx_alloc(lif); 3419 if (err) 3420 goto err_lifs_deinit; 3421 3422 err = ionic_txrx_init(lif); 3423 if (err) 3424 goto err_txrx_free; 3425 } 3426 3427 mutex_unlock(&lif->queue_lock); 3428 3429 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 3430 ionic_link_status_check_request(lif, CAN_SLEEP); 3431 netif_device_attach(lif->netdev); 3432 3433 return 0; 3434 3435 err_txrx_free: 3436 ionic_txrx_free(lif); 3437 err_lifs_deinit: 3438 ionic_lif_deinit(lif); 3439 err_qcqs_free: 3440 ionic_qcqs_free(lif); 3441 err_unlock: 3442 mutex_unlock(&lif->queue_lock); 3443 3444 return err; 3445 } 3446 3447 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 3448 { 3449 struct ionic *ionic = lif->ionic; 3450 int err; 3451 3452 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3453 return; 3454 3455 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 3456 3457 /* This is a little different from what happens at 3458 * probe time because the LIF already exists so we 3459 * just need to reanimate it. 3460 */ 3461 ionic_init_devinfo(ionic); 3462 err = ionic_identify(ionic); 3463 if (err) 3464 goto err_out; 3465 err = ionic_port_identify(ionic); 3466 if (err) 3467 goto err_out; 3468 err = ionic_port_init(ionic); 3469 if (err) 3470 goto err_out; 3471 3472 err = ionic_restart_lif(lif); 3473 if (err) 3474 goto err_out; 3475 3476 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 3477 3478 /* restore the hardware timestamping queues */ 3479 ionic_lif_hwstamp_replay(lif); 3480 3481 return; 3482 3483 err_out: 3484 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 3485 } 3486 3487 void ionic_lif_free(struct ionic_lif *lif) 3488 { 3489 struct device *dev = lif->ionic->dev; 3490 3491 ionic_lif_free_phc(lif); 3492 3493 /* free rss indirection table */ 3494 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 3495 lif->rss_ind_tbl_pa); 3496 lif->rss_ind_tbl = NULL; 3497 lif->rss_ind_tbl_pa = 0; 3498 3499 /* free queues */ 3500 ionic_qcqs_free(lif); 3501 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3502 ionic_lif_reset(lif); 3503 3504 /* free lif info */ 3505 kfree(lif->identity); 3506 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3507 lif->info = NULL; 3508 lif->info_pa = 0; 3509 3510 /* unmap doorbell page */ 3511 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3512 lif->kern_dbpage = NULL; 3513 3514 mutex_destroy(&lif->config_lock); 3515 mutex_destroy(&lif->queue_lock); 3516 3517 /* free netdev & lif */ 3518 ionic_debugfs_del_lif(lif); 3519 free_netdev(lif->netdev); 3520 } 3521 3522 void ionic_lif_deinit(struct ionic_lif *lif) 3523 { 3524 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 3525 return; 3526 3527 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3528 cancel_work_sync(&lif->deferred.work); 3529 cancel_work_sync(&lif->tx_timeout_work); 3530 ionic_rx_filters_deinit(lif); 3531 if (lif->netdev->features & NETIF_F_RXHASH) 3532 ionic_lif_rss_deinit(lif); 3533 } 3534 3535 napi_disable(&lif->adminqcq->napi); 3536 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3537 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3538 3539 ionic_lif_reset(lif); 3540 } 3541 3542 static int ionic_lif_adminq_init(struct ionic_lif *lif) 3543 { 3544 struct device *dev = lif->ionic->dev; 3545 struct ionic_q_init_comp comp; 3546 struct ionic_dev *idev; 3547 struct ionic_qcq *qcq; 3548 struct ionic_queue *q; 3549 int err; 3550 3551 idev = &lif->ionic->idev; 3552 qcq = lif->adminqcq; 3553 q = &qcq->q; 3554 3555 mutex_lock(&lif->ionic->dev_cmd_lock); 3556 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 3557 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3558 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3559 mutex_unlock(&lif->ionic->dev_cmd_lock); 3560 if (err) { 3561 netdev_err(lif->netdev, "adminq init failed %d\n", err); 3562 return err; 3563 } 3564 3565 q->hw_type = comp.hw_type; 3566 q->hw_index = le32_to_cpu(comp.hw_index); 3567 q->dbval = IONIC_DBELL_QID(q->hw_index); 3568 3569 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 3570 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 3571 3572 q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE; 3573 q->dbell_jiffies = jiffies; 3574 3575 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi); 3576 3577 qcq->napi_qcq = qcq; 3578 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); 3579 3580 napi_enable(&qcq->napi); 3581 3582 if (qcq->flags & IONIC_QCQ_F_INTR) { 3583 irq_set_affinity_hint(qcq->intr.vector, 3584 &qcq->intr.affinity_mask); 3585 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 3586 IONIC_INTR_MASK_CLEAR); 3587 } 3588 3589 qcq->flags |= IONIC_QCQ_F_INITED; 3590 3591 return 0; 3592 } 3593 3594 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 3595 { 3596 struct ionic_qcq *qcq = lif->notifyqcq; 3597 struct device *dev = lif->ionic->dev; 3598 struct ionic_queue *q = &qcq->q; 3599 int err; 3600 3601 struct ionic_admin_ctx ctx = { 3602 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3603 .cmd.q_init = { 3604 .opcode = IONIC_CMD_Q_INIT, 3605 .lif_index = cpu_to_le16(lif->index), 3606 .type = q->type, 3607 .ver = lif->qtype_info[q->type].version, 3608 .index = cpu_to_le32(q->index), 3609 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 3610 IONIC_QINIT_F_ENA), 3611 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 3612 .pid = cpu_to_le16(q->pid), 3613 .ring_size = ilog2(q->num_descs), 3614 .ring_base = cpu_to_le64(q->base_pa), 3615 } 3616 }; 3617 3618 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 3619 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 3620 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 3621 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 3622 3623 err = ionic_adminq_post_wait(lif, &ctx); 3624 if (err) 3625 return err; 3626 3627 lif->last_eid = 0; 3628 q->hw_type = ctx.comp.q_init.hw_type; 3629 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 3630 q->dbval = IONIC_DBELL_QID(q->hw_index); 3631 3632 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 3633 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 3634 3635 /* preset the callback info */ 3636 q->info[0].cb_arg = lif; 3637 3638 qcq->flags |= IONIC_QCQ_F_INITED; 3639 3640 return 0; 3641 } 3642 3643 static int ionic_station_set(struct ionic_lif *lif) 3644 { 3645 struct net_device *netdev = lif->netdev; 3646 struct ionic_admin_ctx ctx = { 3647 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3648 .cmd.lif_getattr = { 3649 .opcode = IONIC_CMD_LIF_GETATTR, 3650 .index = cpu_to_le16(lif->index), 3651 .attr = IONIC_LIF_ATTR_MAC, 3652 }, 3653 }; 3654 u8 mac_address[ETH_ALEN]; 3655 struct sockaddr addr; 3656 int err; 3657 3658 err = ionic_adminq_post_wait(lif, &ctx); 3659 if (err) 3660 return err; 3661 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 3662 ctx.comp.lif_getattr.mac); 3663 ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac); 3664 3665 if (is_zero_ether_addr(mac_address)) { 3666 eth_hw_addr_random(netdev); 3667 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr); 3668 ether_addr_copy(mac_address, netdev->dev_addr); 3669 3670 err = ionic_program_mac(lif, mac_address); 3671 if (err < 0) 3672 return err; 3673 3674 if (err > 0) { 3675 netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n", 3676 __func__); 3677 return 0; 3678 } 3679 } 3680 3681 if (!is_zero_ether_addr(netdev->dev_addr)) { 3682 /* If the netdev mac is non-zero and doesn't match the default 3683 * device address, it was set by something earlier and we're 3684 * likely here again after a fw-upgrade reset. We need to be 3685 * sure the netdev mac is in our filter list. 3686 */ 3687 if (!ether_addr_equal(mac_address, netdev->dev_addr)) 3688 ionic_lif_addr_add(lif, netdev->dev_addr); 3689 } else { 3690 /* Update the netdev mac with the device's mac */ 3691 ether_addr_copy(addr.sa_data, mac_address); 3692 addr.sa_family = AF_INET; 3693 err = eth_prepare_mac_addr_change(netdev, &addr); 3694 if (err) { 3695 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 3696 addr.sa_data, err); 3697 return 0; 3698 } 3699 3700 eth_commit_mac_addr_change(netdev, &addr); 3701 } 3702 3703 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 3704 netdev->dev_addr); 3705 ionic_lif_addr_add(lif, netdev->dev_addr); 3706 3707 return 0; 3708 } 3709 3710 int ionic_lif_init(struct ionic_lif *lif) 3711 { 3712 struct ionic_dev *idev = &lif->ionic->idev; 3713 struct device *dev = lif->ionic->dev; 3714 struct ionic_lif_init_comp comp; 3715 int dbpage_num; 3716 int err; 3717 3718 mutex_lock(&lif->ionic->dev_cmd_lock); 3719 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 3720 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3721 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3722 mutex_unlock(&lif->ionic->dev_cmd_lock); 3723 if (err) 3724 return err; 3725 3726 lif->hw_index = le16_to_cpu(comp.hw_index); 3727 3728 /* now that we have the hw_index we can figure out our doorbell page */ 3729 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 3730 if (!lif->dbid_count) { 3731 dev_err(dev, "No doorbell pages, aborting\n"); 3732 return -EINVAL; 3733 } 3734 3735 lif->kern_pid = 0; 3736 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 3737 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 3738 if (!lif->kern_dbpage) { 3739 dev_err(dev, "Cannot map dbpage, aborting\n"); 3740 return -ENOMEM; 3741 } 3742 3743 err = ionic_lif_adminq_init(lif); 3744 if (err) 3745 goto err_out_adminq_deinit; 3746 3747 if (lif->ionic->nnqs_per_lif) { 3748 err = ionic_lif_notifyq_init(lif); 3749 if (err) 3750 goto err_out_notifyq_deinit; 3751 } 3752 3753 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3754 err = ionic_set_nic_features(lif, lif->netdev->features); 3755 else 3756 err = ionic_init_nic_features(lif); 3757 if (err) 3758 goto err_out_notifyq_deinit; 3759 3760 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3761 err = ionic_rx_filters_init(lif); 3762 if (err) 3763 goto err_out_notifyq_deinit; 3764 } 3765 3766 err = ionic_station_set(lif); 3767 if (err) 3768 goto err_out_notifyq_deinit; 3769 3770 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 3771 3772 set_bit(IONIC_LIF_F_INITED, lif->state); 3773 3774 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 3775 3776 return 0; 3777 3778 err_out_notifyq_deinit: 3779 napi_disable(&lif->adminqcq->napi); 3780 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3781 err_out_adminq_deinit: 3782 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3783 ionic_lif_reset(lif); 3784 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3785 lif->kern_dbpage = NULL; 3786 3787 return err; 3788 } 3789 3790 static void ionic_lif_notify_work(struct work_struct *ws) 3791 { 3792 } 3793 3794 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 3795 { 3796 struct ionic_admin_ctx ctx = { 3797 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3798 .cmd.lif_setattr = { 3799 .opcode = IONIC_CMD_LIF_SETATTR, 3800 .index = cpu_to_le16(lif->index), 3801 .attr = IONIC_LIF_ATTR_NAME, 3802 }, 3803 }; 3804 3805 strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 3806 sizeof(ctx.cmd.lif_setattr.name)); 3807 3808 ionic_adminq_post_wait(lif, &ctx); 3809 } 3810 3811 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 3812 { 3813 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 3814 return NULL; 3815 3816 return netdev_priv(netdev); 3817 } 3818 3819 static int ionic_lif_notify(struct notifier_block *nb, 3820 unsigned long event, void *info) 3821 { 3822 struct net_device *ndev = netdev_notifier_info_to_dev(info); 3823 struct ionic *ionic = container_of(nb, struct ionic, nb); 3824 struct ionic_lif *lif = ionic_netdev_lif(ndev); 3825 3826 if (!lif || lif->ionic != ionic) 3827 return NOTIFY_DONE; 3828 3829 switch (event) { 3830 case NETDEV_CHANGENAME: 3831 ionic_lif_set_netdev_info(lif); 3832 break; 3833 } 3834 3835 return NOTIFY_DONE; 3836 } 3837 3838 int ionic_lif_register(struct ionic_lif *lif) 3839 { 3840 int err; 3841 3842 ionic_lif_register_phc(lif); 3843 3844 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); 3845 3846 lif->ionic->nb.notifier_call = ionic_lif_notify; 3847 3848 err = register_netdevice_notifier(&lif->ionic->nb); 3849 if (err) 3850 lif->ionic->nb.notifier_call = NULL; 3851 3852 /* only register LIF0 for now */ 3853 err = register_netdev(lif->netdev); 3854 if (err) { 3855 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); 3856 ionic_lif_unregister_phc(lif); 3857 return err; 3858 } 3859 3860 ionic_link_status_check_request(lif, CAN_SLEEP); 3861 lif->registered = true; 3862 ionic_lif_set_netdev_info(lif); 3863 3864 return 0; 3865 } 3866 3867 void ionic_lif_unregister(struct ionic_lif *lif) 3868 { 3869 if (lif->ionic->nb.notifier_call) { 3870 unregister_netdevice_notifier(&lif->ionic->nb); 3871 cancel_work_sync(&lif->ionic->nb_work); 3872 lif->ionic->nb.notifier_call = NULL; 3873 } 3874 3875 if (lif->netdev->reg_state == NETREG_REGISTERED) 3876 unregister_netdev(lif->netdev); 3877 3878 ionic_lif_unregister_phc(lif); 3879 3880 lif->registered = false; 3881 } 3882 3883 static void ionic_lif_queue_identify(struct ionic_lif *lif) 3884 { 3885 union ionic_q_identity __iomem *q_ident; 3886 struct ionic *ionic = lif->ionic; 3887 struct ionic_dev *idev; 3888 int qtype; 3889 int err; 3890 3891 idev = &lif->ionic->idev; 3892 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; 3893 3894 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 3895 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 3896 3897 /* filter out the ones we know about */ 3898 switch (qtype) { 3899 case IONIC_QTYPE_ADMINQ: 3900 case IONIC_QTYPE_NOTIFYQ: 3901 case IONIC_QTYPE_RXQ: 3902 case IONIC_QTYPE_TXQ: 3903 break; 3904 default: 3905 continue; 3906 } 3907 3908 memset(qti, 0, sizeof(*qti)); 3909 3910 mutex_lock(&ionic->dev_cmd_lock); 3911 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 3912 ionic_qtype_versions[qtype]); 3913 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3914 if (!err) { 3915 qti->version = readb(&q_ident->version); 3916 qti->supported = readb(&q_ident->supported); 3917 qti->features = readq(&q_ident->features); 3918 qti->desc_sz = readw(&q_ident->desc_sz); 3919 qti->comp_sz = readw(&q_ident->comp_sz); 3920 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz); 3921 qti->max_sg_elems = readw(&q_ident->max_sg_elems); 3922 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride); 3923 } 3924 mutex_unlock(&ionic->dev_cmd_lock); 3925 3926 if (err == -EINVAL) { 3927 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 3928 continue; 3929 } else if (err == -EIO) { 3930 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 3931 return; 3932 } else if (err) { 3933 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 3934 qtype, err); 3935 return; 3936 } 3937 3938 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 3939 qtype, qti->version); 3940 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 3941 qtype, qti->supported); 3942 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 3943 qtype, qti->features); 3944 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 3945 qtype, qti->desc_sz); 3946 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 3947 qtype, qti->comp_sz); 3948 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 3949 qtype, qti->sg_desc_sz); 3950 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 3951 qtype, qti->max_sg_elems); 3952 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 3953 qtype, qti->sg_desc_stride); 3954 3955 if (qti->max_sg_elems >= IONIC_MAX_FRAGS) { 3956 qti->max_sg_elems = IONIC_MAX_FRAGS - 1; 3957 dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n", 3958 qtype, qti->max_sg_elems); 3959 } 3960 3961 if (qti->max_sg_elems > MAX_SKB_FRAGS) { 3962 qti->max_sg_elems = MAX_SKB_FRAGS; 3963 dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n", 3964 qtype, qti->max_sg_elems); 3965 } 3966 } 3967 } 3968 3969 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 3970 union ionic_lif_identity *lid) 3971 { 3972 struct ionic_dev *idev = &ionic->idev; 3973 size_t sz; 3974 int err; 3975 3976 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 3977 3978 mutex_lock(&ionic->dev_cmd_lock); 3979 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 3980 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3981 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 3982 mutex_unlock(&ionic->dev_cmd_lock); 3983 if (err) 3984 return (err); 3985 3986 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 3987 le64_to_cpu(lid->capabilities)); 3988 3989 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 3990 le32_to_cpu(lid->eth.max_ucast_filters)); 3991 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 3992 le32_to_cpu(lid->eth.max_mcast_filters)); 3993 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 3994 le64_to_cpu(lid->eth.config.features)); 3995 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 3996 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 3997 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 3998 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 3999 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 4000 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 4001 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 4002 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 4003 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 4004 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 4005 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 4006 le32_to_cpu(lid->eth.config.mtu)); 4007 4008 return 0; 4009 } 4010 4011 int ionic_lif_size(struct ionic *ionic) 4012 { 4013 struct ionic_identity *ident = &ionic->ident; 4014 unsigned int nintrs, dev_nintrs; 4015 union ionic_lif_config *lc; 4016 unsigned int ntxqs_per_lif; 4017 unsigned int nrxqs_per_lif; 4018 unsigned int neqs_per_lif; 4019 unsigned int nnqs_per_lif; 4020 unsigned int nxqs, neqs; 4021 unsigned int min_intrs; 4022 int err; 4023 4024 /* retrieve basic values from FW */ 4025 lc = &ident->lif.eth.config; 4026 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 4027 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 4028 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 4029 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 4030 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 4031 4032 /* limit values to play nice with kdump */ 4033 if (is_kdump_kernel()) { 4034 dev_nintrs = 2; 4035 neqs_per_lif = 0; 4036 nnqs_per_lif = 0; 4037 ntxqs_per_lif = 1; 4038 nrxqs_per_lif = 1; 4039 } 4040 4041 /* reserve last queue id for hardware timestamping */ 4042 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { 4043 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { 4044 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP); 4045 } else { 4046 ntxqs_per_lif -= 1; 4047 nrxqs_per_lif -= 1; 4048 } 4049 } 4050 4051 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 4052 nxqs = min(nxqs, num_online_cpus()); 4053 neqs = min(neqs_per_lif, num_online_cpus()); 4054 4055 try_again: 4056 /* interrupt usage: 4057 * 1 for master lif adminq/notifyq 4058 * 1 for each CPU for master lif TxRx queue pairs 4059 * whatever's left is for RDMA queues 4060 */ 4061 nintrs = 1 + nxqs + neqs; 4062 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 4063 4064 if (nintrs > dev_nintrs) 4065 goto try_fewer; 4066 4067 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 4068 if (err < 0 && err != -ENOSPC) { 4069 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 4070 return err; 4071 } 4072 if (err == -ENOSPC) 4073 goto try_fewer; 4074 4075 if (err != nintrs) { 4076 ionic_bus_free_irq_vectors(ionic); 4077 goto try_fewer; 4078 } 4079 4080 ionic->nnqs_per_lif = nnqs_per_lif; 4081 ionic->neqs_per_lif = neqs; 4082 ionic->ntxqs_per_lif = nxqs; 4083 ionic->nrxqs_per_lif = nxqs; 4084 ionic->nintrs = nintrs; 4085 4086 ionic_debugfs_add_sizes(ionic); 4087 4088 return 0; 4089 4090 try_fewer: 4091 if (nnqs_per_lif > 1) { 4092 nnqs_per_lif >>= 1; 4093 goto try_again; 4094 } 4095 if (neqs > 1) { 4096 neqs >>= 1; 4097 goto try_again; 4098 } 4099 if (nxqs > 1) { 4100 nxqs >>= 1; 4101 goto try_again; 4102 } 4103 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 4104 return -ENOSPC; 4105 } 4106