1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ethtool.h> 5 #include <linux/printk.h> 6 #include <linux/dynamic_debug.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_vlan.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/cpumask.h> 14 #include <linux/crash_dump.h> 15 #include <linux/vmalloc.h> 16 17 #include "ionic.h" 18 #include "ionic_bus.h" 19 #include "ionic_dev.h" 20 #include "ionic_lif.h" 21 #include "ionic_txrx.h" 22 #include "ionic_ethtool.h" 23 #include "ionic_debugfs.h" 24 25 /* queuetype support level */ 26 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 27 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 28 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 29 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support 30 * 2 = ... with CMB rings 31 */ 32 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support 33 * 1 = ... with Tx SG version 1 34 * 3 = ... with CMB rings 35 */ 36 }; 37 38 static void ionic_link_status_check(struct ionic_lif *lif); 39 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 40 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 41 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 42 43 static void ionic_txrx_deinit(struct ionic_lif *lif); 44 static int ionic_txrx_init(struct ionic_lif *lif); 45 static int ionic_start_queues(struct ionic_lif *lif); 46 static void ionic_stop_queues(struct ionic_lif *lif); 47 static void ionic_lif_queue_identify(struct ionic_lif *lif); 48 49 static int ionic_xdp_queues_config(struct ionic_lif *lif); 50 static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q); 51 52 static void ionic_dim_work(struct work_struct *work) 53 { 54 struct dim *dim = container_of(work, struct dim, work); 55 struct ionic_intr_info *intr; 56 struct dim_cq_moder cur_moder; 57 struct ionic_qcq *qcq; 58 struct ionic_lif *lif; 59 u32 new_coal; 60 61 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 62 qcq = container_of(dim, struct ionic_qcq, dim); 63 lif = qcq->q.lif; 64 new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec); 65 new_coal = new_coal ? new_coal : 1; 66 67 intr = &qcq->intr; 68 if (intr->dim_coal_hw != new_coal) { 69 intr->dim_coal_hw = new_coal; 70 71 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 72 intr->index, intr->dim_coal_hw); 73 } 74 75 dim->state = DIM_START_MEASURE; 76 } 77 78 static void ionic_lif_deferred_work(struct work_struct *work) 79 { 80 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 81 struct ionic_deferred *def = &lif->deferred; 82 struct ionic_deferred_work *w = NULL; 83 84 do { 85 spin_lock_bh(&def->lock); 86 if (!list_empty(&def->list)) { 87 w = list_first_entry(&def->list, 88 struct ionic_deferred_work, list); 89 list_del(&w->list); 90 } 91 spin_unlock_bh(&def->lock); 92 93 if (!w) 94 break; 95 96 switch (w->type) { 97 case IONIC_DW_TYPE_RX_MODE: 98 ionic_lif_rx_mode(lif); 99 break; 100 case IONIC_DW_TYPE_LINK_STATUS: 101 ionic_link_status_check(lif); 102 break; 103 case IONIC_DW_TYPE_LIF_RESET: 104 if (w->fw_status) { 105 ionic_lif_handle_fw_up(lif); 106 } else { 107 ionic_lif_handle_fw_down(lif); 108 109 /* Fire off another watchdog to see 110 * if the FW is already back rather than 111 * waiting another whole cycle 112 */ 113 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1); 114 } 115 break; 116 default: 117 break; 118 } 119 kfree(w); 120 w = NULL; 121 } while (true); 122 } 123 124 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 125 struct ionic_deferred_work *work) 126 { 127 spin_lock_bh(&def->lock); 128 list_add_tail(&work->list, &def->list); 129 spin_unlock_bh(&def->lock); 130 schedule_work(&def->work); 131 } 132 133 static void ionic_link_status_check(struct ionic_lif *lif) 134 { 135 struct net_device *netdev = lif->netdev; 136 u16 link_status; 137 bool link_up; 138 139 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 140 return; 141 142 /* Don't put carrier back up if we're in a broken state */ 143 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { 144 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 145 return; 146 } 147 148 link_status = le16_to_cpu(lif->info->status.link_status); 149 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 150 151 if (link_up) { 152 int err = 0; 153 154 if (netdev->flags & IFF_UP && netif_running(netdev)) { 155 mutex_lock(&lif->queue_lock); 156 err = ionic_start_queues(lif); 157 if (err && err != -EBUSY) { 158 netdev_err(netdev, 159 "Failed to start queues: %d\n", err); 160 set_bit(IONIC_LIF_F_BROKEN, lif->state); 161 netif_carrier_off(lif->netdev); 162 } 163 mutex_unlock(&lif->queue_lock); 164 } 165 166 if (!err && !netif_carrier_ok(netdev)) { 167 ionic_port_identify(lif->ionic); 168 netdev_info(netdev, "Link up - %d Gbps\n", 169 le32_to_cpu(lif->info->status.link_speed) / 1000); 170 netif_carrier_on(netdev); 171 } 172 } else { 173 if (netif_carrier_ok(netdev)) { 174 lif->link_down_count++; 175 netdev_info(netdev, "Link down\n"); 176 netif_carrier_off(netdev); 177 } 178 179 if (netdev->flags & IFF_UP && netif_running(netdev)) { 180 mutex_lock(&lif->queue_lock); 181 ionic_stop_queues(lif); 182 mutex_unlock(&lif->queue_lock); 183 } 184 } 185 186 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 187 } 188 189 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) 190 { 191 struct ionic_deferred_work *work; 192 193 /* we only need one request outstanding at a time */ 194 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 195 return; 196 197 if (!can_sleep) { 198 work = kzalloc(sizeof(*work), GFP_ATOMIC); 199 if (!work) { 200 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 201 return; 202 } 203 204 work->type = IONIC_DW_TYPE_LINK_STATUS; 205 ionic_lif_deferred_enqueue(&lif->deferred, work); 206 } else { 207 ionic_link_status_check(lif); 208 } 209 } 210 211 static void ionic_napi_deadline(struct timer_list *timer) 212 { 213 struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline); 214 215 napi_schedule(&qcq->napi); 216 } 217 218 static irqreturn_t ionic_isr(int irq, void *data) 219 { 220 struct napi_struct *napi = data; 221 222 napi_schedule_irqoff(napi); 223 224 return IRQ_HANDLED; 225 } 226 227 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 228 { 229 struct ionic_intr_info *intr = &qcq->intr; 230 struct device *dev = lif->ionic->dev; 231 struct ionic_queue *q = &qcq->q; 232 const char *name; 233 234 if (lif->registered) 235 name = lif->netdev->name; 236 else 237 name = dev_name(dev); 238 239 snprintf(intr->name, sizeof(intr->name), 240 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 241 242 return devm_request_irq(dev, intr->vector, ionic_isr, 243 0, intr->name, &qcq->napi); 244 } 245 246 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 247 { 248 struct ionic *ionic = lif->ionic; 249 int index; 250 251 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 252 if (index == ionic->nintrs) { 253 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 254 __func__, index, ionic->nintrs); 255 return -ENOSPC; 256 } 257 258 set_bit(index, ionic->intrs); 259 ionic_intr_init(&ionic->idev, intr, index); 260 261 return 0; 262 } 263 264 static void ionic_intr_free(struct ionic *ionic, int index) 265 { 266 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 267 clear_bit(index, ionic->intrs); 268 } 269 270 static int ionic_qcq_enable(struct ionic_qcq *qcq) 271 { 272 struct ionic_queue *q = &qcq->q; 273 struct ionic_lif *lif = q->lif; 274 struct ionic_dev *idev; 275 struct device *dev; 276 277 struct ionic_admin_ctx ctx = { 278 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 279 .cmd.q_control = { 280 .opcode = IONIC_CMD_Q_CONTROL, 281 .lif_index = cpu_to_le16(lif->index), 282 .type = q->type, 283 .index = cpu_to_le32(q->index), 284 .oper = IONIC_Q_ENABLE, 285 }, 286 }; 287 int ret; 288 289 idev = &lif->ionic->idev; 290 dev = lif->ionic->dev; 291 292 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 293 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 294 295 if (qcq->flags & IONIC_QCQ_F_INTR) 296 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 297 298 ret = ionic_adminq_post_wait(lif, &ctx); 299 if (ret) 300 return ret; 301 302 if (qcq->napi.poll) 303 napi_enable(&qcq->napi); 304 305 if (qcq->flags & IONIC_QCQ_F_INTR) { 306 irq_set_affinity_hint(qcq->intr.vector, 307 &qcq->intr.affinity_mask); 308 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 309 IONIC_INTR_MASK_CLEAR); 310 } 311 312 return 0; 313 } 314 315 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err) 316 { 317 struct ionic_queue *q; 318 319 struct ionic_admin_ctx ctx = { 320 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 321 .cmd.q_control = { 322 .opcode = IONIC_CMD_Q_CONTROL, 323 .oper = IONIC_Q_DISABLE, 324 }, 325 }; 326 327 if (!qcq) { 328 netdev_err(lif->netdev, "%s: bad qcq\n", __func__); 329 return -ENXIO; 330 } 331 332 q = &qcq->q; 333 334 if (qcq->flags & IONIC_QCQ_F_INTR) { 335 struct ionic_dev *idev = &lif->ionic->idev; 336 337 cancel_work_sync(&qcq->dim.work); 338 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 339 IONIC_INTR_MASK_SET); 340 synchronize_irq(qcq->intr.vector); 341 irq_set_affinity_hint(qcq->intr.vector, NULL); 342 napi_disable(&qcq->napi); 343 del_timer_sync(&qcq->napi_deadline); 344 } 345 346 /* If there was a previous fw communcation error, don't bother with 347 * sending the adminq command and just return the same error value. 348 */ 349 if (fw_err == -ETIMEDOUT || fw_err == -ENXIO) 350 return fw_err; 351 352 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); 353 ctx.cmd.q_control.type = q->type; 354 ctx.cmd.q_control.index = cpu_to_le32(q->index); 355 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", 356 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 357 358 return ionic_adminq_post_wait(lif, &ctx); 359 } 360 361 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 362 { 363 struct ionic_dev *idev = &lif->ionic->idev; 364 365 if (!qcq) 366 return; 367 368 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 369 return; 370 371 if (qcq->flags & IONIC_QCQ_F_INTR) { 372 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 373 IONIC_INTR_MASK_SET); 374 netif_napi_del(&qcq->napi); 375 } 376 377 qcq->flags &= ~IONIC_QCQ_F_INITED; 378 } 379 380 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 381 { 382 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 383 return; 384 385 irq_set_affinity_hint(qcq->intr.vector, NULL); 386 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 387 qcq->intr.vector = 0; 388 ionic_intr_free(lif->ionic, qcq->intr.index); 389 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 390 } 391 392 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 393 { 394 struct device *dev = lif->ionic->dev; 395 396 if (!qcq) 397 return; 398 399 ionic_debugfs_del_qcq(qcq); 400 401 if (qcq->q_base) { 402 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 403 qcq->q_base = NULL; 404 qcq->q_base_pa = 0; 405 } 406 407 if (qcq->cmb_q_base) { 408 iounmap(qcq->cmb_q_base); 409 ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order); 410 qcq->cmb_pgid = 0; 411 qcq->cmb_order = 0; 412 qcq->cmb_q_base = NULL; 413 qcq->cmb_q_base_pa = 0; 414 } 415 416 if (qcq->cq_base) { 417 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 418 qcq->cq_base = NULL; 419 qcq->cq_base_pa = 0; 420 } 421 422 if (qcq->sg_base) { 423 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 424 qcq->sg_base = NULL; 425 qcq->sg_base_pa = 0; 426 } 427 428 ionic_xdp_unregister_rxq_info(&qcq->q); 429 ionic_qcq_intr_free(lif, qcq); 430 431 vfree(qcq->cq.info); 432 qcq->cq.info = NULL; 433 vfree(qcq->q.info); 434 qcq->q.info = NULL; 435 } 436 437 void ionic_qcqs_free(struct ionic_lif *lif) 438 { 439 struct device *dev = lif->ionic->dev; 440 struct ionic_qcq *adminqcq; 441 unsigned long irqflags; 442 443 if (lif->notifyqcq) { 444 ionic_qcq_free(lif, lif->notifyqcq); 445 devm_kfree(dev, lif->notifyqcq); 446 lif->notifyqcq = NULL; 447 } 448 449 if (lif->adminqcq) { 450 spin_lock_irqsave(&lif->adminq_lock, irqflags); 451 adminqcq = READ_ONCE(lif->adminqcq); 452 lif->adminqcq = NULL; 453 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 454 if (adminqcq) { 455 ionic_qcq_free(lif, adminqcq); 456 devm_kfree(dev, adminqcq); 457 } 458 } 459 460 if (lif->rxqcqs) { 461 devm_kfree(dev, lif->rxqstats); 462 lif->rxqstats = NULL; 463 devm_kfree(dev, lif->rxqcqs); 464 lif->rxqcqs = NULL; 465 } 466 467 if (lif->txqcqs) { 468 devm_kfree(dev, lif->txqstats); 469 lif->txqstats = NULL; 470 devm_kfree(dev, lif->txqcqs); 471 lif->txqcqs = NULL; 472 } 473 } 474 475 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 476 struct ionic_qcq *n_qcq) 477 { 478 n_qcq->intr.vector = src_qcq->intr.vector; 479 n_qcq->intr.index = src_qcq->intr.index; 480 n_qcq->napi_qcq = src_qcq->napi_qcq; 481 } 482 483 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 484 { 485 int err; 486 487 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 488 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 489 return 0; 490 } 491 492 err = ionic_intr_alloc(lif, &qcq->intr); 493 if (err) { 494 netdev_warn(lif->netdev, "no intr for %s: %d\n", 495 qcq->q.name, err); 496 goto err_out; 497 } 498 499 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); 500 if (err < 0) { 501 netdev_warn(lif->netdev, "no vector for %s: %d\n", 502 qcq->q.name, err); 503 goto err_out_free_intr; 504 } 505 qcq->intr.vector = err; 506 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 507 IONIC_INTR_MASK_SET); 508 509 err = ionic_request_irq(lif, qcq); 510 if (err) { 511 netdev_warn(lif->netdev, "irq request failed %d\n", err); 512 goto err_out_free_intr; 513 } 514 515 /* try to get the irq on the local numa node first */ 516 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, 517 dev_to_node(lif->ionic->dev)); 518 if (qcq->intr.cpu != -1) 519 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask); 520 521 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 522 return 0; 523 524 err_out_free_intr: 525 ionic_intr_free(lif->ionic, qcq->intr.index); 526 err_out: 527 return err; 528 } 529 530 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 531 unsigned int index, 532 const char *name, unsigned int flags, 533 unsigned int num_descs, unsigned int desc_size, 534 unsigned int cq_desc_size, 535 unsigned int sg_desc_size, 536 unsigned int pid, struct ionic_qcq **qcq) 537 { 538 struct ionic_dev *idev = &lif->ionic->idev; 539 struct device *dev = lif->ionic->dev; 540 void *q_base, *cq_base, *sg_base; 541 dma_addr_t cq_base_pa = 0; 542 dma_addr_t sg_base_pa = 0; 543 dma_addr_t q_base_pa = 0; 544 struct ionic_qcq *new; 545 int err; 546 547 *qcq = NULL; 548 549 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 550 if (!new) { 551 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 552 err = -ENOMEM; 553 goto err_out; 554 } 555 556 new->q.dev = dev; 557 new->flags = flags; 558 559 new->q.info = vcalloc(num_descs, sizeof(*new->q.info)); 560 if (!new->q.info) { 561 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 562 err = -ENOMEM; 563 goto err_out_free_qcq; 564 } 565 566 new->q.type = type; 567 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; 568 569 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 570 desc_size, sg_desc_size, pid); 571 if (err) { 572 netdev_err(lif->netdev, "Cannot initialize queue\n"); 573 goto err_out_free_q_info; 574 } 575 576 err = ionic_alloc_qcq_interrupt(lif, new); 577 if (err) 578 goto err_out; 579 580 new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info)); 581 if (!new->cq.info) { 582 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 583 err = -ENOMEM; 584 goto err_out_free_irq; 585 } 586 587 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 588 if (err) { 589 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 590 goto err_out_free_cq_info; 591 } 592 593 if (flags & IONIC_QCQ_F_NOTIFYQ) { 594 int q_size; 595 596 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q 597 * and don't alloc qc. We leave new->qc_size and new->qc_base 598 * as 0 to be sure we don't try to free it later. 599 */ 600 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); 601 new->q_size = PAGE_SIZE + q_size + 602 ALIGN(num_descs * cq_desc_size, PAGE_SIZE); 603 new->q_base = dma_alloc_coherent(dev, new->q_size, 604 &new->q_base_pa, GFP_KERNEL); 605 if (!new->q_base) { 606 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); 607 err = -ENOMEM; 608 goto err_out_free_cq_info; 609 } 610 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 611 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 612 ionic_q_map(&new->q, q_base, q_base_pa); 613 614 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); 615 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); 616 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 617 ionic_cq_bind(&new->cq, &new->q); 618 } else { 619 /* regular DMA q descriptors */ 620 new->q_size = PAGE_SIZE + (num_descs * desc_size); 621 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 622 GFP_KERNEL); 623 if (!new->q_base) { 624 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 625 err = -ENOMEM; 626 goto err_out_free_cq_info; 627 } 628 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 629 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 630 ionic_q_map(&new->q, q_base, q_base_pa); 631 632 if (flags & IONIC_QCQ_F_CMB_RINGS) { 633 /* on-chip CMB q descriptors */ 634 new->cmb_q_size = num_descs * desc_size; 635 new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE); 636 637 err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa, 638 new->cmb_order); 639 if (err) { 640 netdev_err(lif->netdev, 641 "Cannot allocate queue order %d from cmb: err %d\n", 642 new->cmb_order, err); 643 goto err_out_free_q; 644 } 645 646 new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size); 647 if (!new->cmb_q_base) { 648 netdev_err(lif->netdev, "Cannot map queue from cmb\n"); 649 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); 650 err = -ENOMEM; 651 goto err_out_free_q; 652 } 653 654 new->cmb_q_base_pa -= idev->phy_cmb_pages; 655 ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa); 656 } 657 658 /* cq DMA descriptors */ 659 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 660 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 661 GFP_KERNEL); 662 if (!new->cq_base) { 663 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 664 err = -ENOMEM; 665 goto err_out_free_q; 666 } 667 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 668 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 669 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 670 ionic_cq_bind(&new->cq, &new->q); 671 } 672 673 if (flags & IONIC_QCQ_F_SG) { 674 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 675 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 676 GFP_KERNEL); 677 if (!new->sg_base) { 678 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 679 err = -ENOMEM; 680 goto err_out_free_cq; 681 } 682 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 683 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 684 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 685 } 686 687 INIT_WORK(&new->dim.work, ionic_dim_work); 688 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 689 690 *qcq = new; 691 692 return 0; 693 694 err_out_free_cq: 695 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 696 err_out_free_q: 697 if (new->cmb_q_base) { 698 iounmap(new->cmb_q_base); 699 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); 700 } 701 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 702 err_out_free_cq_info: 703 vfree(new->cq.info); 704 err_out_free_irq: 705 if (flags & IONIC_QCQ_F_INTR) { 706 devm_free_irq(dev, new->intr.vector, &new->napi); 707 ionic_intr_free(lif->ionic, new->intr.index); 708 } 709 err_out_free_q_info: 710 vfree(new->q.info); 711 err_out_free_qcq: 712 devm_kfree(dev, new); 713 err_out: 714 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 715 return err; 716 } 717 718 static int ionic_qcqs_alloc(struct ionic_lif *lif) 719 { 720 struct device *dev = lif->ionic->dev; 721 unsigned int flags; 722 int err; 723 724 flags = IONIC_QCQ_F_INTR; 725 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 726 IONIC_ADMINQ_LENGTH, 727 sizeof(struct ionic_admin_cmd), 728 sizeof(struct ionic_admin_comp), 729 0, lif->kern_pid, &lif->adminqcq); 730 if (err) 731 return err; 732 ionic_debugfs_add_qcq(lif, lif->adminqcq); 733 734 if (lif->ionic->nnqs_per_lif) { 735 flags = IONIC_QCQ_F_NOTIFYQ; 736 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 737 flags, IONIC_NOTIFYQ_LENGTH, 738 sizeof(struct ionic_notifyq_cmd), 739 sizeof(union ionic_notifyq_comp), 740 0, lif->kern_pid, &lif->notifyqcq); 741 if (err) 742 goto err_out; 743 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 744 745 /* Let the notifyq ride on the adminq interrupt */ 746 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 747 } 748 749 err = -ENOMEM; 750 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 751 sizeof(*lif->txqcqs), GFP_KERNEL); 752 if (!lif->txqcqs) 753 goto err_out; 754 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 755 sizeof(*lif->rxqcqs), GFP_KERNEL); 756 if (!lif->rxqcqs) 757 goto err_out; 758 759 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1, 760 sizeof(*lif->txqstats), GFP_KERNEL); 761 if (!lif->txqstats) 762 goto err_out; 763 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1, 764 sizeof(*lif->rxqstats), GFP_KERNEL); 765 if (!lif->rxqstats) 766 goto err_out; 767 768 return 0; 769 770 err_out: 771 ionic_qcqs_free(lif); 772 return err; 773 } 774 775 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 776 { 777 qcq->q.tail_idx = 0; 778 qcq->q.head_idx = 0; 779 qcq->cq.tail_idx = 0; 780 qcq->cq.done_color = 1; 781 memset(qcq->q_base, 0, qcq->q_size); 782 if (qcq->cmb_q_base) 783 memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size); 784 memset(qcq->cq_base, 0, qcq->cq_size); 785 memset(qcq->sg_base, 0, qcq->sg_size); 786 } 787 788 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 789 { 790 struct device *dev = lif->ionic->dev; 791 struct ionic_queue *q = &qcq->q; 792 struct ionic_cq *cq = &qcq->cq; 793 struct ionic_admin_ctx ctx = { 794 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 795 .cmd.q_init = { 796 .opcode = IONIC_CMD_Q_INIT, 797 .lif_index = cpu_to_le16(lif->index), 798 .type = q->type, 799 .ver = lif->qtype_info[q->type].version, 800 .index = cpu_to_le32(q->index), 801 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 802 IONIC_QINIT_F_SG), 803 .intr_index = cpu_to_le16(qcq->intr.index), 804 .pid = cpu_to_le16(q->pid), 805 .ring_size = ilog2(q->num_descs), 806 .ring_base = cpu_to_le64(q->base_pa), 807 .cq_ring_base = cpu_to_le64(cq->base_pa), 808 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 809 .features = cpu_to_le64(q->features), 810 }, 811 }; 812 int err; 813 814 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { 815 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); 816 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); 817 } 818 819 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 820 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 821 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 822 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 823 dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base); 824 dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base); 825 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 826 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 827 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 828 829 ionic_qcq_sanitize(qcq); 830 831 err = ionic_adminq_post_wait(lif, &ctx); 832 if (err) 833 return err; 834 835 q->hw_type = ctx.comp.q_init.hw_type; 836 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 837 q->dbval = IONIC_DBELL_QID(q->hw_index); 838 839 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 840 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 841 842 q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE; 843 q->dbell_jiffies = jiffies; 844 845 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { 846 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi); 847 qcq->napi_qcq = qcq; 848 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); 849 } 850 851 qcq->flags |= IONIC_QCQ_F_INITED; 852 853 return 0; 854 } 855 856 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 857 { 858 struct device *dev = lif->ionic->dev; 859 struct ionic_queue *q = &qcq->q; 860 struct ionic_cq *cq = &qcq->cq; 861 struct ionic_admin_ctx ctx = { 862 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 863 .cmd.q_init = { 864 .opcode = IONIC_CMD_Q_INIT, 865 .lif_index = cpu_to_le16(lif->index), 866 .type = q->type, 867 .ver = lif->qtype_info[q->type].version, 868 .index = cpu_to_le32(q->index), 869 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ), 870 .intr_index = cpu_to_le16(cq->bound_intr->index), 871 .pid = cpu_to_le16(q->pid), 872 .ring_size = ilog2(q->num_descs), 873 .ring_base = cpu_to_le64(q->base_pa), 874 .cq_ring_base = cpu_to_le64(cq->base_pa), 875 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 876 .features = cpu_to_le64(q->features), 877 }, 878 }; 879 int err; 880 881 q->partner = &lif->txqcqs[q->index]->q; 882 q->partner->partner = q; 883 884 if (!lif->xdp_prog || 885 (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags)) 886 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG); 887 888 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { 889 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); 890 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); 891 } 892 893 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 894 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 895 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 896 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 897 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 898 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 899 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 900 901 ionic_qcq_sanitize(qcq); 902 903 err = ionic_adminq_post_wait(lif, &ctx); 904 if (err) 905 return err; 906 907 q->hw_type = ctx.comp.q_init.hw_type; 908 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 909 q->dbval = IONIC_DBELL_QID(q->hw_index); 910 911 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 912 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 913 914 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; 915 q->dbell_jiffies = jiffies; 916 917 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 918 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi); 919 else 920 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); 921 922 qcq->napi_qcq = qcq; 923 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); 924 925 qcq->flags |= IONIC_QCQ_F_INITED; 926 927 return 0; 928 } 929 930 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) 931 { 932 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 933 unsigned int txq_i, flags; 934 struct ionic_qcq *txq; 935 u64 features; 936 int err; 937 938 if (lif->hwstamp_txq) 939 return 0; 940 941 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; 942 943 num_desc = IONIC_MIN_TXRX_DESC; 944 desc_sz = sizeof(struct ionic_txq_desc); 945 comp_sz = 2 * sizeof(struct ionic_txq_comp); 946 947 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 948 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) 949 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 950 else 951 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 952 953 txq_i = lif->ionic->ntxqs_per_lif; 954 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 955 956 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, 957 num_desc, desc_sz, comp_sz, sg_desc_sz, 958 lif->kern_pid, &txq); 959 if (err) 960 goto err_qcq_alloc; 961 962 txq->q.features = features; 963 964 ionic_link_qcq_interrupts(lif->adminqcq, txq); 965 ionic_debugfs_add_qcq(lif, txq); 966 967 lif->hwstamp_txq = txq; 968 969 if (netif_running(lif->netdev)) { 970 err = ionic_lif_txq_init(lif, txq); 971 if (err) 972 goto err_qcq_init; 973 974 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 975 err = ionic_qcq_enable(txq); 976 if (err) 977 goto err_qcq_enable; 978 } 979 } 980 981 return 0; 982 983 err_qcq_enable: 984 ionic_lif_qcq_deinit(lif, txq); 985 err_qcq_init: 986 lif->hwstamp_txq = NULL; 987 ionic_debugfs_del_qcq(txq); 988 ionic_qcq_free(lif, txq); 989 devm_kfree(lif->ionic->dev, txq); 990 err_qcq_alloc: 991 return err; 992 } 993 994 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) 995 { 996 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 997 unsigned int rxq_i, flags; 998 struct ionic_qcq *rxq; 999 u64 features; 1000 int err; 1001 1002 if (lif->hwstamp_rxq) 1003 return 0; 1004 1005 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 1006 1007 num_desc = IONIC_MIN_TXRX_DESC; 1008 desc_sz = sizeof(struct ionic_rxq_desc); 1009 comp_sz = 2 * sizeof(struct ionic_rxq_comp); 1010 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 1011 1012 rxq_i = lif->ionic->nrxqs_per_lif; 1013 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 1014 1015 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, 1016 num_desc, desc_sz, comp_sz, sg_desc_sz, 1017 lif->kern_pid, &rxq); 1018 if (err) 1019 goto err_qcq_alloc; 1020 1021 rxq->q.features = features; 1022 1023 ionic_link_qcq_interrupts(lif->adminqcq, rxq); 1024 ionic_debugfs_add_qcq(lif, rxq); 1025 1026 lif->hwstamp_rxq = rxq; 1027 1028 if (netif_running(lif->netdev)) { 1029 err = ionic_lif_rxq_init(lif, rxq); 1030 if (err) 1031 goto err_qcq_init; 1032 1033 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 1034 ionic_rx_fill(&rxq->q); 1035 err = ionic_qcq_enable(rxq); 1036 if (err) 1037 goto err_qcq_enable; 1038 } 1039 } 1040 1041 return 0; 1042 1043 err_qcq_enable: 1044 ionic_lif_qcq_deinit(lif, rxq); 1045 err_qcq_init: 1046 lif->hwstamp_rxq = NULL; 1047 ionic_debugfs_del_qcq(rxq); 1048 ionic_qcq_free(lif, rxq); 1049 devm_kfree(lif->ionic->dev, rxq); 1050 err_qcq_alloc: 1051 return err; 1052 } 1053 1054 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all) 1055 { 1056 struct ionic_queue_params qparam; 1057 1058 ionic_init_queue_params(lif, &qparam); 1059 1060 if (rx_all) 1061 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 1062 else 1063 qparam.rxq_features = 0; 1064 1065 /* if we're not running, just set the values and return */ 1066 if (!netif_running(lif->netdev)) { 1067 lif->rxq_features = qparam.rxq_features; 1068 return 0; 1069 } 1070 1071 return ionic_reconfigure_queues(lif, &qparam); 1072 } 1073 1074 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode) 1075 { 1076 struct ionic_admin_ctx ctx = { 1077 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1078 .cmd.lif_setattr = { 1079 .opcode = IONIC_CMD_LIF_SETATTR, 1080 .index = cpu_to_le16(lif->index), 1081 .attr = IONIC_LIF_ATTR_TXSTAMP, 1082 .txstamp_mode = cpu_to_le16(txstamp_mode), 1083 }, 1084 }; 1085 1086 return ionic_adminq_post_wait(lif, &ctx); 1087 } 1088 1089 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif) 1090 { 1091 struct ionic_admin_ctx ctx = { 1092 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1093 .cmd.rx_filter_del = { 1094 .opcode = IONIC_CMD_RX_FILTER_DEL, 1095 .lif_index = cpu_to_le16(lif->index), 1096 }, 1097 }; 1098 struct ionic_rx_filter *f; 1099 u32 filter_id; 1100 int err; 1101 1102 spin_lock_bh(&lif->rx_filters.lock); 1103 1104 f = ionic_rx_filter_rxsteer(lif); 1105 if (!f) { 1106 spin_unlock_bh(&lif->rx_filters.lock); 1107 return; 1108 } 1109 1110 filter_id = f->filter_id; 1111 ionic_rx_filter_free(lif, f); 1112 1113 spin_unlock_bh(&lif->rx_filters.lock); 1114 1115 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id); 1116 1117 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id); 1118 1119 err = ionic_adminq_post_wait(lif, &ctx); 1120 if (err && err != -EEXIST) 1121 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id); 1122 } 1123 1124 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1125 { 1126 struct ionic_admin_ctx ctx = { 1127 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1128 .cmd.rx_filter_add = { 1129 .opcode = IONIC_CMD_RX_FILTER_ADD, 1130 .lif_index = cpu_to_le16(lif->index), 1131 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS), 1132 .pkt_class = cpu_to_le64(pkt_class), 1133 }, 1134 }; 1135 u8 qtype; 1136 u32 qid; 1137 int err; 1138 1139 if (!lif->hwstamp_rxq) 1140 return -EINVAL; 1141 1142 qtype = lif->hwstamp_rxq->q.type; 1143 ctx.cmd.rx_filter_add.qtype = qtype; 1144 1145 qid = lif->hwstamp_rxq->q.index; 1146 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid); 1147 1148 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n"); 1149 err = ionic_adminq_post_wait(lif, &ctx); 1150 if (err && err != -EEXIST) 1151 return err; 1152 1153 spin_lock_bh(&lif->rx_filters.lock); 1154 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED); 1155 spin_unlock_bh(&lif->rx_filters.lock); 1156 1157 return err; 1158 } 1159 1160 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1161 { 1162 ionic_lif_del_hwstamp_rxfilt(lif); 1163 1164 if (!pkt_class) 1165 return 0; 1166 1167 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); 1168 } 1169 1170 static bool ionic_notifyq_service(struct ionic_cq *cq, 1171 struct ionic_cq_info *cq_info) 1172 { 1173 union ionic_notifyq_comp *comp = cq_info->cq_desc; 1174 struct ionic_deferred_work *work; 1175 struct net_device *netdev; 1176 struct ionic_queue *q; 1177 struct ionic_lif *lif; 1178 u64 eid; 1179 1180 q = cq->bound_q; 1181 lif = q->info[0].cb_arg; 1182 netdev = lif->netdev; 1183 eid = le64_to_cpu(comp->event.eid); 1184 1185 /* Have we run out of new completions to process? */ 1186 if ((s64)(eid - lif->last_eid) <= 0) 1187 return false; 1188 1189 lif->last_eid = eid; 1190 1191 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 1192 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 1193 comp, sizeof(*comp), true); 1194 1195 switch (le16_to_cpu(comp->event.ecode)) { 1196 case IONIC_EVENT_LINK_CHANGE: 1197 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 1198 break; 1199 case IONIC_EVENT_RESET: 1200 if (lif->ionic->idev.fw_status_ready && 1201 !test_bit(IONIC_LIF_F_FW_RESET, lif->state) && 1202 !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { 1203 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1204 if (!work) { 1205 netdev_err(lif->netdev, "Reset event dropped\n"); 1206 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); 1207 } else { 1208 work->type = IONIC_DW_TYPE_LIF_RESET; 1209 ionic_lif_deferred_enqueue(&lif->deferred, work); 1210 } 1211 } 1212 break; 1213 default: 1214 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", 1215 comp->event.ecode, eid); 1216 break; 1217 } 1218 1219 return true; 1220 } 1221 1222 static bool ionic_adminq_service(struct ionic_cq *cq, 1223 struct ionic_cq_info *cq_info) 1224 { 1225 struct ionic_admin_comp *comp = cq_info->cq_desc; 1226 1227 if (!color_match(comp->color, cq->done_color)) 1228 return false; 1229 1230 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 1231 1232 return true; 1233 } 1234 1235 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 1236 { 1237 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 1238 struct ionic_lif *lif = napi_to_cq(napi)->lif; 1239 struct ionic_dev *idev = &lif->ionic->idev; 1240 unsigned long irqflags; 1241 unsigned int flags = 0; 1242 bool resched = false; 1243 int rx_work = 0; 1244 int tx_work = 0; 1245 int n_work = 0; 1246 int a_work = 0; 1247 int work_done; 1248 int credits; 1249 1250 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 1251 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 1252 ionic_notifyq_service, NULL, NULL); 1253 1254 spin_lock_irqsave(&lif->adminq_lock, irqflags); 1255 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 1256 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 1257 ionic_adminq_service, NULL, NULL); 1258 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 1259 1260 if (lif->hwstamp_rxq) 1261 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget, 1262 ionic_rx_service, NULL, NULL); 1263 1264 if (lif->hwstamp_txq) 1265 tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget, 1266 ionic_tx_service, NULL, NULL); 1267 1268 work_done = max(max(n_work, a_work), max(rx_work, tx_work)); 1269 if (work_done < budget && napi_complete_done(napi, work_done)) { 1270 flags |= IONIC_INTR_CRED_UNMASK; 1271 intr->rearm_count++; 1272 } 1273 1274 if (work_done || flags) { 1275 flags |= IONIC_INTR_CRED_RESET_COALESCE; 1276 credits = n_work + a_work + rx_work + tx_work; 1277 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); 1278 } 1279 1280 if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q)) 1281 resched = true; 1282 if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q)) 1283 resched = true; 1284 if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q)) 1285 resched = true; 1286 if (resched) 1287 mod_timer(&lif->adminqcq->napi_deadline, 1288 jiffies + IONIC_NAPI_DEADLINE); 1289 1290 return work_done; 1291 } 1292 1293 void ionic_get_stats64(struct net_device *netdev, 1294 struct rtnl_link_stats64 *ns) 1295 { 1296 struct ionic_lif *lif = netdev_priv(netdev); 1297 struct ionic_lif_stats *ls; 1298 1299 memset(ns, 0, sizeof(*ns)); 1300 ls = &lif->info->stats; 1301 1302 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 1303 le64_to_cpu(ls->rx_mcast_packets) + 1304 le64_to_cpu(ls->rx_bcast_packets); 1305 1306 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 1307 le64_to_cpu(ls->tx_mcast_packets) + 1308 le64_to_cpu(ls->tx_bcast_packets); 1309 1310 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 1311 le64_to_cpu(ls->rx_mcast_bytes) + 1312 le64_to_cpu(ls->rx_bcast_bytes); 1313 1314 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 1315 le64_to_cpu(ls->tx_mcast_bytes) + 1316 le64_to_cpu(ls->tx_bcast_bytes); 1317 1318 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 1319 le64_to_cpu(ls->rx_mcast_drop_packets) + 1320 le64_to_cpu(ls->rx_bcast_drop_packets); 1321 1322 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 1323 le64_to_cpu(ls->tx_mcast_drop_packets) + 1324 le64_to_cpu(ls->tx_bcast_drop_packets); 1325 1326 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 1327 1328 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 1329 1330 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 1331 le64_to_cpu(ls->rx_queue_disabled) + 1332 le64_to_cpu(ls->rx_desc_fetch_error) + 1333 le64_to_cpu(ls->rx_desc_data_error); 1334 1335 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 1336 le64_to_cpu(ls->tx_queue_disabled) + 1337 le64_to_cpu(ls->tx_desc_fetch_error) + 1338 le64_to_cpu(ls->tx_desc_data_error); 1339 1340 ns->rx_errors = ns->rx_over_errors + 1341 ns->rx_missed_errors; 1342 1343 ns->tx_errors = ns->tx_aborted_errors; 1344 } 1345 1346 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1347 { 1348 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); 1349 } 1350 1351 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1352 { 1353 /* Don't delete our own address from the uc list */ 1354 if (ether_addr_equal(addr, netdev->dev_addr)) 1355 return 0; 1356 1357 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); 1358 } 1359 1360 void ionic_lif_rx_mode(struct ionic_lif *lif) 1361 { 1362 struct net_device *netdev = lif->netdev; 1363 unsigned int nfilters; 1364 unsigned int nd_flags; 1365 char buf[128]; 1366 u16 rx_mode; 1367 int i; 1368 #define REMAIN(__x) (sizeof(buf) - (__x)) 1369 1370 mutex_lock(&lif->config_lock); 1371 1372 /* grab the flags once for local use */ 1373 nd_flags = netdev->flags; 1374 1375 rx_mode = IONIC_RX_MODE_F_UNICAST; 1376 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1377 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1378 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1379 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1380 1381 /* sync the filters */ 1382 ionic_rx_filter_sync(lif); 1383 1384 /* check for overflow state 1385 * if so, we track that we overflowed and enable NIC PROMISC 1386 * else if the overflow is set and not needed 1387 * we remove our overflow flag and check the netdev flags 1388 * to see if we can disable NIC PROMISC 1389 */ 1390 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1391 1392 if (((lif->nucast + lif->nmcast) >= nfilters) || 1393 (lif->max_vlans && lif->nvlans >= lif->max_vlans)) { 1394 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1395 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1396 } else { 1397 if (!(nd_flags & IFF_PROMISC)) 1398 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1399 if (!(nd_flags & IFF_ALLMULTI)) 1400 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1401 } 1402 1403 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1404 lif->rx_mode, rx_mode); 1405 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1406 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1407 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1408 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1409 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1410 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1411 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1412 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1413 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1414 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1415 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER) 1416 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER"); 1417 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf); 1418 1419 if (lif->rx_mode != rx_mode) { 1420 struct ionic_admin_ctx ctx = { 1421 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1422 .cmd.rx_mode_set = { 1423 .opcode = IONIC_CMD_RX_MODE_SET, 1424 .lif_index = cpu_to_le16(lif->index), 1425 }, 1426 }; 1427 int err; 1428 1429 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode); 1430 err = ionic_adminq_post_wait(lif, &ctx); 1431 if (err) 1432 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n", 1433 rx_mode, err); 1434 else 1435 lif->rx_mode = rx_mode; 1436 } 1437 1438 mutex_unlock(&lif->config_lock); 1439 } 1440 1441 static void ionic_ndo_set_rx_mode(struct net_device *netdev) 1442 { 1443 struct ionic_lif *lif = netdev_priv(netdev); 1444 struct ionic_deferred_work *work; 1445 1446 /* Sync the kernel filter list with the driver filter list */ 1447 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1448 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1449 1450 /* Shove off the rest of the rxmode work to the work task 1451 * which will include syncing the filters to the firmware. 1452 */ 1453 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1454 if (!work) { 1455 netdev_err(lif->netdev, "rxmode change dropped\n"); 1456 return; 1457 } 1458 work->type = IONIC_DW_TYPE_RX_MODE; 1459 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1460 ionic_lif_deferred_enqueue(&lif->deferred, work); 1461 } 1462 1463 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1464 { 1465 u64 wanted = 0; 1466 1467 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1468 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1469 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1470 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1471 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1472 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1473 if (features & NETIF_F_RXHASH) 1474 wanted |= IONIC_ETH_HW_RX_HASH; 1475 if (features & NETIF_F_RXCSUM) 1476 wanted |= IONIC_ETH_HW_RX_CSUM; 1477 if (features & NETIF_F_SG) 1478 wanted |= IONIC_ETH_HW_TX_SG; 1479 if (features & NETIF_F_HW_CSUM) 1480 wanted |= IONIC_ETH_HW_TX_CSUM; 1481 if (features & NETIF_F_TSO) 1482 wanted |= IONIC_ETH_HW_TSO; 1483 if (features & NETIF_F_TSO6) 1484 wanted |= IONIC_ETH_HW_TSO_IPV6; 1485 if (features & NETIF_F_TSO_ECN) 1486 wanted |= IONIC_ETH_HW_TSO_ECN; 1487 if (features & NETIF_F_GSO_GRE) 1488 wanted |= IONIC_ETH_HW_TSO_GRE; 1489 if (features & NETIF_F_GSO_GRE_CSUM) 1490 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1491 if (features & NETIF_F_GSO_IPXIP4) 1492 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1493 if (features & NETIF_F_GSO_IPXIP6) 1494 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1495 if (features & NETIF_F_GSO_UDP_TUNNEL) 1496 wanted |= IONIC_ETH_HW_TSO_UDP; 1497 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1498 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1499 1500 return cpu_to_le64(wanted); 1501 } 1502 1503 static int ionic_set_nic_features(struct ionic_lif *lif, 1504 netdev_features_t features) 1505 { 1506 struct device *dev = lif->ionic->dev; 1507 struct ionic_admin_ctx ctx = { 1508 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1509 .cmd.lif_setattr = { 1510 .opcode = IONIC_CMD_LIF_SETATTR, 1511 .index = cpu_to_le16(lif->index), 1512 .attr = IONIC_LIF_ATTR_FEATURES, 1513 }, 1514 }; 1515 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1516 IONIC_ETH_HW_VLAN_RX_STRIP | 1517 IONIC_ETH_HW_VLAN_RX_FILTER; 1518 u64 old_hw_features; 1519 int err; 1520 1521 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1522 1523 if (lif->phc) 1524 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP); 1525 1526 err = ionic_adminq_post_wait(lif, &ctx); 1527 if (err) 1528 return err; 1529 1530 old_hw_features = lif->hw_features; 1531 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1532 ctx.comp.lif_setattr.features); 1533 1534 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1535 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1536 1537 if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) && 1538 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1539 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1540 1541 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1542 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1543 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1544 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1545 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1546 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1547 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1548 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1549 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1550 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1551 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1552 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1553 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1554 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1555 if (lif->hw_features & IONIC_ETH_HW_TSO) 1556 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1557 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1558 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1559 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1560 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1561 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1562 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1563 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1564 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1565 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1566 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1567 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1568 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1569 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1570 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1571 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1572 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1573 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP) 1574 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n"); 1575 1576 return 0; 1577 } 1578 1579 static int ionic_init_nic_features(struct ionic_lif *lif) 1580 { 1581 struct net_device *netdev = lif->netdev; 1582 netdev_features_t features; 1583 int err; 1584 1585 /* set up what we expect to support by default */ 1586 features = NETIF_F_HW_VLAN_CTAG_TX | 1587 NETIF_F_HW_VLAN_CTAG_RX | 1588 NETIF_F_HW_VLAN_CTAG_FILTER | 1589 NETIF_F_SG | 1590 NETIF_F_HW_CSUM | 1591 NETIF_F_RXCSUM | 1592 NETIF_F_TSO | 1593 NETIF_F_TSO6 | 1594 NETIF_F_TSO_ECN | 1595 NETIF_F_GSO_GRE | 1596 NETIF_F_GSO_GRE_CSUM | 1597 NETIF_F_GSO_IPXIP4 | 1598 NETIF_F_GSO_IPXIP6 | 1599 NETIF_F_GSO_UDP_TUNNEL | 1600 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1601 1602 if (lif->nxqs > 1) 1603 features |= NETIF_F_RXHASH; 1604 1605 err = ionic_set_nic_features(lif, features); 1606 if (err) 1607 return err; 1608 1609 /* tell the netdev what we actually can support */ 1610 netdev->features |= NETIF_F_HIGHDMA; 1611 1612 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1613 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1614 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1615 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1616 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1617 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1618 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1619 netdev->hw_features |= NETIF_F_RXHASH; 1620 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1621 netdev->hw_features |= NETIF_F_SG; 1622 1623 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1624 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1625 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1626 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1627 if (lif->hw_features & IONIC_ETH_HW_TSO) 1628 netdev->hw_enc_features |= NETIF_F_TSO; 1629 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1630 netdev->hw_enc_features |= NETIF_F_TSO6; 1631 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1632 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1633 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1634 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1635 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1636 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1637 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1638 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1639 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1640 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1641 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1642 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1643 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1644 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1645 1646 netdev->hw_features |= netdev->hw_enc_features; 1647 netdev->features |= netdev->hw_features; 1648 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1649 1650 netdev->priv_flags |= IFF_UNICAST_FLT | 1651 IFF_LIVE_ADDR_CHANGE; 1652 1653 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | 1654 NETDEV_XDP_ACT_REDIRECT | 1655 NETDEV_XDP_ACT_RX_SG | 1656 NETDEV_XDP_ACT_NDO_XMIT | 1657 NETDEV_XDP_ACT_NDO_XMIT_SG; 1658 1659 return 0; 1660 } 1661 1662 static int ionic_set_features(struct net_device *netdev, 1663 netdev_features_t features) 1664 { 1665 struct ionic_lif *lif = netdev_priv(netdev); 1666 int err; 1667 1668 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1669 __func__, (u64)lif->netdev->features, (u64)features); 1670 1671 err = ionic_set_nic_features(lif, features); 1672 1673 return err; 1674 } 1675 1676 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac) 1677 { 1678 struct ionic_admin_ctx ctx = { 1679 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1680 .cmd.lif_setattr = { 1681 .opcode = IONIC_CMD_LIF_SETATTR, 1682 .index = cpu_to_le16(lif->index), 1683 .attr = IONIC_LIF_ATTR_MAC, 1684 }, 1685 }; 1686 1687 ether_addr_copy(ctx.cmd.lif_setattr.mac, mac); 1688 return ionic_adminq_post_wait(lif, &ctx); 1689 } 1690 1691 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr) 1692 { 1693 struct ionic_admin_ctx ctx = { 1694 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1695 .cmd.lif_getattr = { 1696 .opcode = IONIC_CMD_LIF_GETATTR, 1697 .index = cpu_to_le16(lif->index), 1698 .attr = IONIC_LIF_ATTR_MAC, 1699 }, 1700 }; 1701 int err; 1702 1703 err = ionic_adminq_post_wait(lif, &ctx); 1704 if (err) 1705 return err; 1706 1707 ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac); 1708 return 0; 1709 } 1710 1711 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac) 1712 { 1713 u8 get_mac[ETH_ALEN]; 1714 int err; 1715 1716 err = ionic_set_attr_mac(lif, mac); 1717 if (err) 1718 return err; 1719 1720 err = ionic_get_attr_mac(lif, get_mac); 1721 if (err) 1722 return err; 1723 1724 /* To deal with older firmware that silently ignores the set attr mac: 1725 * doesn't actually change the mac and doesn't return an error, so we 1726 * do the get attr to verify whether or not the set actually happened 1727 */ 1728 if (!ether_addr_equal(get_mac, mac)) 1729 return 1; 1730 1731 return 0; 1732 } 1733 1734 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1735 { 1736 struct ionic_lif *lif = netdev_priv(netdev); 1737 struct sockaddr *addr = sa; 1738 u8 *mac; 1739 int err; 1740 1741 mac = (u8 *)addr->sa_data; 1742 if (ether_addr_equal(netdev->dev_addr, mac)) 1743 return 0; 1744 1745 err = ionic_program_mac(lif, mac); 1746 if (err < 0) 1747 return err; 1748 1749 if (err > 0) 1750 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n", 1751 __func__); 1752 1753 err = eth_prepare_mac_addr_change(netdev, addr); 1754 if (err) 1755 return err; 1756 1757 if (!is_zero_ether_addr(netdev->dev_addr)) { 1758 netdev_info(netdev, "deleting mac addr %pM\n", 1759 netdev->dev_addr); 1760 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr); 1761 } 1762 1763 eth_commit_mac_addr_change(netdev, addr); 1764 netdev_info(netdev, "updating mac addr %pM\n", mac); 1765 1766 return ionic_lif_addr_add(netdev_priv(netdev), mac); 1767 } 1768 1769 void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1770 { 1771 /* Stop and clean the queues before reconfiguration */ 1772 netif_device_detach(lif->netdev); 1773 ionic_stop_queues(lif); 1774 ionic_txrx_deinit(lif); 1775 } 1776 1777 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1778 { 1779 int err; 1780 1781 /* Re-init the queues after reconfiguration */ 1782 1783 /* The only way txrx_init can fail here is if communication 1784 * with FW is suddenly broken. There's not much we can do 1785 * at this point - error messages have already been printed, 1786 * so we can continue on and the user can eventually do a 1787 * DOWN and UP to try to reset and clear the issue. 1788 */ 1789 err = ionic_txrx_init(lif); 1790 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 1791 netif_device_attach(lif->netdev); 1792 1793 return err; 1794 } 1795 1796 static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu, 1797 struct bpf_prog *xdp_prog) 1798 { 1799 if (!xdp_prog) 1800 return true; 1801 1802 if (mtu <= IONIC_XDP_MAX_LINEAR_MTU) 1803 return true; 1804 1805 if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags) 1806 return true; 1807 1808 return false; 1809 } 1810 1811 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1812 { 1813 struct ionic_lif *lif = netdev_priv(netdev); 1814 struct ionic_admin_ctx ctx = { 1815 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1816 .cmd.lif_setattr = { 1817 .opcode = IONIC_CMD_LIF_SETATTR, 1818 .index = cpu_to_le16(lif->index), 1819 .attr = IONIC_LIF_ATTR_MTU, 1820 .mtu = cpu_to_le32(new_mtu), 1821 }, 1822 }; 1823 struct bpf_prog *xdp_prog; 1824 int err; 1825 1826 xdp_prog = READ_ONCE(lif->xdp_prog); 1827 if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog)) 1828 return -EINVAL; 1829 1830 err = ionic_adminq_post_wait(lif, &ctx); 1831 if (err) 1832 return err; 1833 1834 /* if we're not running, nothing more to do */ 1835 if (!netif_running(netdev)) { 1836 netdev->mtu = new_mtu; 1837 return 0; 1838 } 1839 1840 mutex_lock(&lif->queue_lock); 1841 ionic_stop_queues_reconfig(lif); 1842 netdev->mtu = new_mtu; 1843 err = ionic_start_queues_reconfig(lif); 1844 mutex_unlock(&lif->queue_lock); 1845 1846 return err; 1847 } 1848 1849 static void ionic_tx_timeout_work(struct work_struct *ws) 1850 { 1851 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1852 int err; 1853 1854 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1855 return; 1856 1857 /* if we were stopped before this scheduled job was launched, 1858 * don't bother the queues as they are already stopped. 1859 */ 1860 if (!netif_running(lif->netdev)) 1861 return; 1862 1863 mutex_lock(&lif->queue_lock); 1864 ionic_stop_queues_reconfig(lif); 1865 err = ionic_start_queues_reconfig(lif); 1866 mutex_unlock(&lif->queue_lock); 1867 1868 if (err) 1869 dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__); 1870 } 1871 1872 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1873 { 1874 struct ionic_lif *lif = netdev_priv(netdev); 1875 1876 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); 1877 schedule_work(&lif->tx_timeout_work); 1878 } 1879 1880 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1881 u16 vid) 1882 { 1883 struct ionic_lif *lif = netdev_priv(netdev); 1884 int err; 1885 1886 err = ionic_lif_vlan_add(lif, vid); 1887 if (err) 1888 return err; 1889 1890 ionic_lif_rx_mode(lif); 1891 1892 return 0; 1893 } 1894 1895 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1896 u16 vid) 1897 { 1898 struct ionic_lif *lif = netdev_priv(netdev); 1899 int err; 1900 1901 err = ionic_lif_vlan_del(lif, vid); 1902 if (err) 1903 return err; 1904 1905 ionic_lif_rx_mode(lif); 1906 1907 return 0; 1908 } 1909 1910 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1911 const u8 *key, const u32 *indir) 1912 { 1913 struct ionic_admin_ctx ctx = { 1914 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1915 .cmd.lif_setattr = { 1916 .opcode = IONIC_CMD_LIF_SETATTR, 1917 .attr = IONIC_LIF_ATTR_RSS, 1918 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1919 }, 1920 }; 1921 unsigned int i, tbl_sz; 1922 1923 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1924 lif->rss_types = types; 1925 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1926 } 1927 1928 if (key) 1929 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1930 1931 if (indir) { 1932 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1933 for (i = 0; i < tbl_sz; i++) 1934 lif->rss_ind_tbl[i] = indir[i]; 1935 } 1936 1937 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1938 IONIC_RSS_HASH_KEY_SIZE); 1939 1940 return ionic_adminq_post_wait(lif, &ctx); 1941 } 1942 1943 static int ionic_lif_rss_init(struct ionic_lif *lif) 1944 { 1945 unsigned int tbl_sz; 1946 unsigned int i; 1947 1948 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1949 IONIC_RSS_TYPE_IPV4_TCP | 1950 IONIC_RSS_TYPE_IPV4_UDP | 1951 IONIC_RSS_TYPE_IPV6 | 1952 IONIC_RSS_TYPE_IPV6_TCP | 1953 IONIC_RSS_TYPE_IPV6_UDP; 1954 1955 /* Fill indirection table with 'default' values */ 1956 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1957 for (i = 0; i < tbl_sz; i++) 1958 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1959 1960 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1961 } 1962 1963 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1964 { 1965 int tbl_sz; 1966 1967 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1968 memset(lif->rss_ind_tbl, 0, tbl_sz); 1969 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1970 1971 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1972 } 1973 1974 static void ionic_lif_quiesce(struct ionic_lif *lif) 1975 { 1976 struct ionic_admin_ctx ctx = { 1977 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1978 .cmd.lif_setattr = { 1979 .opcode = IONIC_CMD_LIF_SETATTR, 1980 .index = cpu_to_le16(lif->index), 1981 .attr = IONIC_LIF_ATTR_STATE, 1982 .state = IONIC_LIF_QUIESCE, 1983 }, 1984 }; 1985 int err; 1986 1987 err = ionic_adminq_post_wait(lif, &ctx); 1988 if (err) 1989 netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err); 1990 } 1991 1992 static void ionic_txrx_disable(struct ionic_lif *lif) 1993 { 1994 unsigned int i; 1995 int err = 0; 1996 1997 if (lif->txqcqs) { 1998 for (i = 0; i < lif->nxqs; i++) 1999 err = ionic_qcq_disable(lif, lif->txqcqs[i], err); 2000 } 2001 2002 if (lif->hwstamp_txq) 2003 err = ionic_qcq_disable(lif, lif->hwstamp_txq, err); 2004 2005 if (lif->rxqcqs) { 2006 for (i = 0; i < lif->nxqs; i++) 2007 err = ionic_qcq_disable(lif, lif->rxqcqs[i], err); 2008 } 2009 2010 if (lif->hwstamp_rxq) 2011 err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err); 2012 2013 ionic_lif_quiesce(lif); 2014 } 2015 2016 static void ionic_txrx_deinit(struct ionic_lif *lif) 2017 { 2018 unsigned int i; 2019 2020 if (lif->txqcqs) { 2021 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 2022 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2023 ionic_tx_flush(&lif->txqcqs[i]->cq); 2024 ionic_tx_empty(&lif->txqcqs[i]->q); 2025 } 2026 } 2027 2028 if (lif->rxqcqs) { 2029 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 2030 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2031 ionic_rx_empty(&lif->rxqcqs[i]->q); 2032 } 2033 } 2034 lif->rx_mode = 0; 2035 2036 if (lif->hwstamp_txq) { 2037 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq); 2038 ionic_tx_flush(&lif->hwstamp_txq->cq); 2039 ionic_tx_empty(&lif->hwstamp_txq->q); 2040 } 2041 2042 if (lif->hwstamp_rxq) { 2043 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq); 2044 ionic_rx_empty(&lif->hwstamp_rxq->q); 2045 } 2046 } 2047 2048 void ionic_txrx_free(struct ionic_lif *lif) 2049 { 2050 unsigned int i; 2051 2052 if (lif->txqcqs) { 2053 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 2054 ionic_qcq_free(lif, lif->txqcqs[i]); 2055 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 2056 lif->txqcqs[i] = NULL; 2057 } 2058 } 2059 2060 if (lif->rxqcqs) { 2061 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2062 ionic_qcq_free(lif, lif->rxqcqs[i]); 2063 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 2064 lif->rxqcqs[i] = NULL; 2065 } 2066 } 2067 2068 if (lif->hwstamp_txq) { 2069 ionic_qcq_free(lif, lif->hwstamp_txq); 2070 devm_kfree(lif->ionic->dev, lif->hwstamp_txq); 2071 lif->hwstamp_txq = NULL; 2072 } 2073 2074 if (lif->hwstamp_rxq) { 2075 ionic_qcq_free(lif, lif->hwstamp_rxq); 2076 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq); 2077 lif->hwstamp_rxq = NULL; 2078 } 2079 } 2080 2081 static int ionic_txrx_alloc(struct ionic_lif *lif) 2082 { 2083 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2084 unsigned int flags, i; 2085 int err = 0; 2086 2087 num_desc = lif->ntxq_descs; 2088 desc_sz = sizeof(struct ionic_txq_desc); 2089 comp_sz = sizeof(struct ionic_txq_comp); 2090 2091 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2092 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2093 sizeof(struct ionic_txq_sg_desc_v1)) 2094 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2095 else 2096 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2097 2098 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2099 2100 if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state)) 2101 flags |= IONIC_QCQ_F_CMB_RINGS; 2102 2103 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2104 flags |= IONIC_QCQ_F_INTR; 2105 2106 for (i = 0; i < lif->nxqs; i++) { 2107 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2108 num_desc, desc_sz, comp_sz, sg_desc_sz, 2109 lif->kern_pid, &lif->txqcqs[i]); 2110 if (err) 2111 goto err_out; 2112 2113 if (flags & IONIC_QCQ_F_INTR) { 2114 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2115 lif->txqcqs[i]->intr.index, 2116 lif->tx_coalesce_hw); 2117 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2118 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2119 } 2120 2121 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2122 } 2123 2124 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 2125 2126 if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) 2127 flags |= IONIC_QCQ_F_CMB_RINGS; 2128 2129 num_desc = lif->nrxq_descs; 2130 desc_sz = sizeof(struct ionic_rxq_desc); 2131 comp_sz = sizeof(struct ionic_rxq_comp); 2132 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2133 2134 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2135 comp_sz *= 2; 2136 2137 for (i = 0; i < lif->nxqs; i++) { 2138 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2139 num_desc, desc_sz, comp_sz, sg_desc_sz, 2140 lif->kern_pid, &lif->rxqcqs[i]); 2141 if (err) 2142 goto err_out; 2143 2144 lif->rxqcqs[i]->q.features = lif->rxq_features; 2145 2146 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2147 lif->rxqcqs[i]->intr.index, 2148 lif->rx_coalesce_hw); 2149 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) 2150 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; 2151 2152 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2153 ionic_link_qcq_interrupts(lif->rxqcqs[i], 2154 lif->txqcqs[i]); 2155 2156 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2157 } 2158 2159 return 0; 2160 2161 err_out: 2162 ionic_txrx_free(lif); 2163 2164 return err; 2165 } 2166 2167 static int ionic_txrx_init(struct ionic_lif *lif) 2168 { 2169 unsigned int i; 2170 int err; 2171 2172 for (i = 0; i < lif->nxqs; i++) { 2173 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 2174 if (err) 2175 goto err_out; 2176 2177 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 2178 if (err) { 2179 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2180 goto err_out; 2181 } 2182 } 2183 2184 if (lif->netdev->features & NETIF_F_RXHASH) 2185 ionic_lif_rss_init(lif); 2186 2187 ionic_lif_rx_mode(lif); 2188 2189 return 0; 2190 2191 err_out: 2192 while (i--) { 2193 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2194 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2195 } 2196 2197 return err; 2198 } 2199 2200 static int ionic_txrx_enable(struct ionic_lif *lif) 2201 { 2202 int derr = 0; 2203 int i, err; 2204 2205 err = ionic_xdp_queues_config(lif); 2206 if (err) 2207 return err; 2208 2209 for (i = 0; i < lif->nxqs; i++) { 2210 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { 2211 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); 2212 err = -ENXIO; 2213 goto err_out; 2214 } 2215 2216 ionic_rx_fill(&lif->rxqcqs[i]->q); 2217 err = ionic_qcq_enable(lif->rxqcqs[i]); 2218 if (err) 2219 goto err_out; 2220 2221 err = ionic_qcq_enable(lif->txqcqs[i]); 2222 if (err) { 2223 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err); 2224 goto err_out; 2225 } 2226 } 2227 2228 if (lif->hwstamp_rxq) { 2229 ionic_rx_fill(&lif->hwstamp_rxq->q); 2230 err = ionic_qcq_enable(lif->hwstamp_rxq); 2231 if (err) 2232 goto err_out_hwstamp_rx; 2233 } 2234 2235 if (lif->hwstamp_txq) { 2236 err = ionic_qcq_enable(lif->hwstamp_txq); 2237 if (err) 2238 goto err_out_hwstamp_tx; 2239 } 2240 2241 return 0; 2242 2243 err_out_hwstamp_tx: 2244 if (lif->hwstamp_rxq) 2245 derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr); 2246 err_out_hwstamp_rx: 2247 i = lif->nxqs; 2248 err_out: 2249 while (i--) { 2250 derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr); 2251 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); 2252 } 2253 2254 ionic_xdp_queues_config(lif); 2255 2256 return err; 2257 } 2258 2259 static int ionic_start_queues(struct ionic_lif *lif) 2260 { 2261 int err; 2262 2263 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) 2264 return -EIO; 2265 2266 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2267 return -EBUSY; 2268 2269 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 2270 return 0; 2271 2272 err = ionic_txrx_enable(lif); 2273 if (err) { 2274 clear_bit(IONIC_LIF_F_UP, lif->state); 2275 return err; 2276 } 2277 netif_tx_wake_all_queues(lif->netdev); 2278 2279 return 0; 2280 } 2281 2282 static int ionic_open(struct net_device *netdev) 2283 { 2284 struct ionic_lif *lif = netdev_priv(netdev); 2285 int err; 2286 2287 /* If recovering from a broken state, clear the bit and we'll try again */ 2288 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 2289 netdev_info(netdev, "clearing broken state\n"); 2290 2291 mutex_lock(&lif->queue_lock); 2292 2293 err = ionic_txrx_alloc(lif); 2294 if (err) 2295 goto err_unlock; 2296 2297 err = ionic_txrx_init(lif); 2298 if (err) 2299 goto err_txrx_free; 2300 2301 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 2302 if (err) 2303 goto err_txrx_deinit; 2304 2305 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 2306 if (err) 2307 goto err_txrx_deinit; 2308 2309 /* don't start the queues until we have link */ 2310 if (netif_carrier_ok(netdev)) { 2311 err = ionic_start_queues(lif); 2312 if (err) 2313 goto err_txrx_deinit; 2314 } 2315 2316 /* If hardware timestamping is enabled, but the queues were freed by 2317 * ionic_stop, those need to be reallocated and initialized, too. 2318 */ 2319 ionic_lif_hwstamp_recreate_queues(lif); 2320 2321 mutex_unlock(&lif->queue_lock); 2322 2323 return 0; 2324 2325 err_txrx_deinit: 2326 ionic_txrx_deinit(lif); 2327 err_txrx_free: 2328 ionic_txrx_free(lif); 2329 err_unlock: 2330 mutex_unlock(&lif->queue_lock); 2331 return err; 2332 } 2333 2334 static void ionic_stop_queues(struct ionic_lif *lif) 2335 { 2336 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 2337 return; 2338 2339 netif_tx_disable(lif->netdev); 2340 ionic_txrx_disable(lif); 2341 } 2342 2343 static int ionic_stop(struct net_device *netdev) 2344 { 2345 struct ionic_lif *lif = netdev_priv(netdev); 2346 2347 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2348 return 0; 2349 2350 mutex_lock(&lif->queue_lock); 2351 ionic_stop_queues(lif); 2352 ionic_txrx_deinit(lif); 2353 ionic_txrx_free(lif); 2354 mutex_unlock(&lif->queue_lock); 2355 2356 return 0; 2357 } 2358 2359 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2360 { 2361 struct ionic_lif *lif = netdev_priv(netdev); 2362 2363 switch (cmd) { 2364 case SIOCSHWTSTAMP: 2365 return ionic_lif_hwstamp_set(lif, ifr); 2366 case SIOCGHWTSTAMP: 2367 return ionic_lif_hwstamp_get(lif, ifr); 2368 default: 2369 return -EOPNOTSUPP; 2370 } 2371 } 2372 2373 static int ionic_get_vf_config(struct net_device *netdev, 2374 int vf, struct ifla_vf_info *ivf) 2375 { 2376 struct ionic_lif *lif = netdev_priv(netdev); 2377 struct ionic *ionic = lif->ionic; 2378 int ret = 0; 2379 2380 if (!netif_device_present(netdev)) 2381 return -EBUSY; 2382 2383 down_read(&ionic->vf_op_lock); 2384 2385 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2386 ret = -EINVAL; 2387 } else { 2388 struct ionic_vf *vfdata = &ionic->vfs[vf]; 2389 2390 ivf->vf = vf; 2391 ivf->qos = 0; 2392 ivf->vlan = le16_to_cpu(vfdata->vlanid); 2393 ivf->spoofchk = vfdata->spoofchk; 2394 ivf->linkstate = vfdata->linkstate; 2395 ivf->max_tx_rate = le32_to_cpu(vfdata->maxrate); 2396 ivf->trusted = vfdata->trusted; 2397 ether_addr_copy(ivf->mac, vfdata->macaddr); 2398 } 2399 2400 up_read(&ionic->vf_op_lock); 2401 return ret; 2402 } 2403 2404 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 2405 struct ifla_vf_stats *vf_stats) 2406 { 2407 struct ionic_lif *lif = netdev_priv(netdev); 2408 struct ionic *ionic = lif->ionic; 2409 struct ionic_lif_stats *vs; 2410 int ret = 0; 2411 2412 if (!netif_device_present(netdev)) 2413 return -EBUSY; 2414 2415 down_read(&ionic->vf_op_lock); 2416 2417 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2418 ret = -EINVAL; 2419 } else { 2420 memset(vf_stats, 0, sizeof(*vf_stats)); 2421 vs = &ionic->vfs[vf].stats; 2422 2423 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 2424 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 2425 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 2426 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 2427 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 2428 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 2429 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 2430 le64_to_cpu(vs->rx_mcast_drop_packets) + 2431 le64_to_cpu(vs->rx_bcast_drop_packets); 2432 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 2433 le64_to_cpu(vs->tx_mcast_drop_packets) + 2434 le64_to_cpu(vs->tx_bcast_drop_packets); 2435 } 2436 2437 up_read(&ionic->vf_op_lock); 2438 return ret; 2439 } 2440 2441 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2442 { 2443 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC }; 2444 struct ionic_lif *lif = netdev_priv(netdev); 2445 struct ionic *ionic = lif->ionic; 2446 int ret; 2447 2448 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 2449 return -EINVAL; 2450 2451 if (!netif_device_present(netdev)) 2452 return -EBUSY; 2453 2454 down_write(&ionic->vf_op_lock); 2455 2456 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2457 ret = -EINVAL; 2458 } else { 2459 ether_addr_copy(vfc.macaddr, mac); 2460 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n", 2461 __func__, vf, vfc.macaddr); 2462 2463 ret = ionic_set_vf_config(ionic, vf, &vfc); 2464 if (!ret) 2465 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 2466 } 2467 2468 up_write(&ionic->vf_op_lock); 2469 return ret; 2470 } 2471 2472 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2473 u8 qos, __be16 proto) 2474 { 2475 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN }; 2476 struct ionic_lif *lif = netdev_priv(netdev); 2477 struct ionic *ionic = lif->ionic; 2478 int ret; 2479 2480 /* until someday when we support qos */ 2481 if (qos) 2482 return -EINVAL; 2483 2484 if (vlan > 4095) 2485 return -EINVAL; 2486 2487 if (proto != htons(ETH_P_8021Q)) 2488 return -EPROTONOSUPPORT; 2489 2490 if (!netif_device_present(netdev)) 2491 return -EBUSY; 2492 2493 down_write(&ionic->vf_op_lock); 2494 2495 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2496 ret = -EINVAL; 2497 } else { 2498 vfc.vlanid = cpu_to_le16(vlan); 2499 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n", 2500 __func__, vf, le16_to_cpu(vfc.vlanid)); 2501 2502 ret = ionic_set_vf_config(ionic, vf, &vfc); 2503 if (!ret) 2504 ionic->vfs[vf].vlanid = cpu_to_le16(vlan); 2505 } 2506 2507 up_write(&ionic->vf_op_lock); 2508 return ret; 2509 } 2510 2511 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 2512 int tx_min, int tx_max) 2513 { 2514 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE }; 2515 struct ionic_lif *lif = netdev_priv(netdev); 2516 struct ionic *ionic = lif->ionic; 2517 int ret; 2518 2519 /* setting the min just seems silly */ 2520 if (tx_min) 2521 return -EINVAL; 2522 2523 if (!netif_device_present(netdev)) 2524 return -EBUSY; 2525 2526 down_write(&ionic->vf_op_lock); 2527 2528 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2529 ret = -EINVAL; 2530 } else { 2531 vfc.maxrate = cpu_to_le32(tx_max); 2532 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n", 2533 __func__, vf, le32_to_cpu(vfc.maxrate)); 2534 2535 ret = ionic_set_vf_config(ionic, vf, &vfc); 2536 if (!ret) 2537 ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); 2538 } 2539 2540 up_write(&ionic->vf_op_lock); 2541 return ret; 2542 } 2543 2544 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 2545 { 2546 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK }; 2547 struct ionic_lif *lif = netdev_priv(netdev); 2548 struct ionic *ionic = lif->ionic; 2549 int ret; 2550 2551 if (!netif_device_present(netdev)) 2552 return -EBUSY; 2553 2554 down_write(&ionic->vf_op_lock); 2555 2556 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2557 ret = -EINVAL; 2558 } else { 2559 vfc.spoofchk = set; 2560 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n", 2561 __func__, vf, vfc.spoofchk); 2562 2563 ret = ionic_set_vf_config(ionic, vf, &vfc); 2564 if (!ret) 2565 ionic->vfs[vf].spoofchk = set; 2566 } 2567 2568 up_write(&ionic->vf_op_lock); 2569 return ret; 2570 } 2571 2572 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2573 { 2574 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST }; 2575 struct ionic_lif *lif = netdev_priv(netdev); 2576 struct ionic *ionic = lif->ionic; 2577 int ret; 2578 2579 if (!netif_device_present(netdev)) 2580 return -EBUSY; 2581 2582 down_write(&ionic->vf_op_lock); 2583 2584 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2585 ret = -EINVAL; 2586 } else { 2587 vfc.trust = set; 2588 dev_dbg(ionic->dev, "%s: vf %d trust %d\n", 2589 __func__, vf, vfc.trust); 2590 2591 ret = ionic_set_vf_config(ionic, vf, &vfc); 2592 if (!ret) 2593 ionic->vfs[vf].trusted = set; 2594 } 2595 2596 up_write(&ionic->vf_op_lock); 2597 return ret; 2598 } 2599 2600 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2601 { 2602 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE }; 2603 struct ionic_lif *lif = netdev_priv(netdev); 2604 struct ionic *ionic = lif->ionic; 2605 u8 vfls; 2606 int ret; 2607 2608 switch (set) { 2609 case IFLA_VF_LINK_STATE_ENABLE: 2610 vfls = IONIC_VF_LINK_STATUS_UP; 2611 break; 2612 case IFLA_VF_LINK_STATE_DISABLE: 2613 vfls = IONIC_VF_LINK_STATUS_DOWN; 2614 break; 2615 case IFLA_VF_LINK_STATE_AUTO: 2616 vfls = IONIC_VF_LINK_STATUS_AUTO; 2617 break; 2618 default: 2619 return -EINVAL; 2620 } 2621 2622 if (!netif_device_present(netdev)) 2623 return -EBUSY; 2624 2625 down_write(&ionic->vf_op_lock); 2626 2627 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2628 ret = -EINVAL; 2629 } else { 2630 vfc.linkstate = vfls; 2631 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n", 2632 __func__, vf, vfc.linkstate); 2633 2634 ret = ionic_set_vf_config(ionic, vf, &vfc); 2635 if (!ret) 2636 ionic->vfs[vf].linkstate = set; 2637 } 2638 2639 up_write(&ionic->vf_op_lock); 2640 return ret; 2641 } 2642 2643 static void ionic_vf_attr_replay(struct ionic_lif *lif) 2644 { 2645 struct ionic_vf_setattr_cmd vfc = { }; 2646 struct ionic *ionic = lif->ionic; 2647 struct ionic_vf *v; 2648 int i; 2649 2650 if (!ionic->vfs) 2651 return; 2652 2653 down_read(&ionic->vf_op_lock); 2654 2655 for (i = 0; i < ionic->num_vfs; i++) { 2656 v = &ionic->vfs[i]; 2657 2658 if (v->stats_pa) { 2659 vfc.attr = IONIC_VF_ATTR_STATSADDR; 2660 vfc.stats_pa = cpu_to_le64(v->stats_pa); 2661 ionic_set_vf_config(ionic, i, &vfc); 2662 vfc.stats_pa = 0; 2663 } 2664 2665 if (!is_zero_ether_addr(v->macaddr)) { 2666 vfc.attr = IONIC_VF_ATTR_MAC; 2667 ether_addr_copy(vfc.macaddr, v->macaddr); 2668 ionic_set_vf_config(ionic, i, &vfc); 2669 eth_zero_addr(vfc.macaddr); 2670 } 2671 2672 if (v->vlanid) { 2673 vfc.attr = IONIC_VF_ATTR_VLAN; 2674 vfc.vlanid = v->vlanid; 2675 ionic_set_vf_config(ionic, i, &vfc); 2676 vfc.vlanid = 0; 2677 } 2678 2679 if (v->maxrate) { 2680 vfc.attr = IONIC_VF_ATTR_RATE; 2681 vfc.maxrate = v->maxrate; 2682 ionic_set_vf_config(ionic, i, &vfc); 2683 vfc.maxrate = 0; 2684 } 2685 2686 if (v->spoofchk) { 2687 vfc.attr = IONIC_VF_ATTR_SPOOFCHK; 2688 vfc.spoofchk = v->spoofchk; 2689 ionic_set_vf_config(ionic, i, &vfc); 2690 vfc.spoofchk = 0; 2691 } 2692 2693 if (v->trusted) { 2694 vfc.attr = IONIC_VF_ATTR_TRUST; 2695 vfc.trust = v->trusted; 2696 ionic_set_vf_config(ionic, i, &vfc); 2697 vfc.trust = 0; 2698 } 2699 2700 if (v->linkstate) { 2701 vfc.attr = IONIC_VF_ATTR_LINKSTATE; 2702 vfc.linkstate = v->linkstate; 2703 ionic_set_vf_config(ionic, i, &vfc); 2704 vfc.linkstate = 0; 2705 } 2706 } 2707 2708 up_read(&ionic->vf_op_lock); 2709 2710 ionic_vf_start(ionic); 2711 } 2712 2713 static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q) 2714 { 2715 struct xdp_rxq_info *xi; 2716 2717 if (!q->xdp_rxq_info) 2718 return; 2719 2720 xi = q->xdp_rxq_info; 2721 q->xdp_rxq_info = NULL; 2722 2723 xdp_rxq_info_unreg(xi); 2724 kfree(xi); 2725 } 2726 2727 static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id) 2728 { 2729 struct xdp_rxq_info *rxq_info; 2730 int err; 2731 2732 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); 2733 if (!rxq_info) 2734 return -ENOMEM; 2735 2736 err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id); 2737 if (err) { 2738 dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n", 2739 q->index, err); 2740 goto err_out; 2741 } 2742 2743 err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL); 2744 if (err) { 2745 dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n", 2746 q->index, err); 2747 xdp_rxq_info_unreg(rxq_info); 2748 goto err_out; 2749 } 2750 2751 q->xdp_rxq_info = rxq_info; 2752 2753 return 0; 2754 2755 err_out: 2756 kfree(rxq_info); 2757 return err; 2758 } 2759 2760 static int ionic_xdp_queues_config(struct ionic_lif *lif) 2761 { 2762 unsigned int i; 2763 int err; 2764 2765 if (!lif->rxqcqs) 2766 return 0; 2767 2768 /* There's no need to rework memory if not going to/from NULL program. 2769 * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info 2770 * This way we don't need to keep an *xdp_prog in every queue struct. 2771 */ 2772 if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info) 2773 return 0; 2774 2775 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2776 struct ionic_queue *q = &lif->rxqcqs[i]->q; 2777 2778 if (q->xdp_rxq_info) { 2779 ionic_xdp_unregister_rxq_info(q); 2780 continue; 2781 } 2782 2783 err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id); 2784 if (err) { 2785 dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n", 2786 i, err); 2787 goto err_out; 2788 } 2789 } 2790 2791 return 0; 2792 2793 err_out: 2794 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) 2795 ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q); 2796 2797 return err; 2798 } 2799 2800 static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) 2801 { 2802 struct ionic_lif *lif = netdev_priv(netdev); 2803 struct bpf_prog *old_prog; 2804 u32 maxfs; 2805 2806 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { 2807 #define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts" 2808 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT); 2809 netdev_info(lif->netdev, XDP_ERR_SPLIT); 2810 return -EOPNOTSUPP; 2811 } 2812 2813 if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) { 2814 #define XDP_ERR_MTU "MTU is too large for XDP without frags support" 2815 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU); 2816 netdev_info(lif->netdev, XDP_ERR_MTU); 2817 return -EINVAL; 2818 } 2819 2820 maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; 2821 if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags)) 2822 maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU); 2823 netdev->max_mtu = maxfs; 2824 2825 if (!netif_running(netdev)) { 2826 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2827 } else { 2828 mutex_lock(&lif->queue_lock); 2829 ionic_stop_queues_reconfig(lif); 2830 old_prog = xchg(&lif->xdp_prog, bpf->prog); 2831 ionic_start_queues_reconfig(lif); 2832 mutex_unlock(&lif->queue_lock); 2833 } 2834 2835 if (old_prog) 2836 bpf_prog_put(old_prog); 2837 2838 return 0; 2839 } 2840 2841 static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf) 2842 { 2843 switch (bpf->command) { 2844 case XDP_SETUP_PROG: 2845 return ionic_xdp_config(netdev, bpf); 2846 default: 2847 return -EINVAL; 2848 } 2849 } 2850 2851 static const struct net_device_ops ionic_netdev_ops = { 2852 .ndo_open = ionic_open, 2853 .ndo_stop = ionic_stop, 2854 .ndo_eth_ioctl = ionic_eth_ioctl, 2855 .ndo_start_xmit = ionic_start_xmit, 2856 .ndo_bpf = ionic_xdp, 2857 .ndo_xdp_xmit = ionic_xdp_xmit, 2858 .ndo_get_stats64 = ionic_get_stats64, 2859 .ndo_set_rx_mode = ionic_ndo_set_rx_mode, 2860 .ndo_set_features = ionic_set_features, 2861 .ndo_set_mac_address = ionic_set_mac_address, 2862 .ndo_validate_addr = eth_validate_addr, 2863 .ndo_tx_timeout = ionic_tx_timeout, 2864 .ndo_change_mtu = ionic_change_mtu, 2865 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2866 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2867 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2868 .ndo_set_vf_trust = ionic_set_vf_trust, 2869 .ndo_set_vf_mac = ionic_set_vf_mac, 2870 .ndo_set_vf_rate = ionic_set_vf_rate, 2871 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2872 .ndo_get_vf_config = ionic_get_vf_config, 2873 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2874 .ndo_get_vf_stats = ionic_get_vf_stats, 2875 }; 2876 2877 static int ionic_cmb_reconfig(struct ionic_lif *lif, 2878 struct ionic_queue_params *qparam) 2879 { 2880 struct ionic_queue_params start_qparams; 2881 int err = 0; 2882 2883 /* When changing CMB queue parameters, we're using limited 2884 * on-device memory and don't have extra memory to use for 2885 * duplicate allocations, so we free it all first then 2886 * re-allocate with the new parameters. 2887 */ 2888 2889 /* Checkpoint for possible unwind */ 2890 ionic_init_queue_params(lif, &start_qparams); 2891 2892 /* Stop and free the queues */ 2893 ionic_stop_queues_reconfig(lif); 2894 ionic_txrx_free(lif); 2895 2896 /* Set up new qparams */ 2897 ionic_set_queue_params(lif, qparam); 2898 2899 if (netif_running(lif->netdev)) { 2900 /* Alloc and start the new configuration */ 2901 err = ionic_txrx_alloc(lif); 2902 if (err) { 2903 dev_warn(lif->ionic->dev, 2904 "CMB reconfig failed, restoring values: %d\n", err); 2905 2906 /* Back out the changes */ 2907 ionic_set_queue_params(lif, &start_qparams); 2908 err = ionic_txrx_alloc(lif); 2909 if (err) { 2910 dev_err(lif->ionic->dev, 2911 "CMB restore failed: %d\n", err); 2912 goto err_out; 2913 } 2914 } 2915 2916 err = ionic_start_queues_reconfig(lif); 2917 if (err) { 2918 dev_err(lif->ionic->dev, 2919 "CMB reconfig failed: %d\n", err); 2920 goto err_out; 2921 } 2922 } 2923 2924 err_out: 2925 /* This was detached in ionic_stop_queues_reconfig() */ 2926 netif_device_attach(lif->netdev); 2927 2928 return err; 2929 } 2930 2931 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2932 { 2933 /* only swapping the queues, not the napi, flags, or other stuff */ 2934 swap(a->q.features, b->q.features); 2935 swap(a->q.num_descs, b->q.num_descs); 2936 swap(a->q.desc_size, b->q.desc_size); 2937 swap(a->q.base, b->q.base); 2938 swap(a->q.base_pa, b->q.base_pa); 2939 swap(a->q.info, b->q.info); 2940 swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info); 2941 swap(a->q.partner, b->q.partner); 2942 swap(a->q_base, b->q_base); 2943 swap(a->q_base_pa, b->q_base_pa); 2944 swap(a->q_size, b->q_size); 2945 2946 swap(a->q.sg_desc_size, b->q.sg_desc_size); 2947 swap(a->q.sg_base, b->q.sg_base); 2948 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2949 swap(a->sg_base, b->sg_base); 2950 swap(a->sg_base_pa, b->sg_base_pa); 2951 swap(a->sg_size, b->sg_size); 2952 2953 swap(a->cq.num_descs, b->cq.num_descs); 2954 swap(a->cq.desc_size, b->cq.desc_size); 2955 swap(a->cq.base, b->cq.base); 2956 swap(a->cq.base_pa, b->cq.base_pa); 2957 swap(a->cq.info, b->cq.info); 2958 swap(a->cq_base, b->cq_base); 2959 swap(a->cq_base_pa, b->cq_base_pa); 2960 swap(a->cq_size, b->cq_size); 2961 2962 ionic_debugfs_del_qcq(a); 2963 ionic_debugfs_add_qcq(a->q.lif, a); 2964 } 2965 2966 int ionic_reconfigure_queues(struct ionic_lif *lif, 2967 struct ionic_queue_params *qparam) 2968 { 2969 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2970 struct ionic_qcq **tx_qcqs = NULL; 2971 struct ionic_qcq **rx_qcqs = NULL; 2972 unsigned int flags, i; 2973 int err = 0; 2974 2975 /* Are we changing q params while CMB is on */ 2976 if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) || 2977 (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx)) 2978 return ionic_cmb_reconfig(lif, qparam); 2979 2980 /* allocate temporary qcq arrays to hold new queue structs */ 2981 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2982 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2983 sizeof(struct ionic_qcq *), GFP_KERNEL); 2984 if (!tx_qcqs) { 2985 err = -ENOMEM; 2986 goto err_out; 2987 } 2988 } 2989 if (qparam->nxqs != lif->nxqs || 2990 qparam->nrxq_descs != lif->nrxq_descs || 2991 qparam->rxq_features != lif->rxq_features) { 2992 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2993 sizeof(struct ionic_qcq *), GFP_KERNEL); 2994 if (!rx_qcqs) { 2995 err = -ENOMEM; 2996 goto err_out; 2997 } 2998 } 2999 3000 /* allocate new desc_info and rings, but leave the interrupt setup 3001 * until later so as to not mess with the still-running queues 3002 */ 3003 if (tx_qcqs) { 3004 num_desc = qparam->ntxq_descs; 3005 desc_sz = sizeof(struct ionic_txq_desc); 3006 comp_sz = sizeof(struct ionic_txq_comp); 3007 3008 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 3009 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 3010 sizeof(struct ionic_txq_sg_desc_v1)) 3011 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 3012 else 3013 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 3014 3015 for (i = 0; i < qparam->nxqs; i++) { 3016 /* If missing, short placeholder qcq needed for swap */ 3017 if (!lif->txqcqs[i]) { 3018 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 3019 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 3020 4, desc_sz, comp_sz, sg_desc_sz, 3021 lif->kern_pid, &lif->txqcqs[i]); 3022 if (err) 3023 goto err_out; 3024 } 3025 3026 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 3027 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 3028 num_desc, desc_sz, comp_sz, sg_desc_sz, 3029 lif->kern_pid, &tx_qcqs[i]); 3030 if (err) 3031 goto err_out; 3032 } 3033 } 3034 3035 if (rx_qcqs) { 3036 num_desc = qparam->nrxq_descs; 3037 desc_sz = sizeof(struct ionic_rxq_desc); 3038 comp_sz = sizeof(struct ionic_rxq_comp); 3039 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 3040 3041 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC) 3042 comp_sz *= 2; 3043 3044 for (i = 0; i < qparam->nxqs; i++) { 3045 /* If missing, short placeholder qcq needed for swap */ 3046 if (!lif->rxqcqs[i]) { 3047 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 3048 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 3049 4, desc_sz, comp_sz, sg_desc_sz, 3050 lif->kern_pid, &lif->rxqcqs[i]); 3051 if (err) 3052 goto err_out; 3053 } 3054 3055 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 3056 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 3057 num_desc, desc_sz, comp_sz, sg_desc_sz, 3058 lif->kern_pid, &rx_qcqs[i]); 3059 if (err) 3060 goto err_out; 3061 3062 rx_qcqs[i]->q.features = qparam->rxq_features; 3063 } 3064 } 3065 3066 /* stop and clean the queues */ 3067 ionic_stop_queues_reconfig(lif); 3068 3069 if (qparam->nxqs != lif->nxqs) { 3070 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 3071 if (err) 3072 goto err_out_reinit_unlock; 3073 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 3074 if (err) { 3075 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 3076 goto err_out_reinit_unlock; 3077 } 3078 } 3079 3080 /* swap new desc_info and rings, keeping existing interrupt config */ 3081 if (tx_qcqs) { 3082 lif->ntxq_descs = qparam->ntxq_descs; 3083 for (i = 0; i < qparam->nxqs; i++) 3084 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 3085 } 3086 3087 if (rx_qcqs) { 3088 lif->nrxq_descs = qparam->nrxq_descs; 3089 for (i = 0; i < qparam->nxqs; i++) 3090 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 3091 } 3092 3093 /* if we need to change the interrupt layout, this is the time */ 3094 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 3095 qparam->nxqs != lif->nxqs) { 3096 if (qparam->intr_split) { 3097 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 3098 } else { 3099 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 3100 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 3101 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 3102 } 3103 3104 /* Clear existing interrupt assignments. We check for NULL here 3105 * because we're checking the whole array for potential qcqs, not 3106 * just those qcqs that have just been set up. 3107 */ 3108 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 3109 if (lif->txqcqs[i]) 3110 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 3111 if (lif->rxqcqs[i]) 3112 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 3113 } 3114 3115 /* re-assign the interrupts */ 3116 for (i = 0; i < qparam->nxqs; i++) { 3117 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 3118 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 3119 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 3120 lif->rxqcqs[i]->intr.index, 3121 lif->rx_coalesce_hw); 3122 3123 if (qparam->intr_split) { 3124 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 3125 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 3126 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 3127 lif->txqcqs[i]->intr.index, 3128 lif->tx_coalesce_hw); 3129 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 3130 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 3131 } else { 3132 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3133 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 3134 } 3135 } 3136 } 3137 3138 /* now we can rework the debugfs mappings */ 3139 if (tx_qcqs) { 3140 for (i = 0; i < qparam->nxqs; i++) { 3141 ionic_debugfs_del_qcq(lif->txqcqs[i]); 3142 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 3143 } 3144 } 3145 3146 if (rx_qcqs) { 3147 for (i = 0; i < qparam->nxqs; i++) { 3148 ionic_debugfs_del_qcq(lif->rxqcqs[i]); 3149 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 3150 } 3151 } 3152 3153 swap(lif->nxqs, qparam->nxqs); 3154 swap(lif->rxq_features, qparam->rxq_features); 3155 3156 err_out_reinit_unlock: 3157 /* re-init the queues, but don't lose an error code */ 3158 if (err) 3159 ionic_start_queues_reconfig(lif); 3160 else 3161 err = ionic_start_queues_reconfig(lif); 3162 3163 err_out: 3164 /* free old allocs without cleaning intr */ 3165 for (i = 0; i < qparam->nxqs; i++) { 3166 if (tx_qcqs && tx_qcqs[i]) { 3167 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3168 ionic_qcq_free(lif, tx_qcqs[i]); 3169 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 3170 tx_qcqs[i] = NULL; 3171 } 3172 if (rx_qcqs && rx_qcqs[i]) { 3173 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3174 ionic_qcq_free(lif, rx_qcqs[i]); 3175 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 3176 rx_qcqs[i] = NULL; 3177 } 3178 } 3179 3180 /* free q array */ 3181 if (rx_qcqs) { 3182 devm_kfree(lif->ionic->dev, rx_qcqs); 3183 rx_qcqs = NULL; 3184 } 3185 if (tx_qcqs) { 3186 devm_kfree(lif->ionic->dev, tx_qcqs); 3187 tx_qcqs = NULL; 3188 } 3189 3190 /* clean the unused dma and info allocations when new set is smaller 3191 * than the full array, but leave the qcq shells in place 3192 */ 3193 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 3194 if (lif->txqcqs && lif->txqcqs[i]) { 3195 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3196 ionic_qcq_free(lif, lif->txqcqs[i]); 3197 } 3198 3199 if (lif->rxqcqs && lif->rxqcqs[i]) { 3200 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 3201 ionic_qcq_free(lif, lif->rxqcqs[i]); 3202 } 3203 } 3204 3205 if (err) 3206 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err); 3207 3208 return err; 3209 } 3210 3211 int ionic_lif_alloc(struct ionic *ionic) 3212 { 3213 struct device *dev = ionic->dev; 3214 union ionic_lif_identity *lid; 3215 struct net_device *netdev; 3216 struct ionic_lif *lif; 3217 int tbl_sz; 3218 int err; 3219 3220 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 3221 if (!lid) 3222 return -ENOMEM; 3223 3224 netdev = alloc_etherdev_mqs(sizeof(*lif), 3225 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 3226 if (!netdev) { 3227 dev_err(dev, "Cannot allocate netdev, aborting\n"); 3228 err = -ENOMEM; 3229 goto err_out_free_lid; 3230 } 3231 3232 SET_NETDEV_DEV(netdev, dev); 3233 3234 lif = netdev_priv(netdev); 3235 lif->netdev = netdev; 3236 ionic->lif = lif; 3237 lif->ionic = ionic; 3238 netdev->netdev_ops = &ionic_netdev_ops; 3239 ionic_ethtool_set_ops(netdev); 3240 3241 netdev->watchdog_timeo = 2 * HZ; 3242 netif_carrier_off(netdev); 3243 3244 lif->identity = lid; 3245 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 3246 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity); 3247 if (err) { 3248 dev_err(ionic->dev, "Cannot identify type %d: %d\n", 3249 lif->lif_type, err); 3250 goto err_out_free_netdev; 3251 } 3252 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 3253 le32_to_cpu(lif->identity->eth.min_frame_size)); 3254 lif->netdev->max_mtu = 3255 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; 3256 3257 lif->neqs = ionic->neqs_per_lif; 3258 lif->nxqs = ionic->ntxqs_per_lif; 3259 3260 lif->index = 0; 3261 3262 if (is_kdump_kernel()) { 3263 lif->ntxq_descs = IONIC_MIN_TXRX_DESC; 3264 lif->nrxq_descs = IONIC_MIN_TXRX_DESC; 3265 } else { 3266 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 3267 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 3268 } 3269 3270 /* Convert the default coalesce value to actual hw resolution */ 3271 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 3272 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 3273 lif->rx_coalesce_usecs); 3274 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 3275 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 3276 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); 3277 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); 3278 3279 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 3280 3281 mutex_init(&lif->queue_lock); 3282 mutex_init(&lif->config_lock); 3283 3284 spin_lock_init(&lif->adminq_lock); 3285 3286 spin_lock_init(&lif->deferred.lock); 3287 INIT_LIST_HEAD(&lif->deferred.list); 3288 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 3289 3290 /* allocate lif info */ 3291 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 3292 lif->info = dma_alloc_coherent(dev, lif->info_sz, 3293 &lif->info_pa, GFP_KERNEL); 3294 if (!lif->info) { 3295 dev_err(dev, "Failed to allocate lif info, aborting\n"); 3296 err = -ENOMEM; 3297 goto err_out_free_mutex; 3298 } 3299 3300 ionic_debugfs_add_lif(lif); 3301 3302 /* allocate control queues and txrx queue arrays */ 3303 ionic_lif_queue_identify(lif); 3304 err = ionic_qcqs_alloc(lif); 3305 if (err) 3306 goto err_out_free_lif_info; 3307 3308 /* allocate rss indirection table */ 3309 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 3310 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 3311 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 3312 &lif->rss_ind_tbl_pa, 3313 GFP_KERNEL); 3314 3315 if (!lif->rss_ind_tbl) { 3316 err = -ENOMEM; 3317 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 3318 goto err_out_free_qcqs; 3319 } 3320 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 3321 3322 ionic_lif_alloc_phc(lif); 3323 3324 return 0; 3325 3326 err_out_free_qcqs: 3327 ionic_qcqs_free(lif); 3328 err_out_free_lif_info: 3329 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3330 lif->info = NULL; 3331 lif->info_pa = 0; 3332 err_out_free_mutex: 3333 mutex_destroy(&lif->config_lock); 3334 mutex_destroy(&lif->queue_lock); 3335 err_out_free_netdev: 3336 free_netdev(lif->netdev); 3337 lif = NULL; 3338 err_out_free_lid: 3339 kfree(lid); 3340 3341 return err; 3342 } 3343 3344 static void ionic_lif_reset(struct ionic_lif *lif) 3345 { 3346 struct ionic_dev *idev = &lif->ionic->idev; 3347 3348 if (!ionic_is_fw_running(idev)) 3349 return; 3350 3351 mutex_lock(&lif->ionic->dev_cmd_lock); 3352 ionic_dev_cmd_lif_reset(idev, lif->index); 3353 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3354 mutex_unlock(&lif->ionic->dev_cmd_lock); 3355 } 3356 3357 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 3358 { 3359 struct ionic *ionic = lif->ionic; 3360 3361 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3362 return; 3363 3364 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 3365 3366 netif_device_detach(lif->netdev); 3367 3368 mutex_lock(&lif->queue_lock); 3369 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 3370 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 3371 ionic_stop_queues(lif); 3372 } 3373 3374 if (netif_running(lif->netdev)) { 3375 ionic_txrx_deinit(lif); 3376 ionic_txrx_free(lif); 3377 } 3378 ionic_lif_deinit(lif); 3379 ionic_reset(ionic); 3380 ionic_qcqs_free(lif); 3381 3382 mutex_unlock(&lif->queue_lock); 3383 3384 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); 3385 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 3386 } 3387 3388 int ionic_restart_lif(struct ionic_lif *lif) 3389 { 3390 struct ionic *ionic = lif->ionic; 3391 int err; 3392 3393 mutex_lock(&lif->queue_lock); 3394 3395 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 3396 dev_info(ionic->dev, "FW Up: clearing broken state\n"); 3397 3398 err = ionic_qcqs_alloc(lif); 3399 if (err) 3400 goto err_unlock; 3401 3402 err = ionic_lif_init(lif); 3403 if (err) 3404 goto err_qcqs_free; 3405 3406 ionic_vf_attr_replay(lif); 3407 3408 if (lif->registered) 3409 ionic_lif_set_netdev_info(lif); 3410 3411 ionic_rx_filter_replay(lif); 3412 3413 if (netif_running(lif->netdev)) { 3414 err = ionic_txrx_alloc(lif); 3415 if (err) 3416 goto err_lifs_deinit; 3417 3418 err = ionic_txrx_init(lif); 3419 if (err) 3420 goto err_txrx_free; 3421 } 3422 3423 mutex_unlock(&lif->queue_lock); 3424 3425 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 3426 ionic_link_status_check_request(lif, CAN_SLEEP); 3427 netif_device_attach(lif->netdev); 3428 3429 return 0; 3430 3431 err_txrx_free: 3432 ionic_txrx_free(lif); 3433 err_lifs_deinit: 3434 ionic_lif_deinit(lif); 3435 err_qcqs_free: 3436 ionic_qcqs_free(lif); 3437 err_unlock: 3438 mutex_unlock(&lif->queue_lock); 3439 3440 return err; 3441 } 3442 3443 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 3444 { 3445 struct ionic *ionic = lif->ionic; 3446 int err; 3447 3448 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3449 return; 3450 3451 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 3452 3453 /* This is a little different from what happens at 3454 * probe time because the LIF already exists so we 3455 * just need to reanimate it. 3456 */ 3457 ionic_init_devinfo(ionic); 3458 err = ionic_identify(ionic); 3459 if (err) 3460 goto err_out; 3461 err = ionic_port_identify(ionic); 3462 if (err) 3463 goto err_out; 3464 err = ionic_port_init(ionic); 3465 if (err) 3466 goto err_out; 3467 3468 err = ionic_restart_lif(lif); 3469 if (err) 3470 goto err_out; 3471 3472 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 3473 3474 /* restore the hardware timestamping queues */ 3475 ionic_lif_hwstamp_replay(lif); 3476 3477 return; 3478 3479 err_out: 3480 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 3481 } 3482 3483 void ionic_lif_free(struct ionic_lif *lif) 3484 { 3485 struct device *dev = lif->ionic->dev; 3486 3487 ionic_lif_free_phc(lif); 3488 3489 /* free rss indirection table */ 3490 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 3491 lif->rss_ind_tbl_pa); 3492 lif->rss_ind_tbl = NULL; 3493 lif->rss_ind_tbl_pa = 0; 3494 3495 /* free queues */ 3496 ionic_qcqs_free(lif); 3497 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3498 ionic_lif_reset(lif); 3499 3500 /* free lif info */ 3501 kfree(lif->identity); 3502 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3503 lif->info = NULL; 3504 lif->info_pa = 0; 3505 3506 /* unmap doorbell page */ 3507 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3508 lif->kern_dbpage = NULL; 3509 3510 mutex_destroy(&lif->config_lock); 3511 mutex_destroy(&lif->queue_lock); 3512 3513 /* free netdev & lif */ 3514 ionic_debugfs_del_lif(lif); 3515 free_netdev(lif->netdev); 3516 } 3517 3518 void ionic_lif_deinit(struct ionic_lif *lif) 3519 { 3520 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 3521 return; 3522 3523 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3524 cancel_work_sync(&lif->deferred.work); 3525 cancel_work_sync(&lif->tx_timeout_work); 3526 ionic_rx_filters_deinit(lif); 3527 if (lif->netdev->features & NETIF_F_RXHASH) 3528 ionic_lif_rss_deinit(lif); 3529 } 3530 3531 napi_disable(&lif->adminqcq->napi); 3532 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3533 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3534 3535 ionic_lif_reset(lif); 3536 } 3537 3538 static int ionic_lif_adminq_init(struct ionic_lif *lif) 3539 { 3540 struct device *dev = lif->ionic->dev; 3541 struct ionic_q_init_comp comp; 3542 struct ionic_dev *idev; 3543 struct ionic_qcq *qcq; 3544 struct ionic_queue *q; 3545 int err; 3546 3547 idev = &lif->ionic->idev; 3548 qcq = lif->adminqcq; 3549 q = &qcq->q; 3550 3551 mutex_lock(&lif->ionic->dev_cmd_lock); 3552 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 3553 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3554 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3555 mutex_unlock(&lif->ionic->dev_cmd_lock); 3556 if (err) { 3557 netdev_err(lif->netdev, "adminq init failed %d\n", err); 3558 return err; 3559 } 3560 3561 q->hw_type = comp.hw_type; 3562 q->hw_index = le32_to_cpu(comp.hw_index); 3563 q->dbval = IONIC_DBELL_QID(q->hw_index); 3564 3565 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 3566 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 3567 3568 q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE; 3569 q->dbell_jiffies = jiffies; 3570 3571 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi); 3572 3573 qcq->napi_qcq = qcq; 3574 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); 3575 3576 napi_enable(&qcq->napi); 3577 3578 if (qcq->flags & IONIC_QCQ_F_INTR) { 3579 irq_set_affinity_hint(qcq->intr.vector, 3580 &qcq->intr.affinity_mask); 3581 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 3582 IONIC_INTR_MASK_CLEAR); 3583 } 3584 3585 qcq->flags |= IONIC_QCQ_F_INITED; 3586 3587 return 0; 3588 } 3589 3590 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 3591 { 3592 struct ionic_qcq *qcq = lif->notifyqcq; 3593 struct device *dev = lif->ionic->dev; 3594 struct ionic_queue *q = &qcq->q; 3595 int err; 3596 3597 struct ionic_admin_ctx ctx = { 3598 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3599 .cmd.q_init = { 3600 .opcode = IONIC_CMD_Q_INIT, 3601 .lif_index = cpu_to_le16(lif->index), 3602 .type = q->type, 3603 .ver = lif->qtype_info[q->type].version, 3604 .index = cpu_to_le32(q->index), 3605 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 3606 IONIC_QINIT_F_ENA), 3607 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 3608 .pid = cpu_to_le16(q->pid), 3609 .ring_size = ilog2(q->num_descs), 3610 .ring_base = cpu_to_le64(q->base_pa), 3611 } 3612 }; 3613 3614 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 3615 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 3616 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 3617 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 3618 3619 err = ionic_adminq_post_wait(lif, &ctx); 3620 if (err) 3621 return err; 3622 3623 lif->last_eid = 0; 3624 q->hw_type = ctx.comp.q_init.hw_type; 3625 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 3626 q->dbval = IONIC_DBELL_QID(q->hw_index); 3627 3628 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 3629 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 3630 3631 /* preset the callback info */ 3632 q->info[0].cb_arg = lif; 3633 3634 qcq->flags |= IONIC_QCQ_F_INITED; 3635 3636 return 0; 3637 } 3638 3639 static int ionic_station_set(struct ionic_lif *lif) 3640 { 3641 struct net_device *netdev = lif->netdev; 3642 struct ionic_admin_ctx ctx = { 3643 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3644 .cmd.lif_getattr = { 3645 .opcode = IONIC_CMD_LIF_GETATTR, 3646 .index = cpu_to_le16(lif->index), 3647 .attr = IONIC_LIF_ATTR_MAC, 3648 }, 3649 }; 3650 u8 mac_address[ETH_ALEN]; 3651 struct sockaddr addr; 3652 int err; 3653 3654 err = ionic_adminq_post_wait(lif, &ctx); 3655 if (err) 3656 return err; 3657 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 3658 ctx.comp.lif_getattr.mac); 3659 ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac); 3660 3661 if (is_zero_ether_addr(mac_address)) { 3662 eth_hw_addr_random(netdev); 3663 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr); 3664 ether_addr_copy(mac_address, netdev->dev_addr); 3665 3666 err = ionic_program_mac(lif, mac_address); 3667 if (err < 0) 3668 return err; 3669 3670 if (err > 0) { 3671 netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n", 3672 __func__); 3673 return 0; 3674 } 3675 } 3676 3677 if (!is_zero_ether_addr(netdev->dev_addr)) { 3678 /* If the netdev mac is non-zero and doesn't match the default 3679 * device address, it was set by something earlier and we're 3680 * likely here again after a fw-upgrade reset. We need to be 3681 * sure the netdev mac is in our filter list. 3682 */ 3683 if (!ether_addr_equal(mac_address, netdev->dev_addr)) 3684 ionic_lif_addr_add(lif, netdev->dev_addr); 3685 } else { 3686 /* Update the netdev mac with the device's mac */ 3687 ether_addr_copy(addr.sa_data, mac_address); 3688 addr.sa_family = AF_INET; 3689 err = eth_prepare_mac_addr_change(netdev, &addr); 3690 if (err) { 3691 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 3692 addr.sa_data, err); 3693 return 0; 3694 } 3695 3696 eth_commit_mac_addr_change(netdev, &addr); 3697 } 3698 3699 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 3700 netdev->dev_addr); 3701 ionic_lif_addr_add(lif, netdev->dev_addr); 3702 3703 return 0; 3704 } 3705 3706 int ionic_lif_init(struct ionic_lif *lif) 3707 { 3708 struct ionic_dev *idev = &lif->ionic->idev; 3709 struct device *dev = lif->ionic->dev; 3710 struct ionic_lif_init_comp comp; 3711 int dbpage_num; 3712 int err; 3713 3714 mutex_lock(&lif->ionic->dev_cmd_lock); 3715 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 3716 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3717 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3718 mutex_unlock(&lif->ionic->dev_cmd_lock); 3719 if (err) 3720 return err; 3721 3722 lif->hw_index = le16_to_cpu(comp.hw_index); 3723 3724 /* now that we have the hw_index we can figure out our doorbell page */ 3725 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 3726 if (!lif->dbid_count) { 3727 dev_err(dev, "No doorbell pages, aborting\n"); 3728 return -EINVAL; 3729 } 3730 3731 lif->kern_pid = 0; 3732 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 3733 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 3734 if (!lif->kern_dbpage) { 3735 dev_err(dev, "Cannot map dbpage, aborting\n"); 3736 return -ENOMEM; 3737 } 3738 3739 err = ionic_lif_adminq_init(lif); 3740 if (err) 3741 goto err_out_adminq_deinit; 3742 3743 if (lif->ionic->nnqs_per_lif) { 3744 err = ionic_lif_notifyq_init(lif); 3745 if (err) 3746 goto err_out_notifyq_deinit; 3747 } 3748 3749 err = ionic_init_nic_features(lif); 3750 if (err) 3751 goto err_out_notifyq_deinit; 3752 3753 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3754 err = ionic_rx_filters_init(lif); 3755 if (err) 3756 goto err_out_notifyq_deinit; 3757 } 3758 3759 err = ionic_station_set(lif); 3760 if (err) 3761 goto err_out_notifyq_deinit; 3762 3763 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 3764 3765 set_bit(IONIC_LIF_F_INITED, lif->state); 3766 3767 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 3768 3769 return 0; 3770 3771 err_out_notifyq_deinit: 3772 napi_disable(&lif->adminqcq->napi); 3773 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3774 err_out_adminq_deinit: 3775 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3776 ionic_lif_reset(lif); 3777 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3778 lif->kern_dbpage = NULL; 3779 3780 return err; 3781 } 3782 3783 static void ionic_lif_notify_work(struct work_struct *ws) 3784 { 3785 } 3786 3787 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 3788 { 3789 struct ionic_admin_ctx ctx = { 3790 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3791 .cmd.lif_setattr = { 3792 .opcode = IONIC_CMD_LIF_SETATTR, 3793 .index = cpu_to_le16(lif->index), 3794 .attr = IONIC_LIF_ATTR_NAME, 3795 }, 3796 }; 3797 3798 strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 3799 sizeof(ctx.cmd.lif_setattr.name)); 3800 3801 ionic_adminq_post_wait(lif, &ctx); 3802 } 3803 3804 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 3805 { 3806 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 3807 return NULL; 3808 3809 return netdev_priv(netdev); 3810 } 3811 3812 static int ionic_lif_notify(struct notifier_block *nb, 3813 unsigned long event, void *info) 3814 { 3815 struct net_device *ndev = netdev_notifier_info_to_dev(info); 3816 struct ionic *ionic = container_of(nb, struct ionic, nb); 3817 struct ionic_lif *lif = ionic_netdev_lif(ndev); 3818 3819 if (!lif || lif->ionic != ionic) 3820 return NOTIFY_DONE; 3821 3822 switch (event) { 3823 case NETDEV_CHANGENAME: 3824 ionic_lif_set_netdev_info(lif); 3825 break; 3826 } 3827 3828 return NOTIFY_DONE; 3829 } 3830 3831 int ionic_lif_register(struct ionic_lif *lif) 3832 { 3833 int err; 3834 3835 ionic_lif_register_phc(lif); 3836 3837 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); 3838 3839 lif->ionic->nb.notifier_call = ionic_lif_notify; 3840 3841 err = register_netdevice_notifier(&lif->ionic->nb); 3842 if (err) 3843 lif->ionic->nb.notifier_call = NULL; 3844 3845 /* only register LIF0 for now */ 3846 err = register_netdev(lif->netdev); 3847 if (err) { 3848 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); 3849 ionic_lif_unregister_phc(lif); 3850 return err; 3851 } 3852 3853 ionic_link_status_check_request(lif, CAN_SLEEP); 3854 lif->registered = true; 3855 ionic_lif_set_netdev_info(lif); 3856 3857 return 0; 3858 } 3859 3860 void ionic_lif_unregister(struct ionic_lif *lif) 3861 { 3862 if (lif->ionic->nb.notifier_call) { 3863 unregister_netdevice_notifier(&lif->ionic->nb); 3864 cancel_work_sync(&lif->ionic->nb_work); 3865 lif->ionic->nb.notifier_call = NULL; 3866 } 3867 3868 if (lif->netdev->reg_state == NETREG_REGISTERED) 3869 unregister_netdev(lif->netdev); 3870 3871 ionic_lif_unregister_phc(lif); 3872 3873 lif->registered = false; 3874 } 3875 3876 static void ionic_lif_queue_identify(struct ionic_lif *lif) 3877 { 3878 union ionic_q_identity __iomem *q_ident; 3879 struct ionic *ionic = lif->ionic; 3880 struct ionic_dev *idev; 3881 int qtype; 3882 int err; 3883 3884 idev = &lif->ionic->idev; 3885 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; 3886 3887 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 3888 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 3889 3890 /* filter out the ones we know about */ 3891 switch (qtype) { 3892 case IONIC_QTYPE_ADMINQ: 3893 case IONIC_QTYPE_NOTIFYQ: 3894 case IONIC_QTYPE_RXQ: 3895 case IONIC_QTYPE_TXQ: 3896 break; 3897 default: 3898 continue; 3899 } 3900 3901 memset(qti, 0, sizeof(*qti)); 3902 3903 mutex_lock(&ionic->dev_cmd_lock); 3904 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 3905 ionic_qtype_versions[qtype]); 3906 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3907 if (!err) { 3908 qti->version = readb(&q_ident->version); 3909 qti->supported = readb(&q_ident->supported); 3910 qti->features = readq(&q_ident->features); 3911 qti->desc_sz = readw(&q_ident->desc_sz); 3912 qti->comp_sz = readw(&q_ident->comp_sz); 3913 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz); 3914 qti->max_sg_elems = readw(&q_ident->max_sg_elems); 3915 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride); 3916 } 3917 mutex_unlock(&ionic->dev_cmd_lock); 3918 3919 if (err == -EINVAL) { 3920 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 3921 continue; 3922 } else if (err == -EIO) { 3923 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 3924 return; 3925 } else if (err) { 3926 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 3927 qtype, err); 3928 return; 3929 } 3930 3931 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 3932 qtype, qti->version); 3933 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 3934 qtype, qti->supported); 3935 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 3936 qtype, qti->features); 3937 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 3938 qtype, qti->desc_sz); 3939 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 3940 qtype, qti->comp_sz); 3941 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 3942 qtype, qti->sg_desc_sz); 3943 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 3944 qtype, qti->max_sg_elems); 3945 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 3946 qtype, qti->sg_desc_stride); 3947 3948 if (qti->max_sg_elems >= IONIC_MAX_FRAGS) { 3949 qti->max_sg_elems = IONIC_MAX_FRAGS - 1; 3950 dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n", 3951 qtype, qti->max_sg_elems); 3952 } 3953 3954 if (qti->max_sg_elems > MAX_SKB_FRAGS) { 3955 qti->max_sg_elems = MAX_SKB_FRAGS; 3956 dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n", 3957 qtype, qti->max_sg_elems); 3958 } 3959 } 3960 } 3961 3962 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 3963 union ionic_lif_identity *lid) 3964 { 3965 struct ionic_dev *idev = &ionic->idev; 3966 size_t sz; 3967 int err; 3968 3969 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 3970 3971 mutex_lock(&ionic->dev_cmd_lock); 3972 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 3973 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3974 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 3975 mutex_unlock(&ionic->dev_cmd_lock); 3976 if (err) 3977 return (err); 3978 3979 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 3980 le64_to_cpu(lid->capabilities)); 3981 3982 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 3983 le32_to_cpu(lid->eth.max_ucast_filters)); 3984 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 3985 le32_to_cpu(lid->eth.max_mcast_filters)); 3986 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 3987 le64_to_cpu(lid->eth.config.features)); 3988 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 3989 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 3990 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 3991 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 3992 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 3993 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 3994 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 3995 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 3996 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 3997 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 3998 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 3999 le32_to_cpu(lid->eth.config.mtu)); 4000 4001 return 0; 4002 } 4003 4004 int ionic_lif_size(struct ionic *ionic) 4005 { 4006 struct ionic_identity *ident = &ionic->ident; 4007 unsigned int nintrs, dev_nintrs; 4008 union ionic_lif_config *lc; 4009 unsigned int ntxqs_per_lif; 4010 unsigned int nrxqs_per_lif; 4011 unsigned int neqs_per_lif; 4012 unsigned int nnqs_per_lif; 4013 unsigned int nxqs, neqs; 4014 unsigned int min_intrs; 4015 int err; 4016 4017 /* retrieve basic values from FW */ 4018 lc = &ident->lif.eth.config; 4019 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 4020 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 4021 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 4022 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 4023 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 4024 4025 /* limit values to play nice with kdump */ 4026 if (is_kdump_kernel()) { 4027 dev_nintrs = 2; 4028 neqs_per_lif = 0; 4029 nnqs_per_lif = 0; 4030 ntxqs_per_lif = 1; 4031 nrxqs_per_lif = 1; 4032 } 4033 4034 /* reserve last queue id for hardware timestamping */ 4035 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { 4036 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { 4037 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP); 4038 } else { 4039 ntxqs_per_lif -= 1; 4040 nrxqs_per_lif -= 1; 4041 } 4042 } 4043 4044 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 4045 nxqs = min(nxqs, num_online_cpus()); 4046 neqs = min(neqs_per_lif, num_online_cpus()); 4047 4048 try_again: 4049 /* interrupt usage: 4050 * 1 for master lif adminq/notifyq 4051 * 1 for each CPU for master lif TxRx queue pairs 4052 * whatever's left is for RDMA queues 4053 */ 4054 nintrs = 1 + nxqs + neqs; 4055 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 4056 4057 if (nintrs > dev_nintrs) 4058 goto try_fewer; 4059 4060 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 4061 if (err < 0 && err != -ENOSPC) { 4062 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 4063 return err; 4064 } 4065 if (err == -ENOSPC) 4066 goto try_fewer; 4067 4068 if (err != nintrs) { 4069 ionic_bus_free_irq_vectors(ionic); 4070 goto try_fewer; 4071 } 4072 4073 ionic->nnqs_per_lif = nnqs_per_lif; 4074 ionic->neqs_per_lif = neqs; 4075 ionic->ntxqs_per_lif = nxqs; 4076 ionic->nrxqs_per_lif = nxqs; 4077 ionic->nintrs = nintrs; 4078 4079 ionic_debugfs_add_sizes(ionic); 4080 4081 return 0; 4082 4083 try_fewer: 4084 if (nnqs_per_lif > 1) { 4085 nnqs_per_lif >>= 1; 4086 goto try_again; 4087 } 4088 if (neqs > 1) { 4089 neqs >>= 1; 4090 goto try_again; 4091 } 4092 if (nxqs > 1) { 4093 nxqs >>= 1; 4094 goto try_again; 4095 } 4096 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 4097 return -ENOSPC; 4098 } 4099