1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/netdevice.h> 5 #include <linux/etherdevice.h> 6 #include <linux/rtnetlink.h> 7 #include <linux/interrupt.h> 8 #include <linux/pci.h> 9 #include <linux/cpumask.h> 10 11 #include "ionic.h" 12 #include "ionic_bus.h" 13 #include "ionic_lif.h" 14 #include "ionic_txrx.h" 15 #include "ionic_ethtool.h" 16 #include "ionic_debugfs.h" 17 18 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); 19 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); 20 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); 21 static void ionic_link_status_check(struct ionic_lif *lif); 22 23 static void ionic_lif_deferred_work(struct work_struct *work) 24 { 25 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 26 struct ionic_deferred *def = &lif->deferred; 27 struct ionic_deferred_work *w = NULL; 28 29 spin_lock_bh(&def->lock); 30 if (!list_empty(&def->list)) { 31 w = list_first_entry(&def->list, 32 struct ionic_deferred_work, list); 33 list_del(&w->list); 34 } 35 spin_unlock_bh(&def->lock); 36 37 if (w) { 38 switch (w->type) { 39 case IONIC_DW_TYPE_RX_MODE: 40 ionic_lif_rx_mode(lif, w->rx_mode); 41 break; 42 case IONIC_DW_TYPE_RX_ADDR_ADD: 43 ionic_lif_addr_add(lif, w->addr); 44 break; 45 case IONIC_DW_TYPE_RX_ADDR_DEL: 46 ionic_lif_addr_del(lif, w->addr); 47 break; 48 case IONIC_DW_TYPE_LINK_STATUS: 49 ionic_link_status_check(lif); 50 break; 51 default: 52 break; 53 } 54 kfree(w); 55 schedule_work(&def->work); 56 } 57 } 58 59 static void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 60 struct ionic_deferred_work *work) 61 { 62 spin_lock_bh(&def->lock); 63 list_add_tail(&work->list, &def->list); 64 spin_unlock_bh(&def->lock); 65 schedule_work(&def->work); 66 } 67 68 static void ionic_link_status_check(struct ionic_lif *lif) 69 { 70 struct net_device *netdev = lif->netdev; 71 u16 link_status; 72 bool link_up; 73 74 link_status = le16_to_cpu(lif->info->status.link_status); 75 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 76 77 /* filter out the no-change cases */ 78 if (link_up == netif_carrier_ok(netdev)) 79 goto link_out; 80 81 if (link_up) { 82 netdev_info(netdev, "Link up - %d Gbps\n", 83 le32_to_cpu(lif->info->status.link_speed) / 1000); 84 85 if (test_bit(IONIC_LIF_UP, lif->state)) { 86 netif_tx_wake_all_queues(lif->netdev); 87 netif_carrier_on(netdev); 88 } 89 } else { 90 netdev_info(netdev, "Link down\n"); 91 92 /* carrier off first to avoid watchdog timeout */ 93 netif_carrier_off(netdev); 94 if (test_bit(IONIC_LIF_UP, lif->state)) 95 netif_tx_stop_all_queues(netdev); 96 } 97 98 link_out: 99 clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state); 100 } 101 102 static void ionic_link_status_check_request(struct ionic_lif *lif) 103 { 104 struct ionic_deferred_work *work; 105 106 /* we only need one request outstanding at a time */ 107 if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state)) 108 return; 109 110 if (in_interrupt()) { 111 work = kzalloc(sizeof(*work), GFP_ATOMIC); 112 if (!work) 113 return; 114 115 work->type = IONIC_DW_TYPE_LINK_STATUS; 116 ionic_lif_deferred_enqueue(&lif->deferred, work); 117 } else { 118 ionic_link_status_check(lif); 119 } 120 } 121 122 static irqreturn_t ionic_isr(int irq, void *data) 123 { 124 struct napi_struct *napi = data; 125 126 napi_schedule_irqoff(napi); 127 128 return IRQ_HANDLED; 129 } 130 131 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 132 { 133 struct ionic_intr_info *intr = &qcq->intr; 134 struct device *dev = lif->ionic->dev; 135 struct ionic_queue *q = &qcq->q; 136 const char *name; 137 138 if (lif->registered) 139 name = lif->netdev->name; 140 else 141 name = dev_name(dev); 142 143 snprintf(intr->name, sizeof(intr->name), 144 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 145 146 return devm_request_irq(dev, intr->vector, ionic_isr, 147 0, intr->name, &qcq->napi); 148 } 149 150 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 151 { 152 struct ionic *ionic = lif->ionic; 153 int index; 154 155 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 156 if (index == ionic->nintrs) { 157 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 158 __func__, index, ionic->nintrs); 159 return -ENOSPC; 160 } 161 162 set_bit(index, ionic->intrs); 163 ionic_intr_init(&ionic->idev, intr, index); 164 165 return 0; 166 } 167 168 static void ionic_intr_free(struct ionic_lif *lif, int index) 169 { 170 if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs) 171 clear_bit(index, lif->ionic->intrs); 172 } 173 174 static int ionic_qcq_enable(struct ionic_qcq *qcq) 175 { 176 struct ionic_queue *q = &qcq->q; 177 struct ionic_lif *lif = q->lif; 178 struct ionic_dev *idev; 179 struct device *dev; 180 181 struct ionic_admin_ctx ctx = { 182 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 183 .cmd.q_control = { 184 .opcode = IONIC_CMD_Q_CONTROL, 185 .lif_index = cpu_to_le16(lif->index), 186 .type = q->type, 187 .index = cpu_to_le32(q->index), 188 .oper = IONIC_Q_ENABLE, 189 }, 190 }; 191 192 idev = &lif->ionic->idev; 193 dev = lif->ionic->dev; 194 195 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 196 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 197 198 if (qcq->flags & IONIC_QCQ_F_INTR) { 199 irq_set_affinity_hint(qcq->intr.vector, 200 &qcq->intr.affinity_mask); 201 napi_enable(&qcq->napi); 202 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 203 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 204 IONIC_INTR_MASK_CLEAR); 205 } 206 207 return ionic_adminq_post_wait(lif, &ctx); 208 } 209 210 static int ionic_qcq_disable(struct ionic_qcq *qcq) 211 { 212 struct ionic_queue *q = &qcq->q; 213 struct ionic_lif *lif = q->lif; 214 struct ionic_dev *idev; 215 struct device *dev; 216 217 struct ionic_admin_ctx ctx = { 218 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 219 .cmd.q_control = { 220 .opcode = IONIC_CMD_Q_CONTROL, 221 .lif_index = cpu_to_le16(lif->index), 222 .type = q->type, 223 .index = cpu_to_le32(q->index), 224 .oper = IONIC_Q_DISABLE, 225 }, 226 }; 227 228 idev = &lif->ionic->idev; 229 dev = lif->ionic->dev; 230 231 dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n", 232 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 233 234 if (qcq->flags & IONIC_QCQ_F_INTR) { 235 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 236 IONIC_INTR_MASK_SET); 237 synchronize_irq(qcq->intr.vector); 238 irq_set_affinity_hint(qcq->intr.vector, NULL); 239 napi_disable(&qcq->napi); 240 } 241 242 return ionic_adminq_post_wait(lif, &ctx); 243 } 244 245 static void ionic_lif_quiesce(struct ionic_lif *lif) 246 { 247 struct ionic_admin_ctx ctx = { 248 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 249 .cmd.lif_setattr = { 250 .opcode = IONIC_CMD_LIF_SETATTR, 251 .attr = IONIC_LIF_ATTR_STATE, 252 .index = lif->index, 253 .state = IONIC_LIF_DISABLE 254 }, 255 }; 256 257 ionic_adminq_post_wait(lif, &ctx); 258 } 259 260 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 261 { 262 struct ionic_dev *idev = &lif->ionic->idev; 263 struct device *dev = lif->ionic->dev; 264 265 if (!qcq) 266 return; 267 268 ionic_debugfs_del_qcq(qcq); 269 270 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 271 return; 272 273 if (qcq->flags & IONIC_QCQ_F_INTR) { 274 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 275 IONIC_INTR_MASK_SET); 276 devm_free_irq(dev, qcq->intr.vector, &qcq->napi); 277 netif_napi_del(&qcq->napi); 278 } 279 280 qcq->flags &= ~IONIC_QCQ_F_INITED; 281 } 282 283 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 284 { 285 struct device *dev = lif->ionic->dev; 286 287 if (!qcq) 288 return; 289 290 dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa); 291 qcq->base = NULL; 292 qcq->base_pa = 0; 293 294 if (qcq->flags & IONIC_QCQ_F_INTR) 295 ionic_intr_free(lif, qcq->intr.index); 296 297 devm_kfree(dev, qcq->cq.info); 298 qcq->cq.info = NULL; 299 devm_kfree(dev, qcq->q.info); 300 qcq->q.info = NULL; 301 devm_kfree(dev, qcq); 302 } 303 304 static void ionic_qcqs_free(struct ionic_lif *lif) 305 { 306 struct device *dev = lif->ionic->dev; 307 unsigned int i; 308 309 if (lif->notifyqcq) { 310 ionic_qcq_free(lif, lif->notifyqcq); 311 lif->notifyqcq = NULL; 312 } 313 314 if (lif->adminqcq) { 315 ionic_qcq_free(lif, lif->adminqcq); 316 lif->adminqcq = NULL; 317 } 318 319 for (i = 0; i < lif->nxqs; i++) 320 if (lif->rxqcqs[i].stats) 321 devm_kfree(dev, lif->rxqcqs[i].stats); 322 323 devm_kfree(dev, lif->rxqcqs); 324 lif->rxqcqs = NULL; 325 326 for (i = 0; i < lif->nxqs; i++) 327 if (lif->txqcqs[i].stats) 328 devm_kfree(dev, lif->txqcqs[i].stats); 329 330 devm_kfree(dev, lif->txqcqs); 331 lif->txqcqs = NULL; 332 } 333 334 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 335 struct ionic_qcq *n_qcq) 336 { 337 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { 338 ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index); 339 n_qcq->flags &= ~IONIC_QCQ_F_INTR; 340 } 341 342 n_qcq->intr.vector = src_qcq->intr.vector; 343 n_qcq->intr.index = src_qcq->intr.index; 344 } 345 346 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 347 unsigned int index, 348 const char *name, unsigned int flags, 349 unsigned int num_descs, unsigned int desc_size, 350 unsigned int cq_desc_size, 351 unsigned int sg_desc_size, 352 unsigned int pid, struct ionic_qcq **qcq) 353 { 354 struct ionic_dev *idev = &lif->ionic->idev; 355 u32 q_size, cq_size, sg_size, total_size; 356 struct device *dev = lif->ionic->dev; 357 void *q_base, *cq_base, *sg_base; 358 dma_addr_t cq_base_pa = 0; 359 dma_addr_t sg_base_pa = 0; 360 dma_addr_t q_base_pa = 0; 361 struct ionic_qcq *new; 362 int err; 363 364 *qcq = NULL; 365 366 q_size = num_descs * desc_size; 367 cq_size = num_descs * cq_desc_size; 368 sg_size = num_descs * sg_desc_size; 369 370 total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE); 371 /* Note: aligning q_size/cq_size is not enough due to cq_base 372 * address aligning as q_base could be not aligned to the page. 373 * Adding PAGE_SIZE. 374 */ 375 total_size += PAGE_SIZE; 376 if (flags & IONIC_QCQ_F_SG) { 377 total_size += ALIGN(sg_size, PAGE_SIZE); 378 total_size += PAGE_SIZE; 379 } 380 381 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 382 if (!new) { 383 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 384 err = -ENOMEM; 385 goto err_out; 386 } 387 388 new->flags = flags; 389 390 new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs, 391 GFP_KERNEL); 392 if (!new->q.info) { 393 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 394 err = -ENOMEM; 395 goto err_out; 396 } 397 398 new->q.type = type; 399 400 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 401 desc_size, sg_desc_size, pid); 402 if (err) { 403 netdev_err(lif->netdev, "Cannot initialize queue\n"); 404 goto err_out; 405 } 406 407 if (flags & IONIC_QCQ_F_INTR) { 408 err = ionic_intr_alloc(lif, &new->intr); 409 if (err) { 410 netdev_warn(lif->netdev, "no intr for %s: %d\n", 411 name, err); 412 goto err_out; 413 } 414 415 err = ionic_bus_get_irq(lif->ionic, new->intr.index); 416 if (err < 0) { 417 netdev_warn(lif->netdev, "no vector for %s: %d\n", 418 name, err); 419 goto err_out_free_intr; 420 } 421 new->intr.vector = err; 422 ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index, 423 IONIC_INTR_MASK_SET); 424 425 new->intr.cpu = new->intr.index % num_online_cpus(); 426 if (cpu_online(new->intr.cpu)) 427 cpumask_set_cpu(new->intr.cpu, 428 &new->intr.affinity_mask); 429 } else { 430 new->intr.index = INTR_INDEX_NOT_ASSIGNED; 431 } 432 433 new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs, 434 GFP_KERNEL); 435 if (!new->cq.info) { 436 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 437 err = -ENOMEM; 438 goto err_out_free_intr; 439 } 440 441 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 442 if (err) { 443 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 444 goto err_out_free_intr; 445 } 446 447 new->base = dma_alloc_coherent(dev, total_size, &new->base_pa, 448 GFP_KERNEL); 449 if (!new->base) { 450 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 451 err = -ENOMEM; 452 goto err_out_free_intr; 453 } 454 455 new->total_size = total_size; 456 457 q_base = new->base; 458 q_base_pa = new->base_pa; 459 460 cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE); 461 cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE); 462 463 if (flags & IONIC_QCQ_F_SG) { 464 sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size, 465 PAGE_SIZE); 466 sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE); 467 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 468 } 469 470 ionic_q_map(&new->q, q_base, q_base_pa); 471 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 472 ionic_cq_bind(&new->cq, &new->q); 473 474 *qcq = new; 475 476 return 0; 477 478 err_out_free_intr: 479 ionic_intr_free(lif, new->intr.index); 480 err_out: 481 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 482 return err; 483 } 484 485 static int ionic_qcqs_alloc(struct ionic_lif *lif) 486 { 487 struct device *dev = lif->ionic->dev; 488 unsigned int q_list_size; 489 unsigned int flags; 490 int err; 491 int i; 492 493 flags = IONIC_QCQ_F_INTR; 494 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 495 IONIC_ADMINQ_LENGTH, 496 sizeof(struct ionic_admin_cmd), 497 sizeof(struct ionic_admin_comp), 498 0, lif->kern_pid, &lif->adminqcq); 499 if (err) 500 return err; 501 502 if (lif->ionic->nnqs_per_lif) { 503 flags = IONIC_QCQ_F_NOTIFYQ; 504 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 505 flags, IONIC_NOTIFYQ_LENGTH, 506 sizeof(struct ionic_notifyq_cmd), 507 sizeof(union ionic_notifyq_comp), 508 0, lif->kern_pid, &lif->notifyqcq); 509 if (err) 510 goto err_out_free_adminqcq; 511 512 /* Let the notifyq ride on the adminq interrupt */ 513 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 514 } 515 516 q_list_size = sizeof(*lif->txqcqs) * lif->nxqs; 517 err = -ENOMEM; 518 lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); 519 if (!lif->txqcqs) 520 goto err_out_free_notifyqcq; 521 for (i = 0; i < lif->nxqs; i++) { 522 lif->txqcqs[i].stats = devm_kzalloc(dev, 523 sizeof(struct ionic_q_stats), 524 GFP_KERNEL); 525 if (!lif->txqcqs[i].stats) 526 goto err_out_free_tx_stats; 527 } 528 529 lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); 530 if (!lif->rxqcqs) 531 goto err_out_free_tx_stats; 532 for (i = 0; i < lif->nxqs; i++) { 533 lif->rxqcqs[i].stats = devm_kzalloc(dev, 534 sizeof(struct ionic_q_stats), 535 GFP_KERNEL); 536 if (!lif->rxqcqs[i].stats) 537 goto err_out_free_rx_stats; 538 } 539 540 return 0; 541 542 err_out_free_rx_stats: 543 for (i = 0; i < lif->nxqs; i++) 544 if (lif->rxqcqs[i].stats) 545 devm_kfree(dev, lif->rxqcqs[i].stats); 546 devm_kfree(dev, lif->rxqcqs); 547 lif->rxqcqs = NULL; 548 err_out_free_tx_stats: 549 for (i = 0; i < lif->nxqs; i++) 550 if (lif->txqcqs[i].stats) 551 devm_kfree(dev, lif->txqcqs[i].stats); 552 devm_kfree(dev, lif->txqcqs); 553 lif->txqcqs = NULL; 554 err_out_free_notifyqcq: 555 if (lif->notifyqcq) { 556 ionic_qcq_free(lif, lif->notifyqcq); 557 lif->notifyqcq = NULL; 558 } 559 err_out_free_adminqcq: 560 ionic_qcq_free(lif, lif->adminqcq); 561 lif->adminqcq = NULL; 562 563 return err; 564 } 565 566 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 567 { 568 struct device *dev = lif->ionic->dev; 569 struct ionic_queue *q = &qcq->q; 570 struct ionic_cq *cq = &qcq->cq; 571 struct ionic_admin_ctx ctx = { 572 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 573 .cmd.q_init = { 574 .opcode = IONIC_CMD_Q_INIT, 575 .lif_index = cpu_to_le16(lif->index), 576 .type = q->type, 577 .index = cpu_to_le32(q->index), 578 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 579 IONIC_QINIT_F_SG), 580 .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index), 581 .pid = cpu_to_le16(q->pid), 582 .ring_size = ilog2(q->num_descs), 583 .ring_base = cpu_to_le64(q->base_pa), 584 .cq_ring_base = cpu_to_le64(cq->base_pa), 585 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 586 }, 587 }; 588 int err; 589 590 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 591 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 592 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 593 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 594 595 err = ionic_adminq_post_wait(lif, &ctx); 596 if (err) 597 return err; 598 599 q->hw_type = ctx.comp.q_init.hw_type; 600 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 601 q->dbval = IONIC_DBELL_QID(q->hw_index); 602 603 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 604 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 605 606 qcq->flags |= IONIC_QCQ_F_INITED; 607 608 ionic_debugfs_add_qcq(lif, qcq); 609 610 return 0; 611 } 612 613 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 614 { 615 struct device *dev = lif->ionic->dev; 616 struct ionic_queue *q = &qcq->q; 617 struct ionic_cq *cq = &qcq->cq; 618 struct ionic_admin_ctx ctx = { 619 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 620 .cmd.q_init = { 621 .opcode = IONIC_CMD_Q_INIT, 622 .lif_index = cpu_to_le16(lif->index), 623 .type = q->type, 624 .index = cpu_to_le32(q->index), 625 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ), 626 .intr_index = cpu_to_le16(cq->bound_intr->index), 627 .pid = cpu_to_le16(q->pid), 628 .ring_size = ilog2(q->num_descs), 629 .ring_base = cpu_to_le64(q->base_pa), 630 .cq_ring_base = cpu_to_le64(cq->base_pa), 631 }, 632 }; 633 int err; 634 635 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 636 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 637 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 638 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 639 640 err = ionic_adminq_post_wait(lif, &ctx); 641 if (err) 642 return err; 643 644 q->hw_type = ctx.comp.q_init.hw_type; 645 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 646 q->dbval = IONIC_DBELL_QID(q->hw_index); 647 648 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 649 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 650 651 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, 652 NAPI_POLL_WEIGHT); 653 654 err = ionic_request_irq(lif, qcq); 655 if (err) { 656 netif_napi_del(&qcq->napi); 657 return err; 658 } 659 660 qcq->flags |= IONIC_QCQ_F_INITED; 661 662 ionic_debugfs_add_qcq(lif, qcq); 663 664 return 0; 665 } 666 667 static bool ionic_notifyq_service(struct ionic_cq *cq, 668 struct ionic_cq_info *cq_info) 669 { 670 union ionic_notifyq_comp *comp = cq_info->cq_desc; 671 struct net_device *netdev; 672 struct ionic_queue *q; 673 struct ionic_lif *lif; 674 u64 eid; 675 676 q = cq->bound_q; 677 lif = q->info[0].cb_arg; 678 netdev = lif->netdev; 679 eid = le64_to_cpu(comp->event.eid); 680 681 /* Have we run out of new completions to process? */ 682 if (eid <= lif->last_eid) 683 return false; 684 685 lif->last_eid = eid; 686 687 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 688 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 689 comp, sizeof(*comp), true); 690 691 switch (le16_to_cpu(comp->event.ecode)) { 692 case IONIC_EVENT_LINK_CHANGE: 693 ionic_link_status_check_request(lif); 694 break; 695 case IONIC_EVENT_RESET: 696 netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n", 697 eid); 698 netdev_info(netdev, " reset_code=%d state=%d\n", 699 comp->reset.reset_code, 700 comp->reset.state); 701 break; 702 default: 703 netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n", 704 comp->event.ecode, eid); 705 break; 706 } 707 708 return true; 709 } 710 711 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget) 712 { 713 struct ionic_dev *idev = &lif->ionic->idev; 714 struct ionic_cq *cq = &lif->notifyqcq->cq; 715 u32 work_done; 716 717 work_done = ionic_cq_service(cq, budget, ionic_notifyq_service, 718 NULL, NULL); 719 if (work_done) 720 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 721 work_done, IONIC_INTR_CRED_RESET_COALESCE); 722 723 return work_done; 724 } 725 726 static bool ionic_adminq_service(struct ionic_cq *cq, 727 struct ionic_cq_info *cq_info) 728 { 729 struct ionic_admin_comp *comp = cq_info->cq_desc; 730 731 if (!color_match(comp->color, cq->done_color)) 732 return false; 733 734 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 735 736 return true; 737 } 738 739 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 740 { 741 struct ionic_lif *lif = napi_to_cq(napi)->lif; 742 int n_work = 0; 743 int a_work = 0; 744 745 if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)) 746 n_work = ionic_notifyq_clean(lif, budget); 747 a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL); 748 749 return max(n_work, a_work); 750 } 751 752 static void ionic_get_stats64(struct net_device *netdev, 753 struct rtnl_link_stats64 *ns) 754 { 755 struct ionic_lif *lif = netdev_priv(netdev); 756 struct ionic_lif_stats *ls; 757 758 memset(ns, 0, sizeof(*ns)); 759 ls = &lif->info->stats; 760 761 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 762 le64_to_cpu(ls->rx_mcast_packets) + 763 le64_to_cpu(ls->rx_bcast_packets); 764 765 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 766 le64_to_cpu(ls->tx_mcast_packets) + 767 le64_to_cpu(ls->tx_bcast_packets); 768 769 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 770 le64_to_cpu(ls->rx_mcast_bytes) + 771 le64_to_cpu(ls->rx_bcast_bytes); 772 773 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 774 le64_to_cpu(ls->tx_mcast_bytes) + 775 le64_to_cpu(ls->tx_bcast_bytes); 776 777 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 778 le64_to_cpu(ls->rx_mcast_drop_packets) + 779 le64_to_cpu(ls->rx_bcast_drop_packets); 780 781 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 782 le64_to_cpu(ls->tx_mcast_drop_packets) + 783 le64_to_cpu(ls->tx_bcast_drop_packets); 784 785 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 786 787 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 788 789 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 790 le64_to_cpu(ls->rx_queue_disabled) + 791 le64_to_cpu(ls->rx_desc_fetch_error) + 792 le64_to_cpu(ls->rx_desc_data_error); 793 794 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 795 le64_to_cpu(ls->tx_queue_disabled) + 796 le64_to_cpu(ls->tx_desc_fetch_error) + 797 le64_to_cpu(ls->tx_desc_data_error); 798 799 ns->rx_errors = ns->rx_over_errors + 800 ns->rx_missed_errors; 801 802 ns->tx_errors = ns->tx_aborted_errors; 803 } 804 805 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 806 { 807 struct ionic_admin_ctx ctx = { 808 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 809 .cmd.rx_filter_add = { 810 .opcode = IONIC_CMD_RX_FILTER_ADD, 811 .lif_index = cpu_to_le16(lif->index), 812 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 813 }, 814 }; 815 struct ionic_rx_filter *f; 816 int err; 817 818 /* don't bother if we already have it */ 819 spin_lock_bh(&lif->rx_filters.lock); 820 f = ionic_rx_filter_by_addr(lif, addr); 821 spin_unlock_bh(&lif->rx_filters.lock); 822 if (f) 823 return 0; 824 825 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, 826 ctx.comp.rx_filter_add.filter_id); 827 828 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 829 err = ionic_adminq_post_wait(lif, &ctx); 830 if (err) 831 return err; 832 833 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 834 } 835 836 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 837 { 838 struct ionic_admin_ctx ctx = { 839 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 840 .cmd.rx_filter_del = { 841 .opcode = IONIC_CMD_RX_FILTER_DEL, 842 .lif_index = cpu_to_le16(lif->index), 843 }, 844 }; 845 struct ionic_rx_filter *f; 846 int err; 847 848 spin_lock_bh(&lif->rx_filters.lock); 849 f = ionic_rx_filter_by_addr(lif, addr); 850 if (!f) { 851 spin_unlock_bh(&lif->rx_filters.lock); 852 return -ENOENT; 853 } 854 855 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 856 ionic_rx_filter_free(lif, f); 857 spin_unlock_bh(&lif->rx_filters.lock); 858 859 err = ionic_adminq_post_wait(lif, &ctx); 860 if (err) 861 return err; 862 863 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, 864 ctx.cmd.rx_filter_del.filter_id); 865 866 return 0; 867 } 868 869 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) 870 { 871 struct ionic *ionic = lif->ionic; 872 struct ionic_deferred_work *work; 873 unsigned int nmfilters; 874 unsigned int nufilters; 875 876 if (add) { 877 /* Do we have space for this filter? We test the counters 878 * here before checking the need for deferral so that we 879 * can return an overflow error to the stack. 880 */ 881 nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters); 882 nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters); 883 884 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) 885 lif->nmcast++; 886 else if (!is_multicast_ether_addr(addr) && 887 lif->nucast < nufilters) 888 lif->nucast++; 889 else 890 return -ENOSPC; 891 } else { 892 if (is_multicast_ether_addr(addr) && lif->nmcast) 893 lif->nmcast--; 894 else if (!is_multicast_ether_addr(addr) && lif->nucast) 895 lif->nucast--; 896 } 897 898 if (in_interrupt()) { 899 work = kzalloc(sizeof(*work), GFP_ATOMIC); 900 if (!work) { 901 netdev_err(lif->netdev, "%s OOM\n", __func__); 902 return -ENOMEM; 903 } 904 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : 905 IONIC_DW_TYPE_RX_ADDR_DEL; 906 memcpy(work->addr, addr, ETH_ALEN); 907 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", 908 add ? "add" : "del", addr); 909 ionic_lif_deferred_enqueue(&lif->deferred, work); 910 } else { 911 netdev_dbg(lif->netdev, "rx_filter %s %pM\n", 912 add ? "add" : "del", addr); 913 if (add) 914 return ionic_lif_addr_add(lif, addr); 915 else 916 return ionic_lif_addr_del(lif, addr); 917 } 918 919 return 0; 920 } 921 922 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 923 { 924 return ionic_lif_addr(netdev_priv(netdev), addr, true); 925 } 926 927 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 928 { 929 return ionic_lif_addr(netdev_priv(netdev), addr, false); 930 } 931 932 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 933 { 934 struct ionic_admin_ctx ctx = { 935 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 936 .cmd.rx_mode_set = { 937 .opcode = IONIC_CMD_RX_MODE_SET, 938 .lif_index = cpu_to_le16(lif->index), 939 .rx_mode = cpu_to_le16(rx_mode), 940 }, 941 }; 942 char buf[128]; 943 int err; 944 int i; 945 #define REMAIN(__x) (sizeof(buf) - (__x)) 946 947 i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 948 lif->rx_mode, rx_mode); 949 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 950 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 951 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 952 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 953 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 954 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 955 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 956 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 957 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 958 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 959 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); 960 961 err = ionic_adminq_post_wait(lif, &ctx); 962 if (err) 963 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", 964 rx_mode, err); 965 else 966 lif->rx_mode = rx_mode; 967 } 968 969 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 970 { 971 struct ionic_deferred_work *work; 972 973 if (in_interrupt()) { 974 work = kzalloc(sizeof(*work), GFP_ATOMIC); 975 if (!work) { 976 netdev_err(lif->netdev, "%s OOM\n", __func__); 977 return; 978 } 979 work->type = IONIC_DW_TYPE_RX_MODE; 980 work->rx_mode = rx_mode; 981 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 982 ionic_lif_deferred_enqueue(&lif->deferred, work); 983 } else { 984 ionic_lif_rx_mode(lif, rx_mode); 985 } 986 } 987 988 static void ionic_set_rx_mode(struct net_device *netdev) 989 { 990 struct ionic_lif *lif = netdev_priv(netdev); 991 struct ionic_identity *ident; 992 unsigned int nfilters; 993 unsigned int rx_mode; 994 995 ident = &lif->ionic->ident; 996 997 rx_mode = IONIC_RX_MODE_F_UNICAST; 998 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 999 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1000 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1001 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1002 1003 /* sync unicast addresses 1004 * next check to see if we're in an overflow state 1005 * if so, we track that we overflowed and enable NIC PROMISC 1006 * else if the overflow is set and not needed 1007 * we remove our overflow flag and check the netdev flags 1008 * to see if we can disable NIC PROMISC 1009 */ 1010 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1011 nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters); 1012 if (netdev_uc_count(netdev) + 1 > nfilters) { 1013 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1014 lif->uc_overflow = true; 1015 } else if (lif->uc_overflow) { 1016 lif->uc_overflow = false; 1017 if (!(netdev->flags & IFF_PROMISC)) 1018 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1019 } 1020 1021 /* same for multicast */ 1022 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1023 nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters); 1024 if (netdev_mc_count(netdev) > nfilters) { 1025 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1026 lif->mc_overflow = true; 1027 } else if (lif->mc_overflow) { 1028 lif->mc_overflow = false; 1029 if (!(netdev->flags & IFF_ALLMULTI)) 1030 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1031 } 1032 1033 if (lif->rx_mode != rx_mode) 1034 _ionic_lif_rx_mode(lif, rx_mode); 1035 } 1036 1037 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1038 { 1039 u64 wanted = 0; 1040 1041 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1042 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1043 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1044 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1045 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1046 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1047 if (features & NETIF_F_RXHASH) 1048 wanted |= IONIC_ETH_HW_RX_HASH; 1049 if (features & NETIF_F_RXCSUM) 1050 wanted |= IONIC_ETH_HW_RX_CSUM; 1051 if (features & NETIF_F_SG) 1052 wanted |= IONIC_ETH_HW_TX_SG; 1053 if (features & NETIF_F_HW_CSUM) 1054 wanted |= IONIC_ETH_HW_TX_CSUM; 1055 if (features & NETIF_F_TSO) 1056 wanted |= IONIC_ETH_HW_TSO; 1057 if (features & NETIF_F_TSO6) 1058 wanted |= IONIC_ETH_HW_TSO_IPV6; 1059 if (features & NETIF_F_TSO_ECN) 1060 wanted |= IONIC_ETH_HW_TSO_ECN; 1061 if (features & NETIF_F_GSO_GRE) 1062 wanted |= IONIC_ETH_HW_TSO_GRE; 1063 if (features & NETIF_F_GSO_GRE_CSUM) 1064 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1065 if (features & NETIF_F_GSO_IPXIP4) 1066 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1067 if (features & NETIF_F_GSO_IPXIP6) 1068 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1069 if (features & NETIF_F_GSO_UDP_TUNNEL) 1070 wanted |= IONIC_ETH_HW_TSO_UDP; 1071 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1072 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1073 1074 return cpu_to_le64(wanted); 1075 } 1076 1077 static int ionic_set_nic_features(struct ionic_lif *lif, 1078 netdev_features_t features) 1079 { 1080 struct device *dev = lif->ionic->dev; 1081 struct ionic_admin_ctx ctx = { 1082 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1083 .cmd.lif_setattr = { 1084 .opcode = IONIC_CMD_LIF_SETATTR, 1085 .index = cpu_to_le16(lif->index), 1086 .attr = IONIC_LIF_ATTR_FEATURES, 1087 }, 1088 }; 1089 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1090 IONIC_ETH_HW_VLAN_RX_STRIP | 1091 IONIC_ETH_HW_VLAN_RX_FILTER; 1092 int err; 1093 1094 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1095 err = ionic_adminq_post_wait(lif, &ctx); 1096 if (err) 1097 return err; 1098 1099 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1100 ctx.comp.lif_setattr.features); 1101 1102 if ((vlan_flags & features) && 1103 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1104 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1105 1106 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1107 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1108 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1109 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1110 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1111 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1112 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1113 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1114 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1115 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1116 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1117 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1118 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1119 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1120 if (lif->hw_features & IONIC_ETH_HW_TSO) 1121 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1122 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1123 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1124 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1125 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1126 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1127 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1128 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1129 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1130 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1131 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1132 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1133 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1134 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1135 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1136 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1137 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1138 1139 return 0; 1140 } 1141 1142 static int ionic_init_nic_features(struct ionic_lif *lif) 1143 { 1144 struct net_device *netdev = lif->netdev; 1145 netdev_features_t features; 1146 int err; 1147 1148 /* set up what we expect to support by default */ 1149 features = NETIF_F_HW_VLAN_CTAG_TX | 1150 NETIF_F_HW_VLAN_CTAG_RX | 1151 NETIF_F_HW_VLAN_CTAG_FILTER | 1152 NETIF_F_RXHASH | 1153 NETIF_F_SG | 1154 NETIF_F_HW_CSUM | 1155 NETIF_F_RXCSUM | 1156 NETIF_F_TSO | 1157 NETIF_F_TSO6 | 1158 NETIF_F_TSO_ECN; 1159 1160 err = ionic_set_nic_features(lif, features); 1161 if (err) 1162 return err; 1163 1164 /* tell the netdev what we actually can support */ 1165 netdev->features |= NETIF_F_HIGHDMA; 1166 1167 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1168 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1169 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1170 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1171 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1172 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1173 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1174 netdev->hw_features |= NETIF_F_RXHASH; 1175 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1176 netdev->hw_features |= NETIF_F_SG; 1177 1178 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1179 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1180 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1181 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1182 if (lif->hw_features & IONIC_ETH_HW_TSO) 1183 netdev->hw_enc_features |= NETIF_F_TSO; 1184 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1185 netdev->hw_enc_features |= NETIF_F_TSO6; 1186 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1187 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1188 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1189 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1190 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1191 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1192 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1193 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1194 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1195 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1196 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1197 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1198 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1199 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1200 1201 netdev->hw_features |= netdev->hw_enc_features; 1202 netdev->features |= netdev->hw_features; 1203 1204 netdev->priv_flags |= IFF_UNICAST_FLT; 1205 1206 return 0; 1207 } 1208 1209 static int ionic_set_features(struct net_device *netdev, 1210 netdev_features_t features) 1211 { 1212 struct ionic_lif *lif = netdev_priv(netdev); 1213 int err; 1214 1215 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1216 __func__, (u64)lif->netdev->features, (u64)features); 1217 1218 err = ionic_set_nic_features(lif, features); 1219 1220 return err; 1221 } 1222 1223 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1224 { 1225 struct sockaddr *addr = sa; 1226 u8 *mac; 1227 int err; 1228 1229 mac = (u8 *)addr->sa_data; 1230 if (ether_addr_equal(netdev->dev_addr, mac)) 1231 return 0; 1232 1233 err = eth_prepare_mac_addr_change(netdev, addr); 1234 if (err) 1235 return err; 1236 1237 if (!is_zero_ether_addr(netdev->dev_addr)) { 1238 netdev_info(netdev, "deleting mac addr %pM\n", 1239 netdev->dev_addr); 1240 ionic_addr_del(netdev, netdev->dev_addr); 1241 } 1242 1243 eth_commit_mac_addr_change(netdev, addr); 1244 netdev_info(netdev, "updating mac addr %pM\n", mac); 1245 1246 return ionic_addr_add(netdev, mac); 1247 } 1248 1249 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1250 { 1251 struct ionic_lif *lif = netdev_priv(netdev); 1252 struct ionic_admin_ctx ctx = { 1253 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1254 .cmd.lif_setattr = { 1255 .opcode = IONIC_CMD_LIF_SETATTR, 1256 .index = cpu_to_le16(lif->index), 1257 .attr = IONIC_LIF_ATTR_MTU, 1258 .mtu = cpu_to_le32(new_mtu), 1259 }, 1260 }; 1261 int err; 1262 1263 err = ionic_adminq_post_wait(lif, &ctx); 1264 if (err) 1265 return err; 1266 1267 netdev->mtu = new_mtu; 1268 err = ionic_reset_queues(lif); 1269 1270 return err; 1271 } 1272 1273 static void ionic_tx_timeout_work(struct work_struct *ws) 1274 { 1275 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1276 1277 netdev_info(lif->netdev, "Tx Timeout recovery\n"); 1278 1279 rtnl_lock(); 1280 ionic_reset_queues(lif); 1281 rtnl_unlock(); 1282 } 1283 1284 static void ionic_tx_timeout(struct net_device *netdev) 1285 { 1286 struct ionic_lif *lif = netdev_priv(netdev); 1287 1288 schedule_work(&lif->tx_timeout_work); 1289 } 1290 1291 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1292 u16 vid) 1293 { 1294 struct ionic_lif *lif = netdev_priv(netdev); 1295 struct ionic_admin_ctx ctx = { 1296 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1297 .cmd.rx_filter_add = { 1298 .opcode = IONIC_CMD_RX_FILTER_ADD, 1299 .lif_index = cpu_to_le16(lif->index), 1300 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), 1301 .vlan.vlan = cpu_to_le16(vid), 1302 }, 1303 }; 1304 int err; 1305 1306 err = ionic_adminq_post_wait(lif, &ctx); 1307 if (err) 1308 return err; 1309 1310 netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, 1311 ctx.comp.rx_filter_add.filter_id); 1312 1313 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1314 } 1315 1316 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1317 u16 vid) 1318 { 1319 struct ionic_lif *lif = netdev_priv(netdev); 1320 struct ionic_admin_ctx ctx = { 1321 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1322 .cmd.rx_filter_del = { 1323 .opcode = IONIC_CMD_RX_FILTER_DEL, 1324 .lif_index = cpu_to_le16(lif->index), 1325 }, 1326 }; 1327 struct ionic_rx_filter *f; 1328 1329 spin_lock_bh(&lif->rx_filters.lock); 1330 1331 f = ionic_rx_filter_by_vlan(lif, vid); 1332 if (!f) { 1333 spin_unlock_bh(&lif->rx_filters.lock); 1334 return -ENOENT; 1335 } 1336 1337 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, 1338 le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); 1339 1340 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1341 ionic_rx_filter_free(lif, f); 1342 spin_unlock_bh(&lif->rx_filters.lock); 1343 1344 return ionic_adminq_post_wait(lif, &ctx); 1345 } 1346 1347 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1348 const u8 *key, const u32 *indir) 1349 { 1350 struct ionic_admin_ctx ctx = { 1351 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1352 .cmd.lif_setattr = { 1353 .opcode = IONIC_CMD_LIF_SETATTR, 1354 .attr = IONIC_LIF_ATTR_RSS, 1355 .rss.types = cpu_to_le16(types), 1356 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1357 }, 1358 }; 1359 unsigned int i, tbl_sz; 1360 1361 lif->rss_types = types; 1362 1363 if (key) 1364 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1365 1366 if (indir) { 1367 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1368 for (i = 0; i < tbl_sz; i++) 1369 lif->rss_ind_tbl[i] = indir[i]; 1370 } 1371 1372 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1373 IONIC_RSS_HASH_KEY_SIZE); 1374 1375 return ionic_adminq_post_wait(lif, &ctx); 1376 } 1377 1378 static int ionic_lif_rss_init(struct ionic_lif *lif) 1379 { 1380 u8 rss_key[IONIC_RSS_HASH_KEY_SIZE]; 1381 unsigned int tbl_sz; 1382 unsigned int i; 1383 1384 netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE); 1385 1386 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1387 IONIC_RSS_TYPE_IPV4_TCP | 1388 IONIC_RSS_TYPE_IPV4_UDP | 1389 IONIC_RSS_TYPE_IPV6 | 1390 IONIC_RSS_TYPE_IPV6_TCP | 1391 IONIC_RSS_TYPE_IPV6_UDP; 1392 1393 /* Fill indirection table with 'default' values */ 1394 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1395 for (i = 0; i < tbl_sz; i++) 1396 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1397 1398 return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL); 1399 } 1400 1401 static int ionic_lif_rss_deinit(struct ionic_lif *lif) 1402 { 1403 return ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1404 } 1405 1406 static void ionic_txrx_disable(struct ionic_lif *lif) 1407 { 1408 unsigned int i; 1409 1410 for (i = 0; i < lif->nxqs; i++) { 1411 ionic_qcq_disable(lif->txqcqs[i].qcq); 1412 ionic_qcq_disable(lif->rxqcqs[i].qcq); 1413 } 1414 } 1415 1416 static void ionic_txrx_deinit(struct ionic_lif *lif) 1417 { 1418 unsigned int i; 1419 1420 for (i = 0; i < lif->nxqs; i++) { 1421 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1422 ionic_tx_flush(&lif->txqcqs[i].qcq->cq); 1423 1424 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); 1425 ionic_rx_flush(&lif->rxqcqs[i].qcq->cq); 1426 ionic_rx_empty(&lif->rxqcqs[i].qcq->q); 1427 } 1428 } 1429 1430 static void ionic_txrx_free(struct ionic_lif *lif) 1431 { 1432 unsigned int i; 1433 1434 for (i = 0; i < lif->nxqs; i++) { 1435 ionic_qcq_free(lif, lif->txqcqs[i].qcq); 1436 lif->txqcqs[i].qcq = NULL; 1437 1438 ionic_qcq_free(lif, lif->rxqcqs[i].qcq); 1439 lif->rxqcqs[i].qcq = NULL; 1440 } 1441 } 1442 1443 static int ionic_txrx_alloc(struct ionic_lif *lif) 1444 { 1445 unsigned int flags; 1446 unsigned int i; 1447 int err = 0; 1448 1449 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 1450 for (i = 0; i < lif->nxqs; i++) { 1451 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 1452 lif->ntxq_descs, 1453 sizeof(struct ionic_txq_desc), 1454 sizeof(struct ionic_txq_comp), 1455 sizeof(struct ionic_txq_sg_desc), 1456 lif->kern_pid, &lif->txqcqs[i].qcq); 1457 if (err) 1458 goto err_out; 1459 1460 lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats; 1461 } 1462 1463 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_INTR; 1464 for (i = 0; i < lif->nxqs; i++) { 1465 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 1466 lif->nrxq_descs, 1467 sizeof(struct ionic_rxq_desc), 1468 sizeof(struct ionic_rxq_comp), 1469 0, lif->kern_pid, &lif->rxqcqs[i].qcq); 1470 if (err) 1471 goto err_out; 1472 1473 lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats; 1474 1475 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 1476 lif->rxqcqs[i].qcq->intr.index, 1477 lif->rx_coalesce_hw); 1478 ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq, 1479 lif->txqcqs[i].qcq); 1480 } 1481 1482 return 0; 1483 1484 err_out: 1485 ionic_txrx_free(lif); 1486 1487 return err; 1488 } 1489 1490 static int ionic_txrx_init(struct ionic_lif *lif) 1491 { 1492 unsigned int i; 1493 int err; 1494 1495 for (i = 0; i < lif->nxqs; i++) { 1496 err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq); 1497 if (err) 1498 goto err_out; 1499 1500 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq); 1501 if (err) { 1502 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1503 goto err_out; 1504 } 1505 } 1506 1507 if (lif->netdev->features & NETIF_F_RXHASH) 1508 ionic_lif_rss_init(lif); 1509 1510 ionic_set_rx_mode(lif->netdev); 1511 1512 return 0; 1513 1514 err_out: 1515 while (i--) { 1516 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1517 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); 1518 } 1519 1520 return err; 1521 } 1522 1523 static int ionic_txrx_enable(struct ionic_lif *lif) 1524 { 1525 int i, err; 1526 1527 for (i = 0; i < lif->nxqs; i++) { 1528 err = ionic_qcq_enable(lif->txqcqs[i].qcq); 1529 if (err) 1530 goto err_out; 1531 1532 ionic_rx_fill(&lif->rxqcqs[i].qcq->q); 1533 err = ionic_qcq_enable(lif->rxqcqs[i].qcq); 1534 if (err) { 1535 ionic_qcq_disable(lif->txqcqs[i].qcq); 1536 goto err_out; 1537 } 1538 } 1539 1540 return 0; 1541 1542 err_out: 1543 while (i--) { 1544 ionic_qcq_disable(lif->rxqcqs[i].qcq); 1545 ionic_qcq_disable(lif->txqcqs[i].qcq); 1546 } 1547 1548 return err; 1549 } 1550 1551 int ionic_open(struct net_device *netdev) 1552 { 1553 struct ionic_lif *lif = netdev_priv(netdev); 1554 int err; 1555 1556 netif_carrier_off(netdev); 1557 1558 err = ionic_txrx_alloc(lif); 1559 if (err) 1560 return err; 1561 1562 err = ionic_txrx_init(lif); 1563 if (err) 1564 goto err_txrx_free; 1565 1566 err = ionic_txrx_enable(lif); 1567 if (err) 1568 goto err_txrx_deinit; 1569 1570 netif_set_real_num_tx_queues(netdev, lif->nxqs); 1571 netif_set_real_num_rx_queues(netdev, lif->nxqs); 1572 1573 set_bit(IONIC_LIF_UP, lif->state); 1574 1575 ionic_link_status_check_request(lif); 1576 if (netif_carrier_ok(netdev)) 1577 netif_tx_wake_all_queues(netdev); 1578 1579 return 0; 1580 1581 err_txrx_deinit: 1582 ionic_txrx_deinit(lif); 1583 err_txrx_free: 1584 ionic_txrx_free(lif); 1585 return err; 1586 } 1587 1588 int ionic_stop(struct net_device *netdev) 1589 { 1590 struct ionic_lif *lif = netdev_priv(netdev); 1591 int err = 0; 1592 1593 if (!test_bit(IONIC_LIF_UP, lif->state)) { 1594 dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n", 1595 __func__, lif->name); 1596 return 0; 1597 } 1598 dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name); 1599 clear_bit(IONIC_LIF_UP, lif->state); 1600 1601 /* carrier off before disabling queues to avoid watchdog timeout */ 1602 netif_carrier_off(netdev); 1603 netif_tx_stop_all_queues(netdev); 1604 netif_tx_disable(netdev); 1605 1606 ionic_txrx_disable(lif); 1607 ionic_lif_quiesce(lif); 1608 ionic_txrx_deinit(lif); 1609 ionic_txrx_free(lif); 1610 1611 return err; 1612 } 1613 1614 static const struct net_device_ops ionic_netdev_ops = { 1615 .ndo_open = ionic_open, 1616 .ndo_stop = ionic_stop, 1617 .ndo_start_xmit = ionic_start_xmit, 1618 .ndo_get_stats64 = ionic_get_stats64, 1619 .ndo_set_rx_mode = ionic_set_rx_mode, 1620 .ndo_set_features = ionic_set_features, 1621 .ndo_set_mac_address = ionic_set_mac_address, 1622 .ndo_validate_addr = eth_validate_addr, 1623 .ndo_tx_timeout = ionic_tx_timeout, 1624 .ndo_change_mtu = ionic_change_mtu, 1625 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 1626 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 1627 }; 1628 1629 int ionic_reset_queues(struct ionic_lif *lif) 1630 { 1631 bool running; 1632 int err = 0; 1633 1634 /* Put off the next watchdog timeout */ 1635 netif_trans_update(lif->netdev); 1636 1637 err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET); 1638 if (err) 1639 return err; 1640 1641 running = netif_running(lif->netdev); 1642 if (running) 1643 err = ionic_stop(lif->netdev); 1644 if (!err && running) 1645 ionic_open(lif->netdev); 1646 1647 clear_bit(IONIC_LIF_QUEUE_RESET, lif->state); 1648 1649 return err; 1650 } 1651 1652 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index) 1653 { 1654 struct device *dev = ionic->dev; 1655 struct net_device *netdev; 1656 struct ionic_lif *lif; 1657 int tbl_sz; 1658 int err; 1659 1660 netdev = alloc_etherdev_mqs(sizeof(*lif), 1661 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 1662 if (!netdev) { 1663 dev_err(dev, "Cannot allocate netdev, aborting\n"); 1664 return ERR_PTR(-ENOMEM); 1665 } 1666 1667 SET_NETDEV_DEV(netdev, dev); 1668 1669 lif = netdev_priv(netdev); 1670 lif->netdev = netdev; 1671 ionic->master_lif = lif; 1672 netdev->netdev_ops = &ionic_netdev_ops; 1673 ionic_ethtool_set_ops(netdev); 1674 1675 netdev->watchdog_timeo = 2 * HZ; 1676 netdev->min_mtu = IONIC_MIN_MTU; 1677 netdev->max_mtu = IONIC_MAX_MTU; 1678 1679 lif->neqs = ionic->neqs_per_lif; 1680 lif->nxqs = ionic->ntxqs_per_lif; 1681 1682 lif->ionic = ionic; 1683 lif->index = index; 1684 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 1685 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 1686 1687 /* Convert the default coalesce value to actual hw resolution */ 1688 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 1689 lif->rx_coalesce_hw = ionic_coal_hw_to_usec(lif->ionic, 1690 lif->rx_coalesce_usecs); 1691 1692 snprintf(lif->name, sizeof(lif->name), "lif%u", index); 1693 1694 spin_lock_init(&lif->adminq_lock); 1695 1696 spin_lock_init(&lif->deferred.lock); 1697 INIT_LIST_HEAD(&lif->deferred.list); 1698 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 1699 1700 /* allocate lif info */ 1701 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 1702 lif->info = dma_alloc_coherent(dev, lif->info_sz, 1703 &lif->info_pa, GFP_KERNEL); 1704 if (!lif->info) { 1705 dev_err(dev, "Failed to allocate lif info, aborting\n"); 1706 err = -ENOMEM; 1707 goto err_out_free_netdev; 1708 } 1709 1710 /* allocate queues */ 1711 err = ionic_qcqs_alloc(lif); 1712 if (err) 1713 goto err_out_free_lif_info; 1714 1715 /* allocate rss indirection table */ 1716 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1717 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 1718 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 1719 &lif->rss_ind_tbl_pa, 1720 GFP_KERNEL); 1721 1722 if (!lif->rss_ind_tbl) { 1723 err = -ENOMEM; 1724 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 1725 goto err_out_free_qcqs; 1726 } 1727 1728 list_add_tail(&lif->list, &ionic->lifs); 1729 1730 return lif; 1731 1732 err_out_free_qcqs: 1733 ionic_qcqs_free(lif); 1734 err_out_free_lif_info: 1735 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 1736 lif->info = NULL; 1737 lif->info_pa = 0; 1738 err_out_free_netdev: 1739 free_netdev(lif->netdev); 1740 lif = NULL; 1741 1742 return ERR_PTR(err); 1743 } 1744 1745 int ionic_lifs_alloc(struct ionic *ionic) 1746 { 1747 struct ionic_lif *lif; 1748 1749 INIT_LIST_HEAD(&ionic->lifs); 1750 1751 /* only build the first lif, others are for later features */ 1752 set_bit(0, ionic->lifbits); 1753 lif = ionic_lif_alloc(ionic, 0); 1754 1755 return PTR_ERR_OR_ZERO(lif); 1756 } 1757 1758 static void ionic_lif_reset(struct ionic_lif *lif) 1759 { 1760 struct ionic_dev *idev = &lif->ionic->idev; 1761 1762 mutex_lock(&lif->ionic->dev_cmd_lock); 1763 ionic_dev_cmd_lif_reset(idev, lif->index); 1764 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 1765 mutex_unlock(&lif->ionic->dev_cmd_lock); 1766 } 1767 1768 static void ionic_lif_free(struct ionic_lif *lif) 1769 { 1770 struct device *dev = lif->ionic->dev; 1771 1772 /* free rss indirection table */ 1773 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 1774 lif->rss_ind_tbl_pa); 1775 lif->rss_ind_tbl = NULL; 1776 lif->rss_ind_tbl_pa = 0; 1777 1778 /* free queues */ 1779 ionic_qcqs_free(lif); 1780 ionic_lif_reset(lif); 1781 1782 /* free lif info */ 1783 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 1784 lif->info = NULL; 1785 lif->info_pa = 0; 1786 1787 /* unmap doorbell page */ 1788 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 1789 lif->kern_dbpage = NULL; 1790 kfree(lif->dbid_inuse); 1791 lif->dbid_inuse = NULL; 1792 1793 /* free netdev & lif */ 1794 ionic_debugfs_del_lif(lif); 1795 list_del(&lif->list); 1796 free_netdev(lif->netdev); 1797 } 1798 1799 void ionic_lifs_free(struct ionic *ionic) 1800 { 1801 struct list_head *cur, *tmp; 1802 struct ionic_lif *lif; 1803 1804 list_for_each_safe(cur, tmp, &ionic->lifs) { 1805 lif = list_entry(cur, struct ionic_lif, list); 1806 1807 ionic_lif_free(lif); 1808 } 1809 } 1810 1811 static void ionic_lif_deinit(struct ionic_lif *lif) 1812 { 1813 if (!test_bit(IONIC_LIF_INITED, lif->state)) 1814 return; 1815 1816 clear_bit(IONIC_LIF_INITED, lif->state); 1817 1818 ionic_rx_filters_deinit(lif); 1819 ionic_lif_rss_deinit(lif); 1820 1821 napi_disable(&lif->adminqcq->napi); 1822 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 1823 ionic_lif_qcq_deinit(lif, lif->adminqcq); 1824 1825 ionic_lif_reset(lif); 1826 } 1827 1828 void ionic_lifs_deinit(struct ionic *ionic) 1829 { 1830 struct list_head *cur, *tmp; 1831 struct ionic_lif *lif; 1832 1833 list_for_each_safe(cur, tmp, &ionic->lifs) { 1834 lif = list_entry(cur, struct ionic_lif, list); 1835 ionic_lif_deinit(lif); 1836 } 1837 } 1838 1839 static int ionic_lif_adminq_init(struct ionic_lif *lif) 1840 { 1841 struct device *dev = lif->ionic->dev; 1842 struct ionic_q_init_comp comp; 1843 struct ionic_dev *idev; 1844 struct ionic_qcq *qcq; 1845 struct ionic_queue *q; 1846 int err; 1847 1848 idev = &lif->ionic->idev; 1849 qcq = lif->adminqcq; 1850 q = &qcq->q; 1851 1852 mutex_lock(&lif->ionic->dev_cmd_lock); 1853 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 1854 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 1855 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 1856 mutex_unlock(&lif->ionic->dev_cmd_lock); 1857 if (err) { 1858 netdev_err(lif->netdev, "adminq init failed %d\n", err); 1859 return err; 1860 } 1861 1862 q->hw_type = comp.hw_type; 1863 q->hw_index = le32_to_cpu(comp.hw_index); 1864 q->dbval = IONIC_DBELL_QID(q->hw_index); 1865 1866 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 1867 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 1868 1869 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, 1870 NAPI_POLL_WEIGHT); 1871 1872 err = ionic_request_irq(lif, qcq); 1873 if (err) { 1874 netdev_warn(lif->netdev, "adminq irq request failed %d\n", err); 1875 netif_napi_del(&qcq->napi); 1876 return err; 1877 } 1878 1879 napi_enable(&qcq->napi); 1880 1881 if (qcq->flags & IONIC_QCQ_F_INTR) 1882 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 1883 IONIC_INTR_MASK_CLEAR); 1884 1885 qcq->flags |= IONIC_QCQ_F_INITED; 1886 1887 ionic_debugfs_add_qcq(lif, qcq); 1888 1889 return 0; 1890 } 1891 1892 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 1893 { 1894 struct ionic_qcq *qcq = lif->notifyqcq; 1895 struct device *dev = lif->ionic->dev; 1896 struct ionic_queue *q = &qcq->q; 1897 int err; 1898 1899 struct ionic_admin_ctx ctx = { 1900 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1901 .cmd.q_init = { 1902 .opcode = IONIC_CMD_Q_INIT, 1903 .lif_index = cpu_to_le16(lif->index), 1904 .type = q->type, 1905 .index = cpu_to_le32(q->index), 1906 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 1907 IONIC_QINIT_F_ENA), 1908 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 1909 .pid = cpu_to_le16(q->pid), 1910 .ring_size = ilog2(q->num_descs), 1911 .ring_base = cpu_to_le64(q->base_pa), 1912 } 1913 }; 1914 1915 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 1916 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 1917 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 1918 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 1919 1920 err = ionic_adminq_post_wait(lif, &ctx); 1921 if (err) 1922 return err; 1923 1924 q->hw_type = ctx.comp.q_init.hw_type; 1925 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 1926 q->dbval = IONIC_DBELL_QID(q->hw_index); 1927 1928 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 1929 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 1930 1931 /* preset the callback info */ 1932 q->info[0].cb_arg = lif; 1933 1934 qcq->flags |= IONIC_QCQ_F_INITED; 1935 1936 ionic_debugfs_add_qcq(lif, qcq); 1937 1938 return 0; 1939 } 1940 1941 static int ionic_station_set(struct ionic_lif *lif) 1942 { 1943 struct net_device *netdev = lif->netdev; 1944 struct ionic_admin_ctx ctx = { 1945 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1946 .cmd.lif_getattr = { 1947 .opcode = IONIC_CMD_LIF_GETATTR, 1948 .index = cpu_to_le16(lif->index), 1949 .attr = IONIC_LIF_ATTR_MAC, 1950 }, 1951 }; 1952 struct sockaddr addr; 1953 int err; 1954 1955 err = ionic_adminq_post_wait(lif, &ctx); 1956 if (err) 1957 return err; 1958 1959 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); 1960 addr.sa_family = AF_INET; 1961 err = eth_prepare_mac_addr_change(netdev, &addr); 1962 if (err) 1963 return err; 1964 1965 if (!is_zero_ether_addr(netdev->dev_addr)) { 1966 netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n", 1967 netdev->dev_addr); 1968 ionic_lif_addr(lif, netdev->dev_addr, false); 1969 } 1970 1971 eth_commit_mac_addr_change(netdev, &addr); 1972 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 1973 netdev->dev_addr); 1974 ionic_lif_addr(lif, netdev->dev_addr, true); 1975 1976 return 0; 1977 } 1978 1979 static int ionic_lif_init(struct ionic_lif *lif) 1980 { 1981 struct ionic_dev *idev = &lif->ionic->idev; 1982 struct device *dev = lif->ionic->dev; 1983 struct ionic_lif_init_comp comp; 1984 int dbpage_num; 1985 int err; 1986 1987 ionic_debugfs_add_lif(lif); 1988 1989 mutex_lock(&lif->ionic->dev_cmd_lock); 1990 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 1991 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 1992 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 1993 mutex_unlock(&lif->ionic->dev_cmd_lock); 1994 if (err) 1995 return err; 1996 1997 lif->hw_index = le16_to_cpu(comp.hw_index); 1998 1999 /* now that we have the hw_index we can figure out our doorbell page */ 2000 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 2001 if (!lif->dbid_count) { 2002 dev_err(dev, "No doorbell pages, aborting\n"); 2003 return -EINVAL; 2004 } 2005 2006 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 2007 if (!lif->dbid_inuse) { 2008 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 2009 return -ENOMEM; 2010 } 2011 2012 /* first doorbell id reserved for kernel (dbid aka pid == zero) */ 2013 set_bit(0, lif->dbid_inuse); 2014 lif->kern_pid = 0; 2015 2016 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 2017 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 2018 if (!lif->kern_dbpage) { 2019 dev_err(dev, "Cannot map dbpage, aborting\n"); 2020 err = -ENOMEM; 2021 goto err_out_free_dbid; 2022 } 2023 2024 err = ionic_lif_adminq_init(lif); 2025 if (err) 2026 goto err_out_adminq_deinit; 2027 2028 if (lif->ionic->nnqs_per_lif) { 2029 err = ionic_lif_notifyq_init(lif); 2030 if (err) 2031 goto err_out_notifyq_deinit; 2032 } 2033 2034 err = ionic_init_nic_features(lif); 2035 if (err) 2036 goto err_out_notifyq_deinit; 2037 2038 err = ionic_rx_filters_init(lif); 2039 if (err) 2040 goto err_out_notifyq_deinit; 2041 2042 err = ionic_station_set(lif); 2043 if (err) 2044 goto err_out_notifyq_deinit; 2045 2046 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 2047 2048 set_bit(IONIC_LIF_INITED, lif->state); 2049 2050 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 2051 2052 return 0; 2053 2054 err_out_notifyq_deinit: 2055 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2056 err_out_adminq_deinit: 2057 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2058 ionic_lif_reset(lif); 2059 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2060 lif->kern_dbpage = NULL; 2061 err_out_free_dbid: 2062 kfree(lif->dbid_inuse); 2063 lif->dbid_inuse = NULL; 2064 2065 return err; 2066 } 2067 2068 int ionic_lifs_init(struct ionic *ionic) 2069 { 2070 struct list_head *cur, *tmp; 2071 struct ionic_lif *lif; 2072 int err; 2073 2074 list_for_each_safe(cur, tmp, &ionic->lifs) { 2075 lif = list_entry(cur, struct ionic_lif, list); 2076 err = ionic_lif_init(lif); 2077 if (err) 2078 return err; 2079 } 2080 2081 return 0; 2082 } 2083 2084 static void ionic_lif_notify_work(struct work_struct *ws) 2085 { 2086 } 2087 2088 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 2089 { 2090 struct ionic_admin_ctx ctx = { 2091 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2092 .cmd.lif_setattr = { 2093 .opcode = IONIC_CMD_LIF_SETATTR, 2094 .index = cpu_to_le16(lif->index), 2095 .attr = IONIC_LIF_ATTR_NAME, 2096 }, 2097 }; 2098 2099 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 2100 sizeof(ctx.cmd.lif_setattr.name)); 2101 2102 ionic_adminq_post_wait(lif, &ctx); 2103 } 2104 2105 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 2106 { 2107 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 2108 return NULL; 2109 2110 return netdev_priv(netdev); 2111 } 2112 2113 static int ionic_lif_notify(struct notifier_block *nb, 2114 unsigned long event, void *info) 2115 { 2116 struct net_device *ndev = netdev_notifier_info_to_dev(info); 2117 struct ionic *ionic = container_of(nb, struct ionic, nb); 2118 struct ionic_lif *lif = ionic_netdev_lif(ndev); 2119 2120 if (!lif || lif->ionic != ionic) 2121 return NOTIFY_DONE; 2122 2123 switch (event) { 2124 case NETDEV_CHANGENAME: 2125 ionic_lif_set_netdev_info(lif); 2126 break; 2127 } 2128 2129 return NOTIFY_DONE; 2130 } 2131 2132 int ionic_lifs_register(struct ionic *ionic) 2133 { 2134 int err; 2135 2136 INIT_WORK(&ionic->nb_work, ionic_lif_notify_work); 2137 2138 ionic->nb.notifier_call = ionic_lif_notify; 2139 2140 err = register_netdevice_notifier(&ionic->nb); 2141 if (err) 2142 ionic->nb.notifier_call = NULL; 2143 2144 /* only register LIF0 for now */ 2145 err = register_netdev(ionic->master_lif->netdev); 2146 if (err) { 2147 dev_err(ionic->dev, "Cannot register net device, aborting\n"); 2148 return err; 2149 } 2150 2151 ionic_link_status_check_request(ionic->master_lif); 2152 ionic->master_lif->registered = true; 2153 2154 return 0; 2155 } 2156 2157 void ionic_lifs_unregister(struct ionic *ionic) 2158 { 2159 if (ionic->nb.notifier_call) { 2160 unregister_netdevice_notifier(&ionic->nb); 2161 cancel_work_sync(&ionic->nb_work); 2162 ionic->nb.notifier_call = NULL; 2163 } 2164 2165 /* There is only one lif ever registered in the 2166 * current model, so don't bother searching the 2167 * ionic->lif for candidates to unregister 2168 */ 2169 cancel_work_sync(&ionic->master_lif->deferred.work); 2170 cancel_work_sync(&ionic->master_lif->tx_timeout_work); 2171 if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED) 2172 unregister_netdev(ionic->master_lif->netdev); 2173 } 2174 2175 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 2176 union ionic_lif_identity *lid) 2177 { 2178 struct ionic_dev *idev = &ionic->idev; 2179 size_t sz; 2180 int err; 2181 2182 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 2183 2184 mutex_lock(&ionic->dev_cmd_lock); 2185 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 2186 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 2187 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 2188 mutex_unlock(&ionic->dev_cmd_lock); 2189 if (err) 2190 return (err); 2191 2192 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 2193 le64_to_cpu(lid->capabilities)); 2194 2195 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 2196 le32_to_cpu(lid->eth.max_ucast_filters)); 2197 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 2198 le32_to_cpu(lid->eth.max_mcast_filters)); 2199 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 2200 le64_to_cpu(lid->eth.config.features)); 2201 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 2202 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 2203 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 2204 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 2205 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 2206 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 2207 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 2208 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 2209 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 2210 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 2211 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 2212 le32_to_cpu(lid->eth.config.mtu)); 2213 2214 return 0; 2215 } 2216 2217 int ionic_lifs_size(struct ionic *ionic) 2218 { 2219 struct ionic_identity *ident = &ionic->ident; 2220 unsigned int nintrs, dev_nintrs; 2221 union ionic_lif_config *lc; 2222 unsigned int ntxqs_per_lif; 2223 unsigned int nrxqs_per_lif; 2224 unsigned int neqs_per_lif; 2225 unsigned int nnqs_per_lif; 2226 unsigned int nxqs, neqs; 2227 unsigned int min_intrs; 2228 int err; 2229 2230 lc = &ident->lif.eth.config; 2231 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 2232 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 2233 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 2234 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 2235 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 2236 2237 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 2238 nxqs = min(nxqs, num_online_cpus()); 2239 neqs = min(neqs_per_lif, num_online_cpus()); 2240 2241 try_again: 2242 /* interrupt usage: 2243 * 1 for master lif adminq/notifyq 2244 * 1 for each CPU for master lif TxRx queue pairs 2245 * whatever's left is for RDMA queues 2246 */ 2247 nintrs = 1 + nxqs + neqs; 2248 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 2249 2250 if (nintrs > dev_nintrs) 2251 goto try_fewer; 2252 2253 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 2254 if (err < 0 && err != -ENOSPC) { 2255 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 2256 return err; 2257 } 2258 if (err == -ENOSPC) 2259 goto try_fewer; 2260 2261 if (err != nintrs) { 2262 ionic_bus_free_irq_vectors(ionic); 2263 goto try_fewer; 2264 } 2265 2266 ionic->nnqs_per_lif = nnqs_per_lif; 2267 ionic->neqs_per_lif = neqs; 2268 ionic->ntxqs_per_lif = nxqs; 2269 ionic->nrxqs_per_lif = nxqs; 2270 ionic->nintrs = nintrs; 2271 2272 ionic_debugfs_add_sizes(ionic); 2273 2274 return 0; 2275 2276 try_fewer: 2277 if (nnqs_per_lif > 1) { 2278 nnqs_per_lif >>= 1; 2279 goto try_again; 2280 } 2281 if (neqs > 1) { 2282 neqs >>= 1; 2283 goto try_again; 2284 } 2285 if (nxqs > 1) { 2286 nxqs >>= 1; 2287 goto try_again; 2288 } 2289 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 2290 return -ENOSPC; 2291 } 2292