1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ethtool.h> 5 #include <linux/printk.h> 6 #include <linux/dynamic_debug.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_vlan.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/cpumask.h> 14 15 #include "ionic.h" 16 #include "ionic_bus.h" 17 #include "ionic_lif.h" 18 #include "ionic_txrx.h" 19 #include "ionic_ethtool.h" 20 #include "ionic_debugfs.h" 21 22 /* queuetype support level */ 23 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 24 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 25 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 26 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */ 27 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support 28 * 1 = ... with Tx SG version 1 29 */ 30 }; 31 32 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); 33 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); 34 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); 35 static void ionic_link_status_check(struct ionic_lif *lif); 36 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 37 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 38 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 39 40 static void ionic_txrx_deinit(struct ionic_lif *lif); 41 static int ionic_txrx_init(struct ionic_lif *lif); 42 static int ionic_start_queues(struct ionic_lif *lif); 43 static void ionic_stop_queues(struct ionic_lif *lif); 44 static void ionic_lif_queue_identify(struct ionic_lif *lif); 45 46 static void ionic_dim_work(struct work_struct *work) 47 { 48 struct dim *dim = container_of(work, struct dim, work); 49 struct dim_cq_moder cur_moder; 50 struct ionic_qcq *qcq; 51 u32 new_coal; 52 53 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 54 qcq = container_of(dim, struct ionic_qcq, dim); 55 new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec); 56 qcq->intr.dim_coal_hw = new_coal ? new_coal : 1; 57 dim->state = DIM_START_MEASURE; 58 } 59 60 static void ionic_lif_deferred_work(struct work_struct *work) 61 { 62 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 63 struct ionic_deferred *def = &lif->deferred; 64 struct ionic_deferred_work *w = NULL; 65 66 do { 67 spin_lock_bh(&def->lock); 68 if (!list_empty(&def->list)) { 69 w = list_first_entry(&def->list, 70 struct ionic_deferred_work, list); 71 list_del(&w->list); 72 } 73 spin_unlock_bh(&def->lock); 74 75 if (!w) 76 break; 77 78 switch (w->type) { 79 case IONIC_DW_TYPE_RX_MODE: 80 ionic_lif_rx_mode(lif, w->rx_mode); 81 break; 82 case IONIC_DW_TYPE_RX_ADDR_ADD: 83 ionic_lif_addr_add(lif, w->addr); 84 break; 85 case IONIC_DW_TYPE_RX_ADDR_DEL: 86 ionic_lif_addr_del(lif, w->addr); 87 break; 88 case IONIC_DW_TYPE_LINK_STATUS: 89 ionic_link_status_check(lif); 90 break; 91 case IONIC_DW_TYPE_LIF_RESET: 92 if (w->fw_status) 93 ionic_lif_handle_fw_up(lif); 94 else 95 ionic_lif_handle_fw_down(lif); 96 break; 97 default: 98 break; 99 } 100 kfree(w); 101 w = NULL; 102 } while (true); 103 } 104 105 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 106 struct ionic_deferred_work *work) 107 { 108 spin_lock_bh(&def->lock); 109 list_add_tail(&work->list, &def->list); 110 spin_unlock_bh(&def->lock); 111 schedule_work(&def->work); 112 } 113 114 static void ionic_link_status_check(struct ionic_lif *lif) 115 { 116 struct net_device *netdev = lif->netdev; 117 u16 link_status; 118 bool link_up; 119 120 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 121 return; 122 123 /* Don't put carrier back up if we're in a broken state */ 124 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { 125 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 126 return; 127 } 128 129 link_status = le16_to_cpu(lif->info->status.link_status); 130 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 131 132 if (link_up) { 133 int err = 0; 134 135 if (netdev->flags & IFF_UP && netif_running(netdev)) { 136 mutex_lock(&lif->queue_lock); 137 err = ionic_start_queues(lif); 138 if (err) { 139 netdev_err(lif->netdev, 140 "Failed to start queues: %d\n", err); 141 set_bit(IONIC_LIF_F_BROKEN, lif->state); 142 netif_carrier_off(lif->netdev); 143 } 144 mutex_unlock(&lif->queue_lock); 145 } 146 147 if (!err && !netif_carrier_ok(netdev)) { 148 ionic_port_identify(lif->ionic); 149 netdev_info(netdev, "Link up - %d Gbps\n", 150 le32_to_cpu(lif->info->status.link_speed) / 1000); 151 netif_carrier_on(netdev); 152 } 153 } else { 154 if (netif_carrier_ok(netdev)) { 155 netdev_info(netdev, "Link down\n"); 156 netif_carrier_off(netdev); 157 } 158 159 if (netdev->flags & IFF_UP && netif_running(netdev)) { 160 mutex_lock(&lif->queue_lock); 161 ionic_stop_queues(lif); 162 mutex_unlock(&lif->queue_lock); 163 } 164 } 165 166 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 167 } 168 169 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) 170 { 171 struct ionic_deferred_work *work; 172 173 /* we only need one request outstanding at a time */ 174 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 175 return; 176 177 if (!can_sleep) { 178 work = kzalloc(sizeof(*work), GFP_ATOMIC); 179 if (!work) { 180 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 181 return; 182 } 183 184 work->type = IONIC_DW_TYPE_LINK_STATUS; 185 ionic_lif_deferred_enqueue(&lif->deferred, work); 186 } else { 187 ionic_link_status_check(lif); 188 } 189 } 190 191 static irqreturn_t ionic_isr(int irq, void *data) 192 { 193 struct napi_struct *napi = data; 194 195 napi_schedule_irqoff(napi); 196 197 return IRQ_HANDLED; 198 } 199 200 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 201 { 202 struct ionic_intr_info *intr = &qcq->intr; 203 struct device *dev = lif->ionic->dev; 204 struct ionic_queue *q = &qcq->q; 205 const char *name; 206 207 if (lif->registered) 208 name = lif->netdev->name; 209 else 210 name = dev_name(dev); 211 212 snprintf(intr->name, sizeof(intr->name), 213 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 214 215 return devm_request_irq(dev, intr->vector, ionic_isr, 216 0, intr->name, &qcq->napi); 217 } 218 219 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 220 { 221 struct ionic *ionic = lif->ionic; 222 int index; 223 224 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 225 if (index == ionic->nintrs) { 226 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 227 __func__, index, ionic->nintrs); 228 return -ENOSPC; 229 } 230 231 set_bit(index, ionic->intrs); 232 ionic_intr_init(&ionic->idev, intr, index); 233 234 return 0; 235 } 236 237 static void ionic_intr_free(struct ionic *ionic, int index) 238 { 239 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 240 clear_bit(index, ionic->intrs); 241 } 242 243 static int ionic_qcq_enable(struct ionic_qcq *qcq) 244 { 245 struct ionic_queue *q = &qcq->q; 246 struct ionic_lif *lif = q->lif; 247 struct ionic_dev *idev; 248 struct device *dev; 249 250 struct ionic_admin_ctx ctx = { 251 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 252 .cmd.q_control = { 253 .opcode = IONIC_CMD_Q_CONTROL, 254 .lif_index = cpu_to_le16(lif->index), 255 .type = q->type, 256 .index = cpu_to_le32(q->index), 257 .oper = IONIC_Q_ENABLE, 258 }, 259 }; 260 261 idev = &lif->ionic->idev; 262 dev = lif->ionic->dev; 263 264 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 265 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 266 267 if (qcq->flags & IONIC_QCQ_F_INTR) { 268 irq_set_affinity_hint(qcq->intr.vector, 269 &qcq->intr.affinity_mask); 270 napi_enable(&qcq->napi); 271 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 272 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 273 IONIC_INTR_MASK_CLEAR); 274 } 275 276 return ionic_adminq_post_wait(lif, &ctx); 277 } 278 279 static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) 280 { 281 struct ionic_queue *q; 282 struct ionic_lif *lif; 283 int err = 0; 284 285 struct ionic_admin_ctx ctx = { 286 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 287 .cmd.q_control = { 288 .opcode = IONIC_CMD_Q_CONTROL, 289 .oper = IONIC_Q_DISABLE, 290 }, 291 }; 292 293 if (!qcq) 294 return -ENXIO; 295 296 q = &qcq->q; 297 lif = q->lif; 298 299 if (qcq->flags & IONIC_QCQ_F_INTR) { 300 struct ionic_dev *idev = &lif->ionic->idev; 301 302 cancel_work_sync(&qcq->dim.work); 303 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 304 IONIC_INTR_MASK_SET); 305 synchronize_irq(qcq->intr.vector); 306 irq_set_affinity_hint(qcq->intr.vector, NULL); 307 napi_disable(&qcq->napi); 308 } 309 310 if (send_to_hw) { 311 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); 312 ctx.cmd.q_control.type = q->type; 313 ctx.cmd.q_control.index = cpu_to_le32(q->index); 314 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", 315 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 316 317 err = ionic_adminq_post_wait(lif, &ctx); 318 } 319 320 return err; 321 } 322 323 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 324 { 325 struct ionic_dev *idev = &lif->ionic->idev; 326 327 if (!qcq) 328 return; 329 330 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 331 return; 332 333 if (qcq->flags & IONIC_QCQ_F_INTR) { 334 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 335 IONIC_INTR_MASK_SET); 336 netif_napi_del(&qcq->napi); 337 } 338 339 qcq->flags &= ~IONIC_QCQ_F_INITED; 340 } 341 342 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 343 { 344 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 345 return; 346 347 irq_set_affinity_hint(qcq->intr.vector, NULL); 348 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 349 qcq->intr.vector = 0; 350 ionic_intr_free(lif->ionic, qcq->intr.index); 351 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 352 } 353 354 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 355 { 356 struct device *dev = lif->ionic->dev; 357 358 if (!qcq) 359 return; 360 361 ionic_debugfs_del_qcq(qcq); 362 363 if (qcq->q_base) { 364 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 365 qcq->q_base = NULL; 366 qcq->q_base_pa = 0; 367 } 368 369 if (qcq->cq_base) { 370 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 371 qcq->cq_base = NULL; 372 qcq->cq_base_pa = 0; 373 } 374 375 if (qcq->sg_base) { 376 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 377 qcq->sg_base = NULL; 378 qcq->sg_base_pa = 0; 379 } 380 381 ionic_qcq_intr_free(lif, qcq); 382 383 if (qcq->cq.info) { 384 devm_kfree(dev, qcq->cq.info); 385 qcq->cq.info = NULL; 386 } 387 if (qcq->q.info) { 388 devm_kfree(dev, qcq->q.info); 389 qcq->q.info = NULL; 390 } 391 } 392 393 static void ionic_qcqs_free(struct ionic_lif *lif) 394 { 395 struct device *dev = lif->ionic->dev; 396 struct ionic_qcq *adminqcq; 397 unsigned long irqflags; 398 399 if (lif->notifyqcq) { 400 ionic_qcq_free(lif, lif->notifyqcq); 401 devm_kfree(dev, lif->notifyqcq); 402 lif->notifyqcq = NULL; 403 } 404 405 if (lif->adminqcq) { 406 spin_lock_irqsave(&lif->adminq_lock, irqflags); 407 adminqcq = READ_ONCE(lif->adminqcq); 408 lif->adminqcq = NULL; 409 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 410 if (adminqcq) { 411 ionic_qcq_free(lif, adminqcq); 412 devm_kfree(dev, adminqcq); 413 } 414 } 415 416 if (lif->rxqcqs) { 417 devm_kfree(dev, lif->rxqstats); 418 lif->rxqstats = NULL; 419 devm_kfree(dev, lif->rxqcqs); 420 lif->rxqcqs = NULL; 421 } 422 423 if (lif->txqcqs) { 424 devm_kfree(dev, lif->txqstats); 425 lif->txqstats = NULL; 426 devm_kfree(dev, lif->txqcqs); 427 lif->txqcqs = NULL; 428 } 429 } 430 431 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 432 struct ionic_qcq *n_qcq) 433 { 434 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { 435 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index); 436 n_qcq->flags &= ~IONIC_QCQ_F_INTR; 437 } 438 439 n_qcq->intr.vector = src_qcq->intr.vector; 440 n_qcq->intr.index = src_qcq->intr.index; 441 } 442 443 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 444 { 445 int err; 446 447 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 448 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 449 return 0; 450 } 451 452 err = ionic_intr_alloc(lif, &qcq->intr); 453 if (err) { 454 netdev_warn(lif->netdev, "no intr for %s: %d\n", 455 qcq->q.name, err); 456 goto err_out; 457 } 458 459 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); 460 if (err < 0) { 461 netdev_warn(lif->netdev, "no vector for %s: %d\n", 462 qcq->q.name, err); 463 goto err_out_free_intr; 464 } 465 qcq->intr.vector = err; 466 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 467 IONIC_INTR_MASK_SET); 468 469 err = ionic_request_irq(lif, qcq); 470 if (err) { 471 netdev_warn(lif->netdev, "irq request failed %d\n", err); 472 goto err_out_free_intr; 473 } 474 475 /* try to get the irq on the local numa node first */ 476 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, 477 dev_to_node(lif->ionic->dev)); 478 if (qcq->intr.cpu != -1) 479 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask); 480 481 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 482 return 0; 483 484 err_out_free_intr: 485 ionic_intr_free(lif->ionic, qcq->intr.index); 486 err_out: 487 return err; 488 } 489 490 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 491 unsigned int index, 492 const char *name, unsigned int flags, 493 unsigned int num_descs, unsigned int desc_size, 494 unsigned int cq_desc_size, 495 unsigned int sg_desc_size, 496 unsigned int pid, struct ionic_qcq **qcq) 497 { 498 struct ionic_dev *idev = &lif->ionic->idev; 499 struct device *dev = lif->ionic->dev; 500 void *q_base, *cq_base, *sg_base; 501 dma_addr_t cq_base_pa = 0; 502 dma_addr_t sg_base_pa = 0; 503 dma_addr_t q_base_pa = 0; 504 struct ionic_qcq *new; 505 int err; 506 507 *qcq = NULL; 508 509 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 510 if (!new) { 511 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 512 err = -ENOMEM; 513 goto err_out; 514 } 515 516 new->q.dev = dev; 517 new->flags = flags; 518 519 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info), 520 GFP_KERNEL); 521 if (!new->q.info) { 522 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 523 err = -ENOMEM; 524 goto err_out_free_qcq; 525 } 526 527 new->q.type = type; 528 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; 529 530 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 531 desc_size, sg_desc_size, pid); 532 if (err) { 533 netdev_err(lif->netdev, "Cannot initialize queue\n"); 534 goto err_out_free_q_info; 535 } 536 537 err = ionic_alloc_qcq_interrupt(lif, new); 538 if (err) 539 goto err_out; 540 541 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info), 542 GFP_KERNEL); 543 if (!new->cq.info) { 544 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 545 err = -ENOMEM; 546 goto err_out_free_irq; 547 } 548 549 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 550 if (err) { 551 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 552 goto err_out_free_cq_info; 553 } 554 555 if (flags & IONIC_QCQ_F_NOTIFYQ) { 556 int q_size, cq_size; 557 558 /* q & cq need to be contiguous in case of notifyq */ 559 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); 560 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE); 561 562 new->q_size = PAGE_SIZE + q_size + cq_size; 563 new->q_base = dma_alloc_coherent(dev, new->q_size, 564 &new->q_base_pa, GFP_KERNEL); 565 if (!new->q_base) { 566 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); 567 err = -ENOMEM; 568 goto err_out_free_cq_info; 569 } 570 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 571 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 572 ionic_q_map(&new->q, q_base, q_base_pa); 573 574 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); 575 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); 576 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 577 ionic_cq_bind(&new->cq, &new->q); 578 } else { 579 new->q_size = PAGE_SIZE + (num_descs * desc_size); 580 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 581 GFP_KERNEL); 582 if (!new->q_base) { 583 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 584 err = -ENOMEM; 585 goto err_out_free_cq_info; 586 } 587 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 588 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 589 ionic_q_map(&new->q, q_base, q_base_pa); 590 591 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 592 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 593 GFP_KERNEL); 594 if (!new->cq_base) { 595 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 596 err = -ENOMEM; 597 goto err_out_free_q; 598 } 599 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 600 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 601 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 602 ionic_cq_bind(&new->cq, &new->q); 603 } 604 605 if (flags & IONIC_QCQ_F_SG) { 606 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 607 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 608 GFP_KERNEL); 609 if (!new->sg_base) { 610 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 611 err = -ENOMEM; 612 goto err_out_free_cq; 613 } 614 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 615 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 616 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 617 } 618 619 INIT_WORK(&new->dim.work, ionic_dim_work); 620 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 621 622 *qcq = new; 623 624 return 0; 625 626 err_out_free_cq: 627 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 628 err_out_free_q: 629 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 630 err_out_free_cq_info: 631 devm_kfree(dev, new->cq.info); 632 err_out_free_irq: 633 if (flags & IONIC_QCQ_F_INTR) { 634 devm_free_irq(dev, new->intr.vector, &new->napi); 635 ionic_intr_free(lif->ionic, new->intr.index); 636 } 637 err_out_free_q_info: 638 devm_kfree(dev, new->q.info); 639 err_out_free_qcq: 640 devm_kfree(dev, new); 641 err_out: 642 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 643 return err; 644 } 645 646 static int ionic_qcqs_alloc(struct ionic_lif *lif) 647 { 648 struct device *dev = lif->ionic->dev; 649 unsigned int flags; 650 int err; 651 652 flags = IONIC_QCQ_F_INTR; 653 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 654 IONIC_ADMINQ_LENGTH, 655 sizeof(struct ionic_admin_cmd), 656 sizeof(struct ionic_admin_comp), 657 0, lif->kern_pid, &lif->adminqcq); 658 if (err) 659 return err; 660 ionic_debugfs_add_qcq(lif, lif->adminqcq); 661 662 if (lif->ionic->nnqs_per_lif) { 663 flags = IONIC_QCQ_F_NOTIFYQ; 664 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 665 flags, IONIC_NOTIFYQ_LENGTH, 666 sizeof(struct ionic_notifyq_cmd), 667 sizeof(union ionic_notifyq_comp), 668 0, lif->kern_pid, &lif->notifyqcq); 669 if (err) 670 goto err_out; 671 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 672 673 /* Let the notifyq ride on the adminq interrupt */ 674 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 675 } 676 677 err = -ENOMEM; 678 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 679 sizeof(struct ionic_qcq *), GFP_KERNEL); 680 if (!lif->txqcqs) 681 goto err_out; 682 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 683 sizeof(struct ionic_qcq *), GFP_KERNEL); 684 if (!lif->rxqcqs) 685 goto err_out; 686 687 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 688 sizeof(struct ionic_tx_stats), GFP_KERNEL); 689 if (!lif->txqstats) 690 goto err_out; 691 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 692 sizeof(struct ionic_rx_stats), GFP_KERNEL); 693 if (!lif->rxqstats) 694 goto err_out; 695 696 return 0; 697 698 err_out: 699 ionic_qcqs_free(lif); 700 return err; 701 } 702 703 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 704 { 705 qcq->q.tail_idx = 0; 706 qcq->q.head_idx = 0; 707 qcq->cq.tail_idx = 0; 708 qcq->cq.done_color = 1; 709 memset(qcq->q_base, 0, qcq->q_size); 710 memset(qcq->cq_base, 0, qcq->cq_size); 711 memset(qcq->sg_base, 0, qcq->sg_size); 712 } 713 714 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 715 { 716 struct device *dev = lif->ionic->dev; 717 struct ionic_queue *q = &qcq->q; 718 struct ionic_cq *cq = &qcq->cq; 719 struct ionic_admin_ctx ctx = { 720 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 721 .cmd.q_init = { 722 .opcode = IONIC_CMD_Q_INIT, 723 .lif_index = cpu_to_le16(lif->index), 724 .type = q->type, 725 .ver = lif->qtype_info[q->type].version, 726 .index = cpu_to_le32(q->index), 727 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 728 IONIC_QINIT_F_SG), 729 .pid = cpu_to_le16(q->pid), 730 .ring_size = ilog2(q->num_descs), 731 .ring_base = cpu_to_le64(q->base_pa), 732 .cq_ring_base = cpu_to_le64(cq->base_pa), 733 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 734 }, 735 }; 736 unsigned int intr_index; 737 int err; 738 739 intr_index = qcq->intr.index; 740 741 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); 742 743 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 744 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 745 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 746 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 747 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 748 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 749 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 750 751 ionic_qcq_sanitize(qcq); 752 753 err = ionic_adminq_post_wait(lif, &ctx); 754 if (err) 755 return err; 756 757 q->hw_type = ctx.comp.q_init.hw_type; 758 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 759 q->dbval = IONIC_DBELL_QID(q->hw_index); 760 761 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 762 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 763 764 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 765 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi, 766 NAPI_POLL_WEIGHT); 767 768 qcq->flags |= IONIC_QCQ_F_INITED; 769 770 return 0; 771 } 772 773 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 774 { 775 struct device *dev = lif->ionic->dev; 776 struct ionic_queue *q = &qcq->q; 777 struct ionic_cq *cq = &qcq->cq; 778 struct ionic_admin_ctx ctx = { 779 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 780 .cmd.q_init = { 781 .opcode = IONIC_CMD_Q_INIT, 782 .lif_index = cpu_to_le16(lif->index), 783 .type = q->type, 784 .ver = lif->qtype_info[q->type].version, 785 .index = cpu_to_le32(q->index), 786 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 787 IONIC_QINIT_F_SG), 788 .intr_index = cpu_to_le16(cq->bound_intr->index), 789 .pid = cpu_to_le16(q->pid), 790 .ring_size = ilog2(q->num_descs), 791 .ring_base = cpu_to_le64(q->base_pa), 792 .cq_ring_base = cpu_to_le64(cq->base_pa), 793 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 794 }, 795 }; 796 int err; 797 798 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 799 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 800 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 801 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 802 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 803 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 804 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 805 806 ionic_qcq_sanitize(qcq); 807 808 err = ionic_adminq_post_wait(lif, &ctx); 809 if (err) 810 return err; 811 812 q->hw_type = ctx.comp.q_init.hw_type; 813 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 814 q->dbval = IONIC_DBELL_QID(q->hw_index); 815 816 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 817 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 818 819 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 820 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, 821 NAPI_POLL_WEIGHT); 822 else 823 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi, 824 NAPI_POLL_WEIGHT); 825 826 qcq->flags |= IONIC_QCQ_F_INITED; 827 828 return 0; 829 } 830 831 static bool ionic_notifyq_service(struct ionic_cq *cq, 832 struct ionic_cq_info *cq_info) 833 { 834 union ionic_notifyq_comp *comp = cq_info->cq_desc; 835 struct ionic_deferred_work *work; 836 struct net_device *netdev; 837 struct ionic_queue *q; 838 struct ionic_lif *lif; 839 u64 eid; 840 841 q = cq->bound_q; 842 lif = q->info[0].cb_arg; 843 netdev = lif->netdev; 844 eid = le64_to_cpu(comp->event.eid); 845 846 /* Have we run out of new completions to process? */ 847 if ((s64)(eid - lif->last_eid) <= 0) 848 return false; 849 850 lif->last_eid = eid; 851 852 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 853 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 854 comp, sizeof(*comp), true); 855 856 switch (le16_to_cpu(comp->event.ecode)) { 857 case IONIC_EVENT_LINK_CHANGE: 858 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 859 break; 860 case IONIC_EVENT_RESET: 861 work = kzalloc(sizeof(*work), GFP_ATOMIC); 862 if (!work) { 863 netdev_err(lif->netdev, "Reset event dropped\n"); 864 } else { 865 work->type = IONIC_DW_TYPE_LIF_RESET; 866 ionic_lif_deferred_enqueue(&lif->deferred, work); 867 } 868 break; 869 default: 870 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", 871 comp->event.ecode, eid); 872 break; 873 } 874 875 return true; 876 } 877 878 static bool ionic_adminq_service(struct ionic_cq *cq, 879 struct ionic_cq_info *cq_info) 880 { 881 struct ionic_admin_comp *comp = cq_info->cq_desc; 882 883 if (!color_match(comp->color, cq->done_color)) 884 return false; 885 886 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 887 888 return true; 889 } 890 891 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 892 { 893 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 894 struct ionic_lif *lif = napi_to_cq(napi)->lif; 895 struct ionic_dev *idev = &lif->ionic->idev; 896 unsigned long irqflags; 897 unsigned int flags = 0; 898 int n_work = 0; 899 int a_work = 0; 900 int work_done; 901 902 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 903 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 904 ionic_notifyq_service, NULL, NULL); 905 906 spin_lock_irqsave(&lif->adminq_lock, irqflags); 907 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 908 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 909 ionic_adminq_service, NULL, NULL); 910 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 911 912 work_done = max(n_work, a_work); 913 if (work_done < budget && napi_complete_done(napi, work_done)) { 914 flags |= IONIC_INTR_CRED_UNMASK; 915 intr->rearm_count++; 916 } 917 918 if (work_done || flags) { 919 flags |= IONIC_INTR_CRED_RESET_COALESCE; 920 ionic_intr_credits(idev->intr_ctrl, 921 intr->index, 922 n_work + a_work, flags); 923 } 924 925 return work_done; 926 } 927 928 void ionic_get_stats64(struct net_device *netdev, 929 struct rtnl_link_stats64 *ns) 930 { 931 struct ionic_lif *lif = netdev_priv(netdev); 932 struct ionic_lif_stats *ls; 933 934 memset(ns, 0, sizeof(*ns)); 935 ls = &lif->info->stats; 936 937 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 938 le64_to_cpu(ls->rx_mcast_packets) + 939 le64_to_cpu(ls->rx_bcast_packets); 940 941 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 942 le64_to_cpu(ls->tx_mcast_packets) + 943 le64_to_cpu(ls->tx_bcast_packets); 944 945 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 946 le64_to_cpu(ls->rx_mcast_bytes) + 947 le64_to_cpu(ls->rx_bcast_bytes); 948 949 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 950 le64_to_cpu(ls->tx_mcast_bytes) + 951 le64_to_cpu(ls->tx_bcast_bytes); 952 953 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 954 le64_to_cpu(ls->rx_mcast_drop_packets) + 955 le64_to_cpu(ls->rx_bcast_drop_packets); 956 957 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 958 le64_to_cpu(ls->tx_mcast_drop_packets) + 959 le64_to_cpu(ls->tx_bcast_drop_packets); 960 961 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 962 963 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 964 965 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 966 le64_to_cpu(ls->rx_queue_disabled) + 967 le64_to_cpu(ls->rx_desc_fetch_error) + 968 le64_to_cpu(ls->rx_desc_data_error); 969 970 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 971 le64_to_cpu(ls->tx_queue_disabled) + 972 le64_to_cpu(ls->tx_desc_fetch_error) + 973 le64_to_cpu(ls->tx_desc_data_error); 974 975 ns->rx_errors = ns->rx_over_errors + 976 ns->rx_missed_errors; 977 978 ns->tx_errors = ns->tx_aborted_errors; 979 } 980 981 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 982 { 983 struct ionic_admin_ctx ctx = { 984 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 985 .cmd.rx_filter_add = { 986 .opcode = IONIC_CMD_RX_FILTER_ADD, 987 .lif_index = cpu_to_le16(lif->index), 988 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 989 }, 990 }; 991 struct ionic_rx_filter *f; 992 int err; 993 994 /* don't bother if we already have it */ 995 spin_lock_bh(&lif->rx_filters.lock); 996 f = ionic_rx_filter_by_addr(lif, addr); 997 spin_unlock_bh(&lif->rx_filters.lock); 998 if (f) 999 return 0; 1000 1001 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); 1002 1003 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 1004 err = ionic_adminq_post_wait(lif, &ctx); 1005 if (err && err != -EEXIST) 1006 return err; 1007 1008 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1009 } 1010 1011 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 1012 { 1013 struct ionic_admin_ctx ctx = { 1014 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1015 .cmd.rx_filter_del = { 1016 .opcode = IONIC_CMD_RX_FILTER_DEL, 1017 .lif_index = cpu_to_le16(lif->index), 1018 }, 1019 }; 1020 struct ionic_rx_filter *f; 1021 int err; 1022 1023 spin_lock_bh(&lif->rx_filters.lock); 1024 f = ionic_rx_filter_by_addr(lif, addr); 1025 if (!f) { 1026 spin_unlock_bh(&lif->rx_filters.lock); 1027 return -ENOENT; 1028 } 1029 1030 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", 1031 addr, f->filter_id); 1032 1033 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1034 ionic_rx_filter_free(lif, f); 1035 spin_unlock_bh(&lif->rx_filters.lock); 1036 1037 err = ionic_adminq_post_wait(lif, &ctx); 1038 if (err && err != -EEXIST) 1039 return err; 1040 1041 return 0; 1042 } 1043 1044 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add, 1045 bool can_sleep) 1046 { 1047 struct ionic_deferred_work *work; 1048 unsigned int nmfilters; 1049 unsigned int nufilters; 1050 1051 if (add) { 1052 /* Do we have space for this filter? We test the counters 1053 * here before checking the need for deferral so that we 1054 * can return an overflow error to the stack. 1055 */ 1056 nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); 1057 nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1058 1059 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) 1060 lif->nmcast++; 1061 else if (!is_multicast_ether_addr(addr) && 1062 lif->nucast < nufilters) 1063 lif->nucast++; 1064 else 1065 return -ENOSPC; 1066 } else { 1067 if (is_multicast_ether_addr(addr) && lif->nmcast) 1068 lif->nmcast--; 1069 else if (!is_multicast_ether_addr(addr) && lif->nucast) 1070 lif->nucast--; 1071 } 1072 1073 if (!can_sleep) { 1074 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1075 if (!work) 1076 return -ENOMEM; 1077 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : 1078 IONIC_DW_TYPE_RX_ADDR_DEL; 1079 memcpy(work->addr, addr, ETH_ALEN); 1080 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", 1081 add ? "add" : "del", addr); 1082 ionic_lif_deferred_enqueue(&lif->deferred, work); 1083 } else { 1084 netdev_dbg(lif->netdev, "rx_filter %s %pM\n", 1085 add ? "add" : "del", addr); 1086 if (add) 1087 return ionic_lif_addr_add(lif, addr); 1088 else 1089 return ionic_lif_addr_del(lif, addr); 1090 } 1091 1092 return 0; 1093 } 1094 1095 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1096 { 1097 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP); 1098 } 1099 1100 static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr) 1101 { 1102 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP); 1103 } 1104 1105 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1106 { 1107 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP); 1108 } 1109 1110 static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr) 1111 { 1112 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP); 1113 } 1114 1115 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 1116 { 1117 struct ionic_admin_ctx ctx = { 1118 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1119 .cmd.rx_mode_set = { 1120 .opcode = IONIC_CMD_RX_MODE_SET, 1121 .lif_index = cpu_to_le16(lif->index), 1122 .rx_mode = cpu_to_le16(rx_mode), 1123 }, 1124 }; 1125 char buf[128]; 1126 int err; 1127 int i; 1128 #define REMAIN(__x) (sizeof(buf) - (__x)) 1129 1130 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1131 lif->rx_mode, rx_mode); 1132 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1133 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1134 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1135 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1136 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1137 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1138 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1139 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1140 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1141 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1142 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); 1143 1144 err = ionic_adminq_post_wait(lif, &ctx); 1145 if (err) 1146 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", 1147 rx_mode, err); 1148 else 1149 lif->rx_mode = rx_mode; 1150 } 1151 1152 static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) 1153 { 1154 struct ionic_lif *lif = netdev_priv(netdev); 1155 struct ionic_deferred_work *work; 1156 unsigned int nfilters; 1157 unsigned int rx_mode; 1158 1159 rx_mode = IONIC_RX_MODE_F_UNICAST; 1160 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1161 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1162 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1163 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1164 1165 /* sync unicast addresses 1166 * next check to see if we're in an overflow state 1167 * if so, we track that we overflowed and enable NIC PROMISC 1168 * else if the overflow is set and not needed 1169 * we remove our overflow flag and check the netdev flags 1170 * to see if we can disable NIC PROMISC 1171 */ 1172 if (can_sleep) 1173 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1174 else 1175 __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); 1176 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1177 if (netdev_uc_count(netdev) + 1 > nfilters) { 1178 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1179 lif->uc_overflow = true; 1180 } else if (lif->uc_overflow) { 1181 lif->uc_overflow = false; 1182 if (!(netdev->flags & IFF_PROMISC)) 1183 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1184 } 1185 1186 /* same for multicast */ 1187 if (can_sleep) 1188 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1189 else 1190 __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); 1191 nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); 1192 if (netdev_mc_count(netdev) > nfilters) { 1193 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1194 lif->mc_overflow = true; 1195 } else if (lif->mc_overflow) { 1196 lif->mc_overflow = false; 1197 if (!(netdev->flags & IFF_ALLMULTI)) 1198 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1199 } 1200 1201 if (lif->rx_mode != rx_mode) { 1202 if (!can_sleep) { 1203 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1204 if (!work) { 1205 netdev_err(lif->netdev, "rxmode change dropped\n"); 1206 return; 1207 } 1208 work->type = IONIC_DW_TYPE_RX_MODE; 1209 work->rx_mode = rx_mode; 1210 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1211 ionic_lif_deferred_enqueue(&lif->deferred, work); 1212 } else { 1213 ionic_lif_rx_mode(lif, rx_mode); 1214 } 1215 } 1216 } 1217 1218 static void ionic_ndo_set_rx_mode(struct net_device *netdev) 1219 { 1220 ionic_set_rx_mode(netdev, CAN_NOT_SLEEP); 1221 } 1222 1223 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1224 { 1225 u64 wanted = 0; 1226 1227 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1228 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1229 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1230 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1231 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1232 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1233 if (features & NETIF_F_RXHASH) 1234 wanted |= IONIC_ETH_HW_RX_HASH; 1235 if (features & NETIF_F_RXCSUM) 1236 wanted |= IONIC_ETH_HW_RX_CSUM; 1237 if (features & NETIF_F_SG) 1238 wanted |= IONIC_ETH_HW_TX_SG; 1239 if (features & NETIF_F_HW_CSUM) 1240 wanted |= IONIC_ETH_HW_TX_CSUM; 1241 if (features & NETIF_F_TSO) 1242 wanted |= IONIC_ETH_HW_TSO; 1243 if (features & NETIF_F_TSO6) 1244 wanted |= IONIC_ETH_HW_TSO_IPV6; 1245 if (features & NETIF_F_TSO_ECN) 1246 wanted |= IONIC_ETH_HW_TSO_ECN; 1247 if (features & NETIF_F_GSO_GRE) 1248 wanted |= IONIC_ETH_HW_TSO_GRE; 1249 if (features & NETIF_F_GSO_GRE_CSUM) 1250 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1251 if (features & NETIF_F_GSO_IPXIP4) 1252 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1253 if (features & NETIF_F_GSO_IPXIP6) 1254 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1255 if (features & NETIF_F_GSO_UDP_TUNNEL) 1256 wanted |= IONIC_ETH_HW_TSO_UDP; 1257 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1258 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1259 1260 return cpu_to_le64(wanted); 1261 } 1262 1263 static int ionic_set_nic_features(struct ionic_lif *lif, 1264 netdev_features_t features) 1265 { 1266 struct device *dev = lif->ionic->dev; 1267 struct ionic_admin_ctx ctx = { 1268 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1269 .cmd.lif_setattr = { 1270 .opcode = IONIC_CMD_LIF_SETATTR, 1271 .index = cpu_to_le16(lif->index), 1272 .attr = IONIC_LIF_ATTR_FEATURES, 1273 }, 1274 }; 1275 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1276 IONIC_ETH_HW_VLAN_RX_STRIP | 1277 IONIC_ETH_HW_VLAN_RX_FILTER; 1278 u64 old_hw_features; 1279 int err; 1280 1281 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1282 err = ionic_adminq_post_wait(lif, &ctx); 1283 if (err) 1284 return err; 1285 1286 old_hw_features = lif->hw_features; 1287 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1288 ctx.comp.lif_setattr.features); 1289 1290 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1291 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1292 1293 if ((vlan_flags & features) && 1294 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1295 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1296 1297 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1298 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1299 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1300 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1301 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1302 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1303 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1304 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1305 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1306 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1307 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1308 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1309 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1310 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1311 if (lif->hw_features & IONIC_ETH_HW_TSO) 1312 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1313 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1314 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1315 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1316 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1317 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1318 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1319 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1320 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1321 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1322 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1323 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1324 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1325 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1326 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1327 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1328 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1329 1330 return 0; 1331 } 1332 1333 static int ionic_init_nic_features(struct ionic_lif *lif) 1334 { 1335 struct net_device *netdev = lif->netdev; 1336 netdev_features_t features; 1337 int err; 1338 1339 /* set up what we expect to support by default */ 1340 features = NETIF_F_HW_VLAN_CTAG_TX | 1341 NETIF_F_HW_VLAN_CTAG_RX | 1342 NETIF_F_HW_VLAN_CTAG_FILTER | 1343 NETIF_F_RXHASH | 1344 NETIF_F_SG | 1345 NETIF_F_HW_CSUM | 1346 NETIF_F_RXCSUM | 1347 NETIF_F_TSO | 1348 NETIF_F_TSO6 | 1349 NETIF_F_TSO_ECN; 1350 1351 err = ionic_set_nic_features(lif, features); 1352 if (err) 1353 return err; 1354 1355 /* tell the netdev what we actually can support */ 1356 netdev->features |= NETIF_F_HIGHDMA; 1357 1358 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1359 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1360 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1361 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1362 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1363 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1364 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1365 netdev->hw_features |= NETIF_F_RXHASH; 1366 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1367 netdev->hw_features |= NETIF_F_SG; 1368 1369 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1370 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1371 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1372 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1373 if (lif->hw_features & IONIC_ETH_HW_TSO) 1374 netdev->hw_enc_features |= NETIF_F_TSO; 1375 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1376 netdev->hw_enc_features |= NETIF_F_TSO6; 1377 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1378 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1379 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1380 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1381 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1382 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1383 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1384 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1385 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1386 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1387 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1388 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1389 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1390 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1391 1392 netdev->hw_features |= netdev->hw_enc_features; 1393 netdev->features |= netdev->hw_features; 1394 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1395 1396 netdev->priv_flags |= IFF_UNICAST_FLT | 1397 IFF_LIVE_ADDR_CHANGE; 1398 1399 return 0; 1400 } 1401 1402 static int ionic_set_features(struct net_device *netdev, 1403 netdev_features_t features) 1404 { 1405 struct ionic_lif *lif = netdev_priv(netdev); 1406 int err; 1407 1408 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1409 __func__, (u64)lif->netdev->features, (u64)features); 1410 1411 err = ionic_set_nic_features(lif, features); 1412 1413 return err; 1414 } 1415 1416 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1417 { 1418 struct sockaddr *addr = sa; 1419 u8 *mac; 1420 int err; 1421 1422 mac = (u8 *)addr->sa_data; 1423 if (ether_addr_equal(netdev->dev_addr, mac)) 1424 return 0; 1425 1426 err = eth_prepare_mac_addr_change(netdev, addr); 1427 if (err) 1428 return err; 1429 1430 if (!is_zero_ether_addr(netdev->dev_addr)) { 1431 netdev_info(netdev, "deleting mac addr %pM\n", 1432 netdev->dev_addr); 1433 ionic_addr_del(netdev, netdev->dev_addr); 1434 } 1435 1436 eth_commit_mac_addr_change(netdev, addr); 1437 netdev_info(netdev, "updating mac addr %pM\n", mac); 1438 1439 return ionic_addr_add(netdev, mac); 1440 } 1441 1442 static void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1443 { 1444 /* Stop and clean the queues before reconfiguration */ 1445 mutex_lock(&lif->queue_lock); 1446 netif_device_detach(lif->netdev); 1447 ionic_stop_queues(lif); 1448 ionic_txrx_deinit(lif); 1449 } 1450 1451 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1452 { 1453 int err; 1454 1455 /* Re-init the queues after reconfiguration */ 1456 1457 /* The only way txrx_init can fail here is if communication 1458 * with FW is suddenly broken. There's not much we can do 1459 * at this point - error messages have already been printed, 1460 * so we can continue on and the user can eventually do a 1461 * DOWN and UP to try to reset and clear the issue. 1462 */ 1463 err = ionic_txrx_init(lif); 1464 mutex_unlock(&lif->queue_lock); 1465 ionic_link_status_check_request(lif, CAN_SLEEP); 1466 netif_device_attach(lif->netdev); 1467 1468 return err; 1469 } 1470 1471 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1472 { 1473 struct ionic_lif *lif = netdev_priv(netdev); 1474 struct ionic_admin_ctx ctx = { 1475 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1476 .cmd.lif_setattr = { 1477 .opcode = IONIC_CMD_LIF_SETATTR, 1478 .index = cpu_to_le16(lif->index), 1479 .attr = IONIC_LIF_ATTR_MTU, 1480 .mtu = cpu_to_le32(new_mtu), 1481 }, 1482 }; 1483 int err; 1484 1485 err = ionic_adminq_post_wait(lif, &ctx); 1486 if (err) 1487 return err; 1488 1489 /* if we're not running, nothing more to do */ 1490 if (!netif_running(netdev)) { 1491 netdev->mtu = new_mtu; 1492 return 0; 1493 } 1494 1495 ionic_stop_queues_reconfig(lif); 1496 netdev->mtu = new_mtu; 1497 return ionic_start_queues_reconfig(lif); 1498 } 1499 1500 static void ionic_tx_timeout_work(struct work_struct *ws) 1501 { 1502 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1503 1504 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1505 return; 1506 1507 /* if we were stopped before this scheduled job was launched, 1508 * don't bother the queues as they are already stopped. 1509 */ 1510 if (!netif_running(lif->netdev)) 1511 return; 1512 1513 ionic_stop_queues_reconfig(lif); 1514 ionic_start_queues_reconfig(lif); 1515 } 1516 1517 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1518 { 1519 struct ionic_lif *lif = netdev_priv(netdev); 1520 1521 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); 1522 schedule_work(&lif->tx_timeout_work); 1523 } 1524 1525 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1526 u16 vid) 1527 { 1528 struct ionic_lif *lif = netdev_priv(netdev); 1529 struct ionic_admin_ctx ctx = { 1530 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1531 .cmd.rx_filter_add = { 1532 .opcode = IONIC_CMD_RX_FILTER_ADD, 1533 .lif_index = cpu_to_le16(lif->index), 1534 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), 1535 .vlan.vlan = cpu_to_le16(vid), 1536 }, 1537 }; 1538 int err; 1539 1540 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); 1541 err = ionic_adminq_post_wait(lif, &ctx); 1542 if (err) 1543 return err; 1544 1545 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1546 } 1547 1548 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1549 u16 vid) 1550 { 1551 struct ionic_lif *lif = netdev_priv(netdev); 1552 struct ionic_admin_ctx ctx = { 1553 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1554 .cmd.rx_filter_del = { 1555 .opcode = IONIC_CMD_RX_FILTER_DEL, 1556 .lif_index = cpu_to_le16(lif->index), 1557 }, 1558 }; 1559 struct ionic_rx_filter *f; 1560 1561 spin_lock_bh(&lif->rx_filters.lock); 1562 1563 f = ionic_rx_filter_by_vlan(lif, vid); 1564 if (!f) { 1565 spin_unlock_bh(&lif->rx_filters.lock); 1566 return -ENOENT; 1567 } 1568 1569 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", 1570 vid, f->filter_id); 1571 1572 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1573 ionic_rx_filter_free(lif, f); 1574 spin_unlock_bh(&lif->rx_filters.lock); 1575 1576 return ionic_adminq_post_wait(lif, &ctx); 1577 } 1578 1579 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1580 const u8 *key, const u32 *indir) 1581 { 1582 struct ionic_admin_ctx ctx = { 1583 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1584 .cmd.lif_setattr = { 1585 .opcode = IONIC_CMD_LIF_SETATTR, 1586 .attr = IONIC_LIF_ATTR_RSS, 1587 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1588 }, 1589 }; 1590 unsigned int i, tbl_sz; 1591 1592 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1593 lif->rss_types = types; 1594 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1595 } 1596 1597 if (key) 1598 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1599 1600 if (indir) { 1601 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1602 for (i = 0; i < tbl_sz; i++) 1603 lif->rss_ind_tbl[i] = indir[i]; 1604 } 1605 1606 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1607 IONIC_RSS_HASH_KEY_SIZE); 1608 1609 return ionic_adminq_post_wait(lif, &ctx); 1610 } 1611 1612 static int ionic_lif_rss_init(struct ionic_lif *lif) 1613 { 1614 unsigned int tbl_sz; 1615 unsigned int i; 1616 1617 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1618 IONIC_RSS_TYPE_IPV4_TCP | 1619 IONIC_RSS_TYPE_IPV4_UDP | 1620 IONIC_RSS_TYPE_IPV6 | 1621 IONIC_RSS_TYPE_IPV6_TCP | 1622 IONIC_RSS_TYPE_IPV6_UDP; 1623 1624 /* Fill indirection table with 'default' values */ 1625 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1626 for (i = 0; i < tbl_sz; i++) 1627 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1628 1629 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1630 } 1631 1632 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1633 { 1634 int tbl_sz; 1635 1636 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1637 memset(lif->rss_ind_tbl, 0, tbl_sz); 1638 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1639 1640 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1641 } 1642 1643 static void ionic_lif_quiesce(struct ionic_lif *lif) 1644 { 1645 struct ionic_admin_ctx ctx = { 1646 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1647 .cmd.lif_setattr = { 1648 .opcode = IONIC_CMD_LIF_SETATTR, 1649 .index = cpu_to_le16(lif->index), 1650 .attr = IONIC_LIF_ATTR_STATE, 1651 .state = IONIC_LIF_QUIESCE, 1652 }, 1653 }; 1654 int err; 1655 1656 err = ionic_adminq_post_wait(lif, &ctx); 1657 if (err) 1658 netdev_err(lif->netdev, "lif quiesce failed %d\n", err); 1659 } 1660 1661 static void ionic_txrx_disable(struct ionic_lif *lif) 1662 { 1663 unsigned int i; 1664 int err = 0; 1665 1666 if (lif->txqcqs) { 1667 for (i = 0; i < lif->nxqs; i++) 1668 err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT)); 1669 } 1670 1671 if (lif->rxqcqs) { 1672 for (i = 0; i < lif->nxqs; i++) 1673 err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); 1674 } 1675 1676 ionic_lif_quiesce(lif); 1677 } 1678 1679 static void ionic_txrx_deinit(struct ionic_lif *lif) 1680 { 1681 unsigned int i; 1682 1683 if (lif->txqcqs) { 1684 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 1685 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1686 ionic_tx_flush(&lif->txqcqs[i]->cq); 1687 ionic_tx_empty(&lif->txqcqs[i]->q); 1688 } 1689 } 1690 1691 if (lif->rxqcqs) { 1692 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 1693 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 1694 ionic_rx_empty(&lif->rxqcqs[i]->q); 1695 } 1696 } 1697 lif->rx_mode = 0; 1698 } 1699 1700 static void ionic_txrx_free(struct ionic_lif *lif) 1701 { 1702 unsigned int i; 1703 1704 if (lif->txqcqs) { 1705 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 1706 ionic_qcq_free(lif, lif->txqcqs[i]); 1707 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 1708 lif->txqcqs[i] = NULL; 1709 } 1710 } 1711 1712 if (lif->rxqcqs) { 1713 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 1714 ionic_qcq_free(lif, lif->rxqcqs[i]); 1715 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 1716 lif->rxqcqs[i] = NULL; 1717 } 1718 } 1719 } 1720 1721 static int ionic_txrx_alloc(struct ionic_lif *lif) 1722 { 1723 unsigned int sg_desc_sz; 1724 unsigned int flags; 1725 unsigned int i; 1726 int err = 0; 1727 1728 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 1729 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 1730 sizeof(struct ionic_txq_sg_desc_v1)) 1731 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 1732 else 1733 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 1734 1735 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 1736 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 1737 flags |= IONIC_QCQ_F_INTR; 1738 for (i = 0; i < lif->nxqs; i++) { 1739 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 1740 lif->ntxq_descs, 1741 sizeof(struct ionic_txq_desc), 1742 sizeof(struct ionic_txq_comp), 1743 sg_desc_sz, 1744 lif->kern_pid, &lif->txqcqs[i]); 1745 if (err) 1746 goto err_out; 1747 1748 if (flags & IONIC_QCQ_F_INTR) { 1749 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 1750 lif->txqcqs[i]->intr.index, 1751 lif->tx_coalesce_hw); 1752 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 1753 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 1754 } 1755 1756 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 1757 } 1758 1759 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 1760 for (i = 0; i < lif->nxqs; i++) { 1761 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 1762 lif->nrxq_descs, 1763 sizeof(struct ionic_rxq_desc), 1764 sizeof(struct ionic_rxq_comp), 1765 sizeof(struct ionic_rxq_sg_desc), 1766 lif->kern_pid, &lif->rxqcqs[i]); 1767 if (err) 1768 goto err_out; 1769 1770 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 1771 lif->rxqcqs[i]->intr.index, 1772 lif->rx_coalesce_hw); 1773 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) 1774 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; 1775 1776 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 1777 ionic_link_qcq_interrupts(lif->rxqcqs[i], 1778 lif->txqcqs[i]); 1779 1780 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 1781 } 1782 1783 return 0; 1784 1785 err_out: 1786 ionic_txrx_free(lif); 1787 1788 return err; 1789 } 1790 1791 static int ionic_txrx_init(struct ionic_lif *lif) 1792 { 1793 unsigned int i; 1794 int err; 1795 1796 for (i = 0; i < lif->nxqs; i++) { 1797 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 1798 if (err) 1799 goto err_out; 1800 1801 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 1802 if (err) { 1803 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1804 goto err_out; 1805 } 1806 } 1807 1808 if (lif->netdev->features & NETIF_F_RXHASH) 1809 ionic_lif_rss_init(lif); 1810 1811 ionic_set_rx_mode(lif->netdev, CAN_SLEEP); 1812 1813 return 0; 1814 1815 err_out: 1816 while (i--) { 1817 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1818 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 1819 } 1820 1821 return err; 1822 } 1823 1824 static int ionic_txrx_enable(struct ionic_lif *lif) 1825 { 1826 int derr = 0; 1827 int i, err; 1828 1829 for (i = 0; i < lif->nxqs; i++) { 1830 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { 1831 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); 1832 err = -ENXIO; 1833 goto err_out; 1834 } 1835 1836 ionic_rx_fill(&lif->rxqcqs[i]->q); 1837 err = ionic_qcq_enable(lif->rxqcqs[i]); 1838 if (err) 1839 goto err_out; 1840 1841 err = ionic_qcq_enable(lif->txqcqs[i]); 1842 if (err) { 1843 derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); 1844 goto err_out; 1845 } 1846 } 1847 1848 return 0; 1849 1850 err_out: 1851 while (i--) { 1852 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT)); 1853 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT)); 1854 } 1855 1856 return err; 1857 } 1858 1859 static int ionic_start_queues(struct ionic_lif *lif) 1860 { 1861 int err; 1862 1863 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) 1864 return -EIO; 1865 1866 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1867 return -EBUSY; 1868 1869 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 1870 return 0; 1871 1872 err = ionic_txrx_enable(lif); 1873 if (err) { 1874 clear_bit(IONIC_LIF_F_UP, lif->state); 1875 return err; 1876 } 1877 netif_tx_wake_all_queues(lif->netdev); 1878 1879 return 0; 1880 } 1881 1882 static int ionic_open(struct net_device *netdev) 1883 { 1884 struct ionic_lif *lif = netdev_priv(netdev); 1885 int err; 1886 1887 /* If recovering from a broken state, clear the bit and we'll try again */ 1888 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 1889 netdev_info(netdev, "clearing broken state\n"); 1890 1891 err = ionic_txrx_alloc(lif); 1892 if (err) 1893 return err; 1894 1895 err = ionic_txrx_init(lif); 1896 if (err) 1897 goto err_txrx_free; 1898 1899 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 1900 if (err) 1901 goto err_txrx_deinit; 1902 1903 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 1904 if (err) 1905 goto err_txrx_deinit; 1906 1907 /* don't start the queues until we have link */ 1908 if (netif_carrier_ok(netdev)) { 1909 err = ionic_start_queues(lif); 1910 if (err) 1911 goto err_txrx_deinit; 1912 } 1913 1914 return 0; 1915 1916 err_txrx_deinit: 1917 ionic_txrx_deinit(lif); 1918 err_txrx_free: 1919 ionic_txrx_free(lif); 1920 return err; 1921 } 1922 1923 static void ionic_stop_queues(struct ionic_lif *lif) 1924 { 1925 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 1926 return; 1927 1928 netif_tx_disable(lif->netdev); 1929 ionic_txrx_disable(lif); 1930 } 1931 1932 static int ionic_stop(struct net_device *netdev) 1933 { 1934 struct ionic_lif *lif = netdev_priv(netdev); 1935 1936 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1937 return 0; 1938 1939 ionic_stop_queues(lif); 1940 ionic_txrx_deinit(lif); 1941 ionic_txrx_free(lif); 1942 1943 return 0; 1944 } 1945 1946 static int ionic_get_vf_config(struct net_device *netdev, 1947 int vf, struct ifla_vf_info *ivf) 1948 { 1949 struct ionic_lif *lif = netdev_priv(netdev); 1950 struct ionic *ionic = lif->ionic; 1951 int ret = 0; 1952 1953 if (!netif_device_present(netdev)) 1954 return -EBUSY; 1955 1956 down_read(&ionic->vf_op_lock); 1957 1958 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1959 ret = -EINVAL; 1960 } else { 1961 ivf->vf = vf; 1962 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid); 1963 ivf->qos = 0; 1964 ivf->spoofchk = ionic->vfs[vf].spoofchk; 1965 ivf->linkstate = ionic->vfs[vf].linkstate; 1966 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate); 1967 ivf->trusted = ionic->vfs[vf].trusted; 1968 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr); 1969 } 1970 1971 up_read(&ionic->vf_op_lock); 1972 return ret; 1973 } 1974 1975 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 1976 struct ifla_vf_stats *vf_stats) 1977 { 1978 struct ionic_lif *lif = netdev_priv(netdev); 1979 struct ionic *ionic = lif->ionic; 1980 struct ionic_lif_stats *vs; 1981 int ret = 0; 1982 1983 if (!netif_device_present(netdev)) 1984 return -EBUSY; 1985 1986 down_read(&ionic->vf_op_lock); 1987 1988 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1989 ret = -EINVAL; 1990 } else { 1991 memset(vf_stats, 0, sizeof(*vf_stats)); 1992 vs = &ionic->vfs[vf].stats; 1993 1994 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 1995 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 1996 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 1997 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 1998 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 1999 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 2000 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 2001 le64_to_cpu(vs->rx_mcast_drop_packets) + 2002 le64_to_cpu(vs->rx_bcast_drop_packets); 2003 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 2004 le64_to_cpu(vs->tx_mcast_drop_packets) + 2005 le64_to_cpu(vs->tx_bcast_drop_packets); 2006 } 2007 2008 up_read(&ionic->vf_op_lock); 2009 return ret; 2010 } 2011 2012 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2013 { 2014 struct ionic_lif *lif = netdev_priv(netdev); 2015 struct ionic *ionic = lif->ionic; 2016 int ret; 2017 2018 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 2019 return -EINVAL; 2020 2021 if (!netif_device_present(netdev)) 2022 return -EBUSY; 2023 2024 down_write(&ionic->vf_op_lock); 2025 2026 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2027 ret = -EINVAL; 2028 } else { 2029 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac); 2030 if (!ret) 2031 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 2032 } 2033 2034 up_write(&ionic->vf_op_lock); 2035 return ret; 2036 } 2037 2038 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2039 u8 qos, __be16 proto) 2040 { 2041 struct ionic_lif *lif = netdev_priv(netdev); 2042 struct ionic *ionic = lif->ionic; 2043 int ret; 2044 2045 /* until someday when we support qos */ 2046 if (qos) 2047 return -EINVAL; 2048 2049 if (vlan > 4095) 2050 return -EINVAL; 2051 2052 if (proto != htons(ETH_P_8021Q)) 2053 return -EPROTONOSUPPORT; 2054 2055 if (!netif_device_present(netdev)) 2056 return -EBUSY; 2057 2058 down_write(&ionic->vf_op_lock); 2059 2060 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2061 ret = -EINVAL; 2062 } else { 2063 ret = ionic_set_vf_config(ionic, vf, 2064 IONIC_VF_ATTR_VLAN, (u8 *)&vlan); 2065 if (!ret) 2066 ionic->vfs[vf].vlanid = cpu_to_le16(vlan); 2067 } 2068 2069 up_write(&ionic->vf_op_lock); 2070 return ret; 2071 } 2072 2073 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 2074 int tx_min, int tx_max) 2075 { 2076 struct ionic_lif *lif = netdev_priv(netdev); 2077 struct ionic *ionic = lif->ionic; 2078 int ret; 2079 2080 /* setting the min just seems silly */ 2081 if (tx_min) 2082 return -EINVAL; 2083 2084 if (!netif_device_present(netdev)) 2085 return -EBUSY; 2086 2087 down_write(&ionic->vf_op_lock); 2088 2089 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2090 ret = -EINVAL; 2091 } else { 2092 ret = ionic_set_vf_config(ionic, vf, 2093 IONIC_VF_ATTR_RATE, (u8 *)&tx_max); 2094 if (!ret) 2095 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); 2096 } 2097 2098 up_write(&ionic->vf_op_lock); 2099 return ret; 2100 } 2101 2102 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 2103 { 2104 struct ionic_lif *lif = netdev_priv(netdev); 2105 struct ionic *ionic = lif->ionic; 2106 u8 data = set; /* convert to u8 for config */ 2107 int ret; 2108 2109 if (!netif_device_present(netdev)) 2110 return -EBUSY; 2111 2112 down_write(&ionic->vf_op_lock); 2113 2114 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2115 ret = -EINVAL; 2116 } else { 2117 ret = ionic_set_vf_config(ionic, vf, 2118 IONIC_VF_ATTR_SPOOFCHK, &data); 2119 if (!ret) 2120 ionic->vfs[vf].spoofchk = data; 2121 } 2122 2123 up_write(&ionic->vf_op_lock); 2124 return ret; 2125 } 2126 2127 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2128 { 2129 struct ionic_lif *lif = netdev_priv(netdev); 2130 struct ionic *ionic = lif->ionic; 2131 u8 data = set; /* convert to u8 for config */ 2132 int ret; 2133 2134 if (!netif_device_present(netdev)) 2135 return -EBUSY; 2136 2137 down_write(&ionic->vf_op_lock); 2138 2139 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2140 ret = -EINVAL; 2141 } else { 2142 ret = ionic_set_vf_config(ionic, vf, 2143 IONIC_VF_ATTR_TRUST, &data); 2144 if (!ret) 2145 ionic->vfs[vf].trusted = data; 2146 } 2147 2148 up_write(&ionic->vf_op_lock); 2149 return ret; 2150 } 2151 2152 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2153 { 2154 struct ionic_lif *lif = netdev_priv(netdev); 2155 struct ionic *ionic = lif->ionic; 2156 u8 data; 2157 int ret; 2158 2159 switch (set) { 2160 case IFLA_VF_LINK_STATE_ENABLE: 2161 data = IONIC_VF_LINK_STATUS_UP; 2162 break; 2163 case IFLA_VF_LINK_STATE_DISABLE: 2164 data = IONIC_VF_LINK_STATUS_DOWN; 2165 break; 2166 case IFLA_VF_LINK_STATE_AUTO: 2167 data = IONIC_VF_LINK_STATUS_AUTO; 2168 break; 2169 default: 2170 return -EINVAL; 2171 } 2172 2173 if (!netif_device_present(netdev)) 2174 return -EBUSY; 2175 2176 down_write(&ionic->vf_op_lock); 2177 2178 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2179 ret = -EINVAL; 2180 } else { 2181 ret = ionic_set_vf_config(ionic, vf, 2182 IONIC_VF_ATTR_LINKSTATE, &data); 2183 if (!ret) 2184 ionic->vfs[vf].linkstate = set; 2185 } 2186 2187 up_write(&ionic->vf_op_lock); 2188 return ret; 2189 } 2190 2191 static const struct net_device_ops ionic_netdev_ops = { 2192 .ndo_open = ionic_open, 2193 .ndo_stop = ionic_stop, 2194 .ndo_start_xmit = ionic_start_xmit, 2195 .ndo_get_stats64 = ionic_get_stats64, 2196 .ndo_set_rx_mode = ionic_ndo_set_rx_mode, 2197 .ndo_set_features = ionic_set_features, 2198 .ndo_set_mac_address = ionic_set_mac_address, 2199 .ndo_validate_addr = eth_validate_addr, 2200 .ndo_tx_timeout = ionic_tx_timeout, 2201 .ndo_change_mtu = ionic_change_mtu, 2202 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2203 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2204 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2205 .ndo_set_vf_trust = ionic_set_vf_trust, 2206 .ndo_set_vf_mac = ionic_set_vf_mac, 2207 .ndo_set_vf_rate = ionic_set_vf_rate, 2208 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2209 .ndo_get_vf_config = ionic_get_vf_config, 2210 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2211 .ndo_get_vf_stats = ionic_get_vf_stats, 2212 }; 2213 2214 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2215 { 2216 /* only swapping the queues, not the napi, flags, or other stuff */ 2217 swap(a->q.num_descs, b->q.num_descs); 2218 swap(a->q.base, b->q.base); 2219 swap(a->q.base_pa, b->q.base_pa); 2220 swap(a->q.info, b->q.info); 2221 swap(a->q_base, b->q_base); 2222 swap(a->q_base_pa, b->q_base_pa); 2223 swap(a->q_size, b->q_size); 2224 2225 swap(a->q.sg_base, b->q.sg_base); 2226 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2227 swap(a->sg_base, b->sg_base); 2228 swap(a->sg_base_pa, b->sg_base_pa); 2229 swap(a->sg_size, b->sg_size); 2230 2231 swap(a->cq.num_descs, b->cq.num_descs); 2232 swap(a->cq.base, b->cq.base); 2233 swap(a->cq.base_pa, b->cq.base_pa); 2234 swap(a->cq.info, b->cq.info); 2235 swap(a->cq_base, b->cq_base); 2236 swap(a->cq_base_pa, b->cq_base_pa); 2237 swap(a->cq_size, b->cq_size); 2238 2239 ionic_debugfs_del_qcq(a); 2240 ionic_debugfs_add_qcq(a->q.lif, a); 2241 } 2242 2243 int ionic_reconfigure_queues(struct ionic_lif *lif, 2244 struct ionic_queue_params *qparam) 2245 { 2246 struct ionic_qcq **tx_qcqs = NULL; 2247 struct ionic_qcq **rx_qcqs = NULL; 2248 unsigned int sg_desc_sz; 2249 unsigned int flags; 2250 int err = -ENOMEM; 2251 unsigned int i; 2252 2253 /* allocate temporary qcq arrays to hold new queue structs */ 2254 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2255 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2256 sizeof(struct ionic_qcq *), GFP_KERNEL); 2257 if (!tx_qcqs) 2258 goto err_out; 2259 } 2260 if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) { 2261 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2262 sizeof(struct ionic_qcq *), GFP_KERNEL); 2263 if (!rx_qcqs) 2264 goto err_out; 2265 } 2266 2267 /* allocate new desc_info and rings, but leave the interrupt setup 2268 * until later so as to not mess with the still-running queues 2269 */ 2270 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2271 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2272 sizeof(struct ionic_txq_sg_desc_v1)) 2273 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2274 else 2275 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2276 2277 if (tx_qcqs) { 2278 for (i = 0; i < qparam->nxqs; i++) { 2279 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2280 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2281 qparam->ntxq_descs, 2282 sizeof(struct ionic_txq_desc), 2283 sizeof(struct ionic_txq_comp), 2284 sg_desc_sz, 2285 lif->kern_pid, &tx_qcqs[i]); 2286 if (err) 2287 goto err_out; 2288 } 2289 } 2290 2291 if (rx_qcqs) { 2292 for (i = 0; i < qparam->nxqs; i++) { 2293 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2294 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2295 qparam->nrxq_descs, 2296 sizeof(struct ionic_rxq_desc), 2297 sizeof(struct ionic_rxq_comp), 2298 sizeof(struct ionic_rxq_sg_desc), 2299 lif->kern_pid, &rx_qcqs[i]); 2300 if (err) 2301 goto err_out; 2302 } 2303 } 2304 2305 /* stop and clean the queues */ 2306 ionic_stop_queues_reconfig(lif); 2307 2308 if (qparam->nxqs != lif->nxqs) { 2309 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 2310 if (err) 2311 goto err_out_reinit_unlock; 2312 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 2313 if (err) { 2314 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 2315 goto err_out_reinit_unlock; 2316 } 2317 } 2318 2319 /* swap new desc_info and rings, keeping existing interrupt config */ 2320 if (tx_qcqs) { 2321 lif->ntxq_descs = qparam->ntxq_descs; 2322 for (i = 0; i < qparam->nxqs; i++) 2323 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 2324 } 2325 2326 if (rx_qcqs) { 2327 lif->nrxq_descs = qparam->nrxq_descs; 2328 for (i = 0; i < qparam->nxqs; i++) 2329 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 2330 } 2331 2332 /* if we need to change the interrupt layout, this is the time */ 2333 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 2334 qparam->nxqs != lif->nxqs) { 2335 if (qparam->intr_split) { 2336 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2337 } else { 2338 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2339 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2340 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2341 } 2342 2343 /* clear existing interrupt assignments */ 2344 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 2345 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 2346 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 2347 } 2348 2349 /* re-assign the interrupts */ 2350 for (i = 0; i < qparam->nxqs; i++) { 2351 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2352 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 2353 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2354 lif->rxqcqs[i]->intr.index, 2355 lif->rx_coalesce_hw); 2356 2357 if (qparam->intr_split) { 2358 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2359 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 2360 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2361 lif->txqcqs[i]->intr.index, 2362 lif->tx_coalesce_hw); 2363 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2364 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2365 } else { 2366 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2367 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 2368 } 2369 } 2370 } 2371 2372 /* now we can rework the debugfs mappings */ 2373 if (tx_qcqs) { 2374 for (i = 0; i < qparam->nxqs; i++) { 2375 ionic_debugfs_del_qcq(lif->txqcqs[i]); 2376 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2377 } 2378 } 2379 2380 if (rx_qcqs) { 2381 for (i = 0; i < qparam->nxqs; i++) { 2382 ionic_debugfs_del_qcq(lif->rxqcqs[i]); 2383 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2384 } 2385 } 2386 2387 swap(lif->nxqs, qparam->nxqs); 2388 2389 err_out_reinit_unlock: 2390 /* re-init the queues, but don't lose an error code */ 2391 if (err) 2392 ionic_start_queues_reconfig(lif); 2393 else 2394 err = ionic_start_queues_reconfig(lif); 2395 2396 err_out: 2397 /* free old allocs without cleaning intr */ 2398 for (i = 0; i < qparam->nxqs; i++) { 2399 if (tx_qcqs && tx_qcqs[i]) { 2400 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2401 ionic_qcq_free(lif, tx_qcqs[i]); 2402 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 2403 tx_qcqs[i] = NULL; 2404 } 2405 if (rx_qcqs && rx_qcqs[i]) { 2406 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2407 ionic_qcq_free(lif, rx_qcqs[i]); 2408 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 2409 rx_qcqs[i] = NULL; 2410 } 2411 } 2412 2413 /* free q array */ 2414 if (rx_qcqs) { 2415 devm_kfree(lif->ionic->dev, rx_qcqs); 2416 rx_qcqs = NULL; 2417 } 2418 if (tx_qcqs) { 2419 devm_kfree(lif->ionic->dev, tx_qcqs); 2420 tx_qcqs = NULL; 2421 } 2422 2423 /* clean the unused dma and info allocations when new set is smaller 2424 * than the full array, but leave the qcq shells in place 2425 */ 2426 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 2427 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2428 ionic_qcq_free(lif, lif->txqcqs[i]); 2429 2430 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2431 ionic_qcq_free(lif, lif->rxqcqs[i]); 2432 } 2433 2434 return err; 2435 } 2436 2437 int ionic_lif_alloc(struct ionic *ionic) 2438 { 2439 struct device *dev = ionic->dev; 2440 union ionic_lif_identity *lid; 2441 struct net_device *netdev; 2442 struct ionic_lif *lif; 2443 int tbl_sz; 2444 int err; 2445 2446 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 2447 if (!lid) 2448 return -ENOMEM; 2449 2450 netdev = alloc_etherdev_mqs(sizeof(*lif), 2451 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 2452 if (!netdev) { 2453 dev_err(dev, "Cannot allocate netdev, aborting\n"); 2454 err = -ENOMEM; 2455 goto err_out_free_lid; 2456 } 2457 2458 SET_NETDEV_DEV(netdev, dev); 2459 2460 lif = netdev_priv(netdev); 2461 lif->netdev = netdev; 2462 ionic->lif = lif; 2463 netdev->netdev_ops = &ionic_netdev_ops; 2464 ionic_ethtool_set_ops(netdev); 2465 2466 netdev->watchdog_timeo = 2 * HZ; 2467 netif_carrier_off(netdev); 2468 2469 lif->identity = lid; 2470 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 2471 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity); 2472 if (err) { 2473 dev_err(ionic->dev, "Cannot identify type %d: %d\n", 2474 lif->lif_type, err); 2475 goto err_out_free_netdev; 2476 } 2477 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 2478 le32_to_cpu(lif->identity->eth.min_frame_size)); 2479 lif->netdev->max_mtu = 2480 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; 2481 2482 lif->neqs = ionic->neqs_per_lif; 2483 lif->nxqs = ionic->ntxqs_per_lif; 2484 2485 lif->ionic = ionic; 2486 lif->index = 0; 2487 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 2488 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 2489 2490 /* Convert the default coalesce value to actual hw resolution */ 2491 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 2492 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 2493 lif->rx_coalesce_usecs); 2494 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2495 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2496 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); 2497 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); 2498 2499 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 2500 2501 spin_lock_init(&lif->adminq_lock); 2502 2503 spin_lock_init(&lif->deferred.lock); 2504 INIT_LIST_HEAD(&lif->deferred.list); 2505 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 2506 2507 /* allocate lif info */ 2508 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 2509 lif->info = dma_alloc_coherent(dev, lif->info_sz, 2510 &lif->info_pa, GFP_KERNEL); 2511 if (!lif->info) { 2512 dev_err(dev, "Failed to allocate lif info, aborting\n"); 2513 err = -ENOMEM; 2514 goto err_out_free_netdev; 2515 } 2516 2517 ionic_debugfs_add_lif(lif); 2518 2519 /* allocate control queues and txrx queue arrays */ 2520 ionic_lif_queue_identify(lif); 2521 err = ionic_qcqs_alloc(lif); 2522 if (err) 2523 goto err_out_free_lif_info; 2524 2525 /* allocate rss indirection table */ 2526 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 2527 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 2528 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 2529 &lif->rss_ind_tbl_pa, 2530 GFP_KERNEL); 2531 2532 if (!lif->rss_ind_tbl) { 2533 err = -ENOMEM; 2534 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 2535 goto err_out_free_qcqs; 2536 } 2537 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 2538 2539 return 0; 2540 2541 err_out_free_qcqs: 2542 ionic_qcqs_free(lif); 2543 err_out_free_lif_info: 2544 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2545 lif->info = NULL; 2546 lif->info_pa = 0; 2547 err_out_free_netdev: 2548 free_netdev(lif->netdev); 2549 lif = NULL; 2550 err_out_free_lid: 2551 kfree(lid); 2552 2553 return err; 2554 } 2555 2556 static void ionic_lif_reset(struct ionic_lif *lif) 2557 { 2558 struct ionic_dev *idev = &lif->ionic->idev; 2559 2560 mutex_lock(&lif->ionic->dev_cmd_lock); 2561 ionic_dev_cmd_lif_reset(idev, lif->index); 2562 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2563 mutex_unlock(&lif->ionic->dev_cmd_lock); 2564 } 2565 2566 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 2567 { 2568 struct ionic *ionic = lif->ionic; 2569 2570 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2571 return; 2572 2573 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 2574 2575 netif_device_detach(lif->netdev); 2576 2577 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 2578 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 2579 mutex_lock(&lif->queue_lock); 2580 ionic_stop_queues(lif); 2581 mutex_unlock(&lif->queue_lock); 2582 } 2583 2584 if (netif_running(lif->netdev)) { 2585 ionic_txrx_deinit(lif); 2586 ionic_txrx_free(lif); 2587 } 2588 ionic_lif_deinit(lif); 2589 ionic_reset(ionic); 2590 ionic_qcqs_free(lif); 2591 2592 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 2593 } 2594 2595 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 2596 { 2597 struct ionic *ionic = lif->ionic; 2598 int err; 2599 2600 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2601 return; 2602 2603 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 2604 2605 ionic_init_devinfo(ionic); 2606 err = ionic_identify(ionic); 2607 if (err) 2608 goto err_out; 2609 err = ionic_port_identify(ionic); 2610 if (err) 2611 goto err_out; 2612 err = ionic_port_init(ionic); 2613 if (err) 2614 goto err_out; 2615 err = ionic_qcqs_alloc(lif); 2616 if (err) 2617 goto err_out; 2618 2619 err = ionic_lif_init(lif); 2620 if (err) 2621 goto err_qcqs_free; 2622 2623 if (lif->registered) 2624 ionic_lif_set_netdev_info(lif); 2625 2626 ionic_rx_filter_replay(lif); 2627 2628 if (netif_running(lif->netdev)) { 2629 err = ionic_txrx_alloc(lif); 2630 if (err) 2631 goto err_lifs_deinit; 2632 2633 err = ionic_txrx_init(lif); 2634 if (err) 2635 goto err_txrx_free; 2636 } 2637 2638 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 2639 ionic_link_status_check_request(lif, CAN_SLEEP); 2640 netif_device_attach(lif->netdev); 2641 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 2642 2643 return; 2644 2645 err_txrx_free: 2646 ionic_txrx_free(lif); 2647 err_lifs_deinit: 2648 ionic_lif_deinit(lif); 2649 err_qcqs_free: 2650 ionic_qcqs_free(lif); 2651 err_out: 2652 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 2653 } 2654 2655 void ionic_lif_free(struct ionic_lif *lif) 2656 { 2657 struct device *dev = lif->ionic->dev; 2658 2659 /* free rss indirection table */ 2660 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 2661 lif->rss_ind_tbl_pa); 2662 lif->rss_ind_tbl = NULL; 2663 lif->rss_ind_tbl_pa = 0; 2664 2665 /* free queues */ 2666 ionic_qcqs_free(lif); 2667 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2668 ionic_lif_reset(lif); 2669 2670 /* free lif info */ 2671 kfree(lif->identity); 2672 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2673 lif->info = NULL; 2674 lif->info_pa = 0; 2675 2676 /* unmap doorbell page */ 2677 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2678 lif->kern_dbpage = NULL; 2679 kfree(lif->dbid_inuse); 2680 lif->dbid_inuse = NULL; 2681 2682 /* free netdev & lif */ 2683 ionic_debugfs_del_lif(lif); 2684 free_netdev(lif->netdev); 2685 } 2686 2687 void ionic_lif_deinit(struct ionic_lif *lif) 2688 { 2689 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 2690 return; 2691 2692 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 2693 cancel_work_sync(&lif->deferred.work); 2694 cancel_work_sync(&lif->tx_timeout_work); 2695 ionic_rx_filters_deinit(lif); 2696 if (lif->netdev->features & NETIF_F_RXHASH) 2697 ionic_lif_rss_deinit(lif); 2698 } 2699 2700 napi_disable(&lif->adminqcq->napi); 2701 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2702 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2703 2704 mutex_destroy(&lif->queue_lock); 2705 ionic_lif_reset(lif); 2706 } 2707 2708 static int ionic_lif_adminq_init(struct ionic_lif *lif) 2709 { 2710 struct device *dev = lif->ionic->dev; 2711 struct ionic_q_init_comp comp; 2712 struct ionic_dev *idev; 2713 struct ionic_qcq *qcq; 2714 struct ionic_queue *q; 2715 int err; 2716 2717 idev = &lif->ionic->idev; 2718 qcq = lif->adminqcq; 2719 q = &qcq->q; 2720 2721 mutex_lock(&lif->ionic->dev_cmd_lock); 2722 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 2723 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2724 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 2725 mutex_unlock(&lif->ionic->dev_cmd_lock); 2726 if (err) { 2727 netdev_err(lif->netdev, "adminq init failed %d\n", err); 2728 return err; 2729 } 2730 2731 q->hw_type = comp.hw_type; 2732 q->hw_index = le32_to_cpu(comp.hw_index); 2733 q->dbval = IONIC_DBELL_QID(q->hw_index); 2734 2735 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 2736 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 2737 2738 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, 2739 NAPI_POLL_WEIGHT); 2740 2741 napi_enable(&qcq->napi); 2742 2743 if (qcq->flags & IONIC_QCQ_F_INTR) 2744 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 2745 IONIC_INTR_MASK_CLEAR); 2746 2747 qcq->flags |= IONIC_QCQ_F_INITED; 2748 2749 return 0; 2750 } 2751 2752 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 2753 { 2754 struct ionic_qcq *qcq = lif->notifyqcq; 2755 struct device *dev = lif->ionic->dev; 2756 struct ionic_queue *q = &qcq->q; 2757 int err; 2758 2759 struct ionic_admin_ctx ctx = { 2760 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2761 .cmd.q_init = { 2762 .opcode = IONIC_CMD_Q_INIT, 2763 .lif_index = cpu_to_le16(lif->index), 2764 .type = q->type, 2765 .ver = lif->qtype_info[q->type].version, 2766 .index = cpu_to_le32(q->index), 2767 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 2768 IONIC_QINIT_F_ENA), 2769 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 2770 .pid = cpu_to_le16(q->pid), 2771 .ring_size = ilog2(q->num_descs), 2772 .ring_base = cpu_to_le64(q->base_pa), 2773 } 2774 }; 2775 2776 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 2777 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 2778 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 2779 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 2780 2781 err = ionic_adminq_post_wait(lif, &ctx); 2782 if (err) 2783 return err; 2784 2785 lif->last_eid = 0; 2786 q->hw_type = ctx.comp.q_init.hw_type; 2787 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 2788 q->dbval = IONIC_DBELL_QID(q->hw_index); 2789 2790 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 2791 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 2792 2793 /* preset the callback info */ 2794 q->info[0].cb_arg = lif; 2795 2796 qcq->flags |= IONIC_QCQ_F_INITED; 2797 2798 return 0; 2799 } 2800 2801 static int ionic_station_set(struct ionic_lif *lif) 2802 { 2803 struct net_device *netdev = lif->netdev; 2804 struct ionic_admin_ctx ctx = { 2805 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2806 .cmd.lif_getattr = { 2807 .opcode = IONIC_CMD_LIF_GETATTR, 2808 .index = cpu_to_le16(lif->index), 2809 .attr = IONIC_LIF_ATTR_MAC, 2810 }, 2811 }; 2812 struct sockaddr addr; 2813 int err; 2814 2815 err = ionic_adminq_post_wait(lif, &ctx); 2816 if (err) 2817 return err; 2818 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 2819 ctx.comp.lif_getattr.mac); 2820 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac)) 2821 return 0; 2822 2823 if (!is_zero_ether_addr(netdev->dev_addr)) { 2824 /* If the netdev mac is non-zero and doesn't match the default 2825 * device address, it was set by something earlier and we're 2826 * likely here again after a fw-upgrade reset. We need to be 2827 * sure the netdev mac is in our filter list. 2828 */ 2829 if (!ether_addr_equal(ctx.comp.lif_getattr.mac, 2830 netdev->dev_addr)) 2831 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP); 2832 } else { 2833 /* Update the netdev mac with the device's mac */ 2834 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); 2835 addr.sa_family = AF_INET; 2836 err = eth_prepare_mac_addr_change(netdev, &addr); 2837 if (err) { 2838 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 2839 addr.sa_data, err); 2840 return 0; 2841 } 2842 2843 eth_commit_mac_addr_change(netdev, &addr); 2844 } 2845 2846 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 2847 netdev->dev_addr); 2848 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP); 2849 2850 return 0; 2851 } 2852 2853 int ionic_lif_init(struct ionic_lif *lif) 2854 { 2855 struct ionic_dev *idev = &lif->ionic->idev; 2856 struct device *dev = lif->ionic->dev; 2857 struct ionic_lif_init_comp comp; 2858 int dbpage_num; 2859 int err; 2860 2861 mutex_lock(&lif->ionic->dev_cmd_lock); 2862 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 2863 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2864 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 2865 mutex_unlock(&lif->ionic->dev_cmd_lock); 2866 if (err) 2867 return err; 2868 2869 lif->hw_index = le16_to_cpu(comp.hw_index); 2870 mutex_init(&lif->queue_lock); 2871 2872 /* now that we have the hw_index we can figure out our doorbell page */ 2873 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 2874 if (!lif->dbid_count) { 2875 dev_err(dev, "No doorbell pages, aborting\n"); 2876 return -EINVAL; 2877 } 2878 2879 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 2880 if (!lif->dbid_inuse) { 2881 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 2882 return -ENOMEM; 2883 } 2884 2885 /* first doorbell id reserved for kernel (dbid aka pid == zero) */ 2886 set_bit(0, lif->dbid_inuse); 2887 lif->kern_pid = 0; 2888 2889 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 2890 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 2891 if (!lif->kern_dbpage) { 2892 dev_err(dev, "Cannot map dbpage, aborting\n"); 2893 err = -ENOMEM; 2894 goto err_out_free_dbid; 2895 } 2896 2897 err = ionic_lif_adminq_init(lif); 2898 if (err) 2899 goto err_out_adminq_deinit; 2900 2901 if (lif->ionic->nnqs_per_lif) { 2902 err = ionic_lif_notifyq_init(lif); 2903 if (err) 2904 goto err_out_notifyq_deinit; 2905 } 2906 2907 err = ionic_init_nic_features(lif); 2908 if (err) 2909 goto err_out_notifyq_deinit; 2910 2911 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 2912 err = ionic_rx_filters_init(lif); 2913 if (err) 2914 goto err_out_notifyq_deinit; 2915 } 2916 2917 err = ionic_station_set(lif); 2918 if (err) 2919 goto err_out_notifyq_deinit; 2920 2921 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 2922 2923 set_bit(IONIC_LIF_F_INITED, lif->state); 2924 2925 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 2926 2927 return 0; 2928 2929 err_out_notifyq_deinit: 2930 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2931 err_out_adminq_deinit: 2932 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2933 ionic_lif_reset(lif); 2934 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2935 lif->kern_dbpage = NULL; 2936 err_out_free_dbid: 2937 kfree(lif->dbid_inuse); 2938 lif->dbid_inuse = NULL; 2939 2940 return err; 2941 } 2942 2943 static void ionic_lif_notify_work(struct work_struct *ws) 2944 { 2945 } 2946 2947 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 2948 { 2949 struct ionic_admin_ctx ctx = { 2950 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2951 .cmd.lif_setattr = { 2952 .opcode = IONIC_CMD_LIF_SETATTR, 2953 .index = cpu_to_le16(lif->index), 2954 .attr = IONIC_LIF_ATTR_NAME, 2955 }, 2956 }; 2957 2958 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 2959 sizeof(ctx.cmd.lif_setattr.name)); 2960 2961 ionic_adminq_post_wait(lif, &ctx); 2962 } 2963 2964 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 2965 { 2966 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 2967 return NULL; 2968 2969 return netdev_priv(netdev); 2970 } 2971 2972 static int ionic_lif_notify(struct notifier_block *nb, 2973 unsigned long event, void *info) 2974 { 2975 struct net_device *ndev = netdev_notifier_info_to_dev(info); 2976 struct ionic *ionic = container_of(nb, struct ionic, nb); 2977 struct ionic_lif *lif = ionic_netdev_lif(ndev); 2978 2979 if (!lif || lif->ionic != ionic) 2980 return NOTIFY_DONE; 2981 2982 switch (event) { 2983 case NETDEV_CHANGENAME: 2984 ionic_lif_set_netdev_info(lif); 2985 break; 2986 } 2987 2988 return NOTIFY_DONE; 2989 } 2990 2991 int ionic_lif_register(struct ionic_lif *lif) 2992 { 2993 int err; 2994 2995 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); 2996 2997 lif->ionic->nb.notifier_call = ionic_lif_notify; 2998 2999 err = register_netdevice_notifier(&lif->ionic->nb); 3000 if (err) 3001 lif->ionic->nb.notifier_call = NULL; 3002 3003 /* only register LIF0 for now */ 3004 err = register_netdev(lif->netdev); 3005 if (err) { 3006 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); 3007 return err; 3008 } 3009 3010 ionic_link_status_check_request(lif, CAN_SLEEP); 3011 lif->registered = true; 3012 ionic_lif_set_netdev_info(lif); 3013 3014 return 0; 3015 } 3016 3017 void ionic_lif_unregister(struct ionic_lif *lif) 3018 { 3019 if (lif->ionic->nb.notifier_call) { 3020 unregister_netdevice_notifier(&lif->ionic->nb); 3021 cancel_work_sync(&lif->ionic->nb_work); 3022 lif->ionic->nb.notifier_call = NULL; 3023 } 3024 3025 if (lif->netdev->reg_state == NETREG_REGISTERED) 3026 unregister_netdev(lif->netdev); 3027 lif->registered = false; 3028 } 3029 3030 static void ionic_lif_queue_identify(struct ionic_lif *lif) 3031 { 3032 union ionic_q_identity __iomem *q_ident; 3033 struct ionic *ionic = lif->ionic; 3034 struct ionic_dev *idev; 3035 int qtype; 3036 int err; 3037 3038 idev = &lif->ionic->idev; 3039 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; 3040 3041 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 3042 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 3043 3044 /* filter out the ones we know about */ 3045 switch (qtype) { 3046 case IONIC_QTYPE_ADMINQ: 3047 case IONIC_QTYPE_NOTIFYQ: 3048 case IONIC_QTYPE_RXQ: 3049 case IONIC_QTYPE_TXQ: 3050 break; 3051 default: 3052 continue; 3053 } 3054 3055 memset(qti, 0, sizeof(*qti)); 3056 3057 mutex_lock(&ionic->dev_cmd_lock); 3058 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 3059 ionic_qtype_versions[qtype]); 3060 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3061 if (!err) { 3062 qti->version = readb(&q_ident->version); 3063 qti->supported = readb(&q_ident->supported); 3064 qti->features = readq(&q_ident->features); 3065 qti->desc_sz = readw(&q_ident->desc_sz); 3066 qti->comp_sz = readw(&q_ident->comp_sz); 3067 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz); 3068 qti->max_sg_elems = readw(&q_ident->max_sg_elems); 3069 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride); 3070 } 3071 mutex_unlock(&ionic->dev_cmd_lock); 3072 3073 if (err == -EINVAL) { 3074 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 3075 continue; 3076 } else if (err == -EIO) { 3077 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 3078 return; 3079 } else if (err) { 3080 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 3081 qtype, err); 3082 return; 3083 } 3084 3085 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 3086 qtype, qti->version); 3087 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 3088 qtype, qti->supported); 3089 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 3090 qtype, qti->features); 3091 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 3092 qtype, qti->desc_sz); 3093 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 3094 qtype, qti->comp_sz); 3095 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 3096 qtype, qti->sg_desc_sz); 3097 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 3098 qtype, qti->max_sg_elems); 3099 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 3100 qtype, qti->sg_desc_stride); 3101 } 3102 } 3103 3104 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 3105 union ionic_lif_identity *lid) 3106 { 3107 struct ionic_dev *idev = &ionic->idev; 3108 size_t sz; 3109 int err; 3110 3111 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 3112 3113 mutex_lock(&ionic->dev_cmd_lock); 3114 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 3115 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3116 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 3117 mutex_unlock(&ionic->dev_cmd_lock); 3118 if (err) 3119 return (err); 3120 3121 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 3122 le64_to_cpu(lid->capabilities)); 3123 3124 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 3125 le32_to_cpu(lid->eth.max_ucast_filters)); 3126 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 3127 le32_to_cpu(lid->eth.max_mcast_filters)); 3128 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 3129 le64_to_cpu(lid->eth.config.features)); 3130 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 3131 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 3132 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 3133 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 3134 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 3135 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 3136 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 3137 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 3138 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 3139 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 3140 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 3141 le32_to_cpu(lid->eth.config.mtu)); 3142 3143 return 0; 3144 } 3145 3146 int ionic_lif_size(struct ionic *ionic) 3147 { 3148 struct ionic_identity *ident = &ionic->ident; 3149 unsigned int nintrs, dev_nintrs; 3150 union ionic_lif_config *lc; 3151 unsigned int ntxqs_per_lif; 3152 unsigned int nrxqs_per_lif; 3153 unsigned int neqs_per_lif; 3154 unsigned int nnqs_per_lif; 3155 unsigned int nxqs, neqs; 3156 unsigned int min_intrs; 3157 int err; 3158 3159 lc = &ident->lif.eth.config; 3160 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 3161 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 3162 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 3163 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 3164 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 3165 3166 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 3167 nxqs = min(nxqs, num_online_cpus()); 3168 neqs = min(neqs_per_lif, num_online_cpus()); 3169 3170 try_again: 3171 /* interrupt usage: 3172 * 1 for master lif adminq/notifyq 3173 * 1 for each CPU for master lif TxRx queue pairs 3174 * whatever's left is for RDMA queues 3175 */ 3176 nintrs = 1 + nxqs + neqs; 3177 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 3178 3179 if (nintrs > dev_nintrs) 3180 goto try_fewer; 3181 3182 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 3183 if (err < 0 && err != -ENOSPC) { 3184 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 3185 return err; 3186 } 3187 if (err == -ENOSPC) 3188 goto try_fewer; 3189 3190 if (err != nintrs) { 3191 ionic_bus_free_irq_vectors(ionic); 3192 goto try_fewer; 3193 } 3194 3195 ionic->nnqs_per_lif = nnqs_per_lif; 3196 ionic->neqs_per_lif = neqs; 3197 ionic->ntxqs_per_lif = nxqs; 3198 ionic->nrxqs_per_lif = nxqs; 3199 ionic->nintrs = nintrs; 3200 3201 ionic_debugfs_add_sizes(ionic); 3202 3203 return 0; 3204 3205 try_fewer: 3206 if (nnqs_per_lif > 1) { 3207 nnqs_per_lif >>= 1; 3208 goto try_again; 3209 } 3210 if (neqs > 1) { 3211 neqs >>= 1; 3212 goto try_again; 3213 } 3214 if (nxqs > 1) { 3215 nxqs >>= 1; 3216 goto try_again; 3217 } 3218 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 3219 return -ENOSPC; 3220 } 3221