1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ethtool.h> 5 #include <linux/printk.h> 6 #include <linux/dynamic_debug.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_vlan.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/cpumask.h> 14 #include <linux/crash_dump.h> 15 16 #include "ionic.h" 17 #include "ionic_bus.h" 18 #include "ionic_lif.h" 19 #include "ionic_txrx.h" 20 #include "ionic_ethtool.h" 21 #include "ionic_debugfs.h" 22 23 /* queuetype support level */ 24 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 25 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 26 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 27 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */ 28 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support 29 * 1 = ... with Tx SG version 1 30 */ 31 }; 32 33 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); 34 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); 35 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); 36 static void ionic_link_status_check(struct ionic_lif *lif); 37 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 38 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 39 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 40 41 static void ionic_txrx_deinit(struct ionic_lif *lif); 42 static int ionic_txrx_init(struct ionic_lif *lif); 43 static int ionic_start_queues(struct ionic_lif *lif); 44 static void ionic_stop_queues(struct ionic_lif *lif); 45 static void ionic_lif_queue_identify(struct ionic_lif *lif); 46 47 static void ionic_dim_work(struct work_struct *work) 48 { 49 struct dim *dim = container_of(work, struct dim, work); 50 struct dim_cq_moder cur_moder; 51 struct ionic_qcq *qcq; 52 u32 new_coal; 53 54 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 55 qcq = container_of(dim, struct ionic_qcq, dim); 56 new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec); 57 qcq->intr.dim_coal_hw = new_coal ? new_coal : 1; 58 dim->state = DIM_START_MEASURE; 59 } 60 61 static void ionic_lif_deferred_work(struct work_struct *work) 62 { 63 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 64 struct ionic_deferred *def = &lif->deferred; 65 struct ionic_deferred_work *w = NULL; 66 67 do { 68 spin_lock_bh(&def->lock); 69 if (!list_empty(&def->list)) { 70 w = list_first_entry(&def->list, 71 struct ionic_deferred_work, list); 72 list_del(&w->list); 73 } 74 spin_unlock_bh(&def->lock); 75 76 if (!w) 77 break; 78 79 switch (w->type) { 80 case IONIC_DW_TYPE_RX_MODE: 81 ionic_lif_rx_mode(lif, w->rx_mode); 82 break; 83 case IONIC_DW_TYPE_RX_ADDR_ADD: 84 ionic_lif_addr_add(lif, w->addr); 85 break; 86 case IONIC_DW_TYPE_RX_ADDR_DEL: 87 ionic_lif_addr_del(lif, w->addr); 88 break; 89 case IONIC_DW_TYPE_LINK_STATUS: 90 ionic_link_status_check(lif); 91 break; 92 case IONIC_DW_TYPE_LIF_RESET: 93 if (w->fw_status) 94 ionic_lif_handle_fw_up(lif); 95 else 96 ionic_lif_handle_fw_down(lif); 97 break; 98 default: 99 break; 100 } 101 kfree(w); 102 w = NULL; 103 } while (true); 104 } 105 106 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 107 struct ionic_deferred_work *work) 108 { 109 spin_lock_bh(&def->lock); 110 list_add_tail(&work->list, &def->list); 111 spin_unlock_bh(&def->lock); 112 schedule_work(&def->work); 113 } 114 115 static void ionic_link_status_check(struct ionic_lif *lif) 116 { 117 struct net_device *netdev = lif->netdev; 118 u16 link_status; 119 bool link_up; 120 121 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 122 return; 123 124 /* Don't put carrier back up if we're in a broken state */ 125 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { 126 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 127 return; 128 } 129 130 link_status = le16_to_cpu(lif->info->status.link_status); 131 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 132 133 if (link_up) { 134 int err = 0; 135 136 if (netdev->flags & IFF_UP && netif_running(netdev)) { 137 mutex_lock(&lif->queue_lock); 138 err = ionic_start_queues(lif); 139 if (err && err != -EBUSY) { 140 netdev_err(lif->netdev, 141 "Failed to start queues: %d\n", err); 142 set_bit(IONIC_LIF_F_BROKEN, lif->state); 143 netif_carrier_off(lif->netdev); 144 } 145 mutex_unlock(&lif->queue_lock); 146 } 147 148 if (!err && !netif_carrier_ok(netdev)) { 149 ionic_port_identify(lif->ionic); 150 netdev_info(netdev, "Link up - %d Gbps\n", 151 le32_to_cpu(lif->info->status.link_speed) / 1000); 152 netif_carrier_on(netdev); 153 } 154 } else { 155 if (netif_carrier_ok(netdev)) { 156 netdev_info(netdev, "Link down\n"); 157 netif_carrier_off(netdev); 158 } 159 160 if (netdev->flags & IFF_UP && netif_running(netdev)) { 161 mutex_lock(&lif->queue_lock); 162 ionic_stop_queues(lif); 163 mutex_unlock(&lif->queue_lock); 164 } 165 } 166 167 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 168 } 169 170 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) 171 { 172 struct ionic_deferred_work *work; 173 174 /* we only need one request outstanding at a time */ 175 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 176 return; 177 178 if (!can_sleep) { 179 work = kzalloc(sizeof(*work), GFP_ATOMIC); 180 if (!work) { 181 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 182 return; 183 } 184 185 work->type = IONIC_DW_TYPE_LINK_STATUS; 186 ionic_lif_deferred_enqueue(&lif->deferred, work); 187 } else { 188 ionic_link_status_check(lif); 189 } 190 } 191 192 static irqreturn_t ionic_isr(int irq, void *data) 193 { 194 struct napi_struct *napi = data; 195 196 napi_schedule_irqoff(napi); 197 198 return IRQ_HANDLED; 199 } 200 201 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 202 { 203 struct ionic_intr_info *intr = &qcq->intr; 204 struct device *dev = lif->ionic->dev; 205 struct ionic_queue *q = &qcq->q; 206 const char *name; 207 208 if (lif->registered) 209 name = lif->netdev->name; 210 else 211 name = dev_name(dev); 212 213 snprintf(intr->name, sizeof(intr->name), 214 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 215 216 return devm_request_irq(dev, intr->vector, ionic_isr, 217 0, intr->name, &qcq->napi); 218 } 219 220 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 221 { 222 struct ionic *ionic = lif->ionic; 223 int index; 224 225 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 226 if (index == ionic->nintrs) { 227 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 228 __func__, index, ionic->nintrs); 229 return -ENOSPC; 230 } 231 232 set_bit(index, ionic->intrs); 233 ionic_intr_init(&ionic->idev, intr, index); 234 235 return 0; 236 } 237 238 static void ionic_intr_free(struct ionic *ionic, int index) 239 { 240 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 241 clear_bit(index, ionic->intrs); 242 } 243 244 static int ionic_qcq_enable(struct ionic_qcq *qcq) 245 { 246 struct ionic_queue *q = &qcq->q; 247 struct ionic_lif *lif = q->lif; 248 struct ionic_dev *idev; 249 struct device *dev; 250 251 struct ionic_admin_ctx ctx = { 252 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 253 .cmd.q_control = { 254 .opcode = IONIC_CMD_Q_CONTROL, 255 .lif_index = cpu_to_le16(lif->index), 256 .type = q->type, 257 .index = cpu_to_le32(q->index), 258 .oper = IONIC_Q_ENABLE, 259 }, 260 }; 261 262 idev = &lif->ionic->idev; 263 dev = lif->ionic->dev; 264 265 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 266 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 267 268 if (qcq->flags & IONIC_QCQ_F_INTR) { 269 irq_set_affinity_hint(qcq->intr.vector, 270 &qcq->intr.affinity_mask); 271 napi_enable(&qcq->napi); 272 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 273 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 274 IONIC_INTR_MASK_CLEAR); 275 } 276 277 return ionic_adminq_post_wait(lif, &ctx); 278 } 279 280 static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) 281 { 282 struct ionic_queue *q; 283 struct ionic_lif *lif; 284 int err = 0; 285 286 struct ionic_admin_ctx ctx = { 287 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 288 .cmd.q_control = { 289 .opcode = IONIC_CMD_Q_CONTROL, 290 .oper = IONIC_Q_DISABLE, 291 }, 292 }; 293 294 if (!qcq) 295 return -ENXIO; 296 297 q = &qcq->q; 298 lif = q->lif; 299 300 if (qcq->flags & IONIC_QCQ_F_INTR) { 301 struct ionic_dev *idev = &lif->ionic->idev; 302 303 cancel_work_sync(&qcq->dim.work); 304 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 305 IONIC_INTR_MASK_SET); 306 synchronize_irq(qcq->intr.vector); 307 irq_set_affinity_hint(qcq->intr.vector, NULL); 308 napi_disable(&qcq->napi); 309 } 310 311 if (send_to_hw) { 312 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); 313 ctx.cmd.q_control.type = q->type; 314 ctx.cmd.q_control.index = cpu_to_le32(q->index); 315 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", 316 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 317 318 err = ionic_adminq_post_wait(lif, &ctx); 319 } 320 321 return err; 322 } 323 324 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 325 { 326 struct ionic_dev *idev = &lif->ionic->idev; 327 328 if (!qcq) 329 return; 330 331 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 332 return; 333 334 if (qcq->flags & IONIC_QCQ_F_INTR) { 335 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 336 IONIC_INTR_MASK_SET); 337 netif_napi_del(&qcq->napi); 338 } 339 340 qcq->flags &= ~IONIC_QCQ_F_INITED; 341 } 342 343 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 344 { 345 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 346 return; 347 348 irq_set_affinity_hint(qcq->intr.vector, NULL); 349 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 350 qcq->intr.vector = 0; 351 ionic_intr_free(lif->ionic, qcq->intr.index); 352 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 353 } 354 355 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 356 { 357 struct device *dev = lif->ionic->dev; 358 359 if (!qcq) 360 return; 361 362 ionic_debugfs_del_qcq(qcq); 363 364 if (qcq->q_base) { 365 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 366 qcq->q_base = NULL; 367 qcq->q_base_pa = 0; 368 } 369 370 if (qcq->cq_base) { 371 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 372 qcq->cq_base = NULL; 373 qcq->cq_base_pa = 0; 374 } 375 376 if (qcq->sg_base) { 377 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 378 qcq->sg_base = NULL; 379 qcq->sg_base_pa = 0; 380 } 381 382 ionic_qcq_intr_free(lif, qcq); 383 384 if (qcq->cq.info) { 385 devm_kfree(dev, qcq->cq.info); 386 qcq->cq.info = NULL; 387 } 388 if (qcq->q.info) { 389 devm_kfree(dev, qcq->q.info); 390 qcq->q.info = NULL; 391 } 392 } 393 394 static void ionic_qcqs_free(struct ionic_lif *lif) 395 { 396 struct device *dev = lif->ionic->dev; 397 struct ionic_qcq *adminqcq; 398 unsigned long irqflags; 399 400 if (lif->notifyqcq) { 401 ionic_qcq_free(lif, lif->notifyqcq); 402 devm_kfree(dev, lif->notifyqcq); 403 lif->notifyqcq = NULL; 404 } 405 406 if (lif->adminqcq) { 407 spin_lock_irqsave(&lif->adminq_lock, irqflags); 408 adminqcq = READ_ONCE(lif->adminqcq); 409 lif->adminqcq = NULL; 410 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 411 if (adminqcq) { 412 ionic_qcq_free(lif, adminqcq); 413 devm_kfree(dev, adminqcq); 414 } 415 } 416 417 if (lif->rxqcqs) { 418 devm_kfree(dev, lif->rxqstats); 419 lif->rxqstats = NULL; 420 devm_kfree(dev, lif->rxqcqs); 421 lif->rxqcqs = NULL; 422 } 423 424 if (lif->txqcqs) { 425 devm_kfree(dev, lif->txqstats); 426 lif->txqstats = NULL; 427 devm_kfree(dev, lif->txqcqs); 428 lif->txqcqs = NULL; 429 } 430 } 431 432 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 433 struct ionic_qcq *n_qcq) 434 { 435 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { 436 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index); 437 n_qcq->flags &= ~IONIC_QCQ_F_INTR; 438 } 439 440 n_qcq->intr.vector = src_qcq->intr.vector; 441 n_qcq->intr.index = src_qcq->intr.index; 442 } 443 444 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 445 { 446 int err; 447 448 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 449 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 450 return 0; 451 } 452 453 err = ionic_intr_alloc(lif, &qcq->intr); 454 if (err) { 455 netdev_warn(lif->netdev, "no intr for %s: %d\n", 456 qcq->q.name, err); 457 goto err_out; 458 } 459 460 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); 461 if (err < 0) { 462 netdev_warn(lif->netdev, "no vector for %s: %d\n", 463 qcq->q.name, err); 464 goto err_out_free_intr; 465 } 466 qcq->intr.vector = err; 467 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 468 IONIC_INTR_MASK_SET); 469 470 err = ionic_request_irq(lif, qcq); 471 if (err) { 472 netdev_warn(lif->netdev, "irq request failed %d\n", err); 473 goto err_out_free_intr; 474 } 475 476 /* try to get the irq on the local numa node first */ 477 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, 478 dev_to_node(lif->ionic->dev)); 479 if (qcq->intr.cpu != -1) 480 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask); 481 482 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 483 return 0; 484 485 err_out_free_intr: 486 ionic_intr_free(lif->ionic, qcq->intr.index); 487 err_out: 488 return err; 489 } 490 491 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 492 unsigned int index, 493 const char *name, unsigned int flags, 494 unsigned int num_descs, unsigned int desc_size, 495 unsigned int cq_desc_size, 496 unsigned int sg_desc_size, 497 unsigned int pid, struct ionic_qcq **qcq) 498 { 499 struct ionic_dev *idev = &lif->ionic->idev; 500 struct device *dev = lif->ionic->dev; 501 void *q_base, *cq_base, *sg_base; 502 dma_addr_t cq_base_pa = 0; 503 dma_addr_t sg_base_pa = 0; 504 dma_addr_t q_base_pa = 0; 505 struct ionic_qcq *new; 506 int err; 507 508 *qcq = NULL; 509 510 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 511 if (!new) { 512 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 513 err = -ENOMEM; 514 goto err_out; 515 } 516 517 new->q.dev = dev; 518 new->flags = flags; 519 520 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info), 521 GFP_KERNEL); 522 if (!new->q.info) { 523 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 524 err = -ENOMEM; 525 goto err_out_free_qcq; 526 } 527 528 new->q.type = type; 529 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; 530 531 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 532 desc_size, sg_desc_size, pid); 533 if (err) { 534 netdev_err(lif->netdev, "Cannot initialize queue\n"); 535 goto err_out_free_q_info; 536 } 537 538 err = ionic_alloc_qcq_interrupt(lif, new); 539 if (err) 540 goto err_out; 541 542 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info), 543 GFP_KERNEL); 544 if (!new->cq.info) { 545 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 546 err = -ENOMEM; 547 goto err_out_free_irq; 548 } 549 550 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 551 if (err) { 552 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 553 goto err_out_free_cq_info; 554 } 555 556 if (flags & IONIC_QCQ_F_NOTIFYQ) { 557 int q_size, cq_size; 558 559 /* q & cq need to be contiguous in case of notifyq */ 560 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); 561 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE); 562 563 new->q_size = PAGE_SIZE + q_size + cq_size; 564 new->q_base = dma_alloc_coherent(dev, new->q_size, 565 &new->q_base_pa, GFP_KERNEL); 566 if (!new->q_base) { 567 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); 568 err = -ENOMEM; 569 goto err_out_free_cq_info; 570 } 571 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 572 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 573 ionic_q_map(&new->q, q_base, q_base_pa); 574 575 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); 576 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); 577 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 578 ionic_cq_bind(&new->cq, &new->q); 579 } else { 580 new->q_size = PAGE_SIZE + (num_descs * desc_size); 581 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 582 GFP_KERNEL); 583 if (!new->q_base) { 584 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 585 err = -ENOMEM; 586 goto err_out_free_cq_info; 587 } 588 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 589 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 590 ionic_q_map(&new->q, q_base, q_base_pa); 591 592 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 593 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 594 GFP_KERNEL); 595 if (!new->cq_base) { 596 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 597 err = -ENOMEM; 598 goto err_out_free_q; 599 } 600 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 601 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 602 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 603 ionic_cq_bind(&new->cq, &new->q); 604 } 605 606 if (flags & IONIC_QCQ_F_SG) { 607 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 608 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 609 GFP_KERNEL); 610 if (!new->sg_base) { 611 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 612 err = -ENOMEM; 613 goto err_out_free_cq; 614 } 615 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 616 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 617 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 618 } 619 620 INIT_WORK(&new->dim.work, ionic_dim_work); 621 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 622 623 *qcq = new; 624 625 return 0; 626 627 err_out_free_cq: 628 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 629 err_out_free_q: 630 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 631 err_out_free_cq_info: 632 devm_kfree(dev, new->cq.info); 633 err_out_free_irq: 634 if (flags & IONIC_QCQ_F_INTR) { 635 devm_free_irq(dev, new->intr.vector, &new->napi); 636 ionic_intr_free(lif->ionic, new->intr.index); 637 } 638 err_out_free_q_info: 639 devm_kfree(dev, new->q.info); 640 err_out_free_qcq: 641 devm_kfree(dev, new); 642 err_out: 643 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 644 return err; 645 } 646 647 static int ionic_qcqs_alloc(struct ionic_lif *lif) 648 { 649 struct device *dev = lif->ionic->dev; 650 unsigned int flags; 651 int err; 652 653 flags = IONIC_QCQ_F_INTR; 654 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 655 IONIC_ADMINQ_LENGTH, 656 sizeof(struct ionic_admin_cmd), 657 sizeof(struct ionic_admin_comp), 658 0, lif->kern_pid, &lif->adminqcq); 659 if (err) 660 return err; 661 ionic_debugfs_add_qcq(lif, lif->adminqcq); 662 663 if (lif->ionic->nnqs_per_lif) { 664 flags = IONIC_QCQ_F_NOTIFYQ; 665 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 666 flags, IONIC_NOTIFYQ_LENGTH, 667 sizeof(struct ionic_notifyq_cmd), 668 sizeof(union ionic_notifyq_comp), 669 0, lif->kern_pid, &lif->notifyqcq); 670 if (err) 671 goto err_out; 672 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 673 674 /* Let the notifyq ride on the adminq interrupt */ 675 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 676 } 677 678 err = -ENOMEM; 679 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 680 sizeof(*lif->txqcqs), GFP_KERNEL); 681 if (!lif->txqcqs) 682 goto err_out; 683 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 684 sizeof(*lif->rxqcqs), GFP_KERNEL); 685 if (!lif->rxqcqs) 686 goto err_out; 687 688 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1, 689 sizeof(*lif->txqstats), GFP_KERNEL); 690 if (!lif->txqstats) 691 goto err_out; 692 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1, 693 sizeof(*lif->rxqstats), GFP_KERNEL); 694 if (!lif->rxqstats) 695 goto err_out; 696 697 return 0; 698 699 err_out: 700 ionic_qcqs_free(lif); 701 return err; 702 } 703 704 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 705 { 706 qcq->q.tail_idx = 0; 707 qcq->q.head_idx = 0; 708 qcq->cq.tail_idx = 0; 709 qcq->cq.done_color = 1; 710 memset(qcq->q_base, 0, qcq->q_size); 711 memset(qcq->cq_base, 0, qcq->cq_size); 712 memset(qcq->sg_base, 0, qcq->sg_size); 713 } 714 715 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 716 { 717 struct device *dev = lif->ionic->dev; 718 struct ionic_queue *q = &qcq->q; 719 struct ionic_cq *cq = &qcq->cq; 720 struct ionic_admin_ctx ctx = { 721 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 722 .cmd.q_init = { 723 .opcode = IONIC_CMD_Q_INIT, 724 .lif_index = cpu_to_le16(lif->index), 725 .type = q->type, 726 .ver = lif->qtype_info[q->type].version, 727 .index = cpu_to_le32(q->index), 728 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 729 IONIC_QINIT_F_SG), 730 .pid = cpu_to_le16(q->pid), 731 .ring_size = ilog2(q->num_descs), 732 .ring_base = cpu_to_le64(q->base_pa), 733 .cq_ring_base = cpu_to_le64(cq->base_pa), 734 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 735 .features = cpu_to_le64(q->features), 736 }, 737 }; 738 unsigned int intr_index; 739 int err; 740 741 intr_index = qcq->intr.index; 742 743 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); 744 745 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 746 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 747 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 748 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 749 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 750 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 751 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 752 753 ionic_qcq_sanitize(qcq); 754 755 err = ionic_adminq_post_wait(lif, &ctx); 756 if (err) 757 return err; 758 759 q->hw_type = ctx.comp.q_init.hw_type; 760 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 761 q->dbval = IONIC_DBELL_QID(q->hw_index); 762 763 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 764 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 765 766 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 767 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi, 768 NAPI_POLL_WEIGHT); 769 770 qcq->flags |= IONIC_QCQ_F_INITED; 771 772 return 0; 773 } 774 775 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 776 { 777 struct device *dev = lif->ionic->dev; 778 struct ionic_queue *q = &qcq->q; 779 struct ionic_cq *cq = &qcq->cq; 780 struct ionic_admin_ctx ctx = { 781 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 782 .cmd.q_init = { 783 .opcode = IONIC_CMD_Q_INIT, 784 .lif_index = cpu_to_le16(lif->index), 785 .type = q->type, 786 .ver = lif->qtype_info[q->type].version, 787 .index = cpu_to_le32(q->index), 788 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 789 IONIC_QINIT_F_SG), 790 .intr_index = cpu_to_le16(cq->bound_intr->index), 791 .pid = cpu_to_le16(q->pid), 792 .ring_size = ilog2(q->num_descs), 793 .ring_base = cpu_to_le64(q->base_pa), 794 .cq_ring_base = cpu_to_le64(cq->base_pa), 795 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 796 .features = cpu_to_le64(q->features), 797 }, 798 }; 799 int err; 800 801 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 802 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 803 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 804 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 805 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 806 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 807 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 808 809 ionic_qcq_sanitize(qcq); 810 811 err = ionic_adminq_post_wait(lif, &ctx); 812 if (err) 813 return err; 814 815 q->hw_type = ctx.comp.q_init.hw_type; 816 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 817 q->dbval = IONIC_DBELL_QID(q->hw_index); 818 819 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 820 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 821 822 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 823 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, 824 NAPI_POLL_WEIGHT); 825 else 826 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi, 827 NAPI_POLL_WEIGHT); 828 829 qcq->flags |= IONIC_QCQ_F_INITED; 830 831 return 0; 832 } 833 834 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) 835 { 836 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 837 unsigned int txq_i, flags; 838 struct ionic_qcq *txq; 839 u64 features; 840 int err; 841 842 mutex_lock(&lif->queue_lock); 843 844 if (lif->hwstamp_txq) 845 goto out; 846 847 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; 848 849 num_desc = IONIC_MIN_TXRX_DESC; 850 desc_sz = sizeof(struct ionic_txq_desc); 851 comp_sz = 2 * sizeof(struct ionic_txq_comp); 852 853 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 854 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) 855 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 856 else 857 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 858 859 txq_i = lif->ionic->ntxqs_per_lif; 860 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 861 862 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, 863 num_desc, desc_sz, comp_sz, sg_desc_sz, 864 lif->kern_pid, &txq); 865 if (err) 866 goto err_qcq_alloc; 867 868 txq->q.features = features; 869 870 ionic_link_qcq_interrupts(lif->adminqcq, txq); 871 ionic_debugfs_add_qcq(lif, txq); 872 873 lif->hwstamp_txq = txq; 874 875 if (netif_running(lif->netdev)) { 876 err = ionic_lif_txq_init(lif, txq); 877 if (err) 878 goto err_qcq_init; 879 880 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 881 err = ionic_qcq_enable(txq); 882 if (err) 883 goto err_qcq_enable; 884 } 885 } 886 887 out: 888 mutex_unlock(&lif->queue_lock); 889 890 return 0; 891 892 err_qcq_enable: 893 ionic_lif_qcq_deinit(lif, txq); 894 err_qcq_init: 895 lif->hwstamp_txq = NULL; 896 ionic_debugfs_del_qcq(txq); 897 ionic_qcq_free(lif, txq); 898 devm_kfree(lif->ionic->dev, txq); 899 err_qcq_alloc: 900 mutex_unlock(&lif->queue_lock); 901 return err; 902 } 903 904 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) 905 { 906 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 907 unsigned int rxq_i, flags; 908 struct ionic_qcq *rxq; 909 u64 features; 910 int err; 911 912 mutex_lock(&lif->queue_lock); 913 914 if (lif->hwstamp_rxq) 915 goto out; 916 917 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 918 919 num_desc = IONIC_MIN_TXRX_DESC; 920 desc_sz = sizeof(struct ionic_rxq_desc); 921 comp_sz = 2 * sizeof(struct ionic_rxq_comp); 922 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 923 924 rxq_i = lif->ionic->nrxqs_per_lif; 925 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 926 927 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, 928 num_desc, desc_sz, comp_sz, sg_desc_sz, 929 lif->kern_pid, &rxq); 930 if (err) 931 goto err_qcq_alloc; 932 933 rxq->q.features = features; 934 935 ionic_link_qcq_interrupts(lif->adminqcq, rxq); 936 ionic_debugfs_add_qcq(lif, rxq); 937 938 lif->hwstamp_rxq = rxq; 939 940 if (netif_running(lif->netdev)) { 941 err = ionic_lif_rxq_init(lif, rxq); 942 if (err) 943 goto err_qcq_init; 944 945 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 946 ionic_rx_fill(&rxq->q); 947 err = ionic_qcq_enable(rxq); 948 if (err) 949 goto err_qcq_enable; 950 } 951 } 952 953 out: 954 mutex_unlock(&lif->queue_lock); 955 956 return 0; 957 958 err_qcq_enable: 959 ionic_lif_qcq_deinit(lif, rxq); 960 err_qcq_init: 961 lif->hwstamp_rxq = NULL; 962 ionic_debugfs_del_qcq(rxq); 963 ionic_qcq_free(lif, rxq); 964 devm_kfree(lif->ionic->dev, rxq); 965 err_qcq_alloc: 966 mutex_unlock(&lif->queue_lock); 967 return err; 968 } 969 970 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all) 971 { 972 struct ionic_queue_params qparam; 973 974 ionic_init_queue_params(lif, &qparam); 975 976 if (rx_all) 977 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 978 else 979 qparam.rxq_features = 0; 980 981 /* if we're not running, just set the values and return */ 982 if (!netif_running(lif->netdev)) { 983 lif->rxq_features = qparam.rxq_features; 984 return 0; 985 } 986 987 return ionic_reconfigure_queues(lif, &qparam); 988 } 989 990 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode) 991 { 992 struct ionic_admin_ctx ctx = { 993 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 994 .cmd.lif_setattr = { 995 .opcode = IONIC_CMD_LIF_SETATTR, 996 .index = cpu_to_le16(lif->index), 997 .attr = IONIC_LIF_ATTR_TXSTAMP, 998 .txstamp_mode = cpu_to_le16(txstamp_mode), 999 }, 1000 }; 1001 1002 return ionic_adminq_post_wait(lif, &ctx); 1003 } 1004 1005 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif) 1006 { 1007 struct ionic_admin_ctx ctx = { 1008 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1009 .cmd.rx_filter_del = { 1010 .opcode = IONIC_CMD_RX_FILTER_DEL, 1011 .lif_index = cpu_to_le16(lif->index), 1012 }, 1013 }; 1014 struct ionic_rx_filter *f; 1015 u32 filter_id; 1016 int err; 1017 1018 spin_lock_bh(&lif->rx_filters.lock); 1019 1020 f = ionic_rx_filter_rxsteer(lif); 1021 if (!f) { 1022 spin_unlock_bh(&lif->rx_filters.lock); 1023 return; 1024 } 1025 1026 filter_id = f->filter_id; 1027 ionic_rx_filter_free(lif, f); 1028 1029 spin_unlock_bh(&lif->rx_filters.lock); 1030 1031 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id); 1032 1033 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id); 1034 1035 err = ionic_adminq_post_wait(lif, &ctx); 1036 if (err && err != -EEXIST) 1037 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id); 1038 } 1039 1040 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1041 { 1042 struct ionic_admin_ctx ctx = { 1043 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1044 .cmd.rx_filter_add = { 1045 .opcode = IONIC_CMD_RX_FILTER_ADD, 1046 .lif_index = cpu_to_le16(lif->index), 1047 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS), 1048 .pkt_class = cpu_to_le64(pkt_class), 1049 }, 1050 }; 1051 u8 qtype; 1052 u32 qid; 1053 int err; 1054 1055 if (!lif->hwstamp_rxq) 1056 return -EINVAL; 1057 1058 qtype = lif->hwstamp_rxq->q.type; 1059 ctx.cmd.rx_filter_add.qtype = qtype; 1060 1061 qid = lif->hwstamp_rxq->q.index; 1062 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid); 1063 1064 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n"); 1065 err = ionic_adminq_post_wait(lif, &ctx); 1066 if (err && err != -EEXIST) 1067 return err; 1068 1069 return ionic_rx_filter_save(lif, 0, qid, 0, &ctx); 1070 } 1071 1072 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1073 { 1074 ionic_lif_del_hwstamp_rxfilt(lif); 1075 1076 if (!pkt_class) 1077 return 0; 1078 1079 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); 1080 } 1081 1082 static bool ionic_notifyq_service(struct ionic_cq *cq, 1083 struct ionic_cq_info *cq_info) 1084 { 1085 union ionic_notifyq_comp *comp = cq_info->cq_desc; 1086 struct ionic_deferred_work *work; 1087 struct net_device *netdev; 1088 struct ionic_queue *q; 1089 struct ionic_lif *lif; 1090 u64 eid; 1091 1092 q = cq->bound_q; 1093 lif = q->info[0].cb_arg; 1094 netdev = lif->netdev; 1095 eid = le64_to_cpu(comp->event.eid); 1096 1097 /* Have we run out of new completions to process? */ 1098 if ((s64)(eid - lif->last_eid) <= 0) 1099 return false; 1100 1101 lif->last_eid = eid; 1102 1103 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 1104 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 1105 comp, sizeof(*comp), true); 1106 1107 switch (le16_to_cpu(comp->event.ecode)) { 1108 case IONIC_EVENT_LINK_CHANGE: 1109 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 1110 break; 1111 case IONIC_EVENT_RESET: 1112 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1113 if (!work) { 1114 netdev_err(lif->netdev, "Reset event dropped\n"); 1115 } else { 1116 work->type = IONIC_DW_TYPE_LIF_RESET; 1117 ionic_lif_deferred_enqueue(&lif->deferred, work); 1118 } 1119 break; 1120 default: 1121 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", 1122 comp->event.ecode, eid); 1123 break; 1124 } 1125 1126 return true; 1127 } 1128 1129 static bool ionic_adminq_service(struct ionic_cq *cq, 1130 struct ionic_cq_info *cq_info) 1131 { 1132 struct ionic_admin_comp *comp = cq_info->cq_desc; 1133 1134 if (!color_match(comp->color, cq->done_color)) 1135 return false; 1136 1137 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 1138 1139 return true; 1140 } 1141 1142 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 1143 { 1144 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 1145 struct ionic_lif *lif = napi_to_cq(napi)->lif; 1146 struct ionic_dev *idev = &lif->ionic->idev; 1147 unsigned long irqflags; 1148 unsigned int flags = 0; 1149 int rx_work = 0; 1150 int tx_work = 0; 1151 int n_work = 0; 1152 int a_work = 0; 1153 int work_done; 1154 int credits; 1155 1156 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 1157 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 1158 ionic_notifyq_service, NULL, NULL); 1159 1160 spin_lock_irqsave(&lif->adminq_lock, irqflags); 1161 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 1162 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 1163 ionic_adminq_service, NULL, NULL); 1164 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 1165 1166 if (lif->hwstamp_rxq) 1167 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget, 1168 ionic_rx_service, NULL, NULL); 1169 1170 if (lif->hwstamp_txq) 1171 tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget, 1172 ionic_tx_service, NULL, NULL); 1173 1174 work_done = max(max(n_work, a_work), max(rx_work, tx_work)); 1175 if (work_done < budget && napi_complete_done(napi, work_done)) { 1176 flags |= IONIC_INTR_CRED_UNMASK; 1177 intr->rearm_count++; 1178 } 1179 1180 if (work_done || flags) { 1181 flags |= IONIC_INTR_CRED_RESET_COALESCE; 1182 credits = n_work + a_work + rx_work + tx_work; 1183 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); 1184 } 1185 1186 return work_done; 1187 } 1188 1189 void ionic_get_stats64(struct net_device *netdev, 1190 struct rtnl_link_stats64 *ns) 1191 { 1192 struct ionic_lif *lif = netdev_priv(netdev); 1193 struct ionic_lif_stats *ls; 1194 1195 memset(ns, 0, sizeof(*ns)); 1196 ls = &lif->info->stats; 1197 1198 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 1199 le64_to_cpu(ls->rx_mcast_packets) + 1200 le64_to_cpu(ls->rx_bcast_packets); 1201 1202 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 1203 le64_to_cpu(ls->tx_mcast_packets) + 1204 le64_to_cpu(ls->tx_bcast_packets); 1205 1206 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 1207 le64_to_cpu(ls->rx_mcast_bytes) + 1208 le64_to_cpu(ls->rx_bcast_bytes); 1209 1210 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 1211 le64_to_cpu(ls->tx_mcast_bytes) + 1212 le64_to_cpu(ls->tx_bcast_bytes); 1213 1214 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 1215 le64_to_cpu(ls->rx_mcast_drop_packets) + 1216 le64_to_cpu(ls->rx_bcast_drop_packets); 1217 1218 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 1219 le64_to_cpu(ls->tx_mcast_drop_packets) + 1220 le64_to_cpu(ls->tx_bcast_drop_packets); 1221 1222 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 1223 1224 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 1225 1226 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 1227 le64_to_cpu(ls->rx_queue_disabled) + 1228 le64_to_cpu(ls->rx_desc_fetch_error) + 1229 le64_to_cpu(ls->rx_desc_data_error); 1230 1231 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 1232 le64_to_cpu(ls->tx_queue_disabled) + 1233 le64_to_cpu(ls->tx_desc_fetch_error) + 1234 le64_to_cpu(ls->tx_desc_data_error); 1235 1236 ns->rx_errors = ns->rx_over_errors + 1237 ns->rx_missed_errors; 1238 1239 ns->tx_errors = ns->tx_aborted_errors; 1240 } 1241 1242 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 1243 { 1244 struct ionic_admin_ctx ctx = { 1245 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1246 .cmd.rx_filter_add = { 1247 .opcode = IONIC_CMD_RX_FILTER_ADD, 1248 .lif_index = cpu_to_le16(lif->index), 1249 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 1250 }, 1251 }; 1252 struct ionic_rx_filter *f; 1253 int err; 1254 1255 /* don't bother if we already have it */ 1256 spin_lock_bh(&lif->rx_filters.lock); 1257 f = ionic_rx_filter_by_addr(lif, addr); 1258 spin_unlock_bh(&lif->rx_filters.lock); 1259 if (f) 1260 return 0; 1261 1262 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); 1263 1264 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 1265 err = ionic_adminq_post_wait(lif, &ctx); 1266 if (err && err != -EEXIST) 1267 return err; 1268 1269 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1270 } 1271 1272 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 1273 { 1274 struct ionic_admin_ctx ctx = { 1275 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1276 .cmd.rx_filter_del = { 1277 .opcode = IONIC_CMD_RX_FILTER_DEL, 1278 .lif_index = cpu_to_le16(lif->index), 1279 }, 1280 }; 1281 struct ionic_rx_filter *f; 1282 int err; 1283 1284 spin_lock_bh(&lif->rx_filters.lock); 1285 f = ionic_rx_filter_by_addr(lif, addr); 1286 if (!f) { 1287 spin_unlock_bh(&lif->rx_filters.lock); 1288 return -ENOENT; 1289 } 1290 1291 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", 1292 addr, f->filter_id); 1293 1294 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1295 ionic_rx_filter_free(lif, f); 1296 spin_unlock_bh(&lif->rx_filters.lock); 1297 1298 err = ionic_adminq_post_wait(lif, &ctx); 1299 if (err && err != -EEXIST) 1300 return err; 1301 1302 return 0; 1303 } 1304 1305 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add, 1306 bool can_sleep) 1307 { 1308 struct ionic_deferred_work *work; 1309 unsigned int nmfilters; 1310 unsigned int nufilters; 1311 1312 if (add) { 1313 /* Do we have space for this filter? We test the counters 1314 * here before checking the need for deferral so that we 1315 * can return an overflow error to the stack. 1316 */ 1317 nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); 1318 nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1319 1320 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) 1321 lif->nmcast++; 1322 else if (!is_multicast_ether_addr(addr) && 1323 lif->nucast < nufilters) 1324 lif->nucast++; 1325 else 1326 return -ENOSPC; 1327 } else { 1328 if (is_multicast_ether_addr(addr) && lif->nmcast) 1329 lif->nmcast--; 1330 else if (!is_multicast_ether_addr(addr) && lif->nucast) 1331 lif->nucast--; 1332 } 1333 1334 if (!can_sleep) { 1335 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1336 if (!work) 1337 return -ENOMEM; 1338 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : 1339 IONIC_DW_TYPE_RX_ADDR_DEL; 1340 memcpy(work->addr, addr, ETH_ALEN); 1341 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", 1342 add ? "add" : "del", addr); 1343 ionic_lif_deferred_enqueue(&lif->deferred, work); 1344 } else { 1345 netdev_dbg(lif->netdev, "rx_filter %s %pM\n", 1346 add ? "add" : "del", addr); 1347 if (add) 1348 return ionic_lif_addr_add(lif, addr); 1349 else 1350 return ionic_lif_addr_del(lif, addr); 1351 } 1352 1353 return 0; 1354 } 1355 1356 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1357 { 1358 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP); 1359 } 1360 1361 static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr) 1362 { 1363 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP); 1364 } 1365 1366 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1367 { 1368 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP); 1369 } 1370 1371 static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr) 1372 { 1373 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP); 1374 } 1375 1376 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 1377 { 1378 struct ionic_admin_ctx ctx = { 1379 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1380 .cmd.rx_mode_set = { 1381 .opcode = IONIC_CMD_RX_MODE_SET, 1382 .lif_index = cpu_to_le16(lif->index), 1383 .rx_mode = cpu_to_le16(rx_mode), 1384 }, 1385 }; 1386 char buf[128]; 1387 int err; 1388 int i; 1389 #define REMAIN(__x) (sizeof(buf) - (__x)) 1390 1391 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1392 lif->rx_mode, rx_mode); 1393 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1394 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1395 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1396 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1397 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1398 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1399 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1400 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1401 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1402 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1403 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); 1404 1405 err = ionic_adminq_post_wait(lif, &ctx); 1406 if (err) 1407 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", 1408 rx_mode, err); 1409 else 1410 lif->rx_mode = rx_mode; 1411 } 1412 1413 static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) 1414 { 1415 struct ionic_lif *lif = netdev_priv(netdev); 1416 struct ionic_deferred_work *work; 1417 unsigned int nfilters; 1418 unsigned int rx_mode; 1419 1420 rx_mode = IONIC_RX_MODE_F_UNICAST; 1421 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1422 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1423 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1424 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1425 1426 /* sync unicast addresses 1427 * next check to see if we're in an overflow state 1428 * if so, we track that we overflowed and enable NIC PROMISC 1429 * else if the overflow is set and not needed 1430 * we remove our overflow flag and check the netdev flags 1431 * to see if we can disable NIC PROMISC 1432 */ 1433 if (can_sleep) 1434 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1435 else 1436 __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); 1437 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1438 if (netdev_uc_count(netdev) + 1 > nfilters) { 1439 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1440 lif->uc_overflow = true; 1441 } else if (lif->uc_overflow) { 1442 lif->uc_overflow = false; 1443 if (!(netdev->flags & IFF_PROMISC)) 1444 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1445 } 1446 1447 /* same for multicast */ 1448 if (can_sleep) 1449 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1450 else 1451 __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); 1452 nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); 1453 if (netdev_mc_count(netdev) > nfilters) { 1454 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1455 lif->mc_overflow = true; 1456 } else if (lif->mc_overflow) { 1457 lif->mc_overflow = false; 1458 if (!(netdev->flags & IFF_ALLMULTI)) 1459 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1460 } 1461 1462 if (lif->rx_mode != rx_mode) { 1463 if (!can_sleep) { 1464 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1465 if (!work) { 1466 netdev_err(lif->netdev, "rxmode change dropped\n"); 1467 return; 1468 } 1469 work->type = IONIC_DW_TYPE_RX_MODE; 1470 work->rx_mode = rx_mode; 1471 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1472 ionic_lif_deferred_enqueue(&lif->deferred, work); 1473 } else { 1474 ionic_lif_rx_mode(lif, rx_mode); 1475 } 1476 } 1477 } 1478 1479 static void ionic_ndo_set_rx_mode(struct net_device *netdev) 1480 { 1481 ionic_set_rx_mode(netdev, CAN_NOT_SLEEP); 1482 } 1483 1484 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1485 { 1486 u64 wanted = 0; 1487 1488 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1489 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1490 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1491 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1492 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1493 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1494 if (features & NETIF_F_RXHASH) 1495 wanted |= IONIC_ETH_HW_RX_HASH; 1496 if (features & NETIF_F_RXCSUM) 1497 wanted |= IONIC_ETH_HW_RX_CSUM; 1498 if (features & NETIF_F_SG) 1499 wanted |= IONIC_ETH_HW_TX_SG; 1500 if (features & NETIF_F_HW_CSUM) 1501 wanted |= IONIC_ETH_HW_TX_CSUM; 1502 if (features & NETIF_F_TSO) 1503 wanted |= IONIC_ETH_HW_TSO; 1504 if (features & NETIF_F_TSO6) 1505 wanted |= IONIC_ETH_HW_TSO_IPV6; 1506 if (features & NETIF_F_TSO_ECN) 1507 wanted |= IONIC_ETH_HW_TSO_ECN; 1508 if (features & NETIF_F_GSO_GRE) 1509 wanted |= IONIC_ETH_HW_TSO_GRE; 1510 if (features & NETIF_F_GSO_GRE_CSUM) 1511 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1512 if (features & NETIF_F_GSO_IPXIP4) 1513 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1514 if (features & NETIF_F_GSO_IPXIP6) 1515 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1516 if (features & NETIF_F_GSO_UDP_TUNNEL) 1517 wanted |= IONIC_ETH_HW_TSO_UDP; 1518 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1519 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1520 1521 return cpu_to_le64(wanted); 1522 } 1523 1524 static int ionic_set_nic_features(struct ionic_lif *lif, 1525 netdev_features_t features) 1526 { 1527 struct device *dev = lif->ionic->dev; 1528 struct ionic_admin_ctx ctx = { 1529 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1530 .cmd.lif_setattr = { 1531 .opcode = IONIC_CMD_LIF_SETATTR, 1532 .index = cpu_to_le16(lif->index), 1533 .attr = IONIC_LIF_ATTR_FEATURES, 1534 }, 1535 }; 1536 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1537 IONIC_ETH_HW_VLAN_RX_STRIP | 1538 IONIC_ETH_HW_VLAN_RX_FILTER; 1539 u64 old_hw_features; 1540 int err; 1541 1542 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1543 1544 if (lif->phc) 1545 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP); 1546 1547 err = ionic_adminq_post_wait(lif, &ctx); 1548 if (err) 1549 return err; 1550 1551 old_hw_features = lif->hw_features; 1552 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1553 ctx.comp.lif_setattr.features); 1554 1555 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1556 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1557 1558 if ((vlan_flags & features) && 1559 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1560 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1561 1562 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1563 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1564 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1565 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1566 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1567 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1568 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1569 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1570 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1571 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1572 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1573 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1574 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1575 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1576 if (lif->hw_features & IONIC_ETH_HW_TSO) 1577 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1578 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1579 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1580 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1581 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1582 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1583 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1584 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1585 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1586 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1587 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1588 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1589 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1590 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1591 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1592 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1593 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1594 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP) 1595 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n"); 1596 1597 return 0; 1598 } 1599 1600 static int ionic_init_nic_features(struct ionic_lif *lif) 1601 { 1602 struct net_device *netdev = lif->netdev; 1603 netdev_features_t features; 1604 int err; 1605 1606 /* set up what we expect to support by default */ 1607 features = NETIF_F_HW_VLAN_CTAG_TX | 1608 NETIF_F_HW_VLAN_CTAG_RX | 1609 NETIF_F_HW_VLAN_CTAG_FILTER | 1610 NETIF_F_SG | 1611 NETIF_F_HW_CSUM | 1612 NETIF_F_RXCSUM | 1613 NETIF_F_TSO | 1614 NETIF_F_TSO6 | 1615 NETIF_F_TSO_ECN; 1616 1617 if (lif->nxqs > 1) 1618 features |= NETIF_F_RXHASH; 1619 1620 err = ionic_set_nic_features(lif, features); 1621 if (err) 1622 return err; 1623 1624 /* tell the netdev what we actually can support */ 1625 netdev->features |= NETIF_F_HIGHDMA; 1626 1627 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1628 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1629 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1630 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1631 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1632 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1633 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1634 netdev->hw_features |= NETIF_F_RXHASH; 1635 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1636 netdev->hw_features |= NETIF_F_SG; 1637 1638 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1639 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1640 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1641 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1642 if (lif->hw_features & IONIC_ETH_HW_TSO) 1643 netdev->hw_enc_features |= NETIF_F_TSO; 1644 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1645 netdev->hw_enc_features |= NETIF_F_TSO6; 1646 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1647 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1648 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1649 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1650 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1651 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1652 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1653 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1654 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1655 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1656 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1657 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1658 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1659 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1660 1661 netdev->hw_features |= netdev->hw_enc_features; 1662 netdev->features |= netdev->hw_features; 1663 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1664 1665 netdev->priv_flags |= IFF_UNICAST_FLT | 1666 IFF_LIVE_ADDR_CHANGE; 1667 1668 return 0; 1669 } 1670 1671 static int ionic_set_features(struct net_device *netdev, 1672 netdev_features_t features) 1673 { 1674 struct ionic_lif *lif = netdev_priv(netdev); 1675 int err; 1676 1677 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1678 __func__, (u64)lif->netdev->features, (u64)features); 1679 1680 err = ionic_set_nic_features(lif, features); 1681 1682 return err; 1683 } 1684 1685 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1686 { 1687 struct sockaddr *addr = sa; 1688 u8 *mac; 1689 int err; 1690 1691 mac = (u8 *)addr->sa_data; 1692 if (ether_addr_equal(netdev->dev_addr, mac)) 1693 return 0; 1694 1695 err = eth_prepare_mac_addr_change(netdev, addr); 1696 if (err) 1697 return err; 1698 1699 if (!is_zero_ether_addr(netdev->dev_addr)) { 1700 netdev_info(netdev, "deleting mac addr %pM\n", 1701 netdev->dev_addr); 1702 ionic_addr_del(netdev, netdev->dev_addr); 1703 } 1704 1705 eth_commit_mac_addr_change(netdev, addr); 1706 netdev_info(netdev, "updating mac addr %pM\n", mac); 1707 1708 return ionic_addr_add(netdev, mac); 1709 } 1710 1711 static void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1712 { 1713 /* Stop and clean the queues before reconfiguration */ 1714 mutex_lock(&lif->queue_lock); 1715 netif_device_detach(lif->netdev); 1716 ionic_stop_queues(lif); 1717 ionic_txrx_deinit(lif); 1718 } 1719 1720 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1721 { 1722 int err; 1723 1724 /* Re-init the queues after reconfiguration */ 1725 1726 /* The only way txrx_init can fail here is if communication 1727 * with FW is suddenly broken. There's not much we can do 1728 * at this point - error messages have already been printed, 1729 * so we can continue on and the user can eventually do a 1730 * DOWN and UP to try to reset and clear the issue. 1731 */ 1732 err = ionic_txrx_init(lif); 1733 mutex_unlock(&lif->queue_lock); 1734 ionic_link_status_check_request(lif, CAN_SLEEP); 1735 netif_device_attach(lif->netdev); 1736 1737 return err; 1738 } 1739 1740 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1741 { 1742 struct ionic_lif *lif = netdev_priv(netdev); 1743 struct ionic_admin_ctx ctx = { 1744 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1745 .cmd.lif_setattr = { 1746 .opcode = IONIC_CMD_LIF_SETATTR, 1747 .index = cpu_to_le16(lif->index), 1748 .attr = IONIC_LIF_ATTR_MTU, 1749 .mtu = cpu_to_le32(new_mtu), 1750 }, 1751 }; 1752 int err; 1753 1754 err = ionic_adminq_post_wait(lif, &ctx); 1755 if (err) 1756 return err; 1757 1758 /* if we're not running, nothing more to do */ 1759 if (!netif_running(netdev)) { 1760 netdev->mtu = new_mtu; 1761 return 0; 1762 } 1763 1764 ionic_stop_queues_reconfig(lif); 1765 netdev->mtu = new_mtu; 1766 return ionic_start_queues_reconfig(lif); 1767 } 1768 1769 static void ionic_tx_timeout_work(struct work_struct *ws) 1770 { 1771 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1772 1773 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1774 return; 1775 1776 /* if we were stopped before this scheduled job was launched, 1777 * don't bother the queues as they are already stopped. 1778 */ 1779 if (!netif_running(lif->netdev)) 1780 return; 1781 1782 ionic_stop_queues_reconfig(lif); 1783 ionic_start_queues_reconfig(lif); 1784 } 1785 1786 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1787 { 1788 struct ionic_lif *lif = netdev_priv(netdev); 1789 1790 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); 1791 schedule_work(&lif->tx_timeout_work); 1792 } 1793 1794 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1795 u16 vid) 1796 { 1797 struct ionic_lif *lif = netdev_priv(netdev); 1798 struct ionic_admin_ctx ctx = { 1799 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1800 .cmd.rx_filter_add = { 1801 .opcode = IONIC_CMD_RX_FILTER_ADD, 1802 .lif_index = cpu_to_le16(lif->index), 1803 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), 1804 .vlan.vlan = cpu_to_le16(vid), 1805 }, 1806 }; 1807 int err; 1808 1809 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); 1810 err = ionic_adminq_post_wait(lif, &ctx); 1811 if (err) 1812 return err; 1813 1814 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1815 } 1816 1817 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1818 u16 vid) 1819 { 1820 struct ionic_lif *lif = netdev_priv(netdev); 1821 struct ionic_admin_ctx ctx = { 1822 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1823 .cmd.rx_filter_del = { 1824 .opcode = IONIC_CMD_RX_FILTER_DEL, 1825 .lif_index = cpu_to_le16(lif->index), 1826 }, 1827 }; 1828 struct ionic_rx_filter *f; 1829 1830 spin_lock_bh(&lif->rx_filters.lock); 1831 1832 f = ionic_rx_filter_by_vlan(lif, vid); 1833 if (!f) { 1834 spin_unlock_bh(&lif->rx_filters.lock); 1835 return -ENOENT; 1836 } 1837 1838 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", 1839 vid, f->filter_id); 1840 1841 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1842 ionic_rx_filter_free(lif, f); 1843 spin_unlock_bh(&lif->rx_filters.lock); 1844 1845 return ionic_adminq_post_wait(lif, &ctx); 1846 } 1847 1848 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1849 const u8 *key, const u32 *indir) 1850 { 1851 struct ionic_admin_ctx ctx = { 1852 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1853 .cmd.lif_setattr = { 1854 .opcode = IONIC_CMD_LIF_SETATTR, 1855 .attr = IONIC_LIF_ATTR_RSS, 1856 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1857 }, 1858 }; 1859 unsigned int i, tbl_sz; 1860 1861 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1862 lif->rss_types = types; 1863 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1864 } 1865 1866 if (key) 1867 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1868 1869 if (indir) { 1870 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1871 for (i = 0; i < tbl_sz; i++) 1872 lif->rss_ind_tbl[i] = indir[i]; 1873 } 1874 1875 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1876 IONIC_RSS_HASH_KEY_SIZE); 1877 1878 return ionic_adminq_post_wait(lif, &ctx); 1879 } 1880 1881 static int ionic_lif_rss_init(struct ionic_lif *lif) 1882 { 1883 unsigned int tbl_sz; 1884 unsigned int i; 1885 1886 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1887 IONIC_RSS_TYPE_IPV4_TCP | 1888 IONIC_RSS_TYPE_IPV4_UDP | 1889 IONIC_RSS_TYPE_IPV6 | 1890 IONIC_RSS_TYPE_IPV6_TCP | 1891 IONIC_RSS_TYPE_IPV6_UDP; 1892 1893 /* Fill indirection table with 'default' values */ 1894 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1895 for (i = 0; i < tbl_sz; i++) 1896 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1897 1898 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1899 } 1900 1901 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1902 { 1903 int tbl_sz; 1904 1905 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1906 memset(lif->rss_ind_tbl, 0, tbl_sz); 1907 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1908 1909 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1910 } 1911 1912 static void ionic_lif_quiesce(struct ionic_lif *lif) 1913 { 1914 struct ionic_admin_ctx ctx = { 1915 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1916 .cmd.lif_setattr = { 1917 .opcode = IONIC_CMD_LIF_SETATTR, 1918 .index = cpu_to_le16(lif->index), 1919 .attr = IONIC_LIF_ATTR_STATE, 1920 .state = IONIC_LIF_QUIESCE, 1921 }, 1922 }; 1923 int err; 1924 1925 err = ionic_adminq_post_wait(lif, &ctx); 1926 if (err) 1927 netdev_err(lif->netdev, "lif quiesce failed %d\n", err); 1928 } 1929 1930 static void ionic_txrx_disable(struct ionic_lif *lif) 1931 { 1932 unsigned int i; 1933 int err = 0; 1934 1935 if (lif->txqcqs) { 1936 for (i = 0; i < lif->nxqs; i++) 1937 err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT)); 1938 } 1939 1940 if (lif->hwstamp_txq) 1941 err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT)); 1942 1943 if (lif->rxqcqs) { 1944 for (i = 0; i < lif->nxqs; i++) 1945 err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); 1946 } 1947 1948 if (lif->hwstamp_rxq) 1949 err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT)); 1950 1951 ionic_lif_quiesce(lif); 1952 } 1953 1954 static void ionic_txrx_deinit(struct ionic_lif *lif) 1955 { 1956 unsigned int i; 1957 1958 if (lif->txqcqs) { 1959 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 1960 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1961 ionic_tx_flush(&lif->txqcqs[i]->cq); 1962 ionic_tx_empty(&lif->txqcqs[i]->q); 1963 } 1964 } 1965 1966 if (lif->rxqcqs) { 1967 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 1968 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 1969 ionic_rx_empty(&lif->rxqcqs[i]->q); 1970 } 1971 } 1972 lif->rx_mode = 0; 1973 1974 if (lif->hwstamp_txq) { 1975 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq); 1976 ionic_tx_flush(&lif->hwstamp_txq->cq); 1977 ionic_tx_empty(&lif->hwstamp_txq->q); 1978 } 1979 1980 if (lif->hwstamp_rxq) { 1981 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq); 1982 ionic_rx_empty(&lif->hwstamp_rxq->q); 1983 } 1984 } 1985 1986 static void ionic_txrx_free(struct ionic_lif *lif) 1987 { 1988 unsigned int i; 1989 1990 if (lif->txqcqs) { 1991 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 1992 ionic_qcq_free(lif, lif->txqcqs[i]); 1993 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 1994 lif->txqcqs[i] = NULL; 1995 } 1996 } 1997 1998 if (lif->rxqcqs) { 1999 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2000 ionic_qcq_free(lif, lif->rxqcqs[i]); 2001 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 2002 lif->rxqcqs[i] = NULL; 2003 } 2004 } 2005 2006 if (lif->hwstamp_txq) { 2007 ionic_qcq_free(lif, lif->hwstamp_txq); 2008 devm_kfree(lif->ionic->dev, lif->hwstamp_txq); 2009 lif->hwstamp_txq = NULL; 2010 } 2011 2012 if (lif->hwstamp_rxq) { 2013 ionic_qcq_free(lif, lif->hwstamp_rxq); 2014 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq); 2015 lif->hwstamp_rxq = NULL; 2016 } 2017 } 2018 2019 static int ionic_txrx_alloc(struct ionic_lif *lif) 2020 { 2021 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2022 unsigned int flags, i; 2023 int err = 0; 2024 2025 num_desc = lif->ntxq_descs; 2026 desc_sz = sizeof(struct ionic_txq_desc); 2027 comp_sz = sizeof(struct ionic_txq_comp); 2028 2029 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2030 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2031 sizeof(struct ionic_txq_sg_desc_v1)) 2032 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2033 else 2034 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2035 2036 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2037 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2038 flags |= IONIC_QCQ_F_INTR; 2039 for (i = 0; i < lif->nxqs; i++) { 2040 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2041 num_desc, desc_sz, comp_sz, sg_desc_sz, 2042 lif->kern_pid, &lif->txqcqs[i]); 2043 if (err) 2044 goto err_out; 2045 2046 if (flags & IONIC_QCQ_F_INTR) { 2047 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2048 lif->txqcqs[i]->intr.index, 2049 lif->tx_coalesce_hw); 2050 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2051 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2052 } 2053 2054 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2055 } 2056 2057 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 2058 2059 num_desc = lif->nrxq_descs; 2060 desc_sz = sizeof(struct ionic_rxq_desc); 2061 comp_sz = sizeof(struct ionic_rxq_comp); 2062 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2063 2064 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2065 comp_sz *= 2; 2066 2067 for (i = 0; i < lif->nxqs; i++) { 2068 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2069 num_desc, desc_sz, comp_sz, sg_desc_sz, 2070 lif->kern_pid, &lif->rxqcqs[i]); 2071 if (err) 2072 goto err_out; 2073 2074 lif->rxqcqs[i]->q.features = lif->rxq_features; 2075 2076 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2077 lif->rxqcqs[i]->intr.index, 2078 lif->rx_coalesce_hw); 2079 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) 2080 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; 2081 2082 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2083 ionic_link_qcq_interrupts(lif->rxqcqs[i], 2084 lif->txqcqs[i]); 2085 2086 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2087 } 2088 2089 return 0; 2090 2091 err_out: 2092 ionic_txrx_free(lif); 2093 2094 return err; 2095 } 2096 2097 static int ionic_txrx_init(struct ionic_lif *lif) 2098 { 2099 unsigned int i; 2100 int err; 2101 2102 for (i = 0; i < lif->nxqs; i++) { 2103 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 2104 if (err) 2105 goto err_out; 2106 2107 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 2108 if (err) { 2109 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2110 goto err_out; 2111 } 2112 } 2113 2114 if (lif->netdev->features & NETIF_F_RXHASH) 2115 ionic_lif_rss_init(lif); 2116 2117 ionic_set_rx_mode(lif->netdev, CAN_SLEEP); 2118 2119 return 0; 2120 2121 err_out: 2122 while (i--) { 2123 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2124 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2125 } 2126 2127 return err; 2128 } 2129 2130 static int ionic_txrx_enable(struct ionic_lif *lif) 2131 { 2132 int derr = 0; 2133 int i, err; 2134 2135 for (i = 0; i < lif->nxqs; i++) { 2136 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { 2137 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); 2138 err = -ENXIO; 2139 goto err_out; 2140 } 2141 2142 ionic_rx_fill(&lif->rxqcqs[i]->q); 2143 err = ionic_qcq_enable(lif->rxqcqs[i]); 2144 if (err) 2145 goto err_out; 2146 2147 err = ionic_qcq_enable(lif->txqcqs[i]); 2148 if (err) { 2149 derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); 2150 goto err_out; 2151 } 2152 } 2153 2154 if (lif->hwstamp_rxq) { 2155 ionic_rx_fill(&lif->hwstamp_rxq->q); 2156 err = ionic_qcq_enable(lif->hwstamp_rxq); 2157 if (err) 2158 goto err_out_hwstamp_rx; 2159 } 2160 2161 if (lif->hwstamp_txq) { 2162 err = ionic_qcq_enable(lif->hwstamp_txq); 2163 if (err) 2164 goto err_out_hwstamp_tx; 2165 } 2166 2167 return 0; 2168 2169 err_out_hwstamp_tx: 2170 if (lif->hwstamp_rxq) 2171 derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT)); 2172 err_out_hwstamp_rx: 2173 i = lif->nxqs; 2174 err_out: 2175 while (i--) { 2176 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT)); 2177 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT)); 2178 } 2179 2180 return err; 2181 } 2182 2183 static int ionic_start_queues(struct ionic_lif *lif) 2184 { 2185 int err; 2186 2187 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) 2188 return -EIO; 2189 2190 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2191 return -EBUSY; 2192 2193 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 2194 return 0; 2195 2196 err = ionic_txrx_enable(lif); 2197 if (err) { 2198 clear_bit(IONIC_LIF_F_UP, lif->state); 2199 return err; 2200 } 2201 netif_tx_wake_all_queues(lif->netdev); 2202 2203 return 0; 2204 } 2205 2206 static int ionic_open(struct net_device *netdev) 2207 { 2208 struct ionic_lif *lif = netdev_priv(netdev); 2209 int err; 2210 2211 /* If recovering from a broken state, clear the bit and we'll try again */ 2212 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 2213 netdev_info(netdev, "clearing broken state\n"); 2214 2215 err = ionic_txrx_alloc(lif); 2216 if (err) 2217 return err; 2218 2219 err = ionic_txrx_init(lif); 2220 if (err) 2221 goto err_txrx_free; 2222 2223 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 2224 if (err) 2225 goto err_txrx_deinit; 2226 2227 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 2228 if (err) 2229 goto err_txrx_deinit; 2230 2231 /* don't start the queues until we have link */ 2232 if (netif_carrier_ok(netdev)) { 2233 err = ionic_start_queues(lif); 2234 if (err) 2235 goto err_txrx_deinit; 2236 } 2237 2238 return 0; 2239 2240 err_txrx_deinit: 2241 ionic_txrx_deinit(lif); 2242 err_txrx_free: 2243 ionic_txrx_free(lif); 2244 return err; 2245 } 2246 2247 static void ionic_stop_queues(struct ionic_lif *lif) 2248 { 2249 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 2250 return; 2251 2252 netif_tx_disable(lif->netdev); 2253 ionic_txrx_disable(lif); 2254 } 2255 2256 static int ionic_stop(struct net_device *netdev) 2257 { 2258 struct ionic_lif *lif = netdev_priv(netdev); 2259 2260 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2261 return 0; 2262 2263 ionic_stop_queues(lif); 2264 ionic_txrx_deinit(lif); 2265 ionic_txrx_free(lif); 2266 2267 return 0; 2268 } 2269 2270 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2271 { 2272 struct ionic_lif *lif = netdev_priv(netdev); 2273 2274 switch (cmd) { 2275 case SIOCSHWTSTAMP: 2276 return ionic_lif_hwstamp_set(lif, ifr); 2277 case SIOCGHWTSTAMP: 2278 return ionic_lif_hwstamp_get(lif, ifr); 2279 default: 2280 return -EOPNOTSUPP; 2281 } 2282 } 2283 2284 static int ionic_get_vf_config(struct net_device *netdev, 2285 int vf, struct ifla_vf_info *ivf) 2286 { 2287 struct ionic_lif *lif = netdev_priv(netdev); 2288 struct ionic *ionic = lif->ionic; 2289 int ret = 0; 2290 2291 if (!netif_device_present(netdev)) 2292 return -EBUSY; 2293 2294 down_read(&ionic->vf_op_lock); 2295 2296 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2297 ret = -EINVAL; 2298 } else { 2299 ivf->vf = vf; 2300 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid); 2301 ivf->qos = 0; 2302 ivf->spoofchk = ionic->vfs[vf].spoofchk; 2303 ivf->linkstate = ionic->vfs[vf].linkstate; 2304 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate); 2305 ivf->trusted = ionic->vfs[vf].trusted; 2306 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr); 2307 } 2308 2309 up_read(&ionic->vf_op_lock); 2310 return ret; 2311 } 2312 2313 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 2314 struct ifla_vf_stats *vf_stats) 2315 { 2316 struct ionic_lif *lif = netdev_priv(netdev); 2317 struct ionic *ionic = lif->ionic; 2318 struct ionic_lif_stats *vs; 2319 int ret = 0; 2320 2321 if (!netif_device_present(netdev)) 2322 return -EBUSY; 2323 2324 down_read(&ionic->vf_op_lock); 2325 2326 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2327 ret = -EINVAL; 2328 } else { 2329 memset(vf_stats, 0, sizeof(*vf_stats)); 2330 vs = &ionic->vfs[vf].stats; 2331 2332 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 2333 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 2334 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 2335 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 2336 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 2337 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 2338 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 2339 le64_to_cpu(vs->rx_mcast_drop_packets) + 2340 le64_to_cpu(vs->rx_bcast_drop_packets); 2341 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 2342 le64_to_cpu(vs->tx_mcast_drop_packets) + 2343 le64_to_cpu(vs->tx_bcast_drop_packets); 2344 } 2345 2346 up_read(&ionic->vf_op_lock); 2347 return ret; 2348 } 2349 2350 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2351 { 2352 struct ionic_lif *lif = netdev_priv(netdev); 2353 struct ionic *ionic = lif->ionic; 2354 int ret; 2355 2356 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 2357 return -EINVAL; 2358 2359 if (!netif_device_present(netdev)) 2360 return -EBUSY; 2361 2362 down_write(&ionic->vf_op_lock); 2363 2364 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2365 ret = -EINVAL; 2366 } else { 2367 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac); 2368 if (!ret) 2369 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 2370 } 2371 2372 up_write(&ionic->vf_op_lock); 2373 return ret; 2374 } 2375 2376 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2377 u8 qos, __be16 proto) 2378 { 2379 struct ionic_lif *lif = netdev_priv(netdev); 2380 struct ionic *ionic = lif->ionic; 2381 int ret; 2382 2383 /* until someday when we support qos */ 2384 if (qos) 2385 return -EINVAL; 2386 2387 if (vlan > 4095) 2388 return -EINVAL; 2389 2390 if (proto != htons(ETH_P_8021Q)) 2391 return -EPROTONOSUPPORT; 2392 2393 if (!netif_device_present(netdev)) 2394 return -EBUSY; 2395 2396 down_write(&ionic->vf_op_lock); 2397 2398 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2399 ret = -EINVAL; 2400 } else { 2401 ret = ionic_set_vf_config(ionic, vf, 2402 IONIC_VF_ATTR_VLAN, (u8 *)&vlan); 2403 if (!ret) 2404 ionic->vfs[vf].vlanid = cpu_to_le16(vlan); 2405 } 2406 2407 up_write(&ionic->vf_op_lock); 2408 return ret; 2409 } 2410 2411 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 2412 int tx_min, int tx_max) 2413 { 2414 struct ionic_lif *lif = netdev_priv(netdev); 2415 struct ionic *ionic = lif->ionic; 2416 int ret; 2417 2418 /* setting the min just seems silly */ 2419 if (tx_min) 2420 return -EINVAL; 2421 2422 if (!netif_device_present(netdev)) 2423 return -EBUSY; 2424 2425 down_write(&ionic->vf_op_lock); 2426 2427 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2428 ret = -EINVAL; 2429 } else { 2430 ret = ionic_set_vf_config(ionic, vf, 2431 IONIC_VF_ATTR_RATE, (u8 *)&tx_max); 2432 if (!ret) 2433 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); 2434 } 2435 2436 up_write(&ionic->vf_op_lock); 2437 return ret; 2438 } 2439 2440 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 2441 { 2442 struct ionic_lif *lif = netdev_priv(netdev); 2443 struct ionic *ionic = lif->ionic; 2444 u8 data = set; /* convert to u8 for config */ 2445 int ret; 2446 2447 if (!netif_device_present(netdev)) 2448 return -EBUSY; 2449 2450 down_write(&ionic->vf_op_lock); 2451 2452 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2453 ret = -EINVAL; 2454 } else { 2455 ret = ionic_set_vf_config(ionic, vf, 2456 IONIC_VF_ATTR_SPOOFCHK, &data); 2457 if (!ret) 2458 ionic->vfs[vf].spoofchk = data; 2459 } 2460 2461 up_write(&ionic->vf_op_lock); 2462 return ret; 2463 } 2464 2465 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2466 { 2467 struct ionic_lif *lif = netdev_priv(netdev); 2468 struct ionic *ionic = lif->ionic; 2469 u8 data = set; /* convert to u8 for config */ 2470 int ret; 2471 2472 if (!netif_device_present(netdev)) 2473 return -EBUSY; 2474 2475 down_write(&ionic->vf_op_lock); 2476 2477 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2478 ret = -EINVAL; 2479 } else { 2480 ret = ionic_set_vf_config(ionic, vf, 2481 IONIC_VF_ATTR_TRUST, &data); 2482 if (!ret) 2483 ionic->vfs[vf].trusted = data; 2484 } 2485 2486 up_write(&ionic->vf_op_lock); 2487 return ret; 2488 } 2489 2490 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2491 { 2492 struct ionic_lif *lif = netdev_priv(netdev); 2493 struct ionic *ionic = lif->ionic; 2494 u8 data; 2495 int ret; 2496 2497 switch (set) { 2498 case IFLA_VF_LINK_STATE_ENABLE: 2499 data = IONIC_VF_LINK_STATUS_UP; 2500 break; 2501 case IFLA_VF_LINK_STATE_DISABLE: 2502 data = IONIC_VF_LINK_STATUS_DOWN; 2503 break; 2504 case IFLA_VF_LINK_STATE_AUTO: 2505 data = IONIC_VF_LINK_STATUS_AUTO; 2506 break; 2507 default: 2508 return -EINVAL; 2509 } 2510 2511 if (!netif_device_present(netdev)) 2512 return -EBUSY; 2513 2514 down_write(&ionic->vf_op_lock); 2515 2516 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2517 ret = -EINVAL; 2518 } else { 2519 ret = ionic_set_vf_config(ionic, vf, 2520 IONIC_VF_ATTR_LINKSTATE, &data); 2521 if (!ret) 2522 ionic->vfs[vf].linkstate = set; 2523 } 2524 2525 up_write(&ionic->vf_op_lock); 2526 return ret; 2527 } 2528 2529 static const struct net_device_ops ionic_netdev_ops = { 2530 .ndo_open = ionic_open, 2531 .ndo_stop = ionic_stop, 2532 .ndo_eth_ioctl = ionic_eth_ioctl, 2533 .ndo_start_xmit = ionic_start_xmit, 2534 .ndo_get_stats64 = ionic_get_stats64, 2535 .ndo_set_rx_mode = ionic_ndo_set_rx_mode, 2536 .ndo_set_features = ionic_set_features, 2537 .ndo_set_mac_address = ionic_set_mac_address, 2538 .ndo_validate_addr = eth_validate_addr, 2539 .ndo_tx_timeout = ionic_tx_timeout, 2540 .ndo_change_mtu = ionic_change_mtu, 2541 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2542 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2543 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2544 .ndo_set_vf_trust = ionic_set_vf_trust, 2545 .ndo_set_vf_mac = ionic_set_vf_mac, 2546 .ndo_set_vf_rate = ionic_set_vf_rate, 2547 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2548 .ndo_get_vf_config = ionic_get_vf_config, 2549 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2550 .ndo_get_vf_stats = ionic_get_vf_stats, 2551 }; 2552 2553 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2554 { 2555 /* only swapping the queues, not the napi, flags, or other stuff */ 2556 swap(a->q.features, b->q.features); 2557 swap(a->q.num_descs, b->q.num_descs); 2558 swap(a->q.desc_size, b->q.desc_size); 2559 swap(a->q.base, b->q.base); 2560 swap(a->q.base_pa, b->q.base_pa); 2561 swap(a->q.info, b->q.info); 2562 swap(a->q_base, b->q_base); 2563 swap(a->q_base_pa, b->q_base_pa); 2564 swap(a->q_size, b->q_size); 2565 2566 swap(a->q.sg_desc_size, b->q.sg_desc_size); 2567 swap(a->q.sg_base, b->q.sg_base); 2568 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2569 swap(a->sg_base, b->sg_base); 2570 swap(a->sg_base_pa, b->sg_base_pa); 2571 swap(a->sg_size, b->sg_size); 2572 2573 swap(a->cq.num_descs, b->cq.num_descs); 2574 swap(a->cq.desc_size, b->cq.desc_size); 2575 swap(a->cq.base, b->cq.base); 2576 swap(a->cq.base_pa, b->cq.base_pa); 2577 swap(a->cq.info, b->cq.info); 2578 swap(a->cq_base, b->cq_base); 2579 swap(a->cq_base_pa, b->cq_base_pa); 2580 swap(a->cq_size, b->cq_size); 2581 2582 ionic_debugfs_del_qcq(a); 2583 ionic_debugfs_add_qcq(a->q.lif, a); 2584 } 2585 2586 int ionic_reconfigure_queues(struct ionic_lif *lif, 2587 struct ionic_queue_params *qparam) 2588 { 2589 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2590 struct ionic_qcq **tx_qcqs = NULL; 2591 struct ionic_qcq **rx_qcqs = NULL; 2592 unsigned int flags, i; 2593 int err = 0; 2594 2595 /* allocate temporary qcq arrays to hold new queue structs */ 2596 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2597 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2598 sizeof(struct ionic_qcq *), GFP_KERNEL); 2599 if (!tx_qcqs) { 2600 err = -ENOMEM; 2601 goto err_out; 2602 } 2603 } 2604 if (qparam->nxqs != lif->nxqs || 2605 qparam->nrxq_descs != lif->nrxq_descs || 2606 qparam->rxq_features != lif->rxq_features) { 2607 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2608 sizeof(struct ionic_qcq *), GFP_KERNEL); 2609 if (!rx_qcqs) { 2610 err = -ENOMEM; 2611 goto err_out; 2612 } 2613 } 2614 2615 /* allocate new desc_info and rings, but leave the interrupt setup 2616 * until later so as to not mess with the still-running queues 2617 */ 2618 if (tx_qcqs) { 2619 num_desc = qparam->ntxq_descs; 2620 desc_sz = sizeof(struct ionic_txq_desc); 2621 comp_sz = sizeof(struct ionic_txq_comp); 2622 2623 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2624 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2625 sizeof(struct ionic_txq_sg_desc_v1)) 2626 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2627 else 2628 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2629 2630 for (i = 0; i < qparam->nxqs; i++) { 2631 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2632 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2633 num_desc, desc_sz, comp_sz, sg_desc_sz, 2634 lif->kern_pid, &tx_qcqs[i]); 2635 if (err) 2636 goto err_out; 2637 } 2638 } 2639 2640 if (rx_qcqs) { 2641 num_desc = qparam->nrxq_descs; 2642 desc_sz = sizeof(struct ionic_rxq_desc); 2643 comp_sz = sizeof(struct ionic_rxq_comp); 2644 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2645 2646 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2647 comp_sz *= 2; 2648 2649 for (i = 0; i < qparam->nxqs; i++) { 2650 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2651 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2652 num_desc, desc_sz, comp_sz, sg_desc_sz, 2653 lif->kern_pid, &rx_qcqs[i]); 2654 if (err) 2655 goto err_out; 2656 2657 rx_qcqs[i]->q.features = qparam->rxq_features; 2658 } 2659 } 2660 2661 /* stop and clean the queues */ 2662 ionic_stop_queues_reconfig(lif); 2663 2664 if (qparam->nxqs != lif->nxqs) { 2665 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 2666 if (err) 2667 goto err_out_reinit_unlock; 2668 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 2669 if (err) { 2670 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 2671 goto err_out_reinit_unlock; 2672 } 2673 } 2674 2675 /* swap new desc_info and rings, keeping existing interrupt config */ 2676 if (tx_qcqs) { 2677 lif->ntxq_descs = qparam->ntxq_descs; 2678 for (i = 0; i < qparam->nxqs; i++) 2679 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 2680 } 2681 2682 if (rx_qcqs) { 2683 lif->nrxq_descs = qparam->nrxq_descs; 2684 for (i = 0; i < qparam->nxqs; i++) 2685 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 2686 } 2687 2688 /* if we need to change the interrupt layout, this is the time */ 2689 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 2690 qparam->nxqs != lif->nxqs) { 2691 if (qparam->intr_split) { 2692 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2693 } else { 2694 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2695 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2696 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2697 } 2698 2699 /* clear existing interrupt assignments */ 2700 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 2701 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 2702 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 2703 } 2704 2705 /* re-assign the interrupts */ 2706 for (i = 0; i < qparam->nxqs; i++) { 2707 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2708 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 2709 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2710 lif->rxqcqs[i]->intr.index, 2711 lif->rx_coalesce_hw); 2712 2713 if (qparam->intr_split) { 2714 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2715 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 2716 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2717 lif->txqcqs[i]->intr.index, 2718 lif->tx_coalesce_hw); 2719 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2720 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2721 } else { 2722 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2723 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 2724 } 2725 } 2726 } 2727 2728 /* now we can rework the debugfs mappings */ 2729 if (tx_qcqs) { 2730 for (i = 0; i < qparam->nxqs; i++) { 2731 ionic_debugfs_del_qcq(lif->txqcqs[i]); 2732 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2733 } 2734 } 2735 2736 if (rx_qcqs) { 2737 for (i = 0; i < qparam->nxqs; i++) { 2738 ionic_debugfs_del_qcq(lif->rxqcqs[i]); 2739 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2740 } 2741 } 2742 2743 swap(lif->nxqs, qparam->nxqs); 2744 swap(lif->rxq_features, qparam->rxq_features); 2745 2746 err_out_reinit_unlock: 2747 /* re-init the queues, but don't lose an error code */ 2748 if (err) 2749 ionic_start_queues_reconfig(lif); 2750 else 2751 err = ionic_start_queues_reconfig(lif); 2752 2753 err_out: 2754 /* free old allocs without cleaning intr */ 2755 for (i = 0; i < qparam->nxqs; i++) { 2756 if (tx_qcqs && tx_qcqs[i]) { 2757 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2758 ionic_qcq_free(lif, tx_qcqs[i]); 2759 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 2760 tx_qcqs[i] = NULL; 2761 } 2762 if (rx_qcqs && rx_qcqs[i]) { 2763 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2764 ionic_qcq_free(lif, rx_qcqs[i]); 2765 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 2766 rx_qcqs[i] = NULL; 2767 } 2768 } 2769 2770 /* free q array */ 2771 if (rx_qcqs) { 2772 devm_kfree(lif->ionic->dev, rx_qcqs); 2773 rx_qcqs = NULL; 2774 } 2775 if (tx_qcqs) { 2776 devm_kfree(lif->ionic->dev, tx_qcqs); 2777 tx_qcqs = NULL; 2778 } 2779 2780 /* clean the unused dma and info allocations when new set is smaller 2781 * than the full array, but leave the qcq shells in place 2782 */ 2783 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 2784 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2785 ionic_qcq_free(lif, lif->txqcqs[i]); 2786 2787 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2788 ionic_qcq_free(lif, lif->rxqcqs[i]); 2789 } 2790 2791 if (err) 2792 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err); 2793 2794 return err; 2795 } 2796 2797 int ionic_lif_alloc(struct ionic *ionic) 2798 { 2799 struct device *dev = ionic->dev; 2800 union ionic_lif_identity *lid; 2801 struct net_device *netdev; 2802 struct ionic_lif *lif; 2803 int tbl_sz; 2804 int err; 2805 2806 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 2807 if (!lid) 2808 return -ENOMEM; 2809 2810 netdev = alloc_etherdev_mqs(sizeof(*lif), 2811 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 2812 if (!netdev) { 2813 dev_err(dev, "Cannot allocate netdev, aborting\n"); 2814 err = -ENOMEM; 2815 goto err_out_free_lid; 2816 } 2817 2818 SET_NETDEV_DEV(netdev, dev); 2819 2820 lif = netdev_priv(netdev); 2821 lif->netdev = netdev; 2822 ionic->lif = lif; 2823 netdev->netdev_ops = &ionic_netdev_ops; 2824 ionic_ethtool_set_ops(netdev); 2825 2826 netdev->watchdog_timeo = 2 * HZ; 2827 netif_carrier_off(netdev); 2828 2829 lif->identity = lid; 2830 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 2831 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity); 2832 if (err) { 2833 dev_err(ionic->dev, "Cannot identify type %d: %d\n", 2834 lif->lif_type, err); 2835 goto err_out_free_netdev; 2836 } 2837 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 2838 le32_to_cpu(lif->identity->eth.min_frame_size)); 2839 lif->netdev->max_mtu = 2840 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; 2841 2842 lif->neqs = ionic->neqs_per_lif; 2843 lif->nxqs = ionic->ntxqs_per_lif; 2844 2845 lif->ionic = ionic; 2846 lif->index = 0; 2847 2848 if (is_kdump_kernel()) { 2849 lif->ntxq_descs = IONIC_MIN_TXRX_DESC; 2850 lif->nrxq_descs = IONIC_MIN_TXRX_DESC; 2851 } else { 2852 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 2853 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 2854 } 2855 2856 /* Convert the default coalesce value to actual hw resolution */ 2857 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 2858 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 2859 lif->rx_coalesce_usecs); 2860 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2861 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2862 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); 2863 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); 2864 2865 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 2866 2867 spin_lock_init(&lif->adminq_lock); 2868 2869 spin_lock_init(&lif->deferred.lock); 2870 INIT_LIST_HEAD(&lif->deferred.list); 2871 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 2872 2873 /* allocate lif info */ 2874 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 2875 lif->info = dma_alloc_coherent(dev, lif->info_sz, 2876 &lif->info_pa, GFP_KERNEL); 2877 if (!lif->info) { 2878 dev_err(dev, "Failed to allocate lif info, aborting\n"); 2879 err = -ENOMEM; 2880 goto err_out_free_netdev; 2881 } 2882 2883 ionic_debugfs_add_lif(lif); 2884 2885 /* allocate control queues and txrx queue arrays */ 2886 ionic_lif_queue_identify(lif); 2887 err = ionic_qcqs_alloc(lif); 2888 if (err) 2889 goto err_out_free_lif_info; 2890 2891 /* allocate rss indirection table */ 2892 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 2893 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 2894 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 2895 &lif->rss_ind_tbl_pa, 2896 GFP_KERNEL); 2897 2898 if (!lif->rss_ind_tbl) { 2899 err = -ENOMEM; 2900 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 2901 goto err_out_free_qcqs; 2902 } 2903 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 2904 2905 ionic_lif_alloc_phc(lif); 2906 2907 return 0; 2908 2909 err_out_free_qcqs: 2910 ionic_qcqs_free(lif); 2911 err_out_free_lif_info: 2912 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2913 lif->info = NULL; 2914 lif->info_pa = 0; 2915 err_out_free_netdev: 2916 free_netdev(lif->netdev); 2917 lif = NULL; 2918 err_out_free_lid: 2919 kfree(lid); 2920 2921 return err; 2922 } 2923 2924 static void ionic_lif_reset(struct ionic_lif *lif) 2925 { 2926 struct ionic_dev *idev = &lif->ionic->idev; 2927 2928 mutex_lock(&lif->ionic->dev_cmd_lock); 2929 ionic_dev_cmd_lif_reset(idev, lif->index); 2930 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2931 mutex_unlock(&lif->ionic->dev_cmd_lock); 2932 } 2933 2934 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 2935 { 2936 struct ionic *ionic = lif->ionic; 2937 2938 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2939 return; 2940 2941 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 2942 2943 netif_device_detach(lif->netdev); 2944 2945 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 2946 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 2947 mutex_lock(&lif->queue_lock); 2948 ionic_stop_queues(lif); 2949 mutex_unlock(&lif->queue_lock); 2950 } 2951 2952 if (netif_running(lif->netdev)) { 2953 ionic_txrx_deinit(lif); 2954 ionic_txrx_free(lif); 2955 } 2956 ionic_lif_deinit(lif); 2957 ionic_reset(ionic); 2958 ionic_qcqs_free(lif); 2959 2960 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 2961 } 2962 2963 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 2964 { 2965 struct ionic *ionic = lif->ionic; 2966 int err; 2967 2968 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2969 return; 2970 2971 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 2972 2973 ionic_init_devinfo(ionic); 2974 err = ionic_identify(ionic); 2975 if (err) 2976 goto err_out; 2977 err = ionic_port_identify(ionic); 2978 if (err) 2979 goto err_out; 2980 err = ionic_port_init(ionic); 2981 if (err) 2982 goto err_out; 2983 err = ionic_qcqs_alloc(lif); 2984 if (err) 2985 goto err_out; 2986 2987 err = ionic_lif_init(lif); 2988 if (err) 2989 goto err_qcqs_free; 2990 2991 if (lif->registered) 2992 ionic_lif_set_netdev_info(lif); 2993 2994 ionic_rx_filter_replay(lif); 2995 2996 if (netif_running(lif->netdev)) { 2997 err = ionic_txrx_alloc(lif); 2998 if (err) 2999 goto err_lifs_deinit; 3000 3001 err = ionic_txrx_init(lif); 3002 if (err) 3003 goto err_txrx_free; 3004 } 3005 3006 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 3007 ionic_link_status_check_request(lif, CAN_SLEEP); 3008 netif_device_attach(lif->netdev); 3009 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 3010 3011 /* restore the hardware timestamping queues */ 3012 ionic_lif_hwstamp_replay(lif); 3013 3014 return; 3015 3016 err_txrx_free: 3017 ionic_txrx_free(lif); 3018 err_lifs_deinit: 3019 ionic_lif_deinit(lif); 3020 err_qcqs_free: 3021 ionic_qcqs_free(lif); 3022 err_out: 3023 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 3024 } 3025 3026 void ionic_lif_free(struct ionic_lif *lif) 3027 { 3028 struct device *dev = lif->ionic->dev; 3029 3030 ionic_lif_free_phc(lif); 3031 3032 /* free rss indirection table */ 3033 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 3034 lif->rss_ind_tbl_pa); 3035 lif->rss_ind_tbl = NULL; 3036 lif->rss_ind_tbl_pa = 0; 3037 3038 /* free queues */ 3039 ionic_qcqs_free(lif); 3040 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3041 ionic_lif_reset(lif); 3042 3043 /* free lif info */ 3044 kfree(lif->identity); 3045 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3046 lif->info = NULL; 3047 lif->info_pa = 0; 3048 3049 /* unmap doorbell page */ 3050 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3051 lif->kern_dbpage = NULL; 3052 kfree(lif->dbid_inuse); 3053 lif->dbid_inuse = NULL; 3054 3055 /* free netdev & lif */ 3056 ionic_debugfs_del_lif(lif); 3057 free_netdev(lif->netdev); 3058 } 3059 3060 void ionic_lif_deinit(struct ionic_lif *lif) 3061 { 3062 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 3063 return; 3064 3065 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3066 cancel_work_sync(&lif->deferred.work); 3067 cancel_work_sync(&lif->tx_timeout_work); 3068 ionic_rx_filters_deinit(lif); 3069 if (lif->netdev->features & NETIF_F_RXHASH) 3070 ionic_lif_rss_deinit(lif); 3071 } 3072 3073 napi_disable(&lif->adminqcq->napi); 3074 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3075 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3076 3077 mutex_destroy(&lif->queue_lock); 3078 ionic_lif_reset(lif); 3079 } 3080 3081 static int ionic_lif_adminq_init(struct ionic_lif *lif) 3082 { 3083 struct device *dev = lif->ionic->dev; 3084 struct ionic_q_init_comp comp; 3085 struct ionic_dev *idev; 3086 struct ionic_qcq *qcq; 3087 struct ionic_queue *q; 3088 int err; 3089 3090 idev = &lif->ionic->idev; 3091 qcq = lif->adminqcq; 3092 q = &qcq->q; 3093 3094 mutex_lock(&lif->ionic->dev_cmd_lock); 3095 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 3096 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3097 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3098 mutex_unlock(&lif->ionic->dev_cmd_lock); 3099 if (err) { 3100 netdev_err(lif->netdev, "adminq init failed %d\n", err); 3101 return err; 3102 } 3103 3104 q->hw_type = comp.hw_type; 3105 q->hw_index = le32_to_cpu(comp.hw_index); 3106 q->dbval = IONIC_DBELL_QID(q->hw_index); 3107 3108 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 3109 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 3110 3111 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, 3112 NAPI_POLL_WEIGHT); 3113 3114 napi_enable(&qcq->napi); 3115 3116 if (qcq->flags & IONIC_QCQ_F_INTR) 3117 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 3118 IONIC_INTR_MASK_CLEAR); 3119 3120 qcq->flags |= IONIC_QCQ_F_INITED; 3121 3122 return 0; 3123 } 3124 3125 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 3126 { 3127 struct ionic_qcq *qcq = lif->notifyqcq; 3128 struct device *dev = lif->ionic->dev; 3129 struct ionic_queue *q = &qcq->q; 3130 int err; 3131 3132 struct ionic_admin_ctx ctx = { 3133 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3134 .cmd.q_init = { 3135 .opcode = IONIC_CMD_Q_INIT, 3136 .lif_index = cpu_to_le16(lif->index), 3137 .type = q->type, 3138 .ver = lif->qtype_info[q->type].version, 3139 .index = cpu_to_le32(q->index), 3140 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 3141 IONIC_QINIT_F_ENA), 3142 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 3143 .pid = cpu_to_le16(q->pid), 3144 .ring_size = ilog2(q->num_descs), 3145 .ring_base = cpu_to_le64(q->base_pa), 3146 } 3147 }; 3148 3149 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 3150 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 3151 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 3152 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 3153 3154 err = ionic_adminq_post_wait(lif, &ctx); 3155 if (err) 3156 return err; 3157 3158 lif->last_eid = 0; 3159 q->hw_type = ctx.comp.q_init.hw_type; 3160 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 3161 q->dbval = IONIC_DBELL_QID(q->hw_index); 3162 3163 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 3164 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 3165 3166 /* preset the callback info */ 3167 q->info[0].cb_arg = lif; 3168 3169 qcq->flags |= IONIC_QCQ_F_INITED; 3170 3171 return 0; 3172 } 3173 3174 static int ionic_station_set(struct ionic_lif *lif) 3175 { 3176 struct net_device *netdev = lif->netdev; 3177 struct ionic_admin_ctx ctx = { 3178 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3179 .cmd.lif_getattr = { 3180 .opcode = IONIC_CMD_LIF_GETATTR, 3181 .index = cpu_to_le16(lif->index), 3182 .attr = IONIC_LIF_ATTR_MAC, 3183 }, 3184 }; 3185 struct sockaddr addr; 3186 int err; 3187 3188 err = ionic_adminq_post_wait(lif, &ctx); 3189 if (err) 3190 return err; 3191 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 3192 ctx.comp.lif_getattr.mac); 3193 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac)) 3194 return 0; 3195 3196 if (!is_zero_ether_addr(netdev->dev_addr)) { 3197 /* If the netdev mac is non-zero and doesn't match the default 3198 * device address, it was set by something earlier and we're 3199 * likely here again after a fw-upgrade reset. We need to be 3200 * sure the netdev mac is in our filter list. 3201 */ 3202 if (!ether_addr_equal(ctx.comp.lif_getattr.mac, 3203 netdev->dev_addr)) 3204 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP); 3205 } else { 3206 /* Update the netdev mac with the device's mac */ 3207 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); 3208 addr.sa_family = AF_INET; 3209 err = eth_prepare_mac_addr_change(netdev, &addr); 3210 if (err) { 3211 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 3212 addr.sa_data, err); 3213 return 0; 3214 } 3215 3216 eth_commit_mac_addr_change(netdev, &addr); 3217 } 3218 3219 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 3220 netdev->dev_addr); 3221 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP); 3222 3223 return 0; 3224 } 3225 3226 int ionic_lif_init(struct ionic_lif *lif) 3227 { 3228 struct ionic_dev *idev = &lif->ionic->idev; 3229 struct device *dev = lif->ionic->dev; 3230 struct ionic_lif_init_comp comp; 3231 int dbpage_num; 3232 int err; 3233 3234 mutex_lock(&lif->ionic->dev_cmd_lock); 3235 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 3236 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3237 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3238 mutex_unlock(&lif->ionic->dev_cmd_lock); 3239 if (err) 3240 return err; 3241 3242 lif->hw_index = le16_to_cpu(comp.hw_index); 3243 mutex_init(&lif->queue_lock); 3244 3245 /* now that we have the hw_index we can figure out our doorbell page */ 3246 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 3247 if (!lif->dbid_count) { 3248 dev_err(dev, "No doorbell pages, aborting\n"); 3249 return -EINVAL; 3250 } 3251 3252 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 3253 if (!lif->dbid_inuse) { 3254 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 3255 return -ENOMEM; 3256 } 3257 3258 /* first doorbell id reserved for kernel (dbid aka pid == zero) */ 3259 set_bit(0, lif->dbid_inuse); 3260 lif->kern_pid = 0; 3261 3262 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 3263 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 3264 if (!lif->kern_dbpage) { 3265 dev_err(dev, "Cannot map dbpage, aborting\n"); 3266 err = -ENOMEM; 3267 goto err_out_free_dbid; 3268 } 3269 3270 err = ionic_lif_adminq_init(lif); 3271 if (err) 3272 goto err_out_adminq_deinit; 3273 3274 if (lif->ionic->nnqs_per_lif) { 3275 err = ionic_lif_notifyq_init(lif); 3276 if (err) 3277 goto err_out_notifyq_deinit; 3278 } 3279 3280 err = ionic_init_nic_features(lif); 3281 if (err) 3282 goto err_out_notifyq_deinit; 3283 3284 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3285 err = ionic_rx_filters_init(lif); 3286 if (err) 3287 goto err_out_notifyq_deinit; 3288 } 3289 3290 err = ionic_station_set(lif); 3291 if (err) 3292 goto err_out_notifyq_deinit; 3293 3294 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 3295 3296 set_bit(IONIC_LIF_F_INITED, lif->state); 3297 3298 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 3299 3300 return 0; 3301 3302 err_out_notifyq_deinit: 3303 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3304 err_out_adminq_deinit: 3305 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3306 ionic_lif_reset(lif); 3307 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3308 lif->kern_dbpage = NULL; 3309 err_out_free_dbid: 3310 kfree(lif->dbid_inuse); 3311 lif->dbid_inuse = NULL; 3312 3313 return err; 3314 } 3315 3316 static void ionic_lif_notify_work(struct work_struct *ws) 3317 { 3318 } 3319 3320 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 3321 { 3322 struct ionic_admin_ctx ctx = { 3323 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3324 .cmd.lif_setattr = { 3325 .opcode = IONIC_CMD_LIF_SETATTR, 3326 .index = cpu_to_le16(lif->index), 3327 .attr = IONIC_LIF_ATTR_NAME, 3328 }, 3329 }; 3330 3331 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 3332 sizeof(ctx.cmd.lif_setattr.name)); 3333 3334 ionic_adminq_post_wait(lif, &ctx); 3335 } 3336 3337 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 3338 { 3339 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 3340 return NULL; 3341 3342 return netdev_priv(netdev); 3343 } 3344 3345 static int ionic_lif_notify(struct notifier_block *nb, 3346 unsigned long event, void *info) 3347 { 3348 struct net_device *ndev = netdev_notifier_info_to_dev(info); 3349 struct ionic *ionic = container_of(nb, struct ionic, nb); 3350 struct ionic_lif *lif = ionic_netdev_lif(ndev); 3351 3352 if (!lif || lif->ionic != ionic) 3353 return NOTIFY_DONE; 3354 3355 switch (event) { 3356 case NETDEV_CHANGENAME: 3357 ionic_lif_set_netdev_info(lif); 3358 break; 3359 } 3360 3361 return NOTIFY_DONE; 3362 } 3363 3364 int ionic_lif_register(struct ionic_lif *lif) 3365 { 3366 int err; 3367 3368 ionic_lif_register_phc(lif); 3369 3370 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); 3371 3372 lif->ionic->nb.notifier_call = ionic_lif_notify; 3373 3374 err = register_netdevice_notifier(&lif->ionic->nb); 3375 if (err) 3376 lif->ionic->nb.notifier_call = NULL; 3377 3378 /* only register LIF0 for now */ 3379 err = register_netdev(lif->netdev); 3380 if (err) { 3381 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); 3382 ionic_lif_unregister_phc(lif); 3383 return err; 3384 } 3385 3386 ionic_link_status_check_request(lif, CAN_SLEEP); 3387 lif->registered = true; 3388 ionic_lif_set_netdev_info(lif); 3389 3390 return 0; 3391 } 3392 3393 void ionic_lif_unregister(struct ionic_lif *lif) 3394 { 3395 if (lif->ionic->nb.notifier_call) { 3396 unregister_netdevice_notifier(&lif->ionic->nb); 3397 cancel_work_sync(&lif->ionic->nb_work); 3398 lif->ionic->nb.notifier_call = NULL; 3399 } 3400 3401 if (lif->netdev->reg_state == NETREG_REGISTERED) 3402 unregister_netdev(lif->netdev); 3403 3404 ionic_lif_unregister_phc(lif); 3405 3406 lif->registered = false; 3407 } 3408 3409 static void ionic_lif_queue_identify(struct ionic_lif *lif) 3410 { 3411 union ionic_q_identity __iomem *q_ident; 3412 struct ionic *ionic = lif->ionic; 3413 struct ionic_dev *idev; 3414 int qtype; 3415 int err; 3416 3417 idev = &lif->ionic->idev; 3418 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; 3419 3420 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 3421 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 3422 3423 /* filter out the ones we know about */ 3424 switch (qtype) { 3425 case IONIC_QTYPE_ADMINQ: 3426 case IONIC_QTYPE_NOTIFYQ: 3427 case IONIC_QTYPE_RXQ: 3428 case IONIC_QTYPE_TXQ: 3429 break; 3430 default: 3431 continue; 3432 } 3433 3434 memset(qti, 0, sizeof(*qti)); 3435 3436 mutex_lock(&ionic->dev_cmd_lock); 3437 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 3438 ionic_qtype_versions[qtype]); 3439 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3440 if (!err) { 3441 qti->version = readb(&q_ident->version); 3442 qti->supported = readb(&q_ident->supported); 3443 qti->features = readq(&q_ident->features); 3444 qti->desc_sz = readw(&q_ident->desc_sz); 3445 qti->comp_sz = readw(&q_ident->comp_sz); 3446 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz); 3447 qti->max_sg_elems = readw(&q_ident->max_sg_elems); 3448 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride); 3449 } 3450 mutex_unlock(&ionic->dev_cmd_lock); 3451 3452 if (err == -EINVAL) { 3453 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 3454 continue; 3455 } else if (err == -EIO) { 3456 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 3457 return; 3458 } else if (err) { 3459 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 3460 qtype, err); 3461 return; 3462 } 3463 3464 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 3465 qtype, qti->version); 3466 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 3467 qtype, qti->supported); 3468 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 3469 qtype, qti->features); 3470 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 3471 qtype, qti->desc_sz); 3472 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 3473 qtype, qti->comp_sz); 3474 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 3475 qtype, qti->sg_desc_sz); 3476 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 3477 qtype, qti->max_sg_elems); 3478 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 3479 qtype, qti->sg_desc_stride); 3480 } 3481 } 3482 3483 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 3484 union ionic_lif_identity *lid) 3485 { 3486 struct ionic_dev *idev = &ionic->idev; 3487 size_t sz; 3488 int err; 3489 3490 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 3491 3492 mutex_lock(&ionic->dev_cmd_lock); 3493 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 3494 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3495 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 3496 mutex_unlock(&ionic->dev_cmd_lock); 3497 if (err) 3498 return (err); 3499 3500 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 3501 le64_to_cpu(lid->capabilities)); 3502 3503 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 3504 le32_to_cpu(lid->eth.max_ucast_filters)); 3505 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 3506 le32_to_cpu(lid->eth.max_mcast_filters)); 3507 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 3508 le64_to_cpu(lid->eth.config.features)); 3509 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 3510 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 3511 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 3512 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 3513 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 3514 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 3515 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 3516 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 3517 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 3518 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 3519 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 3520 le32_to_cpu(lid->eth.config.mtu)); 3521 3522 return 0; 3523 } 3524 3525 int ionic_lif_size(struct ionic *ionic) 3526 { 3527 struct ionic_identity *ident = &ionic->ident; 3528 unsigned int nintrs, dev_nintrs; 3529 union ionic_lif_config *lc; 3530 unsigned int ntxqs_per_lif; 3531 unsigned int nrxqs_per_lif; 3532 unsigned int neqs_per_lif; 3533 unsigned int nnqs_per_lif; 3534 unsigned int nxqs, neqs; 3535 unsigned int min_intrs; 3536 int err; 3537 3538 /* retrieve basic values from FW */ 3539 lc = &ident->lif.eth.config; 3540 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 3541 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 3542 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 3543 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 3544 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 3545 3546 /* limit values to play nice with kdump */ 3547 if (is_kdump_kernel()) { 3548 dev_nintrs = 2; 3549 neqs_per_lif = 0; 3550 nnqs_per_lif = 0; 3551 ntxqs_per_lif = 1; 3552 nrxqs_per_lif = 1; 3553 } 3554 3555 /* reserve last queue id for hardware timestamping */ 3556 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { 3557 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { 3558 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP); 3559 } else { 3560 ntxqs_per_lif -= 1; 3561 nrxqs_per_lif -= 1; 3562 } 3563 } 3564 3565 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 3566 nxqs = min(nxqs, num_online_cpus()); 3567 neqs = min(neqs_per_lif, num_online_cpus()); 3568 3569 try_again: 3570 /* interrupt usage: 3571 * 1 for master lif adminq/notifyq 3572 * 1 for each CPU for master lif TxRx queue pairs 3573 * whatever's left is for RDMA queues 3574 */ 3575 nintrs = 1 + nxqs + neqs; 3576 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 3577 3578 if (nintrs > dev_nintrs) 3579 goto try_fewer; 3580 3581 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 3582 if (err < 0 && err != -ENOSPC) { 3583 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 3584 return err; 3585 } 3586 if (err == -ENOSPC) 3587 goto try_fewer; 3588 3589 if (err != nintrs) { 3590 ionic_bus_free_irq_vectors(ionic); 3591 goto try_fewer; 3592 } 3593 3594 ionic->nnqs_per_lif = nnqs_per_lif; 3595 ionic->neqs_per_lif = neqs; 3596 ionic->ntxqs_per_lif = nxqs; 3597 ionic->nrxqs_per_lif = nxqs; 3598 ionic->nintrs = nintrs; 3599 3600 ionic_debugfs_add_sizes(ionic); 3601 3602 return 0; 3603 3604 try_fewer: 3605 if (nnqs_per_lif > 1) { 3606 nnqs_per_lif >>= 1; 3607 goto try_again; 3608 } 3609 if (neqs > 1) { 3610 neqs >>= 1; 3611 goto try_again; 3612 } 3613 if (nxqs > 1) { 3614 nxqs >>= 1; 3615 goto try_again; 3616 } 3617 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 3618 return -ENOSPC; 3619 } 3620