1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. 3 * 4 * This file is written based on mt76/usb.c. 5 * 6 * Author: Felix Fietkau <nbd@nbd.name> 7 * Lorenzo Bianconi <lorenzo@kernel.org> 8 * Sean Wang <sean.wang@mediatek.com> 9 */ 10 11 #include <linux/iopoll.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/mmc/sdio_func.h> 15 #include <linux/mmc/card.h> 16 #include <linux/mmc/host.h> 17 #include <linux/sched.h> 18 #include <linux/kthread.h> 19 20 #include "mt76.h" 21 #include "sdio.h" 22 23 static u32 mt76s_read_whisr(struct mt76_dev *dev) 24 { 25 return sdio_readl(dev->sdio.func, MCR_WHISR, NULL); 26 } 27 28 u32 mt76s_read_pcr(struct mt76_dev *dev) 29 { 30 struct mt76_sdio *sdio = &dev->sdio; 31 32 return sdio_readl(sdio->func, MCR_WHLPCR, NULL); 33 } 34 EXPORT_SYMBOL_GPL(mt76s_read_pcr); 35 36 static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset) 37 { 38 struct sdio_func *func = dev->sdio.func; 39 u32 val = ~0, status; 40 int err; 41 42 sdio_claim_host(func); 43 44 sdio_writel(func, offset, MCR_H2DSM0R, &err); 45 if (err < 0) { 46 dev_err(dev->dev, "failed setting address [err=%d]\n", err); 47 goto out; 48 } 49 50 sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err); 51 if (err < 0) { 52 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); 53 goto out; 54 } 55 56 err = readx_poll_timeout(mt76s_read_whisr, dev, status, 57 status & H2D_SW_INT_READ, 0, 1000000); 58 if (err < 0) { 59 dev_err(dev->dev, "query whisr timeout\n"); 60 goto out; 61 } 62 63 sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err); 64 if (err < 0) { 65 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); 66 goto out; 67 } 68 69 val = sdio_readl(func, MCR_H2DSM0R, &err); 70 if (err < 0) { 71 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); 72 goto out; 73 } 74 75 if (val != offset) { 76 dev_err(dev->dev, "register mismatch\n"); 77 val = ~0; 78 goto out; 79 } 80 81 val = sdio_readl(func, MCR_D2HRM1R, &err); 82 if (err < 0) 83 dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err); 84 85 out: 86 sdio_release_host(func); 87 88 return val; 89 } 90 91 static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val) 92 { 93 struct sdio_func *func = dev->sdio.func; 94 u32 status; 95 int err; 96 97 sdio_claim_host(func); 98 99 sdio_writel(func, offset, MCR_H2DSM0R, &err); 100 if (err < 0) { 101 dev_err(dev->dev, "failed setting address [err=%d]\n", err); 102 goto out; 103 } 104 105 sdio_writel(func, val, MCR_H2DSM1R, &err); 106 if (err < 0) { 107 dev_err(dev->dev, 108 "failed setting write value [err=%d]\n", err); 109 goto out; 110 } 111 112 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err); 113 if (err < 0) { 114 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); 115 goto out; 116 } 117 118 err = readx_poll_timeout(mt76s_read_whisr, dev, status, 119 status & H2D_SW_INT_WRITE, 0, 1000000); 120 if (err < 0) { 121 dev_err(dev->dev, "query whisr timeout\n"); 122 goto out; 123 } 124 125 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err); 126 if (err < 0) { 127 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); 128 goto out; 129 } 130 131 val = sdio_readl(func, MCR_H2DSM0R, &err); 132 if (err < 0) { 133 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); 134 goto out; 135 } 136 137 if (val != offset) 138 dev_err(dev->dev, "register mismatch\n"); 139 140 out: 141 sdio_release_host(func); 142 } 143 144 u32 mt76s_rr(struct mt76_dev *dev, u32 offset) 145 { 146 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 147 return dev->mcu_ops->mcu_rr(dev, offset); 148 else 149 return mt76s_read_mailbox(dev, offset); 150 } 151 EXPORT_SYMBOL_GPL(mt76s_rr); 152 153 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val) 154 { 155 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 156 dev->mcu_ops->mcu_wr(dev, offset, val); 157 else 158 mt76s_write_mailbox(dev, offset, val); 159 } 160 EXPORT_SYMBOL_GPL(mt76s_wr); 161 162 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) 163 { 164 val |= mt76s_rr(dev, offset) & ~mask; 165 mt76s_wr(dev, offset, val); 166 167 return val; 168 } 169 EXPORT_SYMBOL_GPL(mt76s_rmw); 170 171 void mt76s_write_copy(struct mt76_dev *dev, u32 offset, 172 const void *data, int len) 173 { 174 const u32 *val = data; 175 int i; 176 177 for (i = 0; i < len / sizeof(u32); i++) { 178 mt76s_wr(dev, offset, val[i]); 179 offset += sizeof(u32); 180 } 181 } 182 EXPORT_SYMBOL_GPL(mt76s_write_copy); 183 184 void mt76s_read_copy(struct mt76_dev *dev, u32 offset, 185 void *data, int len) 186 { 187 u32 *val = data; 188 int i; 189 190 for (i = 0; i < len / sizeof(u32); i++) { 191 val[i] = mt76s_rr(dev, offset); 192 offset += sizeof(u32); 193 } 194 } 195 EXPORT_SYMBOL_GPL(mt76s_read_copy); 196 197 int mt76s_wr_rp(struct mt76_dev *dev, u32 base, 198 const struct mt76_reg_pair *data, 199 int len) 200 { 201 int i; 202 203 for (i = 0; i < len; i++) { 204 mt76s_wr(dev, data->reg, data->value); 205 data++; 206 } 207 208 return 0; 209 } 210 EXPORT_SYMBOL_GPL(mt76s_wr_rp); 211 212 int mt76s_rd_rp(struct mt76_dev *dev, u32 base, 213 struct mt76_reg_pair *data, int len) 214 { 215 int i; 216 217 for (i = 0; i < len; i++) { 218 data->value = mt76s_rr(dev, data->reg); 219 data++; 220 } 221 222 return 0; 223 } 224 EXPORT_SYMBOL_GPL(mt76s_rd_rp); 225 226 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver) 227 { 228 u32 status, ctrl; 229 int ret; 230 231 dev->sdio.hw_ver = hw_ver; 232 233 sdio_claim_host(func); 234 235 ret = sdio_enable_func(func); 236 if (ret < 0) 237 goto release; 238 239 /* Get ownership from the device */ 240 sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR, 241 MCR_WHLPCR, &ret); 242 if (ret < 0) 243 goto disable_func; 244 245 ret = readx_poll_timeout(mt76s_read_pcr, dev, status, 246 status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); 247 if (ret < 0) { 248 dev_err(dev->dev, "Cannot get ownership from device"); 249 goto disable_func; 250 } 251 252 ret = sdio_set_block_size(func, 512); 253 if (ret < 0) 254 goto disable_func; 255 256 /* Enable interrupt */ 257 sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret); 258 if (ret < 0) 259 goto disable_func; 260 261 ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN; 262 if (hw_ver == MT76_CONNAC2_SDIO) 263 ctrl |= WHIER_RX1_DONE_INT_EN; 264 sdio_writel(func, ctrl, MCR_WHIER, &ret); 265 if (ret < 0) 266 goto disable_func; 267 268 switch (hw_ver) { 269 case MT76_CONNAC_SDIO: 270 /* set WHISR as read clear and Rx aggregation number as 16 */ 271 ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16); 272 break; 273 default: 274 ctrl = sdio_readl(func, MCR_WHCR, &ret); 275 if (ret < 0) 276 goto disable_func; 277 ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2; 278 ctrl &= ~W_INT_CLR_CTRL; /* read clear */ 279 ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0); 280 break; 281 } 282 283 sdio_writel(func, ctrl, MCR_WHCR, &ret); 284 if (ret < 0) 285 goto disable_func; 286 287 ret = sdio_claim_irq(func, mt76s_sdio_irq); 288 if (ret < 0) 289 goto disable_func; 290 291 sdio_release_host(func); 292 293 return 0; 294 295 disable_func: 296 sdio_disable_func(func); 297 release: 298 sdio_release_host(func); 299 300 return ret; 301 } 302 EXPORT_SYMBOL_GPL(mt76s_hw_init); 303 304 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) 305 { 306 struct mt76_queue *q = &dev->q_rx[qid]; 307 308 spin_lock_init(&q->lock); 309 q->entry = devm_kcalloc(dev->dev, 310 MT76S_NUM_RX_ENTRIES, sizeof(*q->entry), 311 GFP_KERNEL); 312 if (!q->entry) 313 return -ENOMEM; 314 315 q->ndesc = MT76S_NUM_RX_ENTRIES; 316 q->head = q->tail = 0; 317 q->queued = 0; 318 319 return 0; 320 } 321 EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue); 322 323 static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev) 324 { 325 struct mt76_queue *q; 326 327 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); 328 if (!q) 329 return ERR_PTR(-ENOMEM); 330 331 spin_lock_init(&q->lock); 332 q->entry = devm_kcalloc(dev->dev, 333 MT76S_NUM_TX_ENTRIES, sizeof(*q->entry), 334 GFP_KERNEL); 335 if (!q->entry) 336 return ERR_PTR(-ENOMEM); 337 338 q->ndesc = MT76S_NUM_TX_ENTRIES; 339 340 return q; 341 } 342 343 int mt76s_alloc_tx(struct mt76_dev *dev) 344 { 345 struct mt76_queue *q; 346 int i; 347 348 for (i = 0; i <= MT_TXQ_PSD; i++) { 349 q = mt76s_alloc_tx_queue(dev); 350 if (IS_ERR(q)) 351 return PTR_ERR(q); 352 353 dev->phy.q_tx[i] = q; 354 } 355 356 q = mt76s_alloc_tx_queue(dev); 357 if (IS_ERR(q)) 358 return PTR_ERR(q); 359 360 dev->q_mcu[MT_MCUQ_WM] = q; 361 362 return 0; 363 } 364 EXPORT_SYMBOL_GPL(mt76s_alloc_tx); 365 366 static struct mt76_queue_entry * 367 mt76s_get_next_rx_entry(struct mt76_queue *q) 368 { 369 struct mt76_queue_entry *e = NULL; 370 371 spin_lock_bh(&q->lock); 372 if (q->queued > 0) { 373 e = &q->entry[q->tail]; 374 q->tail = (q->tail + 1) % q->ndesc; 375 q->queued--; 376 } 377 spin_unlock_bh(&q->lock); 378 379 return e; 380 } 381 382 static int 383 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) 384 { 385 int qid = q - &dev->q_rx[MT_RXQ_MAIN]; 386 int nframes = 0; 387 388 while (true) { 389 struct mt76_queue_entry *e; 390 391 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state)) 392 break; 393 394 e = mt76s_get_next_rx_entry(q); 395 if (!e || !e->skb) 396 break; 397 398 dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb, NULL); 399 e->skb = NULL; 400 nframes++; 401 } 402 if (qid == MT_RXQ_MAIN) 403 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); 404 405 return nframes; 406 } 407 408 static void mt76s_net_worker(struct mt76_worker *w) 409 { 410 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, 411 net_worker); 412 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); 413 int i, nframes; 414 415 do { 416 nframes = 0; 417 418 local_bh_disable(); 419 rcu_read_lock(); 420 421 mt76_for_each_q_rx(dev, i) 422 nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]); 423 424 rcu_read_unlock(); 425 local_bh_enable(); 426 } while (nframes > 0); 427 } 428 429 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q) 430 { 431 struct mt76_queue_entry entry; 432 int nframes = 0; 433 bool mcu; 434 435 if (!q) 436 return 0; 437 438 mcu = q == dev->q_mcu[MT_MCUQ_WM]; 439 while (q->queued > 0) { 440 if (!q->entry[q->tail].done) 441 break; 442 443 entry = q->entry[q->tail]; 444 q->entry[q->tail].done = false; 445 446 if (mcu) { 447 dev_kfree_skb(entry.skb); 448 entry.skb = NULL; 449 } 450 451 mt76_queue_tx_complete(dev, q, &entry); 452 nframes++; 453 } 454 455 if (!q->queued) 456 wake_up(&dev->tx_wait); 457 458 return nframes; 459 } 460 461 static void mt76s_status_worker(struct mt76_worker *w) 462 { 463 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, 464 status_worker); 465 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); 466 bool resched = false; 467 int i, nframes; 468 469 do { 470 int ndata_frames = 0; 471 472 nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]); 473 474 for (i = 0; i <= MT_TXQ_PSD; i++) 475 ndata_frames += mt76s_process_tx_queue(dev, 476 dev->phy.q_tx[i]); 477 nframes += ndata_frames; 478 if (ndata_frames > 0) 479 resched = true; 480 481 if (dev->drv->tx_status_data && ndata_frames > 0 && 482 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) && 483 !test_bit(MT76_STATE_SUSPEND, &dev->phy.state)) 484 mt76_worker_schedule(&sdio->stat_worker); 485 } while (nframes > 0); 486 487 if (resched) 488 mt76_worker_schedule(&dev->tx_worker); 489 } 490 491 static void mt76s_tx_status_data(struct mt76_worker *worker) 492 { 493 struct mt76_sdio *sdio; 494 struct mt76_dev *dev; 495 u8 update = 1; 496 u16 count = 0; 497 498 sdio = container_of(worker, struct mt76_sdio, stat_worker); 499 dev = container_of(sdio, struct mt76_dev, sdio); 500 501 while (true) { 502 if (test_bit(MT76_RESET, &dev->phy.state) || 503 test_bit(MT76_REMOVED, &dev->phy.state)) 504 break; 505 506 if (!dev->drv->tx_status_data(dev, &update)) 507 break; 508 count++; 509 } 510 511 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) 512 mt76_worker_schedule(&sdio->status_worker); 513 else 514 clear_bit(MT76_READING_STATS, &dev->phy.state); 515 } 516 517 static int 518 mt76s_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, 519 enum mt76_txq_id qid, struct sk_buff *skb, 520 struct mt76_wcid *wcid, struct ieee80211_sta *sta) 521 { 522 struct mt76_tx_info tx_info = { 523 .skb = skb, 524 }; 525 struct mt76_dev *dev = phy->dev; 526 int err, len = skb->len; 527 u16 idx = q->head; 528 529 if (q->queued == q->ndesc) 530 return -ENOSPC; 531 532 skb->prev = skb->next = NULL; 533 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); 534 if (err < 0) 535 return err; 536 537 q->entry[q->head].skb = tx_info.skb; 538 q->entry[q->head].buf_sz = len; 539 q->entry[q->head].wcid = 0xffff; 540 541 smp_wmb(); 542 543 q->head = (q->head + 1) % q->ndesc; 544 q->queued++; 545 546 return idx; 547 } 548 549 static int 550 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, 551 struct sk_buff *skb, u32 tx_info) 552 { 553 int ret, len = skb->len, pad; 554 555 pad = round_up(skb->len, 4) - skb->len; 556 ret = mt76_skb_adjust_pad(skb, pad); 557 if (ret) 558 goto error; 559 560 spin_lock_bh(&q->lock); 561 562 if (q->queued == q->ndesc) { 563 ret = -ENOSPC; 564 spin_unlock_bh(&q->lock); 565 goto error; 566 } 567 568 q->entry[q->head].buf_sz = len; 569 q->entry[q->head].skb = skb; 570 571 /* ensure the entry fully updated before bus access */ 572 smp_wmb(); 573 574 q->head = (q->head + 1) % q->ndesc; 575 q->queued++; 576 577 spin_unlock_bh(&q->lock); 578 579 return 0; 580 581 error: 582 dev_kfree_skb(skb); 583 584 return ret; 585 } 586 587 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) 588 { 589 struct mt76_sdio *sdio = &dev->sdio; 590 591 mt76_worker_schedule(&sdio->txrx_worker); 592 } 593 594 static const struct mt76_queue_ops sdio_queue_ops = { 595 .tx_queue_skb = mt76s_tx_queue_skb, 596 .kick = mt76s_tx_kick, 597 .tx_queue_skb_raw = mt76s_tx_queue_skb_raw, 598 }; 599 600 void mt76s_deinit(struct mt76_dev *dev) 601 { 602 struct mt76_sdio *sdio = &dev->sdio; 603 int i; 604 605 mt76_worker_teardown(&sdio->txrx_worker); 606 mt76_worker_teardown(&sdio->status_worker); 607 mt76_worker_teardown(&sdio->net_worker); 608 mt76_worker_teardown(&sdio->stat_worker); 609 610 clear_bit(MT76_READING_STATS, &dev->phy.state); 611 612 mt76_tx_status_check(dev, true); 613 614 sdio_claim_host(sdio->func); 615 sdio_release_irq(sdio->func); 616 sdio_release_host(sdio->func); 617 618 mt76_for_each_q_rx(dev, i) { 619 struct mt76_queue *q = &dev->q_rx[i]; 620 int j; 621 622 for (j = 0; j < q->ndesc; j++) { 623 struct mt76_queue_entry *e = &q->entry[j]; 624 625 if (!e->skb) 626 continue; 627 628 dev_kfree_skb(e->skb); 629 e->skb = NULL; 630 } 631 } 632 } 633 EXPORT_SYMBOL_GPL(mt76s_deinit); 634 635 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, 636 const struct mt76_bus_ops *bus_ops) 637 { 638 struct mt76_sdio *sdio = &dev->sdio; 639 u32 host_max_cap; 640 int err; 641 642 err = mt76_worker_setup(dev->hw, &sdio->status_worker, 643 mt76s_status_worker, "sdio-status"); 644 if (err) 645 return err; 646 647 err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker, 648 "sdio-net"); 649 if (err) 650 return err; 651 652 err = mt76_worker_setup(dev->hw, &sdio->stat_worker, mt76s_tx_status_data, 653 "sdio-sta"); 654 if (err) 655 return err; 656 657 sched_set_fifo_low(sdio->status_worker.task); 658 sched_set_fifo_low(sdio->net_worker.task); 659 sched_set_fifo_low(sdio->stat_worker.task); 660 661 dev->queue_ops = &sdio_queue_ops; 662 dev->bus = bus_ops; 663 dev->sdio.func = func; 664 665 host_max_cap = min_t(u32, func->card->host->max_req_size, 666 func->cur_blksize * 667 func->card->host->max_blk_count); 668 dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ); 669 dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz, 670 GFP_KERNEL); 671 if (!dev->sdio.xmit_buf) 672 err = -ENOMEM; 673 674 return err; 675 } 676 EXPORT_SYMBOL_GPL(mt76s_init); 677 678 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 679 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 680 MODULE_DESCRIPTION("MediaTek MT76x SDIO helpers"); 681 MODULE_LICENSE("Dual BSD/GPL"); 682