1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright (C) 2021 Martin Blumenstingl <martin.blumenstingl@googlemail.com> 3 * Copyright (C) 2021 Jernej Skrabec <jernej.skrabec@gmail.com> 4 * 5 * Based on rtw88/pci.c: 6 * Copyright(c) 2018-2019 Realtek Corporation 7 */ 8 9 #include <linux/module.h> 10 #include <linux/mmc/host.h> 11 #include <linux/mmc/sdio_func.h> 12 #include "main.h" 13 #include "mac.h" 14 #include "debug.h" 15 #include "fw.h" 16 #include "ps.h" 17 #include "reg.h" 18 #include "rx.h" 19 #include "sdio.h" 20 #include "tx.h" 21 22 #define RTW_SDIO_INDIRECT_RW_RETRIES 50 23 24 static bool rtw_sdio_is_bus_addr(u32 addr) 25 { 26 return !!(addr & RTW_SDIO_BUS_MSK); 27 } 28 29 static bool rtw_sdio_bus_claim_needed(struct rtw_sdio *rtwsdio) 30 { 31 return !rtwsdio->irq_thread || 32 rtwsdio->irq_thread != current; 33 } 34 35 static u32 rtw_sdio_to_bus_offset(struct rtw_dev *rtwdev, u32 addr) 36 { 37 switch (addr & RTW_SDIO_BUS_MSK) { 38 case WLAN_IOREG_OFFSET: 39 addr &= WLAN_IOREG_REG_MSK; 40 addr |= FIELD_PREP(REG_SDIO_CMD_ADDR_MSK, 41 REG_SDIO_CMD_ADDR_MAC_REG); 42 break; 43 case SDIO_LOCAL_OFFSET: 44 addr &= SDIO_LOCAL_REG_MSK; 45 addr |= FIELD_PREP(REG_SDIO_CMD_ADDR_MSK, 46 REG_SDIO_CMD_ADDR_SDIO_REG); 47 break; 48 default: 49 rtw_warn(rtwdev, "Cannot convert addr 0x%08x to bus offset", 50 addr); 51 } 52 53 return addr; 54 } 55 56 static bool rtw_sdio_use_memcpy_io(struct rtw_dev *rtwdev, u32 addr, 57 u8 alignment) 58 { 59 return IS_ALIGNED(addr, alignment) && 60 test_bit(RTW_FLAG_POWERON, rtwdev->flags); 61 } 62 63 static void rtw_sdio_writel(struct rtw_dev *rtwdev, u32 val, u32 addr, 64 int *err_ret) 65 { 66 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 67 u8 buf[4]; 68 int i; 69 70 if (rtw_sdio_use_memcpy_io(rtwdev, addr, 4)) { 71 sdio_writel(rtwsdio->sdio_func, val, addr, err_ret); 72 return; 73 } 74 75 *(__le32 *)buf = cpu_to_le32(val); 76 77 for (i = 0; i < 4; i++) { 78 sdio_writeb(rtwsdio->sdio_func, buf[i], addr + i, err_ret); 79 if (*err_ret) 80 return; 81 } 82 } 83 84 static void rtw_sdio_writew(struct rtw_dev *rtwdev, u16 val, u32 addr, 85 int *err_ret) 86 { 87 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 88 u8 buf[2]; 89 int i; 90 91 *(__le16 *)buf = cpu_to_le16(val); 92 93 for (i = 0; i < 2; i++) { 94 sdio_writeb(rtwsdio->sdio_func, buf[i], addr + i, err_ret); 95 if (*err_ret) 96 return; 97 } 98 } 99 100 static u32 rtw_sdio_readl(struct rtw_dev *rtwdev, u32 addr, int *err_ret) 101 { 102 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 103 u8 buf[4]; 104 int i; 105 106 if (rtw_sdio_use_memcpy_io(rtwdev, addr, 4)) 107 return sdio_readl(rtwsdio->sdio_func, addr, err_ret); 108 109 for (i = 0; i < 4; i++) { 110 buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret); 111 if (*err_ret) 112 return 0; 113 } 114 115 return le32_to_cpu(*(__le32 *)buf); 116 } 117 118 static u16 rtw_sdio_readw(struct rtw_dev *rtwdev, u32 addr, int *err_ret) 119 { 120 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 121 u8 buf[2]; 122 int i; 123 124 for (i = 0; i < 2; i++) { 125 buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret); 126 if (*err_ret) 127 return 0; 128 } 129 130 return le16_to_cpu(*(__le16 *)buf); 131 } 132 133 static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr, 134 bool direct) 135 { 136 if (!direct) 137 return addr; 138 139 if (!rtw_sdio_is_bus_addr(addr)) 140 addr |= WLAN_IOREG_OFFSET; 141 142 return rtw_sdio_to_bus_offset(rtwdev, addr); 143 } 144 145 static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr) 146 { 147 return !rtw_sdio_is_sdio30_supported(rtwdev) || 148 rtw_sdio_is_bus_addr(addr); 149 } 150 151 static int rtw_sdio_indirect_reg_cfg(struct rtw_dev *rtwdev, u32 addr, u32 cfg) 152 { 153 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 154 unsigned int retry; 155 u32 reg_cfg; 156 int ret; 157 u8 tmp; 158 159 reg_cfg = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_CFG); 160 161 rtw_sdio_writel(rtwdev, addr | cfg | BIT_SDIO_INDIRECT_REG_CFG_UNK20, 162 reg_cfg, &ret); 163 if (ret) 164 return ret; 165 166 for (retry = 0; retry < RTW_SDIO_INDIRECT_RW_RETRIES; retry++) { 167 tmp = sdio_readb(rtwsdio->sdio_func, reg_cfg + 2, &ret); 168 if (!ret && (tmp & BIT(4))) 169 return 0; 170 } 171 172 return -ETIMEDOUT; 173 } 174 175 static u8 rtw_sdio_indirect_read8(struct rtw_dev *rtwdev, u32 addr, 176 int *err_ret) 177 { 178 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 179 u32 reg_data; 180 181 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr, 182 BIT_SDIO_INDIRECT_REG_CFG_READ); 183 if (*err_ret) 184 return 0; 185 186 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA); 187 return sdio_readb(rtwsdio->sdio_func, reg_data, err_ret); 188 } 189 190 static int rtw_sdio_indirect_read_bytes(struct rtw_dev *rtwdev, u32 addr, 191 u8 *buf, int count) 192 { 193 int i, ret = 0; 194 195 for (i = 0; i < count; i++) { 196 buf[i] = rtw_sdio_indirect_read8(rtwdev, addr + i, &ret); 197 if (ret) 198 break; 199 } 200 201 return ret; 202 } 203 204 static u16 rtw_sdio_indirect_read16(struct rtw_dev *rtwdev, u32 addr, 205 int *err_ret) 206 { 207 u32 reg_data; 208 u8 buf[2]; 209 210 if (!IS_ALIGNED(addr, 2)) { 211 *err_ret = rtw_sdio_indirect_read_bytes(rtwdev, addr, buf, 2); 212 if (*err_ret) 213 return 0; 214 215 return le16_to_cpu(*(__le16 *)buf); 216 } 217 218 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr, 219 BIT_SDIO_INDIRECT_REG_CFG_READ); 220 if (*err_ret) 221 return 0; 222 223 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA); 224 return rtw_sdio_readw(rtwdev, reg_data, err_ret); 225 } 226 227 static u32 rtw_sdio_indirect_read32(struct rtw_dev *rtwdev, u32 addr, 228 int *err_ret) 229 { 230 u32 reg_data; 231 u8 buf[4]; 232 233 if (!IS_ALIGNED(addr, 4)) { 234 *err_ret = rtw_sdio_indirect_read_bytes(rtwdev, addr, buf, 4); 235 if (*err_ret) 236 return 0; 237 238 return le32_to_cpu(*(__le32 *)buf); 239 } 240 241 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr, 242 BIT_SDIO_INDIRECT_REG_CFG_READ); 243 if (*err_ret) 244 return 0; 245 246 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA); 247 return rtw_sdio_readl(rtwdev, reg_data, err_ret); 248 } 249 250 static u8 rtw_sdio_read8(struct rtw_dev *rtwdev, u32 addr) 251 { 252 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 253 bool direct, bus_claim; 254 int ret; 255 u8 val; 256 257 direct = rtw_sdio_use_direct_io(rtwdev, addr); 258 addr = rtw_sdio_to_io_address(rtwdev, addr, direct); 259 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio); 260 261 if (bus_claim) 262 sdio_claim_host(rtwsdio->sdio_func); 263 264 if (direct) 265 val = sdio_readb(rtwsdio->sdio_func, addr, &ret); 266 else 267 val = rtw_sdio_indirect_read8(rtwdev, addr, &ret); 268 269 if (bus_claim) 270 sdio_release_host(rtwsdio->sdio_func); 271 272 if (ret) 273 rtw_warn(rtwdev, "sdio read8 failed (0x%x): %d", addr, ret); 274 275 return val; 276 } 277 278 static u16 rtw_sdio_read16(struct rtw_dev *rtwdev, u32 addr) 279 { 280 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 281 bool direct, bus_claim; 282 int ret; 283 u16 val; 284 285 direct = rtw_sdio_use_direct_io(rtwdev, addr); 286 addr = rtw_sdio_to_io_address(rtwdev, addr, direct); 287 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio); 288 289 if (bus_claim) 290 sdio_claim_host(rtwsdio->sdio_func); 291 292 if (direct) 293 val = rtw_sdio_readw(rtwdev, addr, &ret); 294 else 295 val = rtw_sdio_indirect_read16(rtwdev, addr, &ret); 296 297 if (bus_claim) 298 sdio_release_host(rtwsdio->sdio_func); 299 300 if (ret) 301 rtw_warn(rtwdev, "sdio read16 failed (0x%x): %d", addr, ret); 302 303 return val; 304 } 305 306 static u32 rtw_sdio_read32(struct rtw_dev *rtwdev, u32 addr) 307 { 308 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 309 bool direct, bus_claim; 310 u32 val; 311 int ret; 312 313 direct = rtw_sdio_use_direct_io(rtwdev, addr); 314 addr = rtw_sdio_to_io_address(rtwdev, addr, direct); 315 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio); 316 317 if (bus_claim) 318 sdio_claim_host(rtwsdio->sdio_func); 319 320 if (direct) 321 val = rtw_sdio_readl(rtwdev, addr, &ret); 322 else 323 val = rtw_sdio_indirect_read32(rtwdev, addr, &ret); 324 325 if (bus_claim) 326 sdio_release_host(rtwsdio->sdio_func); 327 328 if (ret) 329 rtw_warn(rtwdev, "sdio read32 failed (0x%x): %d", addr, ret); 330 331 return val; 332 } 333 334 static void rtw_sdio_indirect_write8(struct rtw_dev *rtwdev, u8 val, u32 addr, 335 int *err_ret) 336 { 337 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 338 u32 reg_data; 339 340 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA); 341 sdio_writeb(rtwsdio->sdio_func, val, reg_data, err_ret); 342 if (*err_ret) 343 return; 344 345 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr, 346 BIT_SDIO_INDIRECT_REG_CFG_WRITE); 347 } 348 349 static void rtw_sdio_indirect_write16(struct rtw_dev *rtwdev, u16 val, u32 addr, 350 int *err_ret) 351 { 352 u32 reg_data; 353 354 if (!IS_ALIGNED(addr, 2)) { 355 addr = rtw_sdio_to_io_address(rtwdev, addr, true); 356 rtw_sdio_writew(rtwdev, val, addr, err_ret); 357 return; 358 } 359 360 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA); 361 rtw_sdio_writew(rtwdev, val, reg_data, err_ret); 362 if (*err_ret) 363 return; 364 365 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr, 366 BIT_SDIO_INDIRECT_REG_CFG_WRITE | 367 BIT_SDIO_INDIRECT_REG_CFG_WORD); 368 } 369 370 static void rtw_sdio_indirect_write32(struct rtw_dev *rtwdev, u32 val, 371 u32 addr, int *err_ret) 372 { 373 u32 reg_data; 374 375 if (!IS_ALIGNED(addr, 4)) { 376 addr = rtw_sdio_to_io_address(rtwdev, addr, true); 377 rtw_sdio_writel(rtwdev, val, addr, err_ret); 378 return; 379 } 380 381 reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA); 382 rtw_sdio_writel(rtwdev, val, reg_data, err_ret); 383 384 *err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr, 385 BIT_SDIO_INDIRECT_REG_CFG_WRITE | 386 BIT_SDIO_INDIRECT_REG_CFG_DWORD); 387 } 388 389 static void rtw_sdio_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) 390 { 391 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 392 bool direct, bus_claim; 393 int ret; 394 395 direct = rtw_sdio_use_direct_io(rtwdev, addr); 396 addr = rtw_sdio_to_io_address(rtwdev, addr, direct); 397 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio); 398 399 if (bus_claim) 400 sdio_claim_host(rtwsdio->sdio_func); 401 402 if (direct) 403 sdio_writeb(rtwsdio->sdio_func, val, addr, &ret); 404 else 405 rtw_sdio_indirect_write8(rtwdev, val, addr, &ret); 406 407 if (bus_claim) 408 sdio_release_host(rtwsdio->sdio_func); 409 410 if (ret) 411 rtw_warn(rtwdev, "sdio write8 failed (0x%x): %d", addr, ret); 412 } 413 414 static void rtw_sdio_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) 415 { 416 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 417 bool direct, bus_claim; 418 int ret; 419 420 direct = rtw_sdio_use_direct_io(rtwdev, addr); 421 addr = rtw_sdio_to_io_address(rtwdev, addr, direct); 422 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio); 423 424 if (bus_claim) 425 sdio_claim_host(rtwsdio->sdio_func); 426 427 if (direct) 428 rtw_sdio_writew(rtwdev, val, addr, &ret); 429 else 430 rtw_sdio_indirect_write16(rtwdev, val, addr, &ret); 431 432 if (bus_claim) 433 sdio_release_host(rtwsdio->sdio_func); 434 435 if (ret) 436 rtw_warn(rtwdev, "sdio write16 failed (0x%x): %d", addr, ret); 437 } 438 439 static void rtw_sdio_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) 440 { 441 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 442 bool direct, bus_claim; 443 int ret; 444 445 direct = rtw_sdio_use_direct_io(rtwdev, addr); 446 addr = rtw_sdio_to_io_address(rtwdev, addr, direct); 447 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio); 448 449 if (bus_claim) 450 sdio_claim_host(rtwsdio->sdio_func); 451 452 if (direct) 453 rtw_sdio_writel(rtwdev, val, addr, &ret); 454 else 455 rtw_sdio_indirect_write32(rtwdev, val, addr, &ret); 456 457 if (bus_claim) 458 sdio_release_host(rtwsdio->sdio_func); 459 460 if (ret) 461 rtw_warn(rtwdev, "sdio write32 failed (0x%x): %d", addr, ret); 462 } 463 464 static u32 rtw_sdio_get_tx_addr(struct rtw_dev *rtwdev, size_t size, 465 enum rtw_tx_queue_type queue) 466 { 467 u32 txaddr; 468 469 switch (queue) { 470 case RTW_TX_QUEUE_BCN: 471 case RTW_TX_QUEUE_H2C: 472 case RTW_TX_QUEUE_HI0: 473 txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK, 474 REG_SDIO_CMD_ADDR_TXFF_HIGH); 475 break; 476 case RTW_TX_QUEUE_VI: 477 case RTW_TX_QUEUE_VO: 478 txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK, 479 REG_SDIO_CMD_ADDR_TXFF_NORMAL); 480 break; 481 case RTW_TX_QUEUE_BE: 482 case RTW_TX_QUEUE_BK: 483 txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK, 484 REG_SDIO_CMD_ADDR_TXFF_LOW); 485 break; 486 case RTW_TX_QUEUE_MGMT: 487 txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK, 488 REG_SDIO_CMD_ADDR_TXFF_EXTRA); 489 break; 490 default: 491 rtw_warn(rtwdev, "Unsupported queue for TX addr: 0x%02x\n", 492 queue); 493 return 0; 494 } 495 496 txaddr += DIV_ROUND_UP(size, 4); 497 498 return txaddr; 499 }; 500 501 static int rtw_sdio_read_port(struct rtw_dev *rtwdev, u8 *buf, size_t count) 502 { 503 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 504 struct mmc_host *host = rtwsdio->sdio_func->card->host; 505 bool bus_claim = rtw_sdio_bus_claim_needed(rtwsdio); 506 u32 rxaddr = rtwsdio->rx_addr++; 507 int ret = 0, err; 508 size_t bytes; 509 510 if (bus_claim) 511 sdio_claim_host(rtwsdio->sdio_func); 512 513 while (count > 0) { 514 bytes = min_t(size_t, host->max_req_size, count); 515 516 err = sdio_memcpy_fromio(rtwsdio->sdio_func, buf, 517 RTW_SDIO_ADDR_RX_RX0FF_GEN(rxaddr), 518 bytes); 519 if (err) { 520 rtw_warn(rtwdev, 521 "Failed to read %zu byte(s) from SDIO port 0x%08x: %d", 522 bytes, rxaddr, err); 523 524 /* Signal to the caller that reading did not work and 525 * that the data in the buffer is short/corrupted. 526 */ 527 ret = err; 528 529 /* Don't stop here - instead drain the remaining data 530 * from the card's buffer, else the card will return 531 * corrupt data for the next rtw_sdio_read_port() call. 532 */ 533 } 534 535 count -= bytes; 536 buf += bytes; 537 } 538 539 if (bus_claim) 540 sdio_release_host(rtwsdio->sdio_func); 541 542 return ret; 543 } 544 545 static int rtw_sdio_check_free_txpg(struct rtw_dev *rtwdev, u8 queue, 546 size_t count) 547 { 548 unsigned int pages_free, pages_needed; 549 550 if (rtw_chip_wcpu_11n(rtwdev)) { 551 u32 free_txpg; 552 553 free_txpg = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG); 554 555 switch (queue) { 556 case RTW_TX_QUEUE_BCN: 557 case RTW_TX_QUEUE_H2C: 558 case RTW_TX_QUEUE_HI0: 559 case RTW_TX_QUEUE_MGMT: 560 /* high */ 561 pages_free = free_txpg & 0xff; 562 break; 563 case RTW_TX_QUEUE_VI: 564 case RTW_TX_QUEUE_VO: 565 /* normal */ 566 pages_free = (free_txpg >> 8) & 0xff; 567 break; 568 case RTW_TX_QUEUE_BE: 569 case RTW_TX_QUEUE_BK: 570 /* low */ 571 pages_free = (free_txpg >> 16) & 0xff; 572 break; 573 default: 574 rtw_warn(rtwdev, "Unknown mapping for queue %u\n", queue); 575 return -EINVAL; 576 } 577 578 /* add the pages from the public queue */ 579 pages_free += (free_txpg >> 24) & 0xff; 580 } else { 581 u32 free_txpg[3]; 582 583 free_txpg[0] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG); 584 free_txpg[1] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG + 4); 585 free_txpg[2] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG + 8); 586 587 switch (queue) { 588 case RTW_TX_QUEUE_BCN: 589 case RTW_TX_QUEUE_H2C: 590 case RTW_TX_QUEUE_HI0: 591 /* high */ 592 pages_free = free_txpg[0] & 0xfff; 593 break; 594 case RTW_TX_QUEUE_VI: 595 case RTW_TX_QUEUE_VO: 596 /* normal */ 597 pages_free = (free_txpg[0] >> 16) & 0xfff; 598 break; 599 case RTW_TX_QUEUE_BE: 600 case RTW_TX_QUEUE_BK: 601 /* low */ 602 pages_free = free_txpg[1] & 0xfff; 603 break; 604 case RTW_TX_QUEUE_MGMT: 605 /* extra */ 606 pages_free = free_txpg[2] & 0xfff; 607 break; 608 default: 609 rtw_warn(rtwdev, "Unknown mapping for queue %u\n", queue); 610 return -EINVAL; 611 } 612 613 /* add the pages from the public queue */ 614 pages_free += (free_txpg[1] >> 16) & 0xfff; 615 } 616 617 pages_needed = DIV_ROUND_UP(count, rtwdev->chip->page_size); 618 619 if (pages_needed > pages_free) { 620 rtw_dbg(rtwdev, RTW_DBG_SDIO, 621 "Not enough free pages (%u needed, %u free) in queue %u for %zu bytes\n", 622 pages_needed, pages_free, queue, count); 623 return -EBUSY; 624 } 625 626 return 0; 627 } 628 629 static int rtw_sdio_write_port(struct rtw_dev *rtwdev, struct sk_buff *skb, 630 enum rtw_tx_queue_type queue) 631 { 632 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 633 bool bus_claim; 634 size_t txsize; 635 u32 txaddr; 636 int ret; 637 638 txaddr = rtw_sdio_get_tx_addr(rtwdev, skb->len, queue); 639 if (!txaddr) 640 return -EINVAL; 641 642 txsize = sdio_align_size(rtwsdio->sdio_func, skb->len); 643 644 ret = rtw_sdio_check_free_txpg(rtwdev, queue, txsize); 645 if (ret) 646 return ret; 647 648 if (!IS_ALIGNED((unsigned long)skb->data, RTW_SDIO_DATA_PTR_ALIGN)) 649 rtw_warn(rtwdev, "Got unaligned SKB in %s() for queue %u\n", 650 __func__, queue); 651 652 bus_claim = rtw_sdio_bus_claim_needed(rtwsdio); 653 654 if (bus_claim) 655 sdio_claim_host(rtwsdio->sdio_func); 656 657 ret = sdio_memcpy_toio(rtwsdio->sdio_func, txaddr, skb->data, txsize); 658 659 if (bus_claim) 660 sdio_release_host(rtwsdio->sdio_func); 661 662 if (ret) 663 rtw_warn(rtwdev, 664 "Failed to write %zu byte(s) to SDIO port 0x%08x", 665 txsize, txaddr); 666 667 return ret; 668 } 669 670 static void rtw_sdio_init(struct rtw_dev *rtwdev) 671 { 672 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 673 674 rtwsdio->irq_mask = REG_SDIO_HIMR_RX_REQUEST | REG_SDIO_HIMR_CPWM1; 675 } 676 677 static void rtw_sdio_enable_rx_aggregation(struct rtw_dev *rtwdev) 678 { 679 u8 size, timeout; 680 681 switch (rtwdev->chip->id) { 682 case RTW_CHIP_TYPE_8703B: 683 case RTW_CHIP_TYPE_8821A: 684 case RTW_CHIP_TYPE_8812A: 685 size = 0x6; 686 timeout = 0x6; 687 break; 688 case RTW_CHIP_TYPE_8723D: 689 size = 0xa; 690 timeout = 0x3; 691 rtw_write8_set(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7)); 692 break; 693 default: 694 size = 0xff; 695 timeout = 0x1; 696 break; 697 } 698 699 /* Make the firmware honor the size limit configured below */ 700 rtw_write32_set(rtwdev, REG_RXDMA_AGG_PG_TH, BIT_EN_PRE_CALC); 701 702 rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN); 703 704 rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, 705 FIELD_PREP(BIT_RXDMA_AGG_PG_TH, size) | 706 FIELD_PREP(BIT_DMA_AGG_TO_V1, timeout)); 707 708 rtw_write8_set(rtwdev, REG_RXDMA_MODE, BIT_DMA_MODE); 709 } 710 711 static void rtw_sdio_enable_interrupt(struct rtw_dev *rtwdev) 712 { 713 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 714 715 rtw_write32(rtwdev, REG_SDIO_HIMR, rtwsdio->irq_mask); 716 } 717 718 static void rtw_sdio_disable_interrupt(struct rtw_dev *rtwdev) 719 { 720 rtw_write32(rtwdev, REG_SDIO_HIMR, 0x0); 721 } 722 723 static u8 rtw_sdio_get_tx_qsel(struct rtw_dev *rtwdev, struct sk_buff *skb, 724 u8 queue) 725 { 726 switch (queue) { 727 case RTW_TX_QUEUE_BCN: 728 return TX_DESC_QSEL_BEACON; 729 case RTW_TX_QUEUE_H2C: 730 return TX_DESC_QSEL_H2C; 731 case RTW_TX_QUEUE_MGMT: 732 return TX_DESC_QSEL_MGMT; 733 case RTW_TX_QUEUE_HI0: 734 return TX_DESC_QSEL_HIGH; 735 default: 736 return skb->priority; 737 } 738 } 739 740 static int rtw_sdio_setup(struct rtw_dev *rtwdev) 741 { 742 /* nothing to do */ 743 return 0; 744 } 745 746 static int rtw_sdio_start(struct rtw_dev *rtwdev) 747 { 748 rtw_sdio_enable_rx_aggregation(rtwdev); 749 rtw_sdio_enable_interrupt(rtwdev); 750 751 return 0; 752 } 753 754 static void rtw_sdio_stop(struct rtw_dev *rtwdev) 755 { 756 rtw_sdio_disable_interrupt(rtwdev); 757 } 758 759 static void rtw_sdio_deep_ps_enter(struct rtw_dev *rtwdev) 760 { 761 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 762 bool tx_empty = true; 763 u8 queue; 764 765 if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) { 766 /* Deep PS state is not allowed to TX-DMA */ 767 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { 768 /* BCN queue is rsvd page, does not have DMA interrupt 769 * H2C queue is managed by firmware 770 */ 771 if (queue == RTW_TX_QUEUE_BCN || 772 queue == RTW_TX_QUEUE_H2C) 773 continue; 774 775 /* check if there is any skb DMAing */ 776 if (skb_queue_len(&rtwsdio->tx_queue[queue])) { 777 tx_empty = false; 778 break; 779 } 780 } 781 } 782 783 if (!tx_empty) { 784 rtw_dbg(rtwdev, RTW_DBG_PS, 785 "TX path not empty, cannot enter deep power save state\n"); 786 return; 787 } 788 789 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); 790 rtw_power_mode_change(rtwdev, true); 791 } 792 793 static void rtw_sdio_deep_ps_leave(struct rtw_dev *rtwdev) 794 { 795 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) 796 rtw_power_mode_change(rtwdev, false); 797 } 798 799 static void rtw_sdio_deep_ps(struct rtw_dev *rtwdev, bool enter) 800 { 801 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) 802 rtw_sdio_deep_ps_enter(rtwdev); 803 804 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) 805 rtw_sdio_deep_ps_leave(rtwdev); 806 } 807 808 static void rtw_sdio_tx_kick_off(struct rtw_dev *rtwdev) 809 { 810 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 811 812 queue_work(rtwsdio->txwq, &rtwsdio->tx_handler_data->work); 813 } 814 815 static void rtw_sdio_link_ps(struct rtw_dev *rtwdev, bool enter) 816 { 817 /* nothing to do */ 818 } 819 820 static void rtw_sdio_interface_cfg(struct rtw_dev *rtwdev) 821 { 822 u32 val; 823 824 rtw_read32(rtwdev, REG_SDIO_FREE_TXPG); 825 826 val = rtw_read32(rtwdev, REG_SDIO_TX_CTRL); 827 val &= 0xfff8; 828 rtw_write32(rtwdev, REG_SDIO_TX_CTRL, val); 829 } 830 831 static struct rtw_sdio_tx_data *rtw_sdio_get_tx_data(struct sk_buff *skb) 832 { 833 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 834 835 BUILD_BUG_ON(sizeof(struct rtw_sdio_tx_data) > 836 sizeof(info->status.status_driver_data)); 837 838 return (struct rtw_sdio_tx_data *)info->status.status_driver_data; 839 } 840 841 static void rtw_sdio_tx_skb_prepare(struct rtw_dev *rtwdev, 842 struct rtw_tx_pkt_info *pkt_info, 843 struct sk_buff *skb, 844 enum rtw_tx_queue_type queue) 845 { 846 const struct rtw_chip_info *chip = rtwdev->chip; 847 unsigned long data_addr, aligned_addr; 848 size_t offset; 849 u8 *pkt_desc; 850 851 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); 852 853 data_addr = (unsigned long)pkt_desc; 854 aligned_addr = ALIGN(data_addr, RTW_SDIO_DATA_PTR_ALIGN); 855 856 if (data_addr != aligned_addr) { 857 /* Ensure that the start of the pkt_desc is always aligned at 858 * RTW_SDIO_DATA_PTR_ALIGN. 859 */ 860 offset = RTW_SDIO_DATA_PTR_ALIGN - (aligned_addr - data_addr); 861 862 pkt_desc = skb_push(skb, offset); 863 864 /* By inserting padding to align the start of the pkt_desc we 865 * need to inform the firmware that the actual data starts at 866 * a different offset than normal. 867 */ 868 pkt_info->offset += offset; 869 } 870 871 memset(pkt_desc, 0, chip->tx_pkt_desc_sz); 872 873 pkt_info->qsel = rtw_sdio_get_tx_qsel(rtwdev, skb, queue); 874 875 rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb); 876 rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, pkt_desc); 877 } 878 879 static int rtw_sdio_write_data(struct rtw_dev *rtwdev, 880 struct rtw_tx_pkt_info *pkt_info, 881 struct sk_buff *skb, 882 enum rtw_tx_queue_type queue) 883 { 884 int ret; 885 886 rtw_sdio_tx_skb_prepare(rtwdev, pkt_info, skb, queue); 887 888 ret = rtw_sdio_write_port(rtwdev, skb, queue); 889 dev_kfree_skb_any(skb); 890 891 return ret; 892 } 893 894 static int rtw_sdio_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, 895 u32 size) 896 { 897 struct rtw_tx_pkt_info pkt_info = {}; 898 struct sk_buff *skb; 899 900 skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size); 901 if (!skb) 902 return -ENOMEM; 903 904 return rtw_sdio_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN); 905 } 906 907 static int rtw_sdio_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) 908 { 909 struct rtw_tx_pkt_info pkt_info = {}; 910 struct sk_buff *skb; 911 912 skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size); 913 if (!skb) 914 return -ENOMEM; 915 916 return rtw_sdio_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C); 917 } 918 919 static int rtw_sdio_tx_write(struct rtw_dev *rtwdev, 920 struct rtw_tx_pkt_info *pkt_info, 921 struct sk_buff *skb) 922 { 923 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 924 enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb); 925 struct rtw_sdio_tx_data *tx_data; 926 927 rtw_sdio_tx_skb_prepare(rtwdev, pkt_info, skb, queue); 928 929 tx_data = rtw_sdio_get_tx_data(skb); 930 tx_data->sn = pkt_info->sn; 931 932 skb_queue_tail(&rtwsdio->tx_queue[queue], skb); 933 934 return 0; 935 } 936 937 static void rtw_sdio_tx_err_isr(struct rtw_dev *rtwdev) 938 { 939 u32 val = rtw_read32(rtwdev, REG_TXDMA_STATUS); 940 941 rtw_write32(rtwdev, REG_TXDMA_STATUS, val); 942 } 943 944 static void rtw_sdio_rx_skb(struct rtw_dev *rtwdev, struct sk_buff *skb, 945 u32 pkt_offset, struct rtw_rx_pkt_stat *pkt_stat, 946 struct ieee80211_rx_status *rx_status) 947 { 948 *IEEE80211_SKB_RXCB(skb) = *rx_status; 949 950 if (pkt_stat->is_c2h) { 951 skb_put(skb, pkt_stat->pkt_len + pkt_offset); 952 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb); 953 return; 954 } 955 956 skb_put(skb, pkt_stat->pkt_len); 957 skb_reserve(skb, pkt_offset); 958 959 rtw_update_rx_freq_for_invalid(rtwdev, skb, rx_status, pkt_stat); 960 rtw_rx_stats(rtwdev, pkt_stat->vif, skb); 961 962 ieee80211_rx_irqsafe(rtwdev->hw, skb); 963 } 964 965 static void rtw_sdio_rxfifo_recv(struct rtw_dev *rtwdev, u32 rx_len) 966 { 967 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 968 const struct rtw_chip_info *chip = rtwdev->chip; 969 u32 pkt_desc_sz = chip->rx_pkt_desc_sz; 970 struct ieee80211_rx_status rx_status; 971 struct rtw_rx_pkt_stat pkt_stat; 972 struct sk_buff *skb, *split_skb; 973 u32 pkt_offset, curr_pkt_len; 974 size_t bufsz; 975 u8 *rx_desc; 976 int ret; 977 978 bufsz = sdio_align_size(rtwsdio->sdio_func, rx_len); 979 980 skb = dev_alloc_skb(bufsz); 981 if (!skb) 982 return; 983 984 ret = rtw_sdio_read_port(rtwdev, skb->data, bufsz); 985 if (ret) { 986 dev_kfree_skb_any(skb); 987 return; 988 } 989 990 while (true) { 991 rx_desc = skb->data; 992 rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); 993 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + 994 pkt_stat.shift; 995 996 curr_pkt_len = ALIGN(pkt_offset + pkt_stat.pkt_len, 997 RTW_SDIO_DATA_PTR_ALIGN); 998 999 if ((curr_pkt_len + pkt_desc_sz) >= rx_len) { 1000 /* Use the original skb (with it's adjusted offset) 1001 * when processing the last (or even the only) entry to 1002 * have it's memory freed automatically. 1003 */ 1004 rtw_sdio_rx_skb(rtwdev, skb, pkt_offset, &pkt_stat, 1005 &rx_status); 1006 break; 1007 } 1008 1009 split_skb = dev_alloc_skb(curr_pkt_len); 1010 if (!split_skb) { 1011 rtw_sdio_rx_skb(rtwdev, skb, pkt_offset, &pkt_stat, 1012 &rx_status); 1013 break; 1014 } 1015 1016 skb_copy_header(split_skb, skb); 1017 memcpy(split_skb->data, skb->data, curr_pkt_len); 1018 1019 rtw_sdio_rx_skb(rtwdev, split_skb, pkt_offset, &pkt_stat, 1020 &rx_status); 1021 1022 /* Move to the start of the next RX descriptor */ 1023 skb_reserve(skb, curr_pkt_len); 1024 rx_len -= curr_pkt_len; 1025 } 1026 } 1027 1028 static void rtw_sdio_rx_isr(struct rtw_dev *rtwdev) 1029 { 1030 u32 rx_len, hisr, total_rx_bytes = 0; 1031 1032 do { 1033 if (rtw_chip_wcpu_11n(rtwdev)) 1034 rx_len = rtw_read16(rtwdev, REG_SDIO_RX0_REQ_LEN); 1035 else 1036 rx_len = rtw_read32(rtwdev, REG_SDIO_RX0_REQ_LEN); 1037 1038 if (!rx_len) 1039 break; 1040 1041 rtw_sdio_rxfifo_recv(rtwdev, rx_len); 1042 1043 total_rx_bytes += rx_len; 1044 1045 if (rtw_chip_wcpu_11n(rtwdev)) { 1046 /* Stop if no more RX requests are pending, even if 1047 * rx_len could be greater than zero in the next 1048 * iteration. This is needed because the RX buffer may 1049 * already contain data while either HW or FW are not 1050 * done filling that buffer yet. Still reading the 1051 * buffer can result in packets where 1052 * rtw_rx_pkt_stat.pkt_len is zero or points beyond the 1053 * end of the buffer. 1054 */ 1055 hisr = rtw_read32(rtwdev, REG_SDIO_HISR); 1056 } else { 1057 /* RTW_WCPU_11AC chips have improved hardware or 1058 * firmware and can use rx_len unconditionally. 1059 */ 1060 hisr = REG_SDIO_HISR_RX_REQUEST; 1061 } 1062 } while (total_rx_bytes < SZ_64K && hisr & REG_SDIO_HISR_RX_REQUEST); 1063 } 1064 1065 static void rtw_sdio_handle_interrupt(struct sdio_func *sdio_func) 1066 { 1067 struct ieee80211_hw *hw = sdio_get_drvdata(sdio_func); 1068 struct rtw_sdio *rtwsdio; 1069 struct rtw_dev *rtwdev; 1070 u32 hisr; 1071 1072 rtwdev = hw->priv; 1073 rtwsdio = (struct rtw_sdio *)rtwdev->priv; 1074 1075 rtwsdio->irq_thread = current; 1076 1077 hisr = rtw_read32(rtwdev, REG_SDIO_HISR); 1078 1079 if (hisr & REG_SDIO_HISR_TXERR) 1080 rtw_sdio_tx_err_isr(rtwdev); 1081 if (hisr & REG_SDIO_HISR_RX_REQUEST) { 1082 hisr &= ~REG_SDIO_HISR_RX_REQUEST; 1083 rtw_sdio_rx_isr(rtwdev); 1084 } 1085 1086 rtw_write32(rtwdev, REG_SDIO_HISR, hisr); 1087 1088 rtwsdio->irq_thread = NULL; 1089 } 1090 1091 static int __maybe_unused rtw_sdio_suspend(struct device *dev) 1092 { 1093 struct sdio_func *func = dev_to_sdio_func(dev); 1094 struct ieee80211_hw *hw = dev_get_drvdata(dev); 1095 struct rtw_dev *rtwdev = hw->priv; 1096 int ret; 1097 1098 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 1099 if (ret) 1100 rtw_err(rtwdev, "Failed to host PM flag MMC_PM_KEEP_POWER"); 1101 1102 return ret; 1103 } 1104 1105 static int __maybe_unused rtw_sdio_resume(struct device *dev) 1106 { 1107 return 0; 1108 } 1109 1110 SIMPLE_DEV_PM_OPS(rtw_sdio_pm_ops, rtw_sdio_suspend, rtw_sdio_resume); 1111 EXPORT_SYMBOL(rtw_sdio_pm_ops); 1112 1113 static int rtw_sdio_claim(struct rtw_dev *rtwdev, struct sdio_func *sdio_func) 1114 { 1115 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 1116 int ret; 1117 1118 sdio_claim_host(sdio_func); 1119 1120 ret = sdio_enable_func(sdio_func); 1121 if (ret) { 1122 rtw_err(rtwdev, "Failed to enable SDIO func"); 1123 goto err_release_host; 1124 } 1125 1126 ret = sdio_set_block_size(sdio_func, RTW_SDIO_BLOCK_SIZE); 1127 if (ret) { 1128 rtw_err(rtwdev, "Failed to set SDIO block size to 512"); 1129 goto err_disable_func; 1130 } 1131 1132 rtwsdio->sdio_func = sdio_func; 1133 1134 rtwsdio->sdio3_bus_mode = mmc_card_uhs(sdio_func->card); 1135 1136 sdio_set_drvdata(sdio_func, rtwdev->hw); 1137 SET_IEEE80211_DEV(rtwdev->hw, &sdio_func->dev); 1138 1139 sdio_release_host(sdio_func); 1140 1141 return 0; 1142 1143 err_disable_func: 1144 sdio_disable_func(sdio_func); 1145 err_release_host: 1146 sdio_release_host(sdio_func); 1147 return ret; 1148 } 1149 1150 static void rtw_sdio_declaim(struct rtw_dev *rtwdev, 1151 struct sdio_func *sdio_func) 1152 { 1153 sdio_claim_host(sdio_func); 1154 sdio_disable_func(sdio_func); 1155 sdio_release_host(sdio_func); 1156 } 1157 1158 static const struct rtw_hci_ops rtw_sdio_ops = { 1159 .tx_write = rtw_sdio_tx_write, 1160 .tx_kick_off = rtw_sdio_tx_kick_off, 1161 .setup = rtw_sdio_setup, 1162 .start = rtw_sdio_start, 1163 .stop = rtw_sdio_stop, 1164 .deep_ps = rtw_sdio_deep_ps, 1165 .link_ps = rtw_sdio_link_ps, 1166 .interface_cfg = rtw_sdio_interface_cfg, 1167 .dynamic_rx_agg = NULL, 1168 .write_firmware_page = rtw_write_firmware_page, 1169 1170 .read8 = rtw_sdio_read8, 1171 .read16 = rtw_sdio_read16, 1172 .read32 = rtw_sdio_read32, 1173 .write8 = rtw_sdio_write8, 1174 .write16 = rtw_sdio_write16, 1175 .write32 = rtw_sdio_write32, 1176 .write_data_rsvd_page = rtw_sdio_write_data_rsvd_page, 1177 .write_data_h2c = rtw_sdio_write_data_h2c, 1178 }; 1179 1180 static int rtw_sdio_request_irq(struct rtw_dev *rtwdev, 1181 struct sdio_func *sdio_func) 1182 { 1183 int ret; 1184 1185 sdio_claim_host(sdio_func); 1186 ret = sdio_claim_irq(sdio_func, &rtw_sdio_handle_interrupt); 1187 sdio_release_host(sdio_func); 1188 1189 if (ret) { 1190 rtw_err(rtwdev, "failed to claim SDIO IRQ"); 1191 return ret; 1192 } 1193 1194 return 0; 1195 } 1196 1197 static void rtw_sdio_indicate_tx_status(struct rtw_dev *rtwdev, 1198 struct sk_buff *skb) 1199 { 1200 struct rtw_sdio_tx_data *tx_data = rtw_sdio_get_tx_data(skb); 1201 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1202 struct ieee80211_hw *hw = rtwdev->hw; 1203 1204 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); 1205 1206 /* enqueue to wait for tx report */ 1207 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { 1208 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); 1209 return; 1210 } 1211 1212 /* always ACK for others, then they won't be marked as drop */ 1213 ieee80211_tx_info_clear_status(info); 1214 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1215 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 1216 else 1217 info->flags |= IEEE80211_TX_STAT_ACK; 1218 1219 ieee80211_tx_status_irqsafe(hw, skb); 1220 } 1221 1222 static void rtw_sdio_process_tx_queue(struct rtw_dev *rtwdev, 1223 enum rtw_tx_queue_type queue) 1224 { 1225 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 1226 struct sk_buff *skb; 1227 int ret; 1228 1229 skb = skb_dequeue(&rtwsdio->tx_queue[queue]); 1230 if (!skb) 1231 return; 1232 1233 ret = rtw_sdio_write_port(rtwdev, skb, queue); 1234 if (ret) { 1235 skb_queue_head(&rtwsdio->tx_queue[queue], skb); 1236 return; 1237 } 1238 1239 rtw_sdio_indicate_tx_status(rtwdev, skb); 1240 } 1241 1242 static void rtw_sdio_tx_handler(struct work_struct *work) 1243 { 1244 struct rtw_sdio_work_data *work_data = 1245 container_of(work, struct rtw_sdio_work_data, work); 1246 struct rtw_sdio *rtwsdio; 1247 struct rtw_dev *rtwdev; 1248 int limit, queue; 1249 1250 rtwdev = work_data->rtwdev; 1251 rtwsdio = (struct rtw_sdio *)rtwdev->priv; 1252 1253 if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) 1254 rtw_sdio_deep_ps_leave(rtwdev); 1255 1256 for (queue = RTK_MAX_TX_QUEUE_NUM - 1; queue >= 0; queue--) { 1257 for (limit = 0; limit < 1000; limit++) { 1258 rtw_sdio_process_tx_queue(rtwdev, queue); 1259 1260 if (skb_queue_empty(&rtwsdio->tx_queue[queue])) 1261 break; 1262 } 1263 } 1264 } 1265 1266 static void rtw_sdio_free_irq(struct rtw_dev *rtwdev, 1267 struct sdio_func *sdio_func) 1268 { 1269 sdio_claim_host(sdio_func); 1270 sdio_release_irq(sdio_func); 1271 sdio_release_host(sdio_func); 1272 } 1273 1274 static int rtw_sdio_init_tx(struct rtw_dev *rtwdev) 1275 { 1276 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 1277 int i; 1278 1279 rtwsdio->txwq = create_singlethread_workqueue("rtw88_sdio: tx wq"); 1280 if (!rtwsdio->txwq) { 1281 rtw_err(rtwdev, "failed to create TX work queue\n"); 1282 return -ENOMEM; 1283 } 1284 1285 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) 1286 skb_queue_head_init(&rtwsdio->tx_queue[i]); 1287 rtwsdio->tx_handler_data = kmalloc(sizeof(*rtwsdio->tx_handler_data), 1288 GFP_KERNEL); 1289 if (!rtwsdio->tx_handler_data) 1290 goto err_destroy_wq; 1291 1292 rtwsdio->tx_handler_data->rtwdev = rtwdev; 1293 INIT_WORK(&rtwsdio->tx_handler_data->work, rtw_sdio_tx_handler); 1294 1295 return 0; 1296 1297 err_destroy_wq: 1298 destroy_workqueue(rtwsdio->txwq); 1299 return -ENOMEM; 1300 } 1301 1302 static void rtw_sdio_deinit_tx(struct rtw_dev *rtwdev) 1303 { 1304 struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; 1305 int i; 1306 1307 destroy_workqueue(rtwsdio->txwq); 1308 kfree(rtwsdio->tx_handler_data); 1309 1310 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) 1311 ieee80211_purge_tx_queue(rtwdev->hw, &rtwsdio->tx_queue[i]); 1312 } 1313 1314 int rtw_sdio_probe(struct sdio_func *sdio_func, 1315 const struct sdio_device_id *id) 1316 { 1317 struct ieee80211_hw *hw; 1318 struct rtw_dev *rtwdev; 1319 int drv_data_size; 1320 int ret; 1321 1322 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_sdio); 1323 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops); 1324 if (!hw) { 1325 dev_err(&sdio_func->dev, "failed to allocate hw"); 1326 return -ENOMEM; 1327 } 1328 1329 rtwdev = hw->priv; 1330 rtwdev->hw = hw; 1331 rtwdev->dev = &sdio_func->dev; 1332 rtwdev->chip = (struct rtw_chip_info *)id->driver_data; 1333 rtwdev->hci.ops = &rtw_sdio_ops; 1334 rtwdev->hci.type = RTW_HCI_TYPE_SDIO; 1335 1336 ret = rtw_core_init(rtwdev); 1337 if (ret) 1338 goto err_release_hw; 1339 1340 rtw_dbg(rtwdev, RTW_DBG_SDIO, 1341 "rtw88 SDIO probe: vendor=0x%04x device=%04x class=%02x", 1342 id->vendor, id->device, id->class); 1343 1344 ret = rtw_sdio_claim(rtwdev, sdio_func); 1345 if (ret) { 1346 rtw_err(rtwdev, "failed to claim SDIO device"); 1347 goto err_deinit_core; 1348 } 1349 1350 rtw_sdio_init(rtwdev); 1351 1352 ret = rtw_sdio_init_tx(rtwdev); 1353 if (ret) { 1354 rtw_err(rtwdev, "failed to init SDIO TX queue\n"); 1355 goto err_sdio_declaim; 1356 } 1357 1358 ret = rtw_chip_info_setup(rtwdev); 1359 if (ret) { 1360 rtw_err(rtwdev, "failed to setup chip information"); 1361 goto err_destroy_txwq; 1362 } 1363 1364 ret = rtw_sdio_request_irq(rtwdev, sdio_func); 1365 if (ret) 1366 goto err_destroy_txwq; 1367 1368 ret = rtw_register_hw(rtwdev, hw); 1369 if (ret) { 1370 rtw_err(rtwdev, "failed to register hw"); 1371 goto err_free_irq; 1372 } 1373 1374 return 0; 1375 1376 err_free_irq: 1377 rtw_sdio_free_irq(rtwdev, sdio_func); 1378 err_destroy_txwq: 1379 rtw_sdio_deinit_tx(rtwdev); 1380 err_sdio_declaim: 1381 rtw_sdio_declaim(rtwdev, sdio_func); 1382 err_deinit_core: 1383 rtw_core_deinit(rtwdev); 1384 err_release_hw: 1385 ieee80211_free_hw(hw); 1386 1387 return ret; 1388 } 1389 EXPORT_SYMBOL(rtw_sdio_probe); 1390 1391 void rtw_sdio_remove(struct sdio_func *sdio_func) 1392 { 1393 struct ieee80211_hw *hw = sdio_get_drvdata(sdio_func); 1394 struct rtw_dev *rtwdev; 1395 1396 if (!hw) 1397 return; 1398 1399 rtwdev = hw->priv; 1400 1401 rtw_unregister_hw(rtwdev, hw); 1402 rtw_sdio_disable_interrupt(rtwdev); 1403 rtw_sdio_free_irq(rtwdev, sdio_func); 1404 rtw_sdio_declaim(rtwdev, sdio_func); 1405 rtw_sdio_deinit_tx(rtwdev); 1406 rtw_core_deinit(rtwdev); 1407 ieee80211_free_hw(hw); 1408 } 1409 EXPORT_SYMBOL(rtw_sdio_remove); 1410 1411 void rtw_sdio_shutdown(struct device *dev) 1412 { 1413 struct sdio_func *sdio_func = dev_to_sdio_func(dev); 1414 const struct rtw_chip_info *chip; 1415 struct ieee80211_hw *hw; 1416 struct rtw_dev *rtwdev; 1417 1418 hw = sdio_get_drvdata(sdio_func); 1419 if (!hw) 1420 return; 1421 1422 rtwdev = hw->priv; 1423 chip = rtwdev->chip; 1424 1425 if (chip->ops->shutdown) 1426 chip->ops->shutdown(rtwdev); 1427 } 1428 EXPORT_SYMBOL(rtw_sdio_shutdown); 1429 1430 MODULE_AUTHOR("Martin Blumenstingl"); 1431 MODULE_AUTHOR("Jernej Skrabec"); 1432 MODULE_DESCRIPTION("Realtek 802.11ac wireless SDIO driver"); 1433 MODULE_LICENSE("Dual BSD/GPL"); 1434