1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2004-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/mmc/card.h> 10 #include <linux/mmc/mmc.h> 11 #include <linux/mmc/host.h> 12 #include <linux/mmc/sdio_func.h> 13 #include <linux/mmc/sdio_ids.h> 14 #include <linux/mmc/sdio.h> 15 #include <linux/mmc/sd.h> 16 #include <linux/bitfield.h> 17 #include "core.h" 18 #include "bmi.h" 19 #include "debug.h" 20 #include "hif.h" 21 #include "htc.h" 22 #include "mac.h" 23 #include "targaddrs.h" 24 #include "trace.h" 25 #include "sdio.h" 26 27 /* inlined helper functions */ 28 29 static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio, 30 size_t len) 31 { 32 return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask); 33 } 34 35 static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id) 36 { 37 return (enum ath10k_htc_ep_id)pipe_id; 38 } 39 40 static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt) 41 { 42 dev_kfree_skb(pkt->skb); 43 pkt->skb = NULL; 44 pkt->alloc_len = 0; 45 pkt->act_len = 0; 46 pkt->trailer_only = false; 47 } 48 49 static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt, 50 size_t act_len, size_t full_len, 51 bool part_of_bundle, 52 bool last_in_bundle) 53 { 54 pkt->skb = dev_alloc_skb(full_len); 55 if (!pkt->skb) 56 return -ENOMEM; 57 58 pkt->act_len = act_len; 59 pkt->alloc_len = full_len; 60 pkt->part_of_bundle = part_of_bundle; 61 pkt->last_in_bundle = last_in_bundle; 62 pkt->trailer_only = false; 63 64 return 0; 65 } 66 67 static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt) 68 { 69 bool trailer_only = false; 70 struct ath10k_htc_hdr *htc_hdr = 71 (struct ath10k_htc_hdr *)pkt->skb->data; 72 u16 len = __le16_to_cpu(htc_hdr->len); 73 74 if (len == htc_hdr->trailer_len) 75 trailer_only = true; 76 77 return trailer_only; 78 } 79 80 /* sdio/mmc functions */ 81 82 static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw, 83 unsigned int address, 84 unsigned char val) 85 { 86 *arg = FIELD_PREP(BIT(31), write) | 87 FIELD_PREP(BIT(27), raw) | 88 FIELD_PREP(BIT(26), 1) | 89 FIELD_PREP(GENMASK(25, 9), address) | 90 FIELD_PREP(BIT(8), 1) | 91 FIELD_PREP(GENMASK(7, 0), val); 92 } 93 94 static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card, 95 unsigned int address, 96 unsigned char byte) 97 { 98 struct mmc_command io_cmd; 99 100 memset(&io_cmd, 0, sizeof(io_cmd)); 101 ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte); 102 io_cmd.opcode = SD_IO_RW_DIRECT; 103 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; 104 105 return mmc_wait_for_cmd(card->host, &io_cmd, 0); 106 } 107 108 static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card, 109 unsigned int address, 110 unsigned char *byte) 111 { 112 struct mmc_command io_cmd; 113 int ret; 114 115 memset(&io_cmd, 0, sizeof(io_cmd)); 116 ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0); 117 io_cmd.opcode = SD_IO_RW_DIRECT; 118 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; 119 120 ret = mmc_wait_for_cmd(card->host, &io_cmd, 0); 121 if (!ret) 122 *byte = io_cmd.resp[0]; 123 124 return ret; 125 } 126 127 static int ath10k_sdio_config(struct ath10k *ar) 128 { 129 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 130 struct sdio_func *func = ar_sdio->func; 131 unsigned char byte, asyncintdelay = 2; 132 int ret; 133 134 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n"); 135 136 sdio_claim_host(func); 137 138 byte = 0; 139 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, 140 SDIO_CCCR_DRIVE_STRENGTH, 141 &byte); 142 143 byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK; 144 byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK, 145 ATH10K_SDIO_DRIVE_DTSX_TYPE_D); 146 147 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, 148 SDIO_CCCR_DRIVE_STRENGTH, 149 byte); 150 151 byte = 0; 152 ret = ath10k_sdio_func0_cmd52_rd_byte( 153 func->card, 154 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR, 155 &byte); 156 157 byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A | 158 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C | 159 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D); 160 161 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, 162 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR, 163 byte); 164 if (ret) { 165 ath10k_warn(ar, "failed to enable driver strength: %d\n", ret); 166 goto out; 167 } 168 169 byte = 0; 170 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, 171 CCCR_SDIO_IRQ_MODE_REG_SDIO3, 172 &byte); 173 174 byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3; 175 176 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, 177 CCCR_SDIO_IRQ_MODE_REG_SDIO3, 178 byte); 179 if (ret) { 180 ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n", 181 ret); 182 goto out; 183 } 184 185 byte = 0; 186 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, 187 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, 188 &byte); 189 190 byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK; 191 byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay); 192 193 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, 194 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, 195 byte); 196 197 /* give us some time to enable, in ms */ 198 func->enable_timeout = 100; 199 200 ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size); 201 if (ret) { 202 ath10k_warn(ar, "failed to set sdio block size to %d: %d\n", 203 ar_sdio->mbox_info.block_size, ret); 204 goto out; 205 } 206 207 out: 208 sdio_release_host(func); 209 return ret; 210 } 211 212 static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val) 213 { 214 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 215 struct sdio_func *func = ar_sdio->func; 216 int ret; 217 218 sdio_claim_host(func); 219 220 sdio_writel(func, val, addr, &ret); 221 if (ret) { 222 ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n", 223 val, addr, ret); 224 goto out; 225 } 226 227 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n", 228 addr, val); 229 230 out: 231 sdio_release_host(func); 232 233 return ret; 234 } 235 236 static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val) 237 { 238 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 239 struct sdio_func *func = ar_sdio->func; 240 __le32 *buf; 241 int ret; 242 243 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 244 if (!buf) 245 return -ENOMEM; 246 247 *buf = cpu_to_le32(val); 248 249 sdio_claim_host(func); 250 251 ret = sdio_writesb(func, addr, buf, sizeof(*buf)); 252 if (ret) { 253 ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n", 254 val, addr, ret); 255 goto out; 256 } 257 258 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n", 259 addr, val); 260 261 out: 262 sdio_release_host(func); 263 264 kfree(buf); 265 266 return ret; 267 } 268 269 static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val) 270 { 271 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 272 struct sdio_func *func = ar_sdio->func; 273 int ret; 274 275 sdio_claim_host(func); 276 *val = sdio_readl(func, addr, &ret); 277 if (ret) { 278 ath10k_warn(ar, "failed to read from address 0x%x: %d\n", 279 addr, ret); 280 goto out; 281 } 282 283 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n", 284 addr, *val); 285 286 out: 287 sdio_release_host(func); 288 289 return ret; 290 } 291 292 static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len) 293 { 294 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 295 struct sdio_func *func = ar_sdio->func; 296 int ret; 297 298 sdio_claim_host(func); 299 300 ret = sdio_memcpy_fromio(func, buf, addr, len); 301 if (ret) { 302 ath10k_warn(ar, "failed to read from address 0x%x: %d\n", 303 addr, ret); 304 goto out; 305 } 306 307 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n", 308 addr, buf, len); 309 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len); 310 311 out: 312 sdio_release_host(func); 313 314 return ret; 315 } 316 317 static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len) 318 { 319 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 320 struct sdio_func *func = ar_sdio->func; 321 int ret; 322 323 sdio_claim_host(func); 324 325 /* For some reason toio() doesn't have const for the buffer, need 326 * an ugly hack to workaround that. 327 */ 328 ret = sdio_memcpy_toio(func, addr, (void *)buf, len); 329 if (ret) { 330 ath10k_warn(ar, "failed to write to address 0x%x: %d\n", 331 addr, ret); 332 goto out; 333 } 334 335 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n", 336 addr, buf, len); 337 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len); 338 339 out: 340 sdio_release_host(func); 341 342 return ret; 343 } 344 345 static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len) 346 { 347 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 348 struct sdio_func *func = ar_sdio->func; 349 int ret; 350 351 sdio_claim_host(func); 352 353 len = round_down(len, ar_sdio->mbox_info.block_size); 354 355 ret = sdio_readsb(func, buf, addr, len); 356 if (ret) { 357 ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n", 358 addr, ret); 359 goto out; 360 } 361 362 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n", 363 addr, buf, len); 364 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len); 365 366 out: 367 sdio_release_host(func); 368 369 return ret; 370 } 371 372 /* HIF mbox functions */ 373 374 static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar, 375 struct ath10k_sdio_rx_data *pkt, 376 u32 *lookaheads, 377 int *n_lookaheads) 378 { 379 struct ath10k_htc *htc = &ar->htc; 380 struct sk_buff *skb = pkt->skb; 381 struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data; 382 bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT; 383 enum ath10k_htc_ep_id eid; 384 u16 payload_len; 385 u8 *trailer; 386 int ret; 387 388 payload_len = le16_to_cpu(htc_hdr->len); 389 skb->len = payload_len + sizeof(struct ath10k_htc_hdr); 390 391 if (trailer_present) { 392 trailer = skb->data + sizeof(*htc_hdr) + 393 payload_len - htc_hdr->trailer_len; 394 395 eid = pipe_id_to_eid(htc_hdr->eid); 396 397 ret = ath10k_htc_process_trailer(htc, 398 trailer, 399 htc_hdr->trailer_len, 400 eid, 401 lookaheads, 402 n_lookaheads); 403 if (ret) 404 return ret; 405 406 if (is_trailer_only_msg(pkt)) 407 pkt->trailer_only = true; 408 409 skb_trim(skb, skb->len - htc_hdr->trailer_len); 410 } 411 412 skb_pull(skb, sizeof(*htc_hdr)); 413 414 return 0; 415 } 416 417 static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar, 418 u32 lookaheads[], 419 int *n_lookahead) 420 { 421 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 422 struct ath10k_htc *htc = &ar->htc; 423 struct ath10k_sdio_rx_data *pkt; 424 struct ath10k_htc_ep *ep; 425 enum ath10k_htc_ep_id id; 426 int ret, i, *n_lookahead_local; 427 u32 *lookaheads_local; 428 int lookahead_idx = 0; 429 430 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { 431 lookaheads_local = lookaheads; 432 n_lookahead_local = n_lookahead; 433 434 id = ((struct ath10k_htc_hdr *) 435 &lookaheads[lookahead_idx++])->eid; 436 437 if (id >= ATH10K_HTC_EP_COUNT) { 438 ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n", 439 id); 440 ret = -ENOMEM; 441 goto out; 442 } 443 444 ep = &htc->endpoint[id]; 445 446 if (ep->service_id == 0) { 447 ath10k_warn(ar, "ep %d is not connected\n", id); 448 ret = -ENOMEM; 449 goto out; 450 } 451 452 pkt = &ar_sdio->rx_pkts[i]; 453 454 if (pkt->part_of_bundle && !pkt->last_in_bundle) { 455 /* Only read lookahead's from RX trailers 456 * for the last packet in a bundle. 457 */ 458 lookahead_idx--; 459 lookaheads_local = NULL; 460 n_lookahead_local = NULL; 461 } 462 463 ret = ath10k_sdio_mbox_rx_process_packet(ar, 464 pkt, 465 lookaheads_local, 466 n_lookahead_local); 467 if (ret) 468 goto out; 469 470 if (!pkt->trailer_only) 471 ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb); 472 else 473 kfree_skb(pkt->skb); 474 475 /* The RX complete handler now owns the skb...*/ 476 pkt->skb = NULL; 477 pkt->alloc_len = 0; 478 } 479 480 ret = 0; 481 482 out: 483 /* Free all packets that was not passed on to the RX completion 484 * handler... 485 */ 486 for (; i < ar_sdio->n_rx_pkts; i++) 487 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); 488 489 return ret; 490 } 491 492 static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar, 493 struct ath10k_sdio_rx_data *rx_pkts, 494 struct ath10k_htc_hdr *htc_hdr, 495 size_t full_len, size_t act_len, 496 size_t *bndl_cnt) 497 { 498 int ret, i; 499 500 *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags); 501 502 if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) { 503 ath10k_warn(ar, 504 "HTC bundle length %u exceeds maximum %u\n", 505 le16_to_cpu(htc_hdr->len), 506 HTC_HOST_MAX_MSG_PER_RX_BUNDLE); 507 return -ENOMEM; 508 } 509 510 /* Allocate bndl_cnt extra skb's for the bundle. 511 * The package containing the 512 * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included 513 * in bndl_cnt. The skb for that packet will be 514 * allocated separately. 515 */ 516 for (i = 0; i < *bndl_cnt; i++) { 517 ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i], 518 act_len, 519 full_len, 520 true, 521 false); 522 if (ret) 523 return ret; 524 } 525 526 return 0; 527 } 528 529 static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar, 530 u32 lookaheads[], int n_lookaheads) 531 { 532 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 533 struct ath10k_htc_hdr *htc_hdr; 534 size_t full_len, act_len; 535 bool last_in_bundle; 536 int ret, i; 537 538 if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) { 539 ath10k_warn(ar, 540 "the total number of pkgs to be fetched (%u) exceeds maximum %u\n", 541 n_lookaheads, 542 ATH10K_SDIO_MAX_RX_MSGS); 543 ret = -ENOMEM; 544 goto err; 545 } 546 547 for (i = 0; i < n_lookaheads; i++) { 548 htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i]; 549 last_in_bundle = false; 550 551 if (le16_to_cpu(htc_hdr->len) > 552 ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) { 553 ath10k_warn(ar, 554 "payload length %d exceeds max htc length: %zu\n", 555 le16_to_cpu(htc_hdr->len), 556 ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH); 557 ret = -ENOMEM; 558 goto err; 559 } 560 561 act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); 562 full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len); 563 564 if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) { 565 ath10k_warn(ar, 566 "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n", 567 htc_hdr->eid, htc_hdr->flags, 568 le16_to_cpu(htc_hdr->len)); 569 ret = -EINVAL; 570 goto err; 571 } 572 573 if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) { 574 /* HTC header indicates that every packet to follow 575 * has the same padded length so that it can be 576 * optimally fetched as a full bundle. 577 */ 578 size_t bndl_cnt; 579 580 ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar, 581 &ar_sdio->rx_pkts[i], 582 htc_hdr, 583 full_len, 584 act_len, 585 &bndl_cnt); 586 587 if (ret) { 588 ath10k_warn(ar, "alloc_bundle error %d\n", ret); 589 goto err; 590 } 591 592 n_lookaheads += bndl_cnt; 593 i += bndl_cnt; 594 /*Next buffer will be the last in the bundle */ 595 last_in_bundle = true; 596 } 597 598 /* Allocate skb for packet. If the packet had the 599 * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled 600 * packet skb's have been allocated in the previous step. 601 */ 602 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK) 603 full_len += ATH10K_HIF_MBOX_BLOCK_SIZE; 604 605 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i], 606 act_len, 607 full_len, 608 last_in_bundle, 609 last_in_bundle); 610 } 611 612 ar_sdio->n_rx_pkts = i; 613 614 return 0; 615 616 err: 617 for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) { 618 if (!ar_sdio->rx_pkts[i].alloc_len) 619 break; 620 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); 621 } 622 623 return ret; 624 } 625 626 static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar, 627 struct ath10k_sdio_rx_data *pkt) 628 { 629 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 630 struct sk_buff *skb = pkt->skb; 631 int ret; 632 633 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, 634 skb->data, pkt->alloc_len); 635 pkt->status = ret; 636 if (!ret) 637 skb_put(skb, pkt->act_len); 638 639 return ret; 640 } 641 642 static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar) 643 { 644 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 645 int ret, i; 646 647 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { 648 ret = ath10k_sdio_mbox_rx_packet(ar, 649 &ar_sdio->rx_pkts[i]); 650 if (ret) 651 goto err; 652 } 653 654 return 0; 655 656 err: 657 /* Free all packets that was not successfully fetched. */ 658 for (; i < ar_sdio->n_rx_pkts; i++) 659 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); 660 661 return ret; 662 } 663 664 /* This is the timeout for mailbox processing done in the sdio irq 665 * handler. The timeout is deliberately set quite high since SDIO dump logs 666 * over serial port can/will add a substantial overhead to the processing 667 * (if enabled). 668 */ 669 #define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ) 670 671 static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar, 672 u32 msg_lookahead, bool *done) 673 { 674 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 675 u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS]; 676 int n_lookaheads = 1; 677 unsigned long timeout; 678 int ret; 679 680 *done = true; 681 682 /* Copy the lookahead obtained from the HTC register table into our 683 * temp array as a start value. 684 */ 685 lookaheads[0] = msg_lookahead; 686 687 timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ; 688 do { 689 /* Try to allocate as many HTC RX packets indicated by 690 * n_lookaheads. 691 */ 692 ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads, 693 n_lookaheads); 694 if (ret) 695 break; 696 697 if (ar_sdio->n_rx_pkts >= 2) 698 /* A recv bundle was detected, force IRQ status 699 * re-check again. 700 */ 701 *done = false; 702 703 ret = ath10k_sdio_mbox_rx_fetch(ar); 704 705 /* Process fetched packets. This will potentially update 706 * n_lookaheads depending on if the packets contain lookahead 707 * reports. 708 */ 709 n_lookaheads = 0; 710 ret = ath10k_sdio_mbox_rx_process_packets(ar, 711 lookaheads, 712 &n_lookaheads); 713 714 if (!n_lookaheads || ret) 715 break; 716 717 /* For SYNCH processing, if we get here, we are running 718 * through the loop again due to updated lookaheads. Set 719 * flag that we should re-check IRQ status registers again 720 * before leaving IRQ processing, this can net better 721 * performance in high throughput situations. 722 */ 723 *done = false; 724 } while (time_before(jiffies, timeout)); 725 726 if (ret && (ret != -ECANCELED)) 727 ath10k_warn(ar, "failed to get pending recv messages: %d\n", 728 ret); 729 730 return ret; 731 } 732 733 static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar) 734 { 735 u32 val; 736 int ret; 737 738 /* TODO: Add firmware crash handling */ 739 ath10k_warn(ar, "firmware crashed\n"); 740 741 /* read counter to clear the interrupt, the debug error interrupt is 742 * counter 0. 743 */ 744 ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val); 745 if (ret) 746 ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret); 747 748 return ret; 749 } 750 751 static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar) 752 { 753 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 754 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; 755 u8 counter_int_status; 756 int ret; 757 758 mutex_lock(&irq_data->mtx); 759 counter_int_status = irq_data->irq_proc_reg->counter_int_status & 760 irq_data->irq_en_reg->cntr_int_status_en; 761 762 /* NOTE: other modules like GMBOX may use the counter interrupt for 763 * credit flow control on other counters, we only need to check for 764 * the debug assertion counter interrupt. 765 */ 766 if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK) 767 ret = ath10k_sdio_mbox_proc_dbg_intr(ar); 768 else 769 ret = 0; 770 771 mutex_unlock(&irq_data->mtx); 772 773 return ret; 774 } 775 776 static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar) 777 { 778 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 779 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; 780 u8 error_int_status; 781 int ret; 782 783 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n"); 784 785 error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F; 786 if (!error_int_status) { 787 ath10k_warn(ar, "invalid error interrupt status: 0x%x\n", 788 error_int_status); 789 return -EIO; 790 } 791 792 ath10k_dbg(ar, ATH10K_DBG_SDIO, 793 "sdio error_int_status 0x%x\n", error_int_status); 794 795 if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK, 796 error_int_status)) 797 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n"); 798 799 if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, 800 error_int_status)) 801 ath10k_warn(ar, "rx underflow interrupt error\n"); 802 803 if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK, 804 error_int_status)) 805 ath10k_warn(ar, "tx overflow interrupt error\n"); 806 807 /* Clear the interrupt */ 808 irq_data->irq_proc_reg->error_int_status &= ~error_int_status; 809 810 /* set W1C value to clear the interrupt, this hits the register first */ 811 ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS, 812 error_int_status); 813 if (ret) { 814 ath10k_warn(ar, "unable to write to error int status address: %d\n", 815 ret); 816 return ret; 817 } 818 819 return 0; 820 } 821 822 static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar) 823 { 824 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 825 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; 826 u8 cpu_int_status; 827 int ret; 828 829 mutex_lock(&irq_data->mtx); 830 cpu_int_status = irq_data->irq_proc_reg->cpu_int_status & 831 irq_data->irq_en_reg->cpu_int_status_en; 832 if (!cpu_int_status) { 833 ath10k_warn(ar, "CPU interrupt status is zero\n"); 834 ret = -EIO; 835 goto out; 836 } 837 838 /* Clear the interrupt */ 839 irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status; 840 841 /* Set up the register transfer buffer to hit the register 4 times, 842 * this is done to make the access 4-byte aligned to mitigate issues 843 * with host bus interconnects that restrict bus transfer lengths to 844 * be a multiple of 4-bytes. 845 * 846 * Set W1C value to clear the interrupt, this hits the register first. 847 */ 848 ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS, 849 cpu_int_status); 850 if (ret) { 851 ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n", 852 ret); 853 goto out; 854 } 855 856 out: 857 mutex_unlock(&irq_data->mtx); 858 return ret; 859 } 860 861 static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar, 862 u8 *host_int_status, 863 u32 *lookahead) 864 { 865 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 866 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; 867 struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg; 868 struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg; 869 u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1); 870 int ret; 871 872 mutex_lock(&irq_data->mtx); 873 874 *lookahead = 0; 875 *host_int_status = 0; 876 877 /* int_status_en is supposed to be non zero, otherwise interrupts 878 * shouldn't be enabled. There is however a short time frame during 879 * initialization between the irq register and int_status_en init 880 * where this can happen. 881 * We silently ignore this condition. 882 */ 883 if (!irq_en_reg->int_status_en) { 884 ret = 0; 885 goto out; 886 } 887 888 /* Read the first sizeof(struct ath10k_irq_proc_registers) 889 * bytes of the HTC register table. This 890 * will yield us the value of different int status 891 * registers and the lookahead registers. 892 */ 893 ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS, 894 irq_proc_reg, sizeof(*irq_proc_reg)); 895 if (ret) 896 goto out; 897 898 /* Update only those registers that are enabled */ 899 *host_int_status = irq_proc_reg->host_int_status & 900 irq_en_reg->int_status_en; 901 902 /* Look at mbox status */ 903 if (!(*host_int_status & htc_mbox)) { 904 *lookahead = 0; 905 ret = 0; 906 goto out; 907 } 908 909 /* Mask out pending mbox value, we use look ahead as 910 * the real flag for mbox processing. 911 */ 912 *host_int_status &= ~htc_mbox; 913 if (irq_proc_reg->rx_lookahead_valid & htc_mbox) { 914 *lookahead = le32_to_cpu( 915 irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]); 916 if (!*lookahead) 917 ath10k_warn(ar, "sdio mbox lookahead is zero\n"); 918 } 919 920 out: 921 mutex_unlock(&irq_data->mtx); 922 return ret; 923 } 924 925 static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar, 926 bool *done) 927 { 928 u8 host_int_status; 929 u32 lookahead; 930 int ret; 931 932 /* NOTE: HIF implementation guarantees that the context of this 933 * call allows us to perform SYNCHRONOUS I/O, that is we can block, 934 * sleep or call any API that can block or switch thread/task 935 * contexts. This is a fully schedulable context. 936 */ 937 938 ret = ath10k_sdio_mbox_read_int_status(ar, 939 &host_int_status, 940 &lookahead); 941 if (ret) { 942 *done = true; 943 goto out; 944 } 945 946 if (!host_int_status && !lookahead) { 947 ret = 0; 948 *done = true; 949 goto out; 950 } 951 952 if (lookahead) { 953 ath10k_dbg(ar, ATH10K_DBG_SDIO, 954 "sdio pending mailbox msg lookahead 0x%08x\n", 955 lookahead); 956 957 ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar, 958 lookahead, 959 done); 960 if (ret) 961 goto out; 962 } 963 964 /* now, handle the rest of the interrupts */ 965 ath10k_dbg(ar, ATH10K_DBG_SDIO, 966 "sdio host_int_status 0x%x\n", host_int_status); 967 968 if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) { 969 /* CPU Interrupt */ 970 ret = ath10k_sdio_mbox_proc_cpu_intr(ar); 971 if (ret) 972 goto out; 973 } 974 975 if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) { 976 /* Error Interrupt */ 977 ret = ath10k_sdio_mbox_proc_err_intr(ar); 978 if (ret) 979 goto out; 980 } 981 982 if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status)) 983 /* Counter Interrupt */ 984 ret = ath10k_sdio_mbox_proc_counter_intr(ar); 985 986 ret = 0; 987 988 out: 989 /* An optimization to bypass reading the IRQ status registers 990 * unecessarily which can re-wake the target, if upper layers 991 * determine that we are in a low-throughput mode, we can rely on 992 * taking another interrupt rather than re-checking the status 993 * registers which can re-wake the target. 994 * 995 * NOTE : for host interfaces that makes use of detecting pending 996 * mbox messages at hif can not use this optimization due to 997 * possible side effects, SPI requires the host to drain all 998 * messages from the mailbox before exiting the ISR routine. 999 */ 1000 1001 ath10k_dbg(ar, ATH10K_DBG_SDIO, 1002 "sdio pending irqs done %d status %d", 1003 *done, ret); 1004 1005 return ret; 1006 } 1007 1008 static void ath10k_sdio_set_mbox_info(struct ath10k *ar) 1009 { 1010 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1011 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; 1012 u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev; 1013 1014 mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR; 1015 mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE; 1016 mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1; 1017 mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR; 1018 mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH; 1019 1020 mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR; 1021 1022 dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, device); 1023 dev_id_chiprev = FIELD_GET(QCA_MANUFACTURER_ID_REV_MASK, device); 1024 switch (dev_id_base) { 1025 case QCA_MANUFACTURER_ID_AR6005_BASE: 1026 if (dev_id_chiprev < 4) 1027 mbox_info->ext_info[0].htc_ext_sz = 1028 ATH10K_HIF_MBOX0_EXT_WIDTH; 1029 else 1030 /* from QCA6174 2.0(0x504), the width has been extended 1031 * to 56K 1032 */ 1033 mbox_info->ext_info[0].htc_ext_sz = 1034 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0; 1035 break; 1036 case QCA_MANUFACTURER_ID_QCA9377_BASE: 1037 mbox_info->ext_info[0].htc_ext_sz = 1038 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0; 1039 break; 1040 default: 1041 mbox_info->ext_info[0].htc_ext_sz = 1042 ATH10K_HIF_MBOX0_EXT_WIDTH; 1043 } 1044 1045 mbox_info->ext_info[1].htc_ext_addr = 1046 mbox_info->ext_info[0].htc_ext_addr + 1047 mbox_info->ext_info[0].htc_ext_sz + 1048 ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE; 1049 mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH; 1050 } 1051 1052 /* BMI functions */ 1053 1054 static int ath10k_sdio_bmi_credits(struct ath10k *ar) 1055 { 1056 u32 addr, cmd_credits; 1057 unsigned long timeout; 1058 int ret; 1059 1060 /* Read the counter register to get the command credits */ 1061 addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4; 1062 timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; 1063 cmd_credits = 0; 1064 1065 while (time_before(jiffies, timeout) && !cmd_credits) { 1066 /* Hit the credit counter with a 4-byte access, the first byte 1067 * read will hit the counter and cause a decrement, while the 1068 * remaining 3 bytes has no effect. The rationale behind this 1069 * is to make all HIF accesses 4-byte aligned. 1070 */ 1071 ret = ath10k_sdio_read32(ar, addr, &cmd_credits); 1072 if (ret) { 1073 ath10k_warn(ar, 1074 "unable to decrement the command credit count register: %d\n", 1075 ret); 1076 return ret; 1077 } 1078 1079 /* The counter is only 8 bits. 1080 * Ignore anything in the upper 3 bytes 1081 */ 1082 cmd_credits &= 0xFF; 1083 } 1084 1085 if (!cmd_credits) { 1086 ath10k_warn(ar, "bmi communication timeout\n"); 1087 return -ETIMEDOUT; 1088 } 1089 1090 return 0; 1091 } 1092 1093 static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar) 1094 { 1095 unsigned long timeout; 1096 u32 rx_word; 1097 int ret; 1098 1099 timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; 1100 rx_word = 0; 1101 1102 while ((time_before(jiffies, timeout)) && !rx_word) { 1103 ret = ath10k_sdio_read32(ar, 1104 MBOX_HOST_INT_STATUS_ADDRESS, 1105 &rx_word); 1106 if (ret) { 1107 ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret); 1108 return ret; 1109 } 1110 1111 /* all we really want is one bit */ 1112 rx_word &= 1; 1113 } 1114 1115 if (!rx_word) { 1116 ath10k_warn(ar, "bmi_recv_buf FIFO empty\n"); 1117 return -EINVAL; 1118 } 1119 1120 return ret; 1121 } 1122 1123 static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar, 1124 void *req, u32 req_len, 1125 void *resp, u32 *resp_len) 1126 { 1127 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1128 u32 addr; 1129 int ret; 1130 1131 if (req) { 1132 ret = ath10k_sdio_bmi_credits(ar); 1133 if (ret) 1134 return ret; 1135 1136 addr = ar_sdio->mbox_info.htc_addr; 1137 1138 memcpy(ar_sdio->bmi_buf, req, req_len); 1139 ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len); 1140 if (ret) { 1141 ath10k_warn(ar, 1142 "unable to send the bmi data to the device: %d\n", 1143 ret); 1144 return ret; 1145 } 1146 } 1147 1148 if (!resp || !resp_len) 1149 /* No response expected */ 1150 return 0; 1151 1152 /* During normal bootup, small reads may be required. 1153 * Rather than issue an HIF Read and then wait as the Target 1154 * adds successive bytes to the FIFO, we wait here until 1155 * we know that response data is available. 1156 * 1157 * This allows us to cleanly timeout on an unexpected 1158 * Target failure rather than risk problems at the HIF level. 1159 * In particular, this avoids SDIO timeouts and possibly garbage 1160 * data on some host controllers. And on an interconnect 1161 * such as Compact Flash (as well as some SDIO masters) which 1162 * does not provide any indication on data timeout, it avoids 1163 * a potential hang or garbage response. 1164 * 1165 * Synchronization is more difficult for reads larger than the 1166 * size of the MBOX FIFO (128B), because the Target is unable 1167 * to push the 129th byte of data until AFTER the Host posts an 1168 * HIF Read and removes some FIFO data. So for large reads the 1169 * Host proceeds to post an HIF Read BEFORE all the data is 1170 * actually available to read. Fortunately, large BMI reads do 1171 * not occur in practice -- they're supported for debug/development. 1172 * 1173 * So Host/Target BMI synchronization is divided into these cases: 1174 * CASE 1: length < 4 1175 * Should not happen 1176 * 1177 * CASE 2: 4 <= length <= 128 1178 * Wait for first 4 bytes to be in FIFO 1179 * If CONSERVATIVE_BMI_READ is enabled, also wait for 1180 * a BMI command credit, which indicates that the ENTIRE 1181 * response is available in the the FIFO 1182 * 1183 * CASE 3: length > 128 1184 * Wait for the first 4 bytes to be in FIFO 1185 * 1186 * For most uses, a small timeout should be sufficient and we will 1187 * usually see a response quickly; but there may be some unusual 1188 * (debug) cases of BMI_EXECUTE where we want an larger timeout. 1189 * For now, we use an unbounded busy loop while waiting for 1190 * BMI_EXECUTE. 1191 * 1192 * If BMI_EXECUTE ever needs to support longer-latency execution, 1193 * especially in production, this code needs to be enhanced to sleep 1194 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently 1195 * a function of Host processor speed. 1196 */ 1197 ret = ath10k_sdio_bmi_get_rx_lookahead(ar); 1198 if (ret) 1199 return ret; 1200 1201 /* We always read from the start of the mbox address */ 1202 addr = ar_sdio->mbox_info.htc_addr; 1203 ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len); 1204 if (ret) { 1205 ath10k_warn(ar, 1206 "unable to read the bmi data from the device: %d\n", 1207 ret); 1208 return ret; 1209 } 1210 1211 memcpy(resp, ar_sdio->bmi_buf, *resp_len); 1212 1213 return 0; 1214 } 1215 1216 /* sdio async handling functions */ 1217 1218 static struct ath10k_sdio_bus_request 1219 *ath10k_sdio_alloc_busreq(struct ath10k *ar) 1220 { 1221 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1222 struct ath10k_sdio_bus_request *bus_req; 1223 1224 spin_lock_bh(&ar_sdio->lock); 1225 1226 if (list_empty(&ar_sdio->bus_req_freeq)) { 1227 bus_req = NULL; 1228 goto out; 1229 } 1230 1231 bus_req = list_first_entry(&ar_sdio->bus_req_freeq, 1232 struct ath10k_sdio_bus_request, list); 1233 list_del(&bus_req->list); 1234 1235 out: 1236 spin_unlock_bh(&ar_sdio->lock); 1237 return bus_req; 1238 } 1239 1240 static void ath10k_sdio_free_bus_req(struct ath10k *ar, 1241 struct ath10k_sdio_bus_request *bus_req) 1242 { 1243 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1244 1245 memset(bus_req, 0, sizeof(*bus_req)); 1246 1247 spin_lock_bh(&ar_sdio->lock); 1248 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); 1249 spin_unlock_bh(&ar_sdio->lock); 1250 } 1251 1252 static void __ath10k_sdio_write_async(struct ath10k *ar, 1253 struct ath10k_sdio_bus_request *req) 1254 { 1255 struct ath10k_htc_ep *ep; 1256 struct sk_buff *skb; 1257 int ret; 1258 1259 skb = req->skb; 1260 ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len); 1261 if (ret) 1262 ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d", 1263 req->address, ret); 1264 1265 if (req->htc_msg) { 1266 ep = &ar->htc.endpoint[req->eid]; 1267 ath10k_htc_notify_tx_completion(ep, skb); 1268 } else if (req->comp) { 1269 complete(req->comp); 1270 } 1271 1272 ath10k_sdio_free_bus_req(ar, req); 1273 } 1274 1275 static void ath10k_sdio_write_async_work(struct work_struct *work) 1276 { 1277 struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio, 1278 wr_async_work); 1279 struct ath10k *ar = ar_sdio->ar; 1280 struct ath10k_sdio_bus_request *req, *tmp_req; 1281 1282 spin_lock_bh(&ar_sdio->wr_async_lock); 1283 1284 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { 1285 list_del(&req->list); 1286 spin_unlock_bh(&ar_sdio->wr_async_lock); 1287 __ath10k_sdio_write_async(ar, req); 1288 spin_lock_bh(&ar_sdio->wr_async_lock); 1289 } 1290 1291 spin_unlock_bh(&ar_sdio->wr_async_lock); 1292 } 1293 1294 static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr, 1295 struct sk_buff *skb, 1296 struct completion *comp, 1297 bool htc_msg, enum ath10k_htc_ep_id eid) 1298 { 1299 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1300 struct ath10k_sdio_bus_request *bus_req; 1301 1302 /* Allocate a bus request for the message and queue it on the 1303 * SDIO workqueue. 1304 */ 1305 bus_req = ath10k_sdio_alloc_busreq(ar); 1306 if (!bus_req) { 1307 ath10k_warn(ar, 1308 "unable to allocate bus request for async request\n"); 1309 return -ENOMEM; 1310 } 1311 1312 bus_req->skb = skb; 1313 bus_req->eid = eid; 1314 bus_req->address = addr; 1315 bus_req->htc_msg = htc_msg; 1316 bus_req->comp = comp; 1317 1318 spin_lock_bh(&ar_sdio->wr_async_lock); 1319 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); 1320 spin_unlock_bh(&ar_sdio->wr_async_lock); 1321 1322 return 0; 1323 } 1324 1325 /* IRQ handler */ 1326 1327 static void ath10k_sdio_irq_handler(struct sdio_func *func) 1328 { 1329 struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func); 1330 struct ath10k *ar = ar_sdio->ar; 1331 unsigned long timeout; 1332 bool done = false; 1333 int ret; 1334 1335 /* Release the host during interrupts so we can pick it back up when 1336 * we process commands. 1337 */ 1338 sdio_release_host(ar_sdio->func); 1339 1340 timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ; 1341 do { 1342 ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done); 1343 if (ret) 1344 break; 1345 } while (time_before(jiffies, timeout) && !done); 1346 1347 ath10k_mac_tx_push_pending(ar); 1348 1349 sdio_claim_host(ar_sdio->func); 1350 1351 if (ret && ret != -ECANCELED) 1352 ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n", 1353 ret); 1354 } 1355 1356 /* sdio HIF functions */ 1357 1358 static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar) 1359 { 1360 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1361 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; 1362 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; 1363 int ret; 1364 1365 mutex_lock(&irq_data->mtx); 1366 1367 memset(regs, 0, sizeof(*regs)); 1368 ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS, 1369 ®s->int_status_en, sizeof(*regs)); 1370 if (ret) 1371 ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret); 1372 1373 mutex_unlock(&irq_data->mtx); 1374 1375 return ret; 1376 } 1377 1378 static int ath10k_sdio_hif_power_up(struct ath10k *ar, 1379 enum ath10k_firmware_mode fw_mode) 1380 { 1381 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1382 struct sdio_func *func = ar_sdio->func; 1383 int ret; 1384 1385 if (!ar_sdio->is_disabled) 1386 return 0; 1387 1388 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n"); 1389 1390 ret = ath10k_sdio_config(ar); 1391 if (ret) { 1392 ath10k_err(ar, "failed to config sdio: %d\n", ret); 1393 return ret; 1394 } 1395 1396 sdio_claim_host(func); 1397 1398 ret = sdio_enable_func(func); 1399 if (ret) { 1400 ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret); 1401 sdio_release_host(func); 1402 return ret; 1403 } 1404 1405 sdio_release_host(func); 1406 1407 /* Wait for hardware to initialise. It should take a lot less than 1408 * 20 ms but let's be conservative here. 1409 */ 1410 msleep(20); 1411 1412 ar_sdio->is_disabled = false; 1413 1414 ret = ath10k_sdio_hif_disable_intrs(ar); 1415 if (ret) 1416 return ret; 1417 1418 return 0; 1419 } 1420 1421 static void ath10k_sdio_hif_power_down(struct ath10k *ar) 1422 { 1423 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1424 int ret; 1425 1426 if (ar_sdio->is_disabled) 1427 return; 1428 1429 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n"); 1430 1431 /* Disable the card */ 1432 sdio_claim_host(ar_sdio->func); 1433 1434 ret = sdio_disable_func(ar_sdio->func); 1435 if (ret) { 1436 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret); 1437 sdio_release_host(ar_sdio->func); 1438 return; 1439 } 1440 1441 ret = mmc_hw_reset(ar_sdio->func->card->host); 1442 if (ret) 1443 ath10k_warn(ar, "unable to reset sdio: %d\n", ret); 1444 1445 sdio_release_host(ar_sdio->func); 1446 1447 ar_sdio->is_disabled = true; 1448 } 1449 1450 static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 1451 struct ath10k_hif_sg_item *items, int n_items) 1452 { 1453 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1454 enum ath10k_htc_ep_id eid; 1455 struct sk_buff *skb; 1456 int ret, i; 1457 1458 eid = pipe_id_to_eid(pipe_id); 1459 1460 for (i = 0; i < n_items; i++) { 1461 size_t padded_len; 1462 u32 address; 1463 1464 skb = items[i].transfer_context; 1465 padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, 1466 skb->len); 1467 skb_trim(skb, padded_len); 1468 1469 /* Write TX data to the end of the mbox address space */ 1470 address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] - 1471 skb->len; 1472 ret = ath10k_sdio_prep_async_req(ar, address, skb, 1473 NULL, true, eid); 1474 if (ret) 1475 return ret; 1476 } 1477 1478 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); 1479 1480 return 0; 1481 } 1482 1483 static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar) 1484 { 1485 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1486 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; 1487 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; 1488 int ret; 1489 1490 mutex_lock(&irq_data->mtx); 1491 1492 /* Enable all but CPU interrupts */ 1493 regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) | 1494 FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) | 1495 FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1); 1496 1497 /* NOTE: There are some cases where HIF can do detection of 1498 * pending mbox messages which is disabled now. 1499 */ 1500 regs->int_status_en |= 1501 FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1); 1502 1503 /* Set up the CPU Interrupt status Register */ 1504 regs->cpu_int_status_en = 0; 1505 1506 /* Set up the Error Interrupt status Register */ 1507 regs->err_int_status_en = 1508 FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) | 1509 FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1); 1510 1511 /* Enable Counter interrupt status register to get fatal errors for 1512 * debugging. 1513 */ 1514 regs->cntr_int_status_en = 1515 FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK, 1516 ATH10K_SDIO_TARGET_DEBUG_INTR_MASK); 1517 1518 ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS, 1519 ®s->int_status_en, sizeof(*regs)); 1520 if (ret) 1521 ath10k_warn(ar, 1522 "failed to update mbox interrupt status register : %d\n", 1523 ret); 1524 1525 mutex_unlock(&irq_data->mtx); 1526 return ret; 1527 } 1528 1529 static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep) 1530 { 1531 u32 val; 1532 int ret; 1533 1534 ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val); 1535 if (ret) { 1536 ath10k_warn(ar, "failed to read fifo/chip control register: %d\n", 1537 ret); 1538 return ret; 1539 } 1540 1541 if (enable_sleep) 1542 val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF; 1543 else 1544 val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON; 1545 1546 ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val); 1547 if (ret) { 1548 ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d", 1549 ret); 1550 return ret; 1551 } 1552 1553 return 0; 1554 } 1555 1556 /* HIF diagnostics */ 1557 1558 static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf, 1559 size_t buf_len) 1560 { 1561 int ret; 1562 1563 /* set window register to start read cycle */ 1564 ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address); 1565 if (ret) { 1566 ath10k_warn(ar, "failed to set mbox window read address: %d", ret); 1567 return ret; 1568 } 1569 1570 /* read the data */ 1571 ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len); 1572 if (ret) { 1573 ath10k_warn(ar, "failed to read from mbox window data address: %d\n", 1574 ret); 1575 return ret; 1576 } 1577 1578 return 0; 1579 } 1580 1581 static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address, 1582 u32 *value) 1583 { 1584 __le32 *val; 1585 int ret; 1586 1587 val = kzalloc(sizeof(*val), GFP_KERNEL); 1588 if (!val) 1589 return -ENOMEM; 1590 1591 ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val)); 1592 if (ret) 1593 goto out; 1594 1595 *value = __le32_to_cpu(*val); 1596 1597 out: 1598 kfree(val); 1599 1600 return ret; 1601 } 1602 1603 static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address, 1604 const void *data, int nbytes) 1605 { 1606 int ret; 1607 1608 /* set write data */ 1609 ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes); 1610 if (ret) { 1611 ath10k_warn(ar, 1612 "failed to write 0x%p to mbox window data address: %d\n", 1613 data, ret); 1614 return ret; 1615 } 1616 1617 /* set window register, which starts the write cycle */ 1618 ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address); 1619 if (ret) { 1620 ath10k_warn(ar, "failed to set mbox window write address: %d", ret); 1621 return ret; 1622 } 1623 1624 return 0; 1625 } 1626 1627 static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar) 1628 { 1629 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1630 u32 addr, val; 1631 int ret = 0; 1632 1633 addr = host_interest_item_address(HI_ITEM(hi_acs_flags)); 1634 1635 ret = ath10k_sdio_hif_diag_read32(ar, addr, &val); 1636 if (ret) { 1637 ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret); 1638 return ret; 1639 } 1640 1641 if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) { 1642 ath10k_dbg(ar, ATH10K_DBG_SDIO, 1643 "sdio mailbox swap service enabled\n"); 1644 ar_sdio->swap_mbox = true; 1645 } else { 1646 ath10k_dbg(ar, ATH10K_DBG_SDIO, 1647 "sdio mailbox swap service disabled\n"); 1648 ar_sdio->swap_mbox = false; 1649 } 1650 1651 return 0; 1652 } 1653 1654 /* HIF start/stop */ 1655 1656 static int ath10k_sdio_hif_start(struct ath10k *ar) 1657 { 1658 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1659 int ret; 1660 1661 /* Sleep 20 ms before HIF interrupts are disabled. 1662 * This will give target plenty of time to process the BMI done 1663 * request before interrupts are disabled. 1664 */ 1665 msleep(20); 1666 ret = ath10k_sdio_hif_disable_intrs(ar); 1667 if (ret) 1668 return ret; 1669 1670 /* eid 0 always uses the lower part of the extended mailbox address 1671 * space (ext_info[0].htc_ext_addr). 1672 */ 1673 ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; 1674 ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; 1675 1676 sdio_claim_host(ar_sdio->func); 1677 1678 /* Register the isr */ 1679 ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler); 1680 if (ret) { 1681 ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret); 1682 sdio_release_host(ar_sdio->func); 1683 return ret; 1684 } 1685 1686 sdio_release_host(ar_sdio->func); 1687 1688 ret = ath10k_sdio_hif_enable_intrs(ar); 1689 if (ret) 1690 ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret); 1691 1692 /* Enable sleep and then disable it again */ 1693 ret = ath10k_sdio_hif_set_mbox_sleep(ar, true); 1694 if (ret) 1695 return ret; 1696 1697 /* Wait for 20ms for the written value to take effect */ 1698 msleep(20); 1699 1700 ret = ath10k_sdio_hif_set_mbox_sleep(ar, false); 1701 if (ret) 1702 return ret; 1703 1704 return 0; 1705 } 1706 1707 #define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ) 1708 1709 static void ath10k_sdio_irq_disable(struct ath10k *ar) 1710 { 1711 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1712 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; 1713 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; 1714 struct sk_buff *skb; 1715 struct completion irqs_disabled_comp; 1716 int ret; 1717 1718 skb = dev_alloc_skb(sizeof(*regs)); 1719 if (!skb) 1720 return; 1721 1722 mutex_lock(&irq_data->mtx); 1723 1724 memset(regs, 0, sizeof(*regs)); /* disable all interrupts */ 1725 memcpy(skb->data, regs, sizeof(*regs)); 1726 skb_put(skb, sizeof(*regs)); 1727 1728 mutex_unlock(&irq_data->mtx); 1729 1730 init_completion(&irqs_disabled_comp); 1731 ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS, 1732 skb, &irqs_disabled_comp, false, 0); 1733 if (ret) 1734 goto out; 1735 1736 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); 1737 1738 /* Wait for the completion of the IRQ disable request. 1739 * If there is a timeout we will try to disable irq's anyway. 1740 */ 1741 ret = wait_for_completion_timeout(&irqs_disabled_comp, 1742 SDIO_IRQ_DISABLE_TIMEOUT_HZ); 1743 if (!ret) 1744 ath10k_warn(ar, "sdio irq disable request timed out\n"); 1745 1746 sdio_claim_host(ar_sdio->func); 1747 1748 ret = sdio_release_irq(ar_sdio->func); 1749 if (ret) 1750 ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret); 1751 1752 sdio_release_host(ar_sdio->func); 1753 1754 out: 1755 kfree_skb(skb); 1756 } 1757 1758 static void ath10k_sdio_hif_stop(struct ath10k *ar) 1759 { 1760 struct ath10k_sdio_bus_request *req, *tmp_req; 1761 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1762 1763 ath10k_sdio_irq_disable(ar); 1764 1765 cancel_work_sync(&ar_sdio->wr_async_work); 1766 1767 spin_lock_bh(&ar_sdio->wr_async_lock); 1768 1769 /* Free all bus requests that have not been handled */ 1770 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { 1771 struct ath10k_htc_ep *ep; 1772 1773 list_del(&req->list); 1774 1775 if (req->htc_msg) { 1776 ep = &ar->htc.endpoint[req->eid]; 1777 ath10k_htc_notify_tx_completion(ep, req->skb); 1778 } else if (req->skb) { 1779 kfree_skb(req->skb); 1780 } 1781 ath10k_sdio_free_bus_req(ar, req); 1782 } 1783 1784 spin_unlock_bh(&ar_sdio->wr_async_lock); 1785 } 1786 1787 #ifdef CONFIG_PM 1788 1789 static int ath10k_sdio_hif_suspend(struct ath10k *ar) 1790 { 1791 return -EOPNOTSUPP; 1792 } 1793 1794 static int ath10k_sdio_hif_resume(struct ath10k *ar) 1795 { 1796 switch (ar->state) { 1797 case ATH10K_STATE_OFF: 1798 ath10k_dbg(ar, ATH10K_DBG_SDIO, 1799 "sdio resume configuring sdio\n"); 1800 1801 /* need to set sdio settings after power is cut from sdio */ 1802 ath10k_sdio_config(ar); 1803 break; 1804 1805 case ATH10K_STATE_ON: 1806 default: 1807 break; 1808 } 1809 1810 return 0; 1811 } 1812 #endif 1813 1814 static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar, 1815 u16 service_id, 1816 u8 *ul_pipe, u8 *dl_pipe) 1817 { 1818 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); 1819 struct ath10k_htc *htc = &ar->htc; 1820 u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size; 1821 enum ath10k_htc_ep_id eid; 1822 bool ep_found = false; 1823 int i; 1824 1825 /* For sdio, we are interested in the mapping between eid 1826 * and pipeid rather than service_id to pipe_id. 1827 * First we find out which eid has been allocated to the 1828 * service... 1829 */ 1830 for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) { 1831 if (htc->endpoint[i].service_id == service_id) { 1832 eid = htc->endpoint[i].eid; 1833 ep_found = true; 1834 break; 1835 } 1836 } 1837 1838 if (!ep_found) 1839 return -EINVAL; 1840 1841 /* Then we create the simplest mapping possible between pipeid 1842 * and eid 1843 */ 1844 *ul_pipe = *dl_pipe = (u8)eid; 1845 1846 /* Normally, HTT will use the upper part of the extended 1847 * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl 1848 * the lower part (ext_info[0].htc_ext_addr). 1849 * If fw wants swapping of mailbox addresses, the opposite is true. 1850 */ 1851 if (ar_sdio->swap_mbox) { 1852 htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; 1853 wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; 1854 htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; 1855 wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; 1856 } else { 1857 htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; 1858 wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; 1859 htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; 1860 wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; 1861 } 1862 1863 switch (service_id) { 1864 case ATH10K_HTC_SVC_ID_RSVD_CTRL: 1865 /* HTC ctrl ep mbox address has already been setup in 1866 * ath10k_sdio_hif_start 1867 */ 1868 break; 1869 case ATH10K_HTC_SVC_ID_WMI_CONTROL: 1870 ar_sdio->mbox_addr[eid] = wmi_addr; 1871 ar_sdio->mbox_size[eid] = wmi_mbox_size; 1872 ath10k_dbg(ar, ATH10K_DBG_SDIO, 1873 "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n", 1874 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); 1875 break; 1876 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: 1877 ar_sdio->mbox_addr[eid] = htt_addr; 1878 ar_sdio->mbox_size[eid] = htt_mbox_size; 1879 ath10k_dbg(ar, ATH10K_DBG_SDIO, 1880 "sdio htt data mbox_addr 0x%x mbox_size %d\n", 1881 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); 1882 break; 1883 default: 1884 ath10k_warn(ar, "unsupported HTC service id: %d\n", 1885 service_id); 1886 return -EINVAL; 1887 } 1888 1889 return 0; 1890 } 1891 1892 static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar, 1893 u8 *ul_pipe, u8 *dl_pipe) 1894 { 1895 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n"); 1896 1897 /* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our 1898 * case) == 0 1899 */ 1900 *ul_pipe = 0; 1901 *dl_pipe = 0; 1902 } 1903 1904 /* This op is currently only used by htc_wait_target if the HTC ready 1905 * message times out. It is not applicable for SDIO since there is nothing 1906 * we can do if the HTC ready message does not arrive in time. 1907 * TODO: Make this op non mandatory by introducing a NULL check in the 1908 * hif op wrapper. 1909 */ 1910 static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar, 1911 u8 pipe, int force) 1912 { 1913 } 1914 1915 static const struct ath10k_hif_ops ath10k_sdio_hif_ops = { 1916 .tx_sg = ath10k_sdio_hif_tx_sg, 1917 .diag_read = ath10k_sdio_hif_diag_read, 1918 .diag_write = ath10k_sdio_hif_diag_write_mem, 1919 .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg, 1920 .start = ath10k_sdio_hif_start, 1921 .stop = ath10k_sdio_hif_stop, 1922 .swap_mailbox = ath10k_sdio_hif_swap_mailbox, 1923 .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe, 1924 .get_default_pipe = ath10k_sdio_hif_get_default_pipe, 1925 .send_complete_check = ath10k_sdio_hif_send_complete_check, 1926 .power_up = ath10k_sdio_hif_power_up, 1927 .power_down = ath10k_sdio_hif_power_down, 1928 #ifdef CONFIG_PM 1929 .suspend = ath10k_sdio_hif_suspend, 1930 .resume = ath10k_sdio_hif_resume, 1931 #endif 1932 }; 1933 1934 #ifdef CONFIG_PM_SLEEP 1935 1936 /* Empty handlers so that mmc subsystem doesn't remove us entirely during 1937 * suspend. We instead follow cfg80211 suspend/resume handlers. 1938 */ 1939 static int ath10k_sdio_pm_suspend(struct device *device) 1940 { 1941 return 0; 1942 } 1943 1944 static int ath10k_sdio_pm_resume(struct device *device) 1945 { 1946 return 0; 1947 } 1948 1949 static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend, 1950 ath10k_sdio_pm_resume); 1951 1952 #define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops) 1953 1954 #else 1955 1956 #define ATH10K_SDIO_PM_OPS NULL 1957 1958 #endif /* CONFIG_PM_SLEEP */ 1959 1960 static int ath10k_sdio_probe(struct sdio_func *func, 1961 const struct sdio_device_id *id) 1962 { 1963 struct ath10k_sdio *ar_sdio; 1964 struct ath10k *ar; 1965 enum ath10k_hw_rev hw_rev; 1966 u32 dev_id_base; 1967 struct ath10k_bus_params bus_params = {}; 1968 int ret, i; 1969 1970 /* Assumption: All SDIO based chipsets (so far) are QCA6174 based. 1971 * If there will be newer chipsets that does not use the hw reg 1972 * setup as defined in qca6174_regs and qca6174_values, this 1973 * assumption is no longer valid and hw_rev must be setup differently 1974 * depending on chipset. 1975 */ 1976 hw_rev = ATH10K_HW_QCA6174; 1977 1978 ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO, 1979 hw_rev, &ath10k_sdio_hif_ops); 1980 if (!ar) { 1981 dev_err(&func->dev, "failed to allocate core\n"); 1982 return -ENOMEM; 1983 } 1984 1985 ath10k_dbg(ar, ATH10K_DBG_BOOT, 1986 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", 1987 func->num, func->vendor, func->device, 1988 func->max_blksize, func->cur_blksize); 1989 1990 ar_sdio = ath10k_sdio_priv(ar); 1991 1992 ar_sdio->irq_data.irq_proc_reg = 1993 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs), 1994 GFP_KERNEL); 1995 if (!ar_sdio->irq_data.irq_proc_reg) { 1996 ret = -ENOMEM; 1997 goto err_core_destroy; 1998 } 1999 2000 ar_sdio->irq_data.irq_en_reg = 2001 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs), 2002 GFP_KERNEL); 2003 if (!ar_sdio->irq_data.irq_en_reg) { 2004 ret = -ENOMEM; 2005 goto err_core_destroy; 2006 } 2007 2008 ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL); 2009 if (!ar_sdio->bmi_buf) { 2010 ret = -ENOMEM; 2011 goto err_core_destroy; 2012 } 2013 2014 ar_sdio->func = func; 2015 sdio_set_drvdata(func, ar_sdio); 2016 2017 ar_sdio->is_disabled = true; 2018 ar_sdio->ar = ar; 2019 2020 spin_lock_init(&ar_sdio->lock); 2021 spin_lock_init(&ar_sdio->wr_async_lock); 2022 mutex_init(&ar_sdio->irq_data.mtx); 2023 2024 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); 2025 INIT_LIST_HEAD(&ar_sdio->wr_asyncq); 2026 2027 INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work); 2028 ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq"); 2029 if (!ar_sdio->workqueue) { 2030 ret = -ENOMEM; 2031 goto err_core_destroy; 2032 } 2033 2034 for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++) 2035 ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]); 2036 2037 dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device); 2038 switch (dev_id_base) { 2039 case QCA_MANUFACTURER_ID_AR6005_BASE: 2040 case QCA_MANUFACTURER_ID_QCA9377_BASE: 2041 ar->dev_id = QCA9377_1_0_DEVICE_ID; 2042 break; 2043 default: 2044 ret = -ENODEV; 2045 ath10k_err(ar, "unsupported device id %u (0x%x)\n", 2046 dev_id_base, id->device); 2047 goto err_free_wq; 2048 } 2049 2050 ar->id.vendor = id->vendor; 2051 ar->id.device = id->device; 2052 2053 ath10k_sdio_set_mbox_info(ar); 2054 2055 bus_params.dev_type = ATH10K_DEV_TYPE_HL; 2056 /* TODO: don't know yet how to get chip_id with SDIO */ 2057 bus_params.chip_id = 0; 2058 bus_params.hl_msdu_ids = true; 2059 2060 ret = ath10k_core_register(ar, &bus_params); 2061 if (ret) { 2062 ath10k_err(ar, "failed to register driver core: %d\n", ret); 2063 goto err_free_wq; 2064 } 2065 2066 /* TODO: remove this once SDIO support is fully implemented */ 2067 ath10k_warn(ar, "WARNING: ath10k SDIO support is work-in-progress, problems may arise!\n"); 2068 2069 return 0; 2070 2071 err_free_wq: 2072 destroy_workqueue(ar_sdio->workqueue); 2073 err_core_destroy: 2074 ath10k_core_destroy(ar); 2075 2076 return ret; 2077 } 2078 2079 static void ath10k_sdio_remove(struct sdio_func *func) 2080 { 2081 struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func); 2082 struct ath10k *ar = ar_sdio->ar; 2083 2084 ath10k_dbg(ar, ATH10K_DBG_BOOT, 2085 "sdio removed func %d vendor 0x%x device 0x%x\n", 2086 func->num, func->vendor, func->device); 2087 2088 ath10k_core_unregister(ar); 2089 ath10k_core_destroy(ar); 2090 } 2091 2092 static const struct sdio_device_id ath10k_sdio_devices[] = { 2093 {SDIO_DEVICE(QCA_MANUFACTURER_CODE, 2094 (QCA_SDIO_ID_AR6005_BASE | 0xA))}, 2095 {SDIO_DEVICE(QCA_MANUFACTURER_CODE, 2096 (QCA_SDIO_ID_QCA9377_BASE | 0x1))}, 2097 {}, 2098 }; 2099 2100 MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices); 2101 2102 static struct sdio_driver ath10k_sdio_driver = { 2103 .name = "ath10k_sdio", 2104 .id_table = ath10k_sdio_devices, 2105 .probe = ath10k_sdio_probe, 2106 .remove = ath10k_sdio_remove, 2107 .drv = { 2108 .owner = THIS_MODULE, 2109 .pm = ATH10K_SDIO_PM_OPS, 2110 }, 2111 }; 2112 2113 static int __init ath10k_sdio_init(void) 2114 { 2115 int ret; 2116 2117 ret = sdio_register_driver(&ath10k_sdio_driver); 2118 if (ret) 2119 pr_err("sdio driver registration failed: %d\n", ret); 2120 2121 return ret; 2122 } 2123 2124 static void __exit ath10k_sdio_exit(void) 2125 { 2126 sdio_unregister_driver(&ath10k_sdio_driver); 2127 } 2128 2129 module_init(ath10k_sdio_init); 2130 module_exit(ath10k_sdio_exit); 2131 2132 MODULE_AUTHOR("Qualcomm Atheros"); 2133 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices"); 2134 MODULE_LICENSE("Dual BSD/GPL"); 2135