1 /* 2 * Copyright (c) 2004-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/mmc/card.h> 20 #include <linux/mmc/mmc.h> 21 #include <linux/mmc/host.h> 22 #include <linux/mmc/sdio_func.h> 23 #include <linux/mmc/sdio_ids.h> 24 #include <linux/mmc/sdio.h> 25 #include <linux/mmc/sd.h> 26 #include "hif.h" 27 #include "hif-ops.h" 28 #include "target.h" 29 #include "debug.h" 30 #include "cfg80211.h" 31 32 struct ath6kl_sdio { 33 struct sdio_func *func; 34 35 /* protects access to bus_req_freeq */ 36 spinlock_t lock; 37 38 /* free list */ 39 struct list_head bus_req_freeq; 40 41 /* available bus requests */ 42 struct bus_request bus_req[BUS_REQUEST_MAX_NUM]; 43 44 struct ath6kl *ar; 45 46 u8 *dma_buffer; 47 48 /* protects access to dma_buffer */ 49 struct mutex dma_buffer_mutex; 50 51 /* scatter request list head */ 52 struct list_head scat_req; 53 54 atomic_t irq_handling; 55 wait_queue_head_t irq_wq; 56 57 /* protects access to scat_req */ 58 spinlock_t scat_lock; 59 60 bool scatter_enabled; 61 62 bool is_disabled; 63 const struct sdio_device_id *id; 64 struct work_struct wr_async_work; 65 struct list_head wr_asyncq; 66 67 /* protects access to wr_asyncq */ 68 spinlock_t wr_async_lock; 69 }; 70 71 #define CMD53_ARG_READ 0 72 #define CMD53_ARG_WRITE 1 73 #define CMD53_ARG_BLOCK_BASIS 1 74 #define CMD53_ARG_FIXED_ADDRESS 0 75 #define CMD53_ARG_INCR_ADDRESS 1 76 77 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar) 78 { 79 return ar->hif_priv; 80 } 81 82 /* 83 * Macro to check if DMA buffer is WORD-aligned and DMA-able. 84 * Most host controllers assume the buffer is DMA'able and will 85 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid 86 * check fails on stack memory. 87 */ 88 static inline bool buf_needs_bounce(u8 *buf) 89 { 90 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf); 91 } 92 93 static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar) 94 { 95 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info; 96 97 /* EP1 has an extended range */ 98 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR; 99 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR; 100 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH; 101 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE; 102 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR; 103 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH; 104 } 105 106 static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func, 107 u8 mode, u8 opcode, u32 addr, 108 u16 blksz) 109 { 110 *arg = (((rw & 1) << 31) | 111 ((func & 0x7) << 28) | 112 ((mode & 1) << 27) | 113 ((opcode & 1) << 26) | 114 ((addr & 0x1FFFF) << 9) | 115 (blksz & 0x1FF)); 116 } 117 118 static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw, 119 unsigned int address, 120 unsigned char val) 121 { 122 const u8 func = 0; 123 124 *arg = ((write & 1) << 31) | 125 ((func & 0x7) << 28) | 126 ((raw & 1) << 27) | 127 (1 << 26) | 128 ((address & 0x1FFFF) << 9) | 129 (1 << 8) | 130 (val & 0xFF); 131 } 132 133 static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card, 134 unsigned int address, 135 unsigned char byte) 136 { 137 struct mmc_command io_cmd; 138 139 memset(&io_cmd, 0, sizeof(io_cmd)); 140 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte); 141 io_cmd.opcode = SD_IO_RW_DIRECT; 142 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; 143 144 return mmc_wait_for_cmd(card->host, &io_cmd, 0); 145 } 146 147 static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, 148 u8 *buf, u32 len) 149 { 150 int ret = 0; 151 152 sdio_claim_host(func); 153 154 if (request & HIF_WRITE) { 155 /* FIXME: looks like ugly workaround for something */ 156 if (addr >= HIF_MBOX_BASE_ADDR && 157 addr <= HIF_MBOX_END_ADDR) 158 addr += (HIF_MBOX_WIDTH - len); 159 160 /* FIXME: this also looks like ugly workaround */ 161 if (addr == HIF_MBOX0_EXT_BASE_ADDR) 162 addr += HIF_MBOX0_EXT_WIDTH - len; 163 164 if (request & HIF_FIXED_ADDRESS) 165 ret = sdio_writesb(func, addr, buf, len); 166 else 167 ret = sdio_memcpy_toio(func, addr, buf, len); 168 } else { 169 if (request & HIF_FIXED_ADDRESS) 170 ret = sdio_readsb(func, buf, addr, len); 171 else 172 ret = sdio_memcpy_fromio(func, buf, addr, len); 173 } 174 175 sdio_release_host(func); 176 177 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n", 178 request & HIF_WRITE ? "wr" : "rd", addr, 179 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); 180 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len); 181 182 return ret; 183 } 184 185 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) 186 { 187 struct bus_request *bus_req; 188 189 spin_lock_bh(&ar_sdio->lock); 190 191 if (list_empty(&ar_sdio->bus_req_freeq)) { 192 spin_unlock_bh(&ar_sdio->lock); 193 return NULL; 194 } 195 196 bus_req = list_first_entry(&ar_sdio->bus_req_freeq, 197 struct bus_request, list); 198 list_del(&bus_req->list); 199 200 spin_unlock_bh(&ar_sdio->lock); 201 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", 202 __func__, bus_req); 203 204 return bus_req; 205 } 206 207 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, 208 struct bus_request *bus_req) 209 { 210 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", 211 __func__, bus_req); 212 213 spin_lock_bh(&ar_sdio->lock); 214 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); 215 spin_unlock_bh(&ar_sdio->lock); 216 } 217 218 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, 219 struct mmc_data *data) 220 { 221 struct scatterlist *sg; 222 int i; 223 224 data->blksz = HIF_MBOX_BLOCK_SIZE; 225 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE; 226 227 ath6kl_dbg(ATH6KL_DBG_SCATTER, 228 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n", 229 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr, 230 data->blksz, data->blocks, scat_req->len, 231 scat_req->scat_entries); 232 233 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE : 234 MMC_DATA_READ; 235 236 /* fill SG entries */ 237 sg = scat_req->sgentries; 238 sg_init_table(sg, scat_req->scat_entries); 239 240 /* assemble SG list */ 241 for (i = 0; i < scat_req->scat_entries; i++, sg++) { 242 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n", 243 i, scat_req->scat_list[i].buf, 244 scat_req->scat_list[i].len); 245 246 sg_set_buf(sg, scat_req->scat_list[i].buf, 247 scat_req->scat_list[i].len); 248 } 249 250 /* set scatter-gather table for request */ 251 data->sg = scat_req->sgentries; 252 data->sg_len = scat_req->scat_entries; 253 } 254 255 static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio, 256 struct bus_request *req) 257 { 258 struct mmc_request mmc_req; 259 struct mmc_command cmd; 260 struct mmc_data data; 261 struct hif_scatter_req *scat_req; 262 u8 opcode, rw; 263 int status, len; 264 265 scat_req = req->scat_req; 266 267 if (scat_req->virt_scat) { 268 len = scat_req->len; 269 if (scat_req->req & HIF_BLOCK_BASIS) 270 len = round_down(len, HIF_MBOX_BLOCK_SIZE); 271 272 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req, 273 scat_req->addr, scat_req->virt_dma_buf, 274 len); 275 goto scat_complete; 276 } 277 278 memset(&mmc_req, 0, sizeof(struct mmc_request)); 279 memset(&cmd, 0, sizeof(struct mmc_command)); 280 memset(&data, 0, sizeof(struct mmc_data)); 281 282 ath6kl_sdio_setup_scat_data(scat_req, &data); 283 284 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ? 285 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS; 286 287 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ; 288 289 /* Fixup the address so that the last byte will fall on MBOX EOM */ 290 if (scat_req->req & HIF_WRITE) { 291 if (scat_req->addr == HIF_MBOX_BASE_ADDR) 292 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len; 293 else 294 /* Uses extended address range */ 295 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len; 296 } 297 298 /* set command argument */ 299 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num, 300 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr, 301 data.blocks); 302 303 cmd.opcode = SD_IO_RW_EXTENDED; 304 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; 305 306 mmc_req.cmd = &cmd; 307 mmc_req.data = &data; 308 309 sdio_claim_host(ar_sdio->func); 310 311 mmc_set_data_timeout(&data, ar_sdio->func->card); 312 /* synchronous call to process request */ 313 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); 314 315 sdio_release_host(ar_sdio->func); 316 317 status = cmd.error ? cmd.error : data.error; 318 319 scat_complete: 320 scat_req->status = status; 321 322 if (scat_req->status) 323 ath6kl_err("Scatter write request failed:%d\n", 324 scat_req->status); 325 326 if (scat_req->req & HIF_ASYNCHRONOUS) 327 scat_req->complete(ar_sdio->ar->htc_target, scat_req); 328 329 return status; 330 } 331 332 static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, 333 int n_scat_entry, int n_scat_req, 334 bool virt_scat) 335 { 336 struct hif_scatter_req *s_req; 337 struct bus_request *bus_req; 338 int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz; 339 u8 *virt_buf; 340 341 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); 342 scat_req_sz = sizeof(*s_req) + scat_list_sz; 343 344 if (!virt_scat) 345 sg_sz = sizeof(struct scatterlist) * n_scat_entry; 346 else 347 buf_sz = 2 * L1_CACHE_BYTES + 348 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; 349 350 for (i = 0; i < n_scat_req; i++) { 351 /* allocate the scatter request */ 352 s_req = kzalloc(scat_req_sz, GFP_KERNEL); 353 if (!s_req) 354 return -ENOMEM; 355 356 if (virt_scat) { 357 virt_buf = kzalloc(buf_sz, GFP_KERNEL); 358 if (!virt_buf) { 359 kfree(s_req); 360 return -ENOMEM; 361 } 362 363 s_req->virt_dma_buf = 364 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); 365 } else { 366 /* allocate sglist */ 367 s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL); 368 369 if (!s_req->sgentries) { 370 kfree(s_req); 371 return -ENOMEM; 372 } 373 } 374 375 /* allocate a bus request for this scatter request */ 376 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); 377 if (!bus_req) { 378 kfree(s_req->sgentries); 379 kfree(s_req->virt_dma_buf); 380 kfree(s_req); 381 return -ENOMEM; 382 } 383 384 /* assign the scatter request to this bus request */ 385 bus_req->scat_req = s_req; 386 s_req->busrequest = bus_req; 387 388 s_req->virt_scat = virt_scat; 389 390 /* add it to the scatter pool */ 391 hif_scatter_req_add(ar_sdio->ar, s_req); 392 } 393 394 return 0; 395 } 396 397 static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, 398 u32 len, u32 request) 399 { 400 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 401 u8 *tbuf = NULL; 402 int ret; 403 bool bounced = false; 404 405 if (request & HIF_BLOCK_BASIS) 406 len = round_down(len, HIF_MBOX_BLOCK_SIZE); 407 408 if (buf_needs_bounce(buf)) { 409 if (!ar_sdio->dma_buffer) 410 return -ENOMEM; 411 mutex_lock(&ar_sdio->dma_buffer_mutex); 412 tbuf = ar_sdio->dma_buffer; 413 414 if (request & HIF_WRITE) 415 memcpy(tbuf, buf, len); 416 417 bounced = true; 418 } else 419 tbuf = buf; 420 421 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); 422 if ((request & HIF_READ) && bounced) 423 memcpy(buf, tbuf, len); 424 425 if (bounced) 426 mutex_unlock(&ar_sdio->dma_buffer_mutex); 427 428 return ret; 429 } 430 431 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, 432 struct bus_request *req) 433 { 434 if (req->scat_req) 435 ath6kl_sdio_scat_rw(ar_sdio, req); 436 else { 437 void *context; 438 int status; 439 440 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address, 441 req->buffer, req->length, 442 req->request); 443 context = req->packet; 444 ath6kl_sdio_free_bus_req(ar_sdio, req); 445 ath6kl_hif_rw_comp_handler(context, status); 446 } 447 } 448 449 static void ath6kl_sdio_write_async_work(struct work_struct *work) 450 { 451 struct ath6kl_sdio *ar_sdio; 452 struct bus_request *req, *tmp_req; 453 454 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); 455 456 spin_lock_bh(&ar_sdio->wr_async_lock); 457 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { 458 list_del(&req->list); 459 spin_unlock_bh(&ar_sdio->wr_async_lock); 460 __ath6kl_sdio_write_async(ar_sdio, req); 461 spin_lock_bh(&ar_sdio->wr_async_lock); 462 } 463 spin_unlock_bh(&ar_sdio->wr_async_lock); 464 } 465 466 static void ath6kl_sdio_irq_handler(struct sdio_func *func) 467 { 468 int status; 469 struct ath6kl_sdio *ar_sdio; 470 471 ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n"); 472 473 ar_sdio = sdio_get_drvdata(func); 474 atomic_set(&ar_sdio->irq_handling, 1); 475 /* 476 * Release the host during interrups so we can pick it back up when 477 * we process commands. 478 */ 479 sdio_release_host(ar_sdio->func); 480 481 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar); 482 sdio_claim_host(ar_sdio->func); 483 484 atomic_set(&ar_sdio->irq_handling, 0); 485 wake_up(&ar_sdio->irq_wq); 486 487 WARN_ON(status && status != -ECANCELED); 488 } 489 490 static int ath6kl_sdio_power_on(struct ath6kl *ar) 491 { 492 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 493 struct sdio_func *func = ar_sdio->func; 494 int ret = 0; 495 496 if (!ar_sdio->is_disabled) 497 return 0; 498 499 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n"); 500 501 sdio_claim_host(func); 502 503 ret = sdio_enable_func(func); 504 if (ret) { 505 ath6kl_err("Unable to enable sdio func: %d)\n", ret); 506 sdio_release_host(func); 507 return ret; 508 } 509 510 sdio_release_host(func); 511 512 /* 513 * Wait for hardware to initialise. It should take a lot less than 514 * 10 ms but let's be conservative here. 515 */ 516 msleep(10); 517 518 ar_sdio->is_disabled = false; 519 520 return ret; 521 } 522 523 static int ath6kl_sdio_power_off(struct ath6kl *ar) 524 { 525 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 526 int ret; 527 528 if (ar_sdio->is_disabled) 529 return 0; 530 531 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n"); 532 533 /* Disable the card */ 534 sdio_claim_host(ar_sdio->func); 535 ret = sdio_disable_func(ar_sdio->func); 536 sdio_release_host(ar_sdio->func); 537 538 if (ret) 539 return ret; 540 541 ar_sdio->is_disabled = true; 542 543 return ret; 544 } 545 546 static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, 547 u32 length, u32 request, 548 struct htc_packet *packet) 549 { 550 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 551 struct bus_request *bus_req; 552 553 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); 554 555 if (WARN_ON_ONCE(!bus_req)) 556 return -ENOMEM; 557 558 bus_req->address = address; 559 bus_req->buffer = buffer; 560 bus_req->length = length; 561 bus_req->request = request; 562 bus_req->packet = packet; 563 564 spin_lock_bh(&ar_sdio->wr_async_lock); 565 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); 566 spin_unlock_bh(&ar_sdio->wr_async_lock); 567 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); 568 569 return 0; 570 } 571 572 static void ath6kl_sdio_irq_enable(struct ath6kl *ar) 573 { 574 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 575 int ret; 576 577 sdio_claim_host(ar_sdio->func); 578 579 /* Register the isr */ 580 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler); 581 if (ret) 582 ath6kl_err("Failed to claim sdio irq: %d\n", ret); 583 584 sdio_release_host(ar_sdio->func); 585 } 586 587 static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar) 588 { 589 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 590 591 return !atomic_read(&ar_sdio->irq_handling); 592 } 593 594 static void ath6kl_sdio_irq_disable(struct ath6kl *ar) 595 { 596 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 597 int ret; 598 599 sdio_claim_host(ar_sdio->func); 600 601 if (atomic_read(&ar_sdio->irq_handling)) { 602 sdio_release_host(ar_sdio->func); 603 604 ret = wait_event_interruptible(ar_sdio->irq_wq, 605 ath6kl_sdio_is_on_irq(ar)); 606 if (ret) 607 return; 608 609 sdio_claim_host(ar_sdio->func); 610 } 611 612 ret = sdio_release_irq(ar_sdio->func); 613 if (ret) 614 ath6kl_err("Failed to release sdio irq: %d\n", ret); 615 616 sdio_release_host(ar_sdio->func); 617 } 618 619 static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) 620 { 621 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 622 struct hif_scatter_req *node = NULL; 623 624 spin_lock_bh(&ar_sdio->scat_lock); 625 626 if (!list_empty(&ar_sdio->scat_req)) { 627 node = list_first_entry(&ar_sdio->scat_req, 628 struct hif_scatter_req, list); 629 list_del(&node->list); 630 631 node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req); 632 } 633 634 spin_unlock_bh(&ar_sdio->scat_lock); 635 636 return node; 637 } 638 639 static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar, 640 struct hif_scatter_req *s_req) 641 { 642 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 643 644 spin_lock_bh(&ar_sdio->scat_lock); 645 646 list_add_tail(&s_req->list, &ar_sdio->scat_req); 647 648 spin_unlock_bh(&ar_sdio->scat_lock); 649 650 } 651 652 /* scatter gather read write request */ 653 static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, 654 struct hif_scatter_req *scat_req) 655 { 656 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 657 u32 request = scat_req->req; 658 int status = 0; 659 660 if (!scat_req->len) 661 return -EINVAL; 662 663 ath6kl_dbg(ATH6KL_DBG_SCATTER, 664 "hif-scatter: total len: %d scatter entries: %d\n", 665 scat_req->len, scat_req->scat_entries); 666 667 if (request & HIF_SYNCHRONOUS) 668 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); 669 else { 670 spin_lock_bh(&ar_sdio->wr_async_lock); 671 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); 672 spin_unlock_bh(&ar_sdio->wr_async_lock); 673 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); 674 } 675 676 return status; 677 } 678 679 /* clean up scatter support */ 680 static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) 681 { 682 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 683 struct hif_scatter_req *s_req, *tmp_req; 684 685 /* empty the free list */ 686 spin_lock_bh(&ar_sdio->scat_lock); 687 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { 688 list_del(&s_req->list); 689 spin_unlock_bh(&ar_sdio->scat_lock); 690 691 /* 692 * FIXME: should we also call completion handler with 693 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so 694 * that the packet is properly freed? 695 */ 696 if (s_req->busrequest) 697 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); 698 kfree(s_req->virt_dma_buf); 699 kfree(s_req->sgentries); 700 kfree(s_req); 701 702 spin_lock_bh(&ar_sdio->scat_lock); 703 } 704 spin_unlock_bh(&ar_sdio->scat_lock); 705 } 706 707 /* setup of HIF scatter resources */ 708 static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) 709 { 710 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 711 struct htc_target *target = ar->htc_target; 712 int ret = 0; 713 bool virt_scat = false; 714 715 if (ar_sdio->scatter_enabled) 716 return 0; 717 718 ar_sdio->scatter_enabled = true; 719 720 /* check if host supports scatter and it meets our requirements */ 721 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { 722 ath6kl_err("host only supports scatter of :%d entries, need: %d\n", 723 ar_sdio->func->card->host->max_segs, 724 MAX_SCATTER_ENTRIES_PER_REQ); 725 virt_scat = true; 726 } 727 728 if (!virt_scat) { 729 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, 730 MAX_SCATTER_ENTRIES_PER_REQ, 731 MAX_SCATTER_REQUESTS, virt_scat); 732 733 if (!ret) { 734 ath6kl_dbg(ATH6KL_DBG_BOOT, 735 "hif-scatter enabled requests %d entries %d\n", 736 MAX_SCATTER_REQUESTS, 737 MAX_SCATTER_ENTRIES_PER_REQ); 738 739 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ; 740 target->max_xfer_szper_scatreq = 741 MAX_SCATTER_REQ_TRANSFER_SIZE; 742 } else { 743 ath6kl_sdio_cleanup_scatter(ar); 744 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n"); 745 } 746 } 747 748 if (virt_scat || ret) { 749 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, 750 ATH6KL_SCATTER_ENTRIES_PER_REQ, 751 ATH6KL_SCATTER_REQS, virt_scat); 752 753 if (ret) { 754 ath6kl_err("failed to alloc virtual scatter resources !\n"); 755 ath6kl_sdio_cleanup_scatter(ar); 756 return ret; 757 } 758 759 ath6kl_dbg(ATH6KL_DBG_BOOT, 760 "virtual scatter enabled requests %d entries %d\n", 761 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); 762 763 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; 764 target->max_xfer_szper_scatreq = 765 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; 766 } 767 768 return 0; 769 } 770 771 static int ath6kl_sdio_config(struct ath6kl *ar) 772 { 773 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 774 struct sdio_func *func = ar_sdio->func; 775 int ret; 776 777 sdio_claim_host(func); 778 779 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >= 780 MANUFACTURER_ID_AR6003_BASE) { 781 /* enable 4-bit ASYNC interrupt on AR6003 or later */ 782 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, 783 CCCR_SDIO_IRQ_MODE_REG, 784 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); 785 if (ret) { 786 ath6kl_err("Failed to enable 4-bit async irq mode %d\n", 787 ret); 788 goto out; 789 } 790 791 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n"); 792 } 793 794 /* give us some time to enable, in ms */ 795 func->enable_timeout = 100; 796 797 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); 798 if (ret) { 799 ath6kl_err("Set sdio block size %d failed: %d)\n", 800 HIF_MBOX_BLOCK_SIZE, ret); 801 goto out; 802 } 803 804 out: 805 sdio_release_host(func); 806 807 return ret; 808 } 809 810 static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar) 811 { 812 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 813 struct sdio_func *func = ar_sdio->func; 814 mmc_pm_flag_t flags; 815 int ret; 816 817 flags = sdio_get_host_pm_caps(func); 818 819 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags); 820 821 if (!(flags & MMC_PM_WAKE_SDIO_IRQ) || 822 !(flags & MMC_PM_KEEP_POWER)) 823 return -EINVAL; 824 825 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 826 if (ret) { 827 ath6kl_err("set sdio keep pwr flag failed: %d\n", ret); 828 return ret; 829 } 830 831 /* sdio irq wakes up host */ 832 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); 833 if (ret) 834 ath6kl_err("set sdio wake irq flag failed: %d\n", ret); 835 836 return ret; 837 } 838 839 static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) 840 { 841 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 842 struct sdio_func *func = ar_sdio->func; 843 mmc_pm_flag_t flags; 844 bool try_deepsleep = false; 845 int ret; 846 847 if (ar->suspend_mode == WLAN_POWER_STATE_WOW || 848 (!ar->suspend_mode && wow)) { 849 850 ret = ath6kl_set_sdio_pm_caps(ar); 851 if (ret) 852 goto cut_pwr; 853 854 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow); 855 if (ret && ret != -ENOTCONN) 856 ath6kl_err("wow suspend failed: %d\n", ret); 857 858 if (ret && 859 (!ar->wow_suspend_mode || 860 ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP)) 861 try_deepsleep = true; 862 else if (ret && 863 ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR) 864 goto cut_pwr; 865 if (!ret) 866 return 0; 867 } 868 869 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || 870 !ar->suspend_mode || try_deepsleep) { 871 872 flags = sdio_get_host_pm_caps(func); 873 if (!(flags & MMC_PM_KEEP_POWER)) 874 goto cut_pwr; 875 876 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 877 if (ret) 878 goto cut_pwr; 879 880 /* 881 * Workaround to support Deep Sleep with MSM, set the host pm 882 * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable 883 * the sdc2_clock and internally allows MSM to enter 884 * TCXO shutdown properly. 885 */ 886 if ((flags & MMC_PM_WAKE_SDIO_IRQ)) { 887 ret = sdio_set_host_pm_flags(func, 888 MMC_PM_WAKE_SDIO_IRQ); 889 if (ret) 890 goto cut_pwr; 891 } 892 893 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, 894 NULL); 895 if (ret) 896 goto cut_pwr; 897 898 return 0; 899 } 900 901 cut_pwr: 902 if (func->card && func->card->host) 903 func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER; 904 905 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL); 906 } 907 908 static int ath6kl_sdio_resume(struct ath6kl *ar) 909 { 910 switch (ar->state) { 911 case ATH6KL_STATE_OFF: 912 case ATH6KL_STATE_CUTPOWER: 913 ath6kl_dbg(ATH6KL_DBG_SUSPEND, 914 "sdio resume configuring sdio\n"); 915 916 /* need to set sdio settings after power is cut from sdio */ 917 ath6kl_sdio_config(ar); 918 break; 919 920 case ATH6KL_STATE_ON: 921 break; 922 923 case ATH6KL_STATE_DEEPSLEEP: 924 break; 925 926 case ATH6KL_STATE_WOW: 927 break; 928 929 case ATH6KL_STATE_SUSPENDING: 930 break; 931 932 case ATH6KL_STATE_RESUMING: 933 break; 934 935 case ATH6KL_STATE_RECOVERY: 936 break; 937 } 938 939 ath6kl_cfg80211_resume(ar); 940 941 return 0; 942 } 943 944 /* set the window address register (using 4-byte register access ). */ 945 static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) 946 { 947 int status; 948 u8 addr_val[4]; 949 s32 i; 950 951 /* 952 * Write bytes 1,2,3 of the register to set the upper address bytes, 953 * the LSB is written last to initiate the access cycle 954 */ 955 956 for (i = 1; i <= 3; i++) { 957 /* 958 * Fill the buffer with the address byte value we want to 959 * hit 4 times. 960 */ 961 memset(addr_val, ((u8 *)&addr)[i], 4); 962 963 /* 964 * Hit each byte of the register address with a 4-byte 965 * write operation to the same address, this is a harmless 966 * operation. 967 */ 968 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val, 969 4, HIF_WR_SYNC_BYTE_FIX); 970 if (status) 971 break; 972 } 973 974 if (status) { 975 ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n", 976 __func__, addr, reg_addr); 977 return status; 978 } 979 980 /* 981 * Write the address register again, this time write the whole 982 * 4-byte value. The effect here is that the LSB write causes the 983 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no 984 * effect since we are writing the same values again 985 */ 986 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr), 987 4, HIF_WR_SYNC_BYTE_INC); 988 989 if (status) { 990 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n", 991 __func__, addr, reg_addr); 992 return status; 993 } 994 995 return 0; 996 } 997 998 static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data) 999 { 1000 int status; 1001 1002 /* set window register to start read cycle */ 1003 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, 1004 address); 1005 1006 if (status) 1007 return status; 1008 1009 /* read the data */ 1010 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, 1011 (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC); 1012 if (status) { 1013 ath6kl_err("%s: failed to read from window data addr\n", 1014 __func__); 1015 return status; 1016 } 1017 1018 return status; 1019 } 1020 1021 static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address, 1022 __le32 data) 1023 { 1024 int status; 1025 u32 val = (__force u32) data; 1026 1027 /* set write data */ 1028 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, 1029 (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC); 1030 if (status) { 1031 ath6kl_err("%s: failed to write 0x%x to window data addr\n", 1032 __func__, data); 1033 return status; 1034 } 1035 1036 /* set window register, which starts the write cycle */ 1037 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, 1038 address); 1039 } 1040 1041 static int ath6kl_sdio_bmi_credits(struct ath6kl *ar) 1042 { 1043 u32 addr; 1044 unsigned long timeout; 1045 int ret; 1046 1047 ar->bmi.cmd_credits = 0; 1048 1049 /* Read the counter register to get the command credits */ 1050 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; 1051 1052 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); 1053 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { 1054 1055 /* 1056 * Hit the credit counter with a 4-byte access, the first byte 1057 * read will hit the counter and cause a decrement, while the 1058 * remaining 3 bytes has no effect. The rationale behind this 1059 * is to make all HIF accesses 4-byte aligned. 1060 */ 1061 ret = ath6kl_sdio_read_write_sync(ar, addr, 1062 (u8 *)&ar->bmi.cmd_credits, 4, 1063 HIF_RD_SYNC_BYTE_INC); 1064 if (ret) { 1065 ath6kl_err("Unable to decrement the command credit count register: %d\n", 1066 ret); 1067 return ret; 1068 } 1069 1070 /* The counter is only 8 bits. 1071 * Ignore anything in the upper 3 bytes 1072 */ 1073 ar->bmi.cmd_credits &= 0xFF; 1074 } 1075 1076 if (!ar->bmi.cmd_credits) { 1077 ath6kl_err("bmi communication timeout\n"); 1078 return -ETIMEDOUT; 1079 } 1080 1081 return 0; 1082 } 1083 1084 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) 1085 { 1086 unsigned long timeout; 1087 u32 rx_word = 0; 1088 int ret = 0; 1089 1090 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); 1091 while ((time_before(jiffies, timeout)) && !rx_word) { 1092 ret = ath6kl_sdio_read_write_sync(ar, 1093 RX_LOOKAHEAD_VALID_ADDRESS, 1094 (u8 *)&rx_word, sizeof(rx_word), 1095 HIF_RD_SYNC_BYTE_INC); 1096 if (ret) { 1097 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n"); 1098 return ret; 1099 } 1100 1101 /* all we really want is one bit */ 1102 rx_word &= (1 << ENDPOINT1); 1103 } 1104 1105 if (!rx_word) { 1106 ath6kl_err("bmi_recv_buf FIFO empty\n"); 1107 return -EINVAL; 1108 } 1109 1110 return ret; 1111 } 1112 1113 static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) 1114 { 1115 int ret; 1116 u32 addr; 1117 1118 ret = ath6kl_sdio_bmi_credits(ar); 1119 if (ret) 1120 return ret; 1121 1122 addr = ar->mbox_info.htc_addr; 1123 1124 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, 1125 HIF_WR_SYNC_BYTE_INC); 1126 if (ret) 1127 ath6kl_err("unable to send the bmi data to the device\n"); 1128 1129 return ret; 1130 } 1131 1132 static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) 1133 { 1134 int ret; 1135 u32 addr; 1136 1137 /* 1138 * During normal bootup, small reads may be required. 1139 * Rather than issue an HIF Read and then wait as the Target 1140 * adds successive bytes to the FIFO, we wait here until 1141 * we know that response data is available. 1142 * 1143 * This allows us to cleanly timeout on an unexpected 1144 * Target failure rather than risk problems at the HIF level. 1145 * In particular, this avoids SDIO timeouts and possibly garbage 1146 * data on some host controllers. And on an interconnect 1147 * such as Compact Flash (as well as some SDIO masters) which 1148 * does not provide any indication on data timeout, it avoids 1149 * a potential hang or garbage response. 1150 * 1151 * Synchronization is more difficult for reads larger than the 1152 * size of the MBOX FIFO (128B), because the Target is unable 1153 * to push the 129th byte of data until AFTER the Host posts an 1154 * HIF Read and removes some FIFO data. So for large reads the 1155 * Host proceeds to post an HIF Read BEFORE all the data is 1156 * actually available to read. Fortunately, large BMI reads do 1157 * not occur in practice -- they're supported for debug/development. 1158 * 1159 * So Host/Target BMI synchronization is divided into these cases: 1160 * CASE 1: length < 4 1161 * Should not happen 1162 * 1163 * CASE 2: 4 <= length <= 128 1164 * Wait for first 4 bytes to be in FIFO 1165 * If CONSERVATIVE_BMI_READ is enabled, also wait for 1166 * a BMI command credit, which indicates that the ENTIRE 1167 * response is available in the the FIFO 1168 * 1169 * CASE 3: length > 128 1170 * Wait for the first 4 bytes to be in FIFO 1171 * 1172 * For most uses, a small timeout should be sufficient and we will 1173 * usually see a response quickly; but there may be some unusual 1174 * (debug) cases of BMI_EXECUTE where we want an larger timeout. 1175 * For now, we use an unbounded busy loop while waiting for 1176 * BMI_EXECUTE. 1177 * 1178 * If BMI_EXECUTE ever needs to support longer-latency execution, 1179 * especially in production, this code needs to be enhanced to sleep 1180 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently 1181 * a function of Host processor speed. 1182 */ 1183 if (len >= 4) { /* NB: Currently, always true */ 1184 ret = ath6kl_bmi_get_rx_lkahd(ar); 1185 if (ret) 1186 return ret; 1187 } 1188 1189 addr = ar->mbox_info.htc_addr; 1190 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, 1191 HIF_RD_SYNC_BYTE_INC); 1192 if (ret) { 1193 ath6kl_err("Unable to read the bmi data from the device: %d\n", 1194 ret); 1195 return ret; 1196 } 1197 1198 return 0; 1199 } 1200 1201 static void ath6kl_sdio_stop(struct ath6kl *ar) 1202 { 1203 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 1204 struct bus_request *req, *tmp_req; 1205 void *context; 1206 1207 /* FIXME: make sure that wq is not queued again */ 1208 1209 cancel_work_sync(&ar_sdio->wr_async_work); 1210 1211 spin_lock_bh(&ar_sdio->wr_async_lock); 1212 1213 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { 1214 list_del(&req->list); 1215 1216 if (req->scat_req) { 1217 /* this is a scatter gather request */ 1218 req->scat_req->status = -ECANCELED; 1219 req->scat_req->complete(ar_sdio->ar->htc_target, 1220 req->scat_req); 1221 } else { 1222 context = req->packet; 1223 ath6kl_sdio_free_bus_req(ar_sdio, req); 1224 ath6kl_hif_rw_comp_handler(context, -ECANCELED); 1225 } 1226 } 1227 1228 spin_unlock_bh(&ar_sdio->wr_async_lock); 1229 1230 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4); 1231 } 1232 1233 static const struct ath6kl_hif_ops ath6kl_sdio_ops = { 1234 .read_write_sync = ath6kl_sdio_read_write_sync, 1235 .write_async = ath6kl_sdio_write_async, 1236 .irq_enable = ath6kl_sdio_irq_enable, 1237 .irq_disable = ath6kl_sdio_irq_disable, 1238 .scatter_req_get = ath6kl_sdio_scatter_req_get, 1239 .scatter_req_add = ath6kl_sdio_scatter_req_add, 1240 .enable_scatter = ath6kl_sdio_enable_scatter, 1241 .scat_req_rw = ath6kl_sdio_async_rw_scatter, 1242 .cleanup_scatter = ath6kl_sdio_cleanup_scatter, 1243 .suspend = ath6kl_sdio_suspend, 1244 .resume = ath6kl_sdio_resume, 1245 .diag_read32 = ath6kl_sdio_diag_read32, 1246 .diag_write32 = ath6kl_sdio_diag_write32, 1247 .bmi_read = ath6kl_sdio_bmi_read, 1248 .bmi_write = ath6kl_sdio_bmi_write, 1249 .power_on = ath6kl_sdio_power_on, 1250 .power_off = ath6kl_sdio_power_off, 1251 .stop = ath6kl_sdio_stop, 1252 }; 1253 1254 #ifdef CONFIG_PM_SLEEP 1255 1256 /* 1257 * Empty handlers so that mmc subsystem doesn't remove us entirely during 1258 * suspend. We instead follow cfg80211 suspend/resume handlers. 1259 */ 1260 static int ath6kl_sdio_pm_suspend(struct device *device) 1261 { 1262 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n"); 1263 1264 return 0; 1265 } 1266 1267 static int ath6kl_sdio_pm_resume(struct device *device) 1268 { 1269 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n"); 1270 1271 return 0; 1272 } 1273 1274 static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend, 1275 ath6kl_sdio_pm_resume); 1276 1277 #define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops) 1278 1279 #else 1280 1281 #define ATH6KL_SDIO_PM_OPS NULL 1282 1283 #endif /* CONFIG_PM_SLEEP */ 1284 1285 static int ath6kl_sdio_probe(struct sdio_func *func, 1286 const struct sdio_device_id *id) 1287 { 1288 int ret; 1289 struct ath6kl_sdio *ar_sdio; 1290 struct ath6kl *ar; 1291 int count; 1292 1293 ath6kl_dbg(ATH6KL_DBG_BOOT, 1294 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", 1295 func->num, func->vendor, func->device, 1296 func->max_blksize, func->cur_blksize); 1297 1298 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL); 1299 if (!ar_sdio) 1300 return -ENOMEM; 1301 1302 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL); 1303 if (!ar_sdio->dma_buffer) { 1304 ret = -ENOMEM; 1305 goto err_hif; 1306 } 1307 1308 ar_sdio->func = func; 1309 sdio_set_drvdata(func, ar_sdio); 1310 1311 ar_sdio->id = id; 1312 ar_sdio->is_disabled = true; 1313 1314 spin_lock_init(&ar_sdio->lock); 1315 spin_lock_init(&ar_sdio->scat_lock); 1316 spin_lock_init(&ar_sdio->wr_async_lock); 1317 mutex_init(&ar_sdio->dma_buffer_mutex); 1318 1319 INIT_LIST_HEAD(&ar_sdio->scat_req); 1320 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); 1321 INIT_LIST_HEAD(&ar_sdio->wr_asyncq); 1322 1323 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work); 1324 1325 init_waitqueue_head(&ar_sdio->irq_wq); 1326 1327 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) 1328 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]); 1329 1330 ar = ath6kl_core_create(&ar_sdio->func->dev); 1331 if (!ar) { 1332 ath6kl_err("Failed to alloc ath6kl core\n"); 1333 ret = -ENOMEM; 1334 goto err_dma; 1335 } 1336 1337 ar_sdio->ar = ar; 1338 ar->hif_type = ATH6KL_HIF_TYPE_SDIO; 1339 ar->hif_priv = ar_sdio; 1340 ar->hif_ops = &ath6kl_sdio_ops; 1341 ar->bmi.max_data_size = 256; 1342 1343 ath6kl_sdio_set_mbox_info(ar); 1344 1345 ret = ath6kl_sdio_config(ar); 1346 if (ret) { 1347 ath6kl_err("Failed to config sdio: %d\n", ret); 1348 goto err_core_alloc; 1349 } 1350 1351 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX); 1352 if (ret) { 1353 ath6kl_err("Failed to init ath6kl core\n"); 1354 goto err_core_alloc; 1355 } 1356 1357 return ret; 1358 1359 err_core_alloc: 1360 ath6kl_core_destroy(ar_sdio->ar); 1361 err_dma: 1362 kfree(ar_sdio->dma_buffer); 1363 err_hif: 1364 kfree(ar_sdio); 1365 1366 return ret; 1367 } 1368 1369 static void ath6kl_sdio_remove(struct sdio_func *func) 1370 { 1371 struct ath6kl_sdio *ar_sdio; 1372 1373 ath6kl_dbg(ATH6KL_DBG_BOOT, 1374 "sdio removed func %d vendor 0x%x device 0x%x\n", 1375 func->num, func->vendor, func->device); 1376 1377 ar_sdio = sdio_get_drvdata(func); 1378 1379 ath6kl_stop_txrx(ar_sdio->ar); 1380 cancel_work_sync(&ar_sdio->wr_async_work); 1381 1382 ath6kl_core_cleanup(ar_sdio->ar); 1383 ath6kl_core_destroy(ar_sdio->ar); 1384 1385 kfree(ar_sdio->dma_buffer); 1386 kfree(ar_sdio); 1387 } 1388 1389 static const struct sdio_device_id ath6kl_sdio_devices[] = { 1390 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))}, 1391 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, 1392 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, 1393 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, 1394 {}, 1395 }; 1396 1397 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); 1398 1399 static struct sdio_driver ath6kl_sdio_driver = { 1400 .name = "ath6kl_sdio", 1401 .id_table = ath6kl_sdio_devices, 1402 .probe = ath6kl_sdio_probe, 1403 .remove = ath6kl_sdio_remove, 1404 .drv.pm = ATH6KL_SDIO_PM_OPS, 1405 }; 1406 1407 static int __init ath6kl_sdio_init(void) 1408 { 1409 int ret; 1410 1411 ret = sdio_register_driver(&ath6kl_sdio_driver); 1412 if (ret) 1413 ath6kl_err("sdio driver registration failed: %d\n", ret); 1414 1415 return ret; 1416 } 1417 1418 static void __exit ath6kl_sdio_exit(void) 1419 { 1420 sdio_unregister_driver(&ath6kl_sdio_driver); 1421 } 1422 1423 module_init(ath6kl_sdio_init); 1424 module_exit(ath6kl_sdio_exit); 1425 1426 MODULE_AUTHOR("Atheros Communications, Inc."); 1427 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices"); 1428 MODULE_LICENSE("Dual BSD/GPL"); 1429 1430 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE); 1431 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE); 1432 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE); 1433 MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE); 1434 MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE); 1435 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE); 1436 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE); 1437 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE); 1438 MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE); 1439 MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE); 1440 MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE); 1441 MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE); 1442 MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE); 1443 MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE); 1444 MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE); 1445 MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE); 1446 MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE); 1447 MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE); 1448 MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE); 1449 MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE); 1450 MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE); 1451 MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE); 1452