1 /* 2 * Copyright (c) 2004-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/mmc/card.h> 19 #include <linux/mmc/mmc.h> 20 #include <linux/mmc/host.h> 21 #include <linux/mmc/sdio_func.h> 22 #include <linux/mmc/sdio_ids.h> 23 #include <linux/mmc/sdio.h> 24 #include <linux/mmc/sd.h> 25 #include "hif.h" 26 #include "hif-ops.h" 27 #include "target.h" 28 #include "debug.h" 29 #include "cfg80211.h" 30 31 struct ath6kl_sdio { 32 struct sdio_func *func; 33 34 spinlock_t lock; 35 36 /* free list */ 37 struct list_head bus_req_freeq; 38 39 /* available bus requests */ 40 struct bus_request bus_req[BUS_REQUEST_MAX_NUM]; 41 42 struct ath6kl *ar; 43 44 u8 *dma_buffer; 45 46 /* protects access to dma_buffer */ 47 struct mutex dma_buffer_mutex; 48 49 /* scatter request list head */ 50 struct list_head scat_req; 51 52 /* Avoids disabling irq while the interrupts being handled */ 53 struct mutex mtx_irq; 54 55 spinlock_t scat_lock; 56 bool scatter_enabled; 57 58 bool is_disabled; 59 const struct sdio_device_id *id; 60 struct work_struct wr_async_work; 61 struct list_head wr_asyncq; 62 spinlock_t wr_async_lock; 63 }; 64 65 #define CMD53_ARG_READ 0 66 #define CMD53_ARG_WRITE 1 67 #define CMD53_ARG_BLOCK_BASIS 1 68 #define CMD53_ARG_FIXED_ADDRESS 0 69 #define CMD53_ARG_INCR_ADDRESS 1 70 71 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar) 72 { 73 return ar->hif_priv; 74 } 75 76 /* 77 * Macro to check if DMA buffer is WORD-aligned and DMA-able. 78 * Most host controllers assume the buffer is DMA'able and will 79 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid 80 * check fails on stack memory. 81 */ 82 static inline bool buf_needs_bounce(u8 *buf) 83 { 84 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf); 85 } 86 87 static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar) 88 { 89 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info; 90 91 /* EP1 has an extended range */ 92 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR; 93 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR; 94 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH; 95 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE; 96 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR; 97 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH; 98 } 99 100 static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func, 101 u8 mode, u8 opcode, u32 addr, 102 u16 blksz) 103 { 104 *arg = (((rw & 1) << 31) | 105 ((func & 0x7) << 28) | 106 ((mode & 1) << 27) | 107 ((opcode & 1) << 26) | 108 ((addr & 0x1FFFF) << 9) | 109 (blksz & 0x1FF)); 110 } 111 112 static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw, 113 unsigned int address, 114 unsigned char val) 115 { 116 const u8 func = 0; 117 118 *arg = ((write & 1) << 31) | 119 ((func & 0x7) << 28) | 120 ((raw & 1) << 27) | 121 (1 << 26) | 122 ((address & 0x1FFFF) << 9) | 123 (1 << 8) | 124 (val & 0xFF); 125 } 126 127 static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card, 128 unsigned int address, 129 unsigned char byte) 130 { 131 struct mmc_command io_cmd; 132 133 memset(&io_cmd, 0, sizeof(io_cmd)); 134 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte); 135 io_cmd.opcode = SD_IO_RW_DIRECT; 136 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; 137 138 return mmc_wait_for_cmd(card->host, &io_cmd, 0); 139 } 140 141 static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, 142 u8 *buf, u32 len) 143 { 144 int ret = 0; 145 146 sdio_claim_host(func); 147 148 if (request & HIF_WRITE) { 149 /* FIXME: looks like ugly workaround for something */ 150 if (addr >= HIF_MBOX_BASE_ADDR && 151 addr <= HIF_MBOX_END_ADDR) 152 addr += (HIF_MBOX_WIDTH - len); 153 154 /* FIXME: this also looks like ugly workaround */ 155 if (addr == HIF_MBOX0_EXT_BASE_ADDR) 156 addr += HIF_MBOX0_EXT_WIDTH - len; 157 158 if (request & HIF_FIXED_ADDRESS) 159 ret = sdio_writesb(func, addr, buf, len); 160 else 161 ret = sdio_memcpy_toio(func, addr, buf, len); 162 } else { 163 if (request & HIF_FIXED_ADDRESS) 164 ret = sdio_readsb(func, buf, addr, len); 165 else 166 ret = sdio_memcpy_fromio(func, buf, addr, len); 167 } 168 169 sdio_release_host(func); 170 171 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n", 172 request & HIF_WRITE ? "wr" : "rd", addr, 173 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); 174 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len); 175 176 return ret; 177 } 178 179 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) 180 { 181 struct bus_request *bus_req; 182 183 spin_lock_bh(&ar_sdio->lock); 184 185 if (list_empty(&ar_sdio->bus_req_freeq)) { 186 spin_unlock_bh(&ar_sdio->lock); 187 return NULL; 188 } 189 190 bus_req = list_first_entry(&ar_sdio->bus_req_freeq, 191 struct bus_request, list); 192 list_del(&bus_req->list); 193 194 spin_unlock_bh(&ar_sdio->lock); 195 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", 196 __func__, bus_req); 197 198 return bus_req; 199 } 200 201 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, 202 struct bus_request *bus_req) 203 { 204 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", 205 __func__, bus_req); 206 207 spin_lock_bh(&ar_sdio->lock); 208 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); 209 spin_unlock_bh(&ar_sdio->lock); 210 } 211 212 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, 213 struct mmc_data *data) 214 { 215 struct scatterlist *sg; 216 int i; 217 218 data->blksz = HIF_MBOX_BLOCK_SIZE; 219 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE; 220 221 ath6kl_dbg(ATH6KL_DBG_SCATTER, 222 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n", 223 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr, 224 data->blksz, data->blocks, scat_req->len, 225 scat_req->scat_entries); 226 227 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE : 228 MMC_DATA_READ; 229 230 /* fill SG entries */ 231 sg = scat_req->sgentries; 232 sg_init_table(sg, scat_req->scat_entries); 233 234 /* assemble SG list */ 235 for (i = 0; i < scat_req->scat_entries; i++, sg++) { 236 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n", 237 i, scat_req->scat_list[i].buf, 238 scat_req->scat_list[i].len); 239 240 sg_set_buf(sg, scat_req->scat_list[i].buf, 241 scat_req->scat_list[i].len); 242 } 243 244 /* set scatter-gather table for request */ 245 data->sg = scat_req->sgentries; 246 data->sg_len = scat_req->scat_entries; 247 } 248 249 static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio, 250 struct bus_request *req) 251 { 252 struct mmc_request mmc_req; 253 struct mmc_command cmd; 254 struct mmc_data data; 255 struct hif_scatter_req *scat_req; 256 u8 opcode, rw; 257 int status, len; 258 259 scat_req = req->scat_req; 260 261 if (scat_req->virt_scat) { 262 len = scat_req->len; 263 if (scat_req->req & HIF_BLOCK_BASIS) 264 len = round_down(len, HIF_MBOX_BLOCK_SIZE); 265 266 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req, 267 scat_req->addr, scat_req->virt_dma_buf, 268 len); 269 goto scat_complete; 270 } 271 272 memset(&mmc_req, 0, sizeof(struct mmc_request)); 273 memset(&cmd, 0, sizeof(struct mmc_command)); 274 memset(&data, 0, sizeof(struct mmc_data)); 275 276 ath6kl_sdio_setup_scat_data(scat_req, &data); 277 278 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ? 279 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS; 280 281 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ; 282 283 /* Fixup the address so that the last byte will fall on MBOX EOM */ 284 if (scat_req->req & HIF_WRITE) { 285 if (scat_req->addr == HIF_MBOX_BASE_ADDR) 286 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len; 287 else 288 /* Uses extended address range */ 289 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len; 290 } 291 292 /* set command argument */ 293 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num, 294 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr, 295 data.blocks); 296 297 cmd.opcode = SD_IO_RW_EXTENDED; 298 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; 299 300 mmc_req.cmd = &cmd; 301 mmc_req.data = &data; 302 303 sdio_claim_host(ar_sdio->func); 304 305 mmc_set_data_timeout(&data, ar_sdio->func->card); 306 /* synchronous call to process request */ 307 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); 308 309 sdio_release_host(ar_sdio->func); 310 311 status = cmd.error ? cmd.error : data.error; 312 313 scat_complete: 314 scat_req->status = status; 315 316 if (scat_req->status) 317 ath6kl_err("Scatter write request failed:%d\n", 318 scat_req->status); 319 320 if (scat_req->req & HIF_ASYNCHRONOUS) 321 scat_req->complete(ar_sdio->ar->htc_target, scat_req); 322 323 return status; 324 } 325 326 static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, 327 int n_scat_entry, int n_scat_req, 328 bool virt_scat) 329 { 330 struct hif_scatter_req *s_req; 331 struct bus_request *bus_req; 332 int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz; 333 u8 *virt_buf; 334 335 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); 336 scat_req_sz = sizeof(*s_req) + scat_list_sz; 337 338 if (!virt_scat) 339 sg_sz = sizeof(struct scatterlist) * n_scat_entry; 340 else 341 buf_sz = 2 * L1_CACHE_BYTES + 342 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; 343 344 for (i = 0; i < n_scat_req; i++) { 345 /* allocate the scatter request */ 346 s_req = kzalloc(scat_req_sz, GFP_KERNEL); 347 if (!s_req) 348 return -ENOMEM; 349 350 if (virt_scat) { 351 virt_buf = kzalloc(buf_sz, GFP_KERNEL); 352 if (!virt_buf) { 353 kfree(s_req); 354 return -ENOMEM; 355 } 356 357 s_req->virt_dma_buf = 358 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); 359 } else { 360 /* allocate sglist */ 361 s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL); 362 363 if (!s_req->sgentries) { 364 kfree(s_req); 365 return -ENOMEM; 366 } 367 } 368 369 /* allocate a bus request for this scatter request */ 370 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); 371 if (!bus_req) { 372 kfree(s_req->sgentries); 373 kfree(s_req->virt_dma_buf); 374 kfree(s_req); 375 return -ENOMEM; 376 } 377 378 /* assign the scatter request to this bus request */ 379 bus_req->scat_req = s_req; 380 s_req->busrequest = bus_req; 381 382 s_req->virt_scat = virt_scat; 383 384 /* add it to the scatter pool */ 385 hif_scatter_req_add(ar_sdio->ar, s_req); 386 } 387 388 return 0; 389 } 390 391 static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, 392 u32 len, u32 request) 393 { 394 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 395 u8 *tbuf = NULL; 396 int ret; 397 bool bounced = false; 398 399 if (request & HIF_BLOCK_BASIS) 400 len = round_down(len, HIF_MBOX_BLOCK_SIZE); 401 402 if (buf_needs_bounce(buf)) { 403 if (!ar_sdio->dma_buffer) 404 return -ENOMEM; 405 mutex_lock(&ar_sdio->dma_buffer_mutex); 406 tbuf = ar_sdio->dma_buffer; 407 memcpy(tbuf, buf, len); 408 bounced = true; 409 } else 410 tbuf = buf; 411 412 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); 413 if ((request & HIF_READ) && bounced) 414 memcpy(buf, tbuf, len); 415 416 if (bounced) 417 mutex_unlock(&ar_sdio->dma_buffer_mutex); 418 419 return ret; 420 } 421 422 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, 423 struct bus_request *req) 424 { 425 if (req->scat_req) 426 ath6kl_sdio_scat_rw(ar_sdio, req); 427 else { 428 void *context; 429 int status; 430 431 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address, 432 req->buffer, req->length, 433 req->request); 434 context = req->packet; 435 ath6kl_sdio_free_bus_req(ar_sdio, req); 436 ath6kl_hif_rw_comp_handler(context, status); 437 } 438 } 439 440 static void ath6kl_sdio_write_async_work(struct work_struct *work) 441 { 442 struct ath6kl_sdio *ar_sdio; 443 struct bus_request *req, *tmp_req; 444 445 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); 446 447 spin_lock_bh(&ar_sdio->wr_async_lock); 448 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { 449 list_del(&req->list); 450 spin_unlock_bh(&ar_sdio->wr_async_lock); 451 __ath6kl_sdio_write_async(ar_sdio, req); 452 spin_lock_bh(&ar_sdio->wr_async_lock); 453 } 454 spin_unlock_bh(&ar_sdio->wr_async_lock); 455 } 456 457 static void ath6kl_sdio_irq_handler(struct sdio_func *func) 458 { 459 int status; 460 struct ath6kl_sdio *ar_sdio; 461 462 ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n"); 463 464 ar_sdio = sdio_get_drvdata(func); 465 mutex_lock(&ar_sdio->mtx_irq); 466 /* 467 * Release the host during interrups so we can pick it back up when 468 * we process commands. 469 */ 470 sdio_release_host(ar_sdio->func); 471 472 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar); 473 sdio_claim_host(ar_sdio->func); 474 mutex_unlock(&ar_sdio->mtx_irq); 475 WARN_ON(status && status != -ECANCELED); 476 } 477 478 static int ath6kl_sdio_power_on(struct ath6kl *ar) 479 { 480 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 481 struct sdio_func *func = ar_sdio->func; 482 int ret = 0; 483 484 if (!ar_sdio->is_disabled) 485 return 0; 486 487 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n"); 488 489 sdio_claim_host(func); 490 491 ret = sdio_enable_func(func); 492 if (ret) { 493 ath6kl_err("Unable to enable sdio func: %d)\n", ret); 494 sdio_release_host(func); 495 return ret; 496 } 497 498 sdio_release_host(func); 499 500 /* 501 * Wait for hardware to initialise. It should take a lot less than 502 * 10 ms but let's be conservative here. 503 */ 504 msleep(10); 505 506 ar_sdio->is_disabled = false; 507 508 return ret; 509 } 510 511 static int ath6kl_sdio_power_off(struct ath6kl *ar) 512 { 513 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 514 int ret; 515 516 if (ar_sdio->is_disabled) 517 return 0; 518 519 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n"); 520 521 /* Disable the card */ 522 sdio_claim_host(ar_sdio->func); 523 ret = sdio_disable_func(ar_sdio->func); 524 sdio_release_host(ar_sdio->func); 525 526 if (ret) 527 return ret; 528 529 ar_sdio->is_disabled = true; 530 531 return ret; 532 } 533 534 static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, 535 u32 length, u32 request, 536 struct htc_packet *packet) 537 { 538 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 539 struct bus_request *bus_req; 540 541 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); 542 543 if (!bus_req) 544 return -ENOMEM; 545 546 bus_req->address = address; 547 bus_req->buffer = buffer; 548 bus_req->length = length; 549 bus_req->request = request; 550 bus_req->packet = packet; 551 552 spin_lock_bh(&ar_sdio->wr_async_lock); 553 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); 554 spin_unlock_bh(&ar_sdio->wr_async_lock); 555 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); 556 557 return 0; 558 } 559 560 static void ath6kl_sdio_irq_enable(struct ath6kl *ar) 561 { 562 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 563 int ret; 564 565 sdio_claim_host(ar_sdio->func); 566 567 /* Register the isr */ 568 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler); 569 if (ret) 570 ath6kl_err("Failed to claim sdio irq: %d\n", ret); 571 572 sdio_release_host(ar_sdio->func); 573 } 574 575 static void ath6kl_sdio_irq_disable(struct ath6kl *ar) 576 { 577 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 578 int ret; 579 580 sdio_claim_host(ar_sdio->func); 581 582 mutex_lock(&ar_sdio->mtx_irq); 583 584 ret = sdio_release_irq(ar_sdio->func); 585 if (ret) 586 ath6kl_err("Failed to release sdio irq: %d\n", ret); 587 588 mutex_unlock(&ar_sdio->mtx_irq); 589 590 sdio_release_host(ar_sdio->func); 591 } 592 593 static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) 594 { 595 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 596 struct hif_scatter_req *node = NULL; 597 598 spin_lock_bh(&ar_sdio->scat_lock); 599 600 if (!list_empty(&ar_sdio->scat_req)) { 601 node = list_first_entry(&ar_sdio->scat_req, 602 struct hif_scatter_req, list); 603 list_del(&node->list); 604 } 605 606 spin_unlock_bh(&ar_sdio->scat_lock); 607 608 return node; 609 } 610 611 static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar, 612 struct hif_scatter_req *s_req) 613 { 614 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 615 616 spin_lock_bh(&ar_sdio->scat_lock); 617 618 list_add_tail(&s_req->list, &ar_sdio->scat_req); 619 620 spin_unlock_bh(&ar_sdio->scat_lock); 621 622 } 623 624 /* scatter gather read write request */ 625 static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, 626 struct hif_scatter_req *scat_req) 627 { 628 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 629 u32 request = scat_req->req; 630 int status = 0; 631 632 if (!scat_req->len) 633 return -EINVAL; 634 635 ath6kl_dbg(ATH6KL_DBG_SCATTER, 636 "hif-scatter: total len: %d scatter entries: %d\n", 637 scat_req->len, scat_req->scat_entries); 638 639 if (request & HIF_SYNCHRONOUS) 640 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); 641 else { 642 spin_lock_bh(&ar_sdio->wr_async_lock); 643 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); 644 spin_unlock_bh(&ar_sdio->wr_async_lock); 645 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); 646 } 647 648 return status; 649 } 650 651 /* clean up scatter support */ 652 static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) 653 { 654 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 655 struct hif_scatter_req *s_req, *tmp_req; 656 657 /* empty the free list */ 658 spin_lock_bh(&ar_sdio->scat_lock); 659 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { 660 list_del(&s_req->list); 661 spin_unlock_bh(&ar_sdio->scat_lock); 662 663 /* 664 * FIXME: should we also call completion handler with 665 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so 666 * that the packet is properly freed? 667 */ 668 if (s_req->busrequest) 669 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); 670 kfree(s_req->virt_dma_buf); 671 kfree(s_req->sgentries); 672 kfree(s_req); 673 674 spin_lock_bh(&ar_sdio->scat_lock); 675 } 676 spin_unlock_bh(&ar_sdio->scat_lock); 677 } 678 679 /* setup of HIF scatter resources */ 680 static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) 681 { 682 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 683 struct htc_target *target = ar->htc_target; 684 int ret; 685 bool virt_scat = false; 686 687 if (ar_sdio->scatter_enabled) 688 return 0; 689 690 ar_sdio->scatter_enabled = true; 691 692 /* check if host supports scatter and it meets our requirements */ 693 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { 694 ath6kl_err("host only supports scatter of :%d entries, need: %d\n", 695 ar_sdio->func->card->host->max_segs, 696 MAX_SCATTER_ENTRIES_PER_REQ); 697 virt_scat = true; 698 } 699 700 if (!virt_scat) { 701 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, 702 MAX_SCATTER_ENTRIES_PER_REQ, 703 MAX_SCATTER_REQUESTS, virt_scat); 704 705 if (!ret) { 706 ath6kl_dbg(ATH6KL_DBG_BOOT, 707 "hif-scatter enabled requests %d entries %d\n", 708 MAX_SCATTER_REQUESTS, 709 MAX_SCATTER_ENTRIES_PER_REQ); 710 711 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ; 712 target->max_xfer_szper_scatreq = 713 MAX_SCATTER_REQ_TRANSFER_SIZE; 714 } else { 715 ath6kl_sdio_cleanup_scatter(ar); 716 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n"); 717 } 718 } 719 720 if (virt_scat || ret) { 721 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, 722 ATH6KL_SCATTER_ENTRIES_PER_REQ, 723 ATH6KL_SCATTER_REQS, virt_scat); 724 725 if (ret) { 726 ath6kl_err("failed to alloc virtual scatter resources !\n"); 727 ath6kl_sdio_cleanup_scatter(ar); 728 return ret; 729 } 730 731 ath6kl_dbg(ATH6KL_DBG_BOOT, 732 "virtual scatter enabled requests %d entries %d\n", 733 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); 734 735 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; 736 target->max_xfer_szper_scatreq = 737 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; 738 } 739 740 return 0; 741 } 742 743 static int ath6kl_sdio_config(struct ath6kl *ar) 744 { 745 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 746 struct sdio_func *func = ar_sdio->func; 747 int ret; 748 749 sdio_claim_host(func); 750 751 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >= 752 MANUFACTURER_ID_AR6003_BASE) { 753 /* enable 4-bit ASYNC interrupt on AR6003 or later */ 754 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, 755 CCCR_SDIO_IRQ_MODE_REG, 756 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); 757 if (ret) { 758 ath6kl_err("Failed to enable 4-bit async irq mode %d\n", 759 ret); 760 goto out; 761 } 762 763 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n"); 764 } 765 766 /* give us some time to enable, in ms */ 767 func->enable_timeout = 100; 768 769 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); 770 if (ret) { 771 ath6kl_err("Set sdio block size %d failed: %d)\n", 772 HIF_MBOX_BLOCK_SIZE, ret); 773 goto out; 774 } 775 776 out: 777 sdio_release_host(func); 778 779 return ret; 780 } 781 782 static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar) 783 { 784 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 785 struct sdio_func *func = ar_sdio->func; 786 mmc_pm_flag_t flags; 787 int ret; 788 789 flags = sdio_get_host_pm_caps(func); 790 791 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags); 792 793 if (!(flags & MMC_PM_WAKE_SDIO_IRQ) || 794 !(flags & MMC_PM_KEEP_POWER)) 795 return -EINVAL; 796 797 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 798 if (ret) { 799 ath6kl_err("set sdio keep pwr flag failed: %d\n", ret); 800 return ret; 801 } 802 803 /* sdio irq wakes up host */ 804 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); 805 if (ret) 806 ath6kl_err("set sdio wake irq flag failed: %d\n", ret); 807 808 return ret; 809 } 810 811 static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) 812 { 813 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 814 struct sdio_func *func = ar_sdio->func; 815 mmc_pm_flag_t flags; 816 int ret; 817 818 if (ar->state == ATH6KL_STATE_SCHED_SCAN) { 819 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sched scan is in progress\n"); 820 821 ret = ath6kl_set_sdio_pm_caps(ar); 822 if (ret) 823 goto cut_pwr; 824 825 ret = ath6kl_cfg80211_suspend(ar, 826 ATH6KL_CFG_SUSPEND_SCHED_SCAN, 827 NULL); 828 if (ret) 829 goto cut_pwr; 830 831 return 0; 832 } 833 834 if (ar->suspend_mode == WLAN_POWER_STATE_WOW || 835 (!ar->suspend_mode && wow)) { 836 837 ret = ath6kl_set_sdio_pm_caps(ar); 838 if (ret) 839 goto cut_pwr; 840 841 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow); 842 if (ret) 843 goto cut_pwr; 844 845 return 0; 846 } 847 848 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || 849 !ar->suspend_mode) { 850 851 flags = sdio_get_host_pm_caps(func); 852 if (!(flags & MMC_PM_KEEP_POWER)) 853 goto cut_pwr; 854 855 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 856 if (ret) 857 goto cut_pwr; 858 859 /* 860 * Workaround to support Deep Sleep with MSM, set the host pm 861 * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable 862 * the sdc2_clock and internally allows MSM to enter 863 * TCXO shutdown properly. 864 */ 865 if ((flags & MMC_PM_WAKE_SDIO_IRQ)) { 866 ret = sdio_set_host_pm_flags(func, 867 MMC_PM_WAKE_SDIO_IRQ); 868 if (ret) 869 goto cut_pwr; 870 } 871 872 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, 873 NULL); 874 if (ret) 875 goto cut_pwr; 876 877 return 0; 878 } 879 880 cut_pwr: 881 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL); 882 } 883 884 static int ath6kl_sdio_resume(struct ath6kl *ar) 885 { 886 switch (ar->state) { 887 case ATH6KL_STATE_OFF: 888 case ATH6KL_STATE_CUTPOWER: 889 ath6kl_dbg(ATH6KL_DBG_SUSPEND, 890 "sdio resume configuring sdio\n"); 891 892 /* need to set sdio settings after power is cut from sdio */ 893 ath6kl_sdio_config(ar); 894 break; 895 896 case ATH6KL_STATE_ON: 897 break; 898 899 case ATH6KL_STATE_DEEPSLEEP: 900 break; 901 902 case ATH6KL_STATE_WOW: 903 break; 904 case ATH6KL_STATE_SCHED_SCAN: 905 break; 906 } 907 908 ath6kl_cfg80211_resume(ar); 909 910 return 0; 911 } 912 913 /* set the window address register (using 4-byte register access ). */ 914 static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) 915 { 916 int status; 917 u8 addr_val[4]; 918 s32 i; 919 920 /* 921 * Write bytes 1,2,3 of the register to set the upper address bytes, 922 * the LSB is written last to initiate the access cycle 923 */ 924 925 for (i = 1; i <= 3; i++) { 926 /* 927 * Fill the buffer with the address byte value we want to 928 * hit 4 times. 929 */ 930 memset(addr_val, ((u8 *)&addr)[i], 4); 931 932 /* 933 * Hit each byte of the register address with a 4-byte 934 * write operation to the same address, this is a harmless 935 * operation. 936 */ 937 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val, 938 4, HIF_WR_SYNC_BYTE_FIX); 939 if (status) 940 break; 941 } 942 943 if (status) { 944 ath6kl_err("%s: failed to write initial bytes of 0x%x " 945 "to window reg: 0x%X\n", __func__, 946 addr, reg_addr); 947 return status; 948 } 949 950 /* 951 * Write the address register again, this time write the whole 952 * 4-byte value. The effect here is that the LSB write causes the 953 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no 954 * effect since we are writing the same values again 955 */ 956 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr), 957 4, HIF_WR_SYNC_BYTE_INC); 958 959 if (status) { 960 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n", 961 __func__, addr, reg_addr); 962 return status; 963 } 964 965 return 0; 966 } 967 968 static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data) 969 { 970 int status; 971 972 /* set window register to start read cycle */ 973 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, 974 address); 975 976 if (status) 977 return status; 978 979 /* read the data */ 980 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, 981 (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC); 982 if (status) { 983 ath6kl_err("%s: failed to read from window data addr\n", 984 __func__); 985 return status; 986 } 987 988 return status; 989 } 990 991 static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address, 992 __le32 data) 993 { 994 int status; 995 u32 val = (__force u32) data; 996 997 /* set write data */ 998 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, 999 (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC); 1000 if (status) { 1001 ath6kl_err("%s: failed to write 0x%x to window data addr\n", 1002 __func__, data); 1003 return status; 1004 } 1005 1006 /* set window register, which starts the write cycle */ 1007 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, 1008 address); 1009 } 1010 1011 static int ath6kl_sdio_bmi_credits(struct ath6kl *ar) 1012 { 1013 u32 addr; 1014 unsigned long timeout; 1015 int ret; 1016 1017 ar->bmi.cmd_credits = 0; 1018 1019 /* Read the counter register to get the command credits */ 1020 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; 1021 1022 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); 1023 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { 1024 1025 /* 1026 * Hit the credit counter with a 4-byte access, the first byte 1027 * read will hit the counter and cause a decrement, while the 1028 * remaining 3 bytes has no effect. The rationale behind this 1029 * is to make all HIF accesses 4-byte aligned. 1030 */ 1031 ret = ath6kl_sdio_read_write_sync(ar, addr, 1032 (u8 *)&ar->bmi.cmd_credits, 4, 1033 HIF_RD_SYNC_BYTE_INC); 1034 if (ret) { 1035 ath6kl_err("Unable to decrement the command credit " 1036 "count register: %d\n", ret); 1037 return ret; 1038 } 1039 1040 /* The counter is only 8 bits. 1041 * Ignore anything in the upper 3 bytes 1042 */ 1043 ar->bmi.cmd_credits &= 0xFF; 1044 } 1045 1046 if (!ar->bmi.cmd_credits) { 1047 ath6kl_err("bmi communication timeout\n"); 1048 return -ETIMEDOUT; 1049 } 1050 1051 return 0; 1052 } 1053 1054 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) 1055 { 1056 unsigned long timeout; 1057 u32 rx_word = 0; 1058 int ret = 0; 1059 1060 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); 1061 while ((time_before(jiffies, timeout)) && !rx_word) { 1062 ret = ath6kl_sdio_read_write_sync(ar, 1063 RX_LOOKAHEAD_VALID_ADDRESS, 1064 (u8 *)&rx_word, sizeof(rx_word), 1065 HIF_RD_SYNC_BYTE_INC); 1066 if (ret) { 1067 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n"); 1068 return ret; 1069 } 1070 1071 /* all we really want is one bit */ 1072 rx_word &= (1 << ENDPOINT1); 1073 } 1074 1075 if (!rx_word) { 1076 ath6kl_err("bmi_recv_buf FIFO empty\n"); 1077 return -EINVAL; 1078 } 1079 1080 return ret; 1081 } 1082 1083 static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) 1084 { 1085 int ret; 1086 u32 addr; 1087 1088 ret = ath6kl_sdio_bmi_credits(ar); 1089 if (ret) 1090 return ret; 1091 1092 addr = ar->mbox_info.htc_addr; 1093 1094 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, 1095 HIF_WR_SYNC_BYTE_INC); 1096 if (ret) 1097 ath6kl_err("unable to send the bmi data to the device\n"); 1098 1099 return ret; 1100 } 1101 1102 static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) 1103 { 1104 int ret; 1105 u32 addr; 1106 1107 /* 1108 * During normal bootup, small reads may be required. 1109 * Rather than issue an HIF Read and then wait as the Target 1110 * adds successive bytes to the FIFO, we wait here until 1111 * we know that response data is available. 1112 * 1113 * This allows us to cleanly timeout on an unexpected 1114 * Target failure rather than risk problems at the HIF level. 1115 * In particular, this avoids SDIO timeouts and possibly garbage 1116 * data on some host controllers. And on an interconnect 1117 * such as Compact Flash (as well as some SDIO masters) which 1118 * does not provide any indication on data timeout, it avoids 1119 * a potential hang or garbage response. 1120 * 1121 * Synchronization is more difficult for reads larger than the 1122 * size of the MBOX FIFO (128B), because the Target is unable 1123 * to push the 129th byte of data until AFTER the Host posts an 1124 * HIF Read and removes some FIFO data. So for large reads the 1125 * Host proceeds to post an HIF Read BEFORE all the data is 1126 * actually available to read. Fortunately, large BMI reads do 1127 * not occur in practice -- they're supported for debug/development. 1128 * 1129 * So Host/Target BMI synchronization is divided into these cases: 1130 * CASE 1: length < 4 1131 * Should not happen 1132 * 1133 * CASE 2: 4 <= length <= 128 1134 * Wait for first 4 bytes to be in FIFO 1135 * If CONSERVATIVE_BMI_READ is enabled, also wait for 1136 * a BMI command credit, which indicates that the ENTIRE 1137 * response is available in the the FIFO 1138 * 1139 * CASE 3: length > 128 1140 * Wait for the first 4 bytes to be in FIFO 1141 * 1142 * For most uses, a small timeout should be sufficient and we will 1143 * usually see a response quickly; but there may be some unusual 1144 * (debug) cases of BMI_EXECUTE where we want an larger timeout. 1145 * For now, we use an unbounded busy loop while waiting for 1146 * BMI_EXECUTE. 1147 * 1148 * If BMI_EXECUTE ever needs to support longer-latency execution, 1149 * especially in production, this code needs to be enhanced to sleep 1150 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently 1151 * a function of Host processor speed. 1152 */ 1153 if (len >= 4) { /* NB: Currently, always true */ 1154 ret = ath6kl_bmi_get_rx_lkahd(ar); 1155 if (ret) 1156 return ret; 1157 } 1158 1159 addr = ar->mbox_info.htc_addr; 1160 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, 1161 HIF_RD_SYNC_BYTE_INC); 1162 if (ret) { 1163 ath6kl_err("Unable to read the bmi data from the device: %d\n", 1164 ret); 1165 return ret; 1166 } 1167 1168 return 0; 1169 } 1170 1171 static void ath6kl_sdio_stop(struct ath6kl *ar) 1172 { 1173 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 1174 struct bus_request *req, *tmp_req; 1175 void *context; 1176 1177 /* FIXME: make sure that wq is not queued again */ 1178 1179 cancel_work_sync(&ar_sdio->wr_async_work); 1180 1181 spin_lock_bh(&ar_sdio->wr_async_lock); 1182 1183 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { 1184 list_del(&req->list); 1185 1186 if (req->scat_req) { 1187 /* this is a scatter gather request */ 1188 req->scat_req->status = -ECANCELED; 1189 req->scat_req->complete(ar_sdio->ar->htc_target, 1190 req->scat_req); 1191 } else { 1192 context = req->packet; 1193 ath6kl_sdio_free_bus_req(ar_sdio, req); 1194 ath6kl_hif_rw_comp_handler(context, -ECANCELED); 1195 } 1196 } 1197 1198 spin_unlock_bh(&ar_sdio->wr_async_lock); 1199 1200 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4); 1201 } 1202 1203 static const struct ath6kl_hif_ops ath6kl_sdio_ops = { 1204 .read_write_sync = ath6kl_sdio_read_write_sync, 1205 .write_async = ath6kl_sdio_write_async, 1206 .irq_enable = ath6kl_sdio_irq_enable, 1207 .irq_disable = ath6kl_sdio_irq_disable, 1208 .scatter_req_get = ath6kl_sdio_scatter_req_get, 1209 .scatter_req_add = ath6kl_sdio_scatter_req_add, 1210 .enable_scatter = ath6kl_sdio_enable_scatter, 1211 .scat_req_rw = ath6kl_sdio_async_rw_scatter, 1212 .cleanup_scatter = ath6kl_sdio_cleanup_scatter, 1213 .suspend = ath6kl_sdio_suspend, 1214 .resume = ath6kl_sdio_resume, 1215 .diag_read32 = ath6kl_sdio_diag_read32, 1216 .diag_write32 = ath6kl_sdio_diag_write32, 1217 .bmi_read = ath6kl_sdio_bmi_read, 1218 .bmi_write = ath6kl_sdio_bmi_write, 1219 .power_on = ath6kl_sdio_power_on, 1220 .power_off = ath6kl_sdio_power_off, 1221 .stop = ath6kl_sdio_stop, 1222 }; 1223 1224 #ifdef CONFIG_PM_SLEEP 1225 1226 /* 1227 * Empty handlers so that mmc subsystem doesn't remove us entirely during 1228 * suspend. We instead follow cfg80211 suspend/resume handlers. 1229 */ 1230 static int ath6kl_sdio_pm_suspend(struct device *device) 1231 { 1232 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n"); 1233 1234 return 0; 1235 } 1236 1237 static int ath6kl_sdio_pm_resume(struct device *device) 1238 { 1239 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n"); 1240 1241 return 0; 1242 } 1243 1244 static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend, 1245 ath6kl_sdio_pm_resume); 1246 1247 #define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops) 1248 1249 #else 1250 1251 #define ATH6KL_SDIO_PM_OPS NULL 1252 1253 #endif /* CONFIG_PM_SLEEP */ 1254 1255 static int ath6kl_sdio_probe(struct sdio_func *func, 1256 const struct sdio_device_id *id) 1257 { 1258 int ret; 1259 struct ath6kl_sdio *ar_sdio; 1260 struct ath6kl *ar; 1261 int count; 1262 1263 ath6kl_dbg(ATH6KL_DBG_BOOT, 1264 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", 1265 func->num, func->vendor, func->device, 1266 func->max_blksize, func->cur_blksize); 1267 1268 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL); 1269 if (!ar_sdio) 1270 return -ENOMEM; 1271 1272 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL); 1273 if (!ar_sdio->dma_buffer) { 1274 ret = -ENOMEM; 1275 goto err_hif; 1276 } 1277 1278 ar_sdio->func = func; 1279 sdio_set_drvdata(func, ar_sdio); 1280 1281 ar_sdio->id = id; 1282 ar_sdio->is_disabled = true; 1283 1284 spin_lock_init(&ar_sdio->lock); 1285 spin_lock_init(&ar_sdio->scat_lock); 1286 spin_lock_init(&ar_sdio->wr_async_lock); 1287 mutex_init(&ar_sdio->dma_buffer_mutex); 1288 mutex_init(&ar_sdio->mtx_irq); 1289 1290 INIT_LIST_HEAD(&ar_sdio->scat_req); 1291 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); 1292 INIT_LIST_HEAD(&ar_sdio->wr_asyncq); 1293 1294 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work); 1295 1296 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) 1297 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]); 1298 1299 ar = ath6kl_core_create(&ar_sdio->func->dev); 1300 if (!ar) { 1301 ath6kl_err("Failed to alloc ath6kl core\n"); 1302 ret = -ENOMEM; 1303 goto err_dma; 1304 } 1305 1306 ar_sdio->ar = ar; 1307 ar->hif_type = ATH6KL_HIF_TYPE_SDIO; 1308 ar->hif_priv = ar_sdio; 1309 ar->hif_ops = &ath6kl_sdio_ops; 1310 ar->bmi.max_data_size = 256; 1311 1312 ath6kl_sdio_set_mbox_info(ar); 1313 1314 ret = ath6kl_sdio_config(ar); 1315 if (ret) { 1316 ath6kl_err("Failed to config sdio: %d\n", ret); 1317 goto err_core_alloc; 1318 } 1319 1320 ret = ath6kl_core_init(ar); 1321 if (ret) { 1322 ath6kl_err("Failed to init ath6kl core\n"); 1323 goto err_core_alloc; 1324 } 1325 1326 return ret; 1327 1328 err_core_alloc: 1329 ath6kl_core_destroy(ar_sdio->ar); 1330 err_dma: 1331 kfree(ar_sdio->dma_buffer); 1332 err_hif: 1333 kfree(ar_sdio); 1334 1335 return ret; 1336 } 1337 1338 static void ath6kl_sdio_remove(struct sdio_func *func) 1339 { 1340 struct ath6kl_sdio *ar_sdio; 1341 1342 ath6kl_dbg(ATH6KL_DBG_BOOT, 1343 "sdio removed func %d vendor 0x%x device 0x%x\n", 1344 func->num, func->vendor, func->device); 1345 1346 ar_sdio = sdio_get_drvdata(func); 1347 1348 ath6kl_stop_txrx(ar_sdio->ar); 1349 cancel_work_sync(&ar_sdio->wr_async_work); 1350 1351 ath6kl_core_cleanup(ar_sdio->ar); 1352 ath6kl_core_destroy(ar_sdio->ar); 1353 1354 kfree(ar_sdio->dma_buffer); 1355 kfree(ar_sdio); 1356 } 1357 1358 static const struct sdio_device_id ath6kl_sdio_devices[] = { 1359 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))}, 1360 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, 1361 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, 1362 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, 1363 {}, 1364 }; 1365 1366 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); 1367 1368 static struct sdio_driver ath6kl_sdio_driver = { 1369 .name = "ath6kl_sdio", 1370 .id_table = ath6kl_sdio_devices, 1371 .probe = ath6kl_sdio_probe, 1372 .remove = ath6kl_sdio_remove, 1373 .drv.pm = ATH6KL_SDIO_PM_OPS, 1374 }; 1375 1376 static int __init ath6kl_sdio_init(void) 1377 { 1378 int ret; 1379 1380 ret = sdio_register_driver(&ath6kl_sdio_driver); 1381 if (ret) 1382 ath6kl_err("sdio driver registration failed: %d\n", ret); 1383 1384 return ret; 1385 } 1386 1387 static void __exit ath6kl_sdio_exit(void) 1388 { 1389 sdio_unregister_driver(&ath6kl_sdio_driver); 1390 } 1391 1392 module_init(ath6kl_sdio_init); 1393 module_exit(ath6kl_sdio_exit); 1394 1395 MODULE_AUTHOR("Atheros Communications, Inc."); 1396 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices"); 1397 MODULE_LICENSE("Dual BSD/GPL"); 1398 1399 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE); 1400 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE); 1401 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE); 1402 MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE); 1403 MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE); 1404 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE); 1405 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE); 1406 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE); 1407 MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE); 1408 MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE); 1409 MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE); 1410 MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE); 1411 MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE); 1412 MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE); 1413 MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE); 1414 MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE); 1415