1 /* 2 * Copyright (c) 2004-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/mmc/card.h> 19 #include <linux/mmc/mmc.h> 20 #include <linux/mmc/host.h> 21 #include <linux/mmc/sdio_func.h> 22 #include <linux/mmc/sdio_ids.h> 23 #include <linux/mmc/sdio.h> 24 #include <linux/mmc/sd.h> 25 #include "hif.h" 26 #include "hif-ops.h" 27 #include "target.h" 28 #include "debug.h" 29 #include "cfg80211.h" 30 31 struct ath6kl_sdio { 32 struct sdio_func *func; 33 34 spinlock_t lock; 35 36 /* free list */ 37 struct list_head bus_req_freeq; 38 39 /* available bus requests */ 40 struct bus_request bus_req[BUS_REQUEST_MAX_NUM]; 41 42 struct ath6kl *ar; 43 44 u8 *dma_buffer; 45 46 /* protects access to dma_buffer */ 47 struct mutex dma_buffer_mutex; 48 49 /* scatter request list head */ 50 struct list_head scat_req; 51 52 /* Avoids disabling irq while the interrupts being handled */ 53 struct mutex mtx_irq; 54 55 spinlock_t scat_lock; 56 bool scatter_enabled; 57 58 bool is_disabled; 59 const struct sdio_device_id *id; 60 struct work_struct wr_async_work; 61 struct list_head wr_asyncq; 62 spinlock_t wr_async_lock; 63 }; 64 65 #define CMD53_ARG_READ 0 66 #define CMD53_ARG_WRITE 1 67 #define CMD53_ARG_BLOCK_BASIS 1 68 #define CMD53_ARG_FIXED_ADDRESS 0 69 #define CMD53_ARG_INCR_ADDRESS 1 70 71 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar) 72 { 73 return ar->hif_priv; 74 } 75 76 /* 77 * Macro to check if DMA buffer is WORD-aligned and DMA-able. 78 * Most host controllers assume the buffer is DMA'able and will 79 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid 80 * check fails on stack memory. 81 */ 82 static inline bool buf_needs_bounce(u8 *buf) 83 { 84 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf); 85 } 86 87 static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar) 88 { 89 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info; 90 91 /* EP1 has an extended range */ 92 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR; 93 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR; 94 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH; 95 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE; 96 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR; 97 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH; 98 } 99 100 static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func, 101 u8 mode, u8 opcode, u32 addr, 102 u16 blksz) 103 { 104 *arg = (((rw & 1) << 31) | 105 ((func & 0x7) << 28) | 106 ((mode & 1) << 27) | 107 ((opcode & 1) << 26) | 108 ((addr & 0x1FFFF) << 9) | 109 (blksz & 0x1FF)); 110 } 111 112 static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw, 113 unsigned int address, 114 unsigned char val) 115 { 116 const u8 func = 0; 117 118 *arg = ((write & 1) << 31) | 119 ((func & 0x7) << 28) | 120 ((raw & 1) << 27) | 121 (1 << 26) | 122 ((address & 0x1FFFF) << 9) | 123 (1 << 8) | 124 (val & 0xFF); 125 } 126 127 static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card, 128 unsigned int address, 129 unsigned char byte) 130 { 131 struct mmc_command io_cmd; 132 133 memset(&io_cmd, 0, sizeof(io_cmd)); 134 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte); 135 io_cmd.opcode = SD_IO_RW_DIRECT; 136 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; 137 138 return mmc_wait_for_cmd(card->host, &io_cmd, 0); 139 } 140 141 static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, 142 u8 *buf, u32 len) 143 { 144 int ret = 0; 145 146 sdio_claim_host(func); 147 148 if (request & HIF_WRITE) { 149 /* FIXME: looks like ugly workaround for something */ 150 if (addr >= HIF_MBOX_BASE_ADDR && 151 addr <= HIF_MBOX_END_ADDR) 152 addr += (HIF_MBOX_WIDTH - len); 153 154 /* FIXME: this also looks like ugly workaround */ 155 if (addr == HIF_MBOX0_EXT_BASE_ADDR) 156 addr += HIF_MBOX0_EXT_WIDTH - len; 157 158 if (request & HIF_FIXED_ADDRESS) 159 ret = sdio_writesb(func, addr, buf, len); 160 else 161 ret = sdio_memcpy_toio(func, addr, buf, len); 162 } else { 163 if (request & HIF_FIXED_ADDRESS) 164 ret = sdio_readsb(func, buf, addr, len); 165 else 166 ret = sdio_memcpy_fromio(func, buf, addr, len); 167 } 168 169 sdio_release_host(func); 170 171 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n", 172 request & HIF_WRITE ? "wr" : "rd", addr, 173 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); 174 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len); 175 176 return ret; 177 } 178 179 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) 180 { 181 struct bus_request *bus_req; 182 183 spin_lock_bh(&ar_sdio->lock); 184 185 if (list_empty(&ar_sdio->bus_req_freeq)) { 186 spin_unlock_bh(&ar_sdio->lock); 187 return NULL; 188 } 189 190 bus_req = list_first_entry(&ar_sdio->bus_req_freeq, 191 struct bus_request, list); 192 list_del(&bus_req->list); 193 194 spin_unlock_bh(&ar_sdio->lock); 195 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", 196 __func__, bus_req); 197 198 return bus_req; 199 } 200 201 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, 202 struct bus_request *bus_req) 203 { 204 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", 205 __func__, bus_req); 206 207 spin_lock_bh(&ar_sdio->lock); 208 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); 209 spin_unlock_bh(&ar_sdio->lock); 210 } 211 212 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, 213 struct mmc_data *data) 214 { 215 struct scatterlist *sg; 216 int i; 217 218 data->blksz = HIF_MBOX_BLOCK_SIZE; 219 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE; 220 221 ath6kl_dbg(ATH6KL_DBG_SCATTER, 222 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n", 223 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr, 224 data->blksz, data->blocks, scat_req->len, 225 scat_req->scat_entries); 226 227 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE : 228 MMC_DATA_READ; 229 230 /* fill SG entries */ 231 sg = scat_req->sgentries; 232 sg_init_table(sg, scat_req->scat_entries); 233 234 /* assemble SG list */ 235 for (i = 0; i < scat_req->scat_entries; i++, sg++) { 236 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n", 237 i, scat_req->scat_list[i].buf, 238 scat_req->scat_list[i].len); 239 240 sg_set_buf(sg, scat_req->scat_list[i].buf, 241 scat_req->scat_list[i].len); 242 } 243 244 /* set scatter-gather table for request */ 245 data->sg = scat_req->sgentries; 246 data->sg_len = scat_req->scat_entries; 247 } 248 249 static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio, 250 struct bus_request *req) 251 { 252 struct mmc_request mmc_req; 253 struct mmc_command cmd; 254 struct mmc_data data; 255 struct hif_scatter_req *scat_req; 256 u8 opcode, rw; 257 int status, len; 258 259 scat_req = req->scat_req; 260 261 if (scat_req->virt_scat) { 262 len = scat_req->len; 263 if (scat_req->req & HIF_BLOCK_BASIS) 264 len = round_down(len, HIF_MBOX_BLOCK_SIZE); 265 266 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req, 267 scat_req->addr, scat_req->virt_dma_buf, 268 len); 269 goto scat_complete; 270 } 271 272 memset(&mmc_req, 0, sizeof(struct mmc_request)); 273 memset(&cmd, 0, sizeof(struct mmc_command)); 274 memset(&data, 0, sizeof(struct mmc_data)); 275 276 ath6kl_sdio_setup_scat_data(scat_req, &data); 277 278 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ? 279 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS; 280 281 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ; 282 283 /* Fixup the address so that the last byte will fall on MBOX EOM */ 284 if (scat_req->req & HIF_WRITE) { 285 if (scat_req->addr == HIF_MBOX_BASE_ADDR) 286 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len; 287 else 288 /* Uses extended address range */ 289 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len; 290 } 291 292 /* set command argument */ 293 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num, 294 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr, 295 data.blocks); 296 297 cmd.opcode = SD_IO_RW_EXTENDED; 298 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; 299 300 mmc_req.cmd = &cmd; 301 mmc_req.data = &data; 302 303 sdio_claim_host(ar_sdio->func); 304 305 mmc_set_data_timeout(&data, ar_sdio->func->card); 306 /* synchronous call to process request */ 307 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); 308 309 sdio_release_host(ar_sdio->func); 310 311 status = cmd.error ? cmd.error : data.error; 312 313 scat_complete: 314 scat_req->status = status; 315 316 if (scat_req->status) 317 ath6kl_err("Scatter write request failed:%d\n", 318 scat_req->status); 319 320 if (scat_req->req & HIF_ASYNCHRONOUS) 321 scat_req->complete(ar_sdio->ar->htc_target, scat_req); 322 323 return status; 324 } 325 326 static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, 327 int n_scat_entry, int n_scat_req, 328 bool virt_scat) 329 { 330 struct hif_scatter_req *s_req; 331 struct bus_request *bus_req; 332 int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz; 333 u8 *virt_buf; 334 335 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); 336 scat_req_sz = sizeof(*s_req) + scat_list_sz; 337 338 if (!virt_scat) 339 sg_sz = sizeof(struct scatterlist) * n_scat_entry; 340 else 341 buf_sz = 2 * L1_CACHE_BYTES + 342 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; 343 344 for (i = 0; i < n_scat_req; i++) { 345 /* allocate the scatter request */ 346 s_req = kzalloc(scat_req_sz, GFP_KERNEL); 347 if (!s_req) 348 return -ENOMEM; 349 350 if (virt_scat) { 351 virt_buf = kzalloc(buf_sz, GFP_KERNEL); 352 if (!virt_buf) { 353 kfree(s_req); 354 return -ENOMEM; 355 } 356 357 s_req->virt_dma_buf = 358 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); 359 } else { 360 /* allocate sglist */ 361 s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL); 362 363 if (!s_req->sgentries) { 364 kfree(s_req); 365 return -ENOMEM; 366 } 367 } 368 369 /* allocate a bus request for this scatter request */ 370 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); 371 if (!bus_req) { 372 kfree(s_req->sgentries); 373 kfree(s_req->virt_dma_buf); 374 kfree(s_req); 375 return -ENOMEM; 376 } 377 378 /* assign the scatter request to this bus request */ 379 bus_req->scat_req = s_req; 380 s_req->busrequest = bus_req; 381 382 s_req->virt_scat = virt_scat; 383 384 /* add it to the scatter pool */ 385 hif_scatter_req_add(ar_sdio->ar, s_req); 386 } 387 388 return 0; 389 } 390 391 static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, 392 u32 len, u32 request) 393 { 394 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 395 u8 *tbuf = NULL; 396 int ret; 397 bool bounced = false; 398 399 if (request & HIF_BLOCK_BASIS) 400 len = round_down(len, HIF_MBOX_BLOCK_SIZE); 401 402 if (buf_needs_bounce(buf)) { 403 if (!ar_sdio->dma_buffer) 404 return -ENOMEM; 405 mutex_lock(&ar_sdio->dma_buffer_mutex); 406 tbuf = ar_sdio->dma_buffer; 407 memcpy(tbuf, buf, len); 408 bounced = true; 409 } else 410 tbuf = buf; 411 412 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); 413 if ((request & HIF_READ) && bounced) 414 memcpy(buf, tbuf, len); 415 416 if (bounced) 417 mutex_unlock(&ar_sdio->dma_buffer_mutex); 418 419 return ret; 420 } 421 422 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, 423 struct bus_request *req) 424 { 425 if (req->scat_req) 426 ath6kl_sdio_scat_rw(ar_sdio, req); 427 else { 428 void *context; 429 int status; 430 431 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address, 432 req->buffer, req->length, 433 req->request); 434 context = req->packet; 435 ath6kl_sdio_free_bus_req(ar_sdio, req); 436 ath6kl_hif_rw_comp_handler(context, status); 437 } 438 } 439 440 static void ath6kl_sdio_write_async_work(struct work_struct *work) 441 { 442 struct ath6kl_sdio *ar_sdio; 443 struct bus_request *req, *tmp_req; 444 445 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); 446 447 spin_lock_bh(&ar_sdio->wr_async_lock); 448 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { 449 list_del(&req->list); 450 spin_unlock_bh(&ar_sdio->wr_async_lock); 451 __ath6kl_sdio_write_async(ar_sdio, req); 452 spin_lock_bh(&ar_sdio->wr_async_lock); 453 } 454 spin_unlock_bh(&ar_sdio->wr_async_lock); 455 } 456 457 static void ath6kl_sdio_irq_handler(struct sdio_func *func) 458 { 459 int status; 460 struct ath6kl_sdio *ar_sdio; 461 462 ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n"); 463 464 ar_sdio = sdio_get_drvdata(func); 465 mutex_lock(&ar_sdio->mtx_irq); 466 /* 467 * Release the host during interrups so we can pick it back up when 468 * we process commands. 469 */ 470 sdio_release_host(ar_sdio->func); 471 472 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar); 473 sdio_claim_host(ar_sdio->func); 474 mutex_unlock(&ar_sdio->mtx_irq); 475 WARN_ON(status && status != -ECANCELED); 476 } 477 478 static int ath6kl_sdio_power_on(struct ath6kl *ar) 479 { 480 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 481 struct sdio_func *func = ar_sdio->func; 482 int ret = 0; 483 484 if (!ar_sdio->is_disabled) 485 return 0; 486 487 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n"); 488 489 sdio_claim_host(func); 490 491 ret = sdio_enable_func(func); 492 if (ret) { 493 ath6kl_err("Unable to enable sdio func: %d)\n", ret); 494 sdio_release_host(func); 495 return ret; 496 } 497 498 sdio_release_host(func); 499 500 /* 501 * Wait for hardware to initialise. It should take a lot less than 502 * 10 ms but let's be conservative here. 503 */ 504 msleep(10); 505 506 ar_sdio->is_disabled = false; 507 508 return ret; 509 } 510 511 static int ath6kl_sdio_power_off(struct ath6kl *ar) 512 { 513 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 514 int ret; 515 516 if (ar_sdio->is_disabled) 517 return 0; 518 519 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n"); 520 521 /* Disable the card */ 522 sdio_claim_host(ar_sdio->func); 523 ret = sdio_disable_func(ar_sdio->func); 524 sdio_release_host(ar_sdio->func); 525 526 if (ret) 527 return ret; 528 529 ar_sdio->is_disabled = true; 530 531 return ret; 532 } 533 534 static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, 535 u32 length, u32 request, 536 struct htc_packet *packet) 537 { 538 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 539 struct bus_request *bus_req; 540 541 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); 542 543 if (!bus_req) 544 return -ENOMEM; 545 546 bus_req->address = address; 547 bus_req->buffer = buffer; 548 bus_req->length = length; 549 bus_req->request = request; 550 bus_req->packet = packet; 551 552 spin_lock_bh(&ar_sdio->wr_async_lock); 553 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); 554 spin_unlock_bh(&ar_sdio->wr_async_lock); 555 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); 556 557 return 0; 558 } 559 560 static void ath6kl_sdio_irq_enable(struct ath6kl *ar) 561 { 562 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 563 int ret; 564 565 sdio_claim_host(ar_sdio->func); 566 567 /* Register the isr */ 568 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler); 569 if (ret) 570 ath6kl_err("Failed to claim sdio irq: %d\n", ret); 571 572 sdio_release_host(ar_sdio->func); 573 } 574 575 static void ath6kl_sdio_irq_disable(struct ath6kl *ar) 576 { 577 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 578 int ret; 579 580 sdio_claim_host(ar_sdio->func); 581 582 mutex_lock(&ar_sdio->mtx_irq); 583 584 ret = sdio_release_irq(ar_sdio->func); 585 if (ret) 586 ath6kl_err("Failed to release sdio irq: %d\n", ret); 587 588 mutex_unlock(&ar_sdio->mtx_irq); 589 590 sdio_release_host(ar_sdio->func); 591 } 592 593 static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) 594 { 595 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 596 struct hif_scatter_req *node = NULL; 597 598 spin_lock_bh(&ar_sdio->scat_lock); 599 600 if (!list_empty(&ar_sdio->scat_req)) { 601 node = list_first_entry(&ar_sdio->scat_req, 602 struct hif_scatter_req, list); 603 list_del(&node->list); 604 } 605 606 spin_unlock_bh(&ar_sdio->scat_lock); 607 608 return node; 609 } 610 611 static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar, 612 struct hif_scatter_req *s_req) 613 { 614 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 615 616 spin_lock_bh(&ar_sdio->scat_lock); 617 618 list_add_tail(&s_req->list, &ar_sdio->scat_req); 619 620 spin_unlock_bh(&ar_sdio->scat_lock); 621 622 } 623 624 /* scatter gather read write request */ 625 static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, 626 struct hif_scatter_req *scat_req) 627 { 628 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 629 u32 request = scat_req->req; 630 int status = 0; 631 632 if (!scat_req->len) 633 return -EINVAL; 634 635 ath6kl_dbg(ATH6KL_DBG_SCATTER, 636 "hif-scatter: total len: %d scatter entries: %d\n", 637 scat_req->len, scat_req->scat_entries); 638 639 if (request & HIF_SYNCHRONOUS) 640 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); 641 else { 642 spin_lock_bh(&ar_sdio->wr_async_lock); 643 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); 644 spin_unlock_bh(&ar_sdio->wr_async_lock); 645 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); 646 } 647 648 return status; 649 } 650 651 /* clean up scatter support */ 652 static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) 653 { 654 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 655 struct hif_scatter_req *s_req, *tmp_req; 656 657 /* empty the free list */ 658 spin_lock_bh(&ar_sdio->scat_lock); 659 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { 660 list_del(&s_req->list); 661 spin_unlock_bh(&ar_sdio->scat_lock); 662 663 /* 664 * FIXME: should we also call completion handler with 665 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so 666 * that the packet is properly freed? 667 */ 668 if (s_req->busrequest) 669 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); 670 kfree(s_req->virt_dma_buf); 671 kfree(s_req->sgentries); 672 kfree(s_req); 673 674 spin_lock_bh(&ar_sdio->scat_lock); 675 } 676 spin_unlock_bh(&ar_sdio->scat_lock); 677 } 678 679 /* setup of HIF scatter resources */ 680 static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) 681 { 682 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 683 struct htc_target *target = ar->htc_target; 684 int ret; 685 bool virt_scat = false; 686 687 if (ar_sdio->scatter_enabled) 688 return 0; 689 690 ar_sdio->scatter_enabled = true; 691 692 /* check if host supports scatter and it meets our requirements */ 693 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { 694 ath6kl_err("host only supports scatter of :%d entries, need: %d\n", 695 ar_sdio->func->card->host->max_segs, 696 MAX_SCATTER_ENTRIES_PER_REQ); 697 virt_scat = true; 698 } 699 700 if (!virt_scat) { 701 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, 702 MAX_SCATTER_ENTRIES_PER_REQ, 703 MAX_SCATTER_REQUESTS, virt_scat); 704 705 if (!ret) { 706 ath6kl_dbg(ATH6KL_DBG_BOOT, 707 "hif-scatter enabled requests %d entries %d\n", 708 MAX_SCATTER_REQUESTS, 709 MAX_SCATTER_ENTRIES_PER_REQ); 710 711 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ; 712 target->max_xfer_szper_scatreq = 713 MAX_SCATTER_REQ_TRANSFER_SIZE; 714 } else { 715 ath6kl_sdio_cleanup_scatter(ar); 716 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n"); 717 } 718 } 719 720 if (virt_scat || ret) { 721 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, 722 ATH6KL_SCATTER_ENTRIES_PER_REQ, 723 ATH6KL_SCATTER_REQS, virt_scat); 724 725 if (ret) { 726 ath6kl_err("failed to alloc virtual scatter resources !\n"); 727 ath6kl_sdio_cleanup_scatter(ar); 728 return ret; 729 } 730 731 ath6kl_dbg(ATH6KL_DBG_BOOT, 732 "virtual scatter enabled requests %d entries %d\n", 733 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); 734 735 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; 736 target->max_xfer_szper_scatreq = 737 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; 738 } 739 740 return 0; 741 } 742 743 static int ath6kl_sdio_config(struct ath6kl *ar) 744 { 745 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 746 struct sdio_func *func = ar_sdio->func; 747 int ret; 748 749 sdio_claim_host(func); 750 751 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >= 752 MANUFACTURER_ID_AR6003_BASE) { 753 /* enable 4-bit ASYNC interrupt on AR6003 or later */ 754 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, 755 CCCR_SDIO_IRQ_MODE_REG, 756 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); 757 if (ret) { 758 ath6kl_err("Failed to enable 4-bit async irq mode %d\n", 759 ret); 760 goto out; 761 } 762 763 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n"); 764 } 765 766 /* give us some time to enable, in ms */ 767 func->enable_timeout = 100; 768 769 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); 770 if (ret) { 771 ath6kl_err("Set sdio block size %d failed: %d)\n", 772 HIF_MBOX_BLOCK_SIZE, ret); 773 goto out; 774 } 775 776 out: 777 sdio_release_host(func); 778 779 return ret; 780 } 781 782 static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) 783 { 784 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 785 struct sdio_func *func = ar_sdio->func; 786 mmc_pm_flag_t flags; 787 int ret; 788 789 flags = sdio_get_host_pm_caps(func); 790 791 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags); 792 793 if (!(flags & MMC_PM_KEEP_POWER) || 794 (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) { 795 /* as host doesn't support keep power we need to cut power */ 796 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, 797 NULL); 798 } 799 800 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 801 if (ret) { 802 printk(KERN_ERR "ath6kl: set sdio pm flags failed: %d\n", 803 ret); 804 return ret; 805 } 806 807 if (!(flags & MMC_PM_WAKE_SDIO_IRQ)) 808 goto deepsleep; 809 810 /* sdio irq wakes up host */ 811 812 if (ar->state == ATH6KL_STATE_SCHED_SCAN) { 813 ret = ath6kl_cfg80211_suspend(ar, 814 ATH6KL_CFG_SUSPEND_SCHED_SCAN, 815 NULL); 816 if (ret) { 817 ath6kl_warn("Schedule scan suspend failed: %d", ret); 818 return ret; 819 } 820 821 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); 822 if (ret) 823 ath6kl_warn("set sdio wake irq flag failed: %d\n", ret); 824 825 return ret; 826 } 827 828 if (wow) { 829 /* 830 * The host sdio controller is capable of keep power and 831 * sdio irq wake up at this point. It's fine to continue 832 * wow suspend operation. 833 */ 834 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow); 835 if (ret) 836 return ret; 837 838 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); 839 if (ret) 840 ath6kl_err("set sdio wake irq flag failed: %d\n", ret); 841 842 return ret; 843 } 844 845 deepsleep: 846 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL); 847 } 848 849 static int ath6kl_sdio_resume(struct ath6kl *ar) 850 { 851 switch (ar->state) { 852 case ATH6KL_STATE_OFF: 853 case ATH6KL_STATE_CUTPOWER: 854 ath6kl_dbg(ATH6KL_DBG_SUSPEND, 855 "sdio resume configuring sdio\n"); 856 857 /* need to set sdio settings after power is cut from sdio */ 858 ath6kl_sdio_config(ar); 859 break; 860 861 case ATH6KL_STATE_ON: 862 break; 863 864 case ATH6KL_STATE_DEEPSLEEP: 865 break; 866 867 case ATH6KL_STATE_WOW: 868 break; 869 case ATH6KL_STATE_SCHED_SCAN: 870 break; 871 } 872 873 ath6kl_cfg80211_resume(ar); 874 875 return 0; 876 } 877 878 /* set the window address register (using 4-byte register access ). */ 879 static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) 880 { 881 int status; 882 u8 addr_val[4]; 883 s32 i; 884 885 /* 886 * Write bytes 1,2,3 of the register to set the upper address bytes, 887 * the LSB is written last to initiate the access cycle 888 */ 889 890 for (i = 1; i <= 3; i++) { 891 /* 892 * Fill the buffer with the address byte value we want to 893 * hit 4 times. 894 */ 895 memset(addr_val, ((u8 *)&addr)[i], 4); 896 897 /* 898 * Hit each byte of the register address with a 4-byte 899 * write operation to the same address, this is a harmless 900 * operation. 901 */ 902 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val, 903 4, HIF_WR_SYNC_BYTE_FIX); 904 if (status) 905 break; 906 } 907 908 if (status) { 909 ath6kl_err("%s: failed to write initial bytes of 0x%x " 910 "to window reg: 0x%X\n", __func__, 911 addr, reg_addr); 912 return status; 913 } 914 915 /* 916 * Write the address register again, this time write the whole 917 * 4-byte value. The effect here is that the LSB write causes the 918 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no 919 * effect since we are writing the same values again 920 */ 921 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr), 922 4, HIF_WR_SYNC_BYTE_INC); 923 924 if (status) { 925 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n", 926 __func__, addr, reg_addr); 927 return status; 928 } 929 930 return 0; 931 } 932 933 static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data) 934 { 935 int status; 936 937 /* set window register to start read cycle */ 938 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, 939 address); 940 941 if (status) 942 return status; 943 944 /* read the data */ 945 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, 946 (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC); 947 if (status) { 948 ath6kl_err("%s: failed to read from window data addr\n", 949 __func__); 950 return status; 951 } 952 953 return status; 954 } 955 956 static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address, 957 __le32 data) 958 { 959 int status; 960 u32 val = (__force u32) data; 961 962 /* set write data */ 963 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, 964 (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC); 965 if (status) { 966 ath6kl_err("%s: failed to write 0x%x to window data addr\n", 967 __func__, data); 968 return status; 969 } 970 971 /* set window register, which starts the write cycle */ 972 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, 973 address); 974 } 975 976 static int ath6kl_sdio_bmi_credits(struct ath6kl *ar) 977 { 978 u32 addr; 979 unsigned long timeout; 980 int ret; 981 982 ar->bmi.cmd_credits = 0; 983 984 /* Read the counter register to get the command credits */ 985 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; 986 987 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); 988 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { 989 990 /* 991 * Hit the credit counter with a 4-byte access, the first byte 992 * read will hit the counter and cause a decrement, while the 993 * remaining 3 bytes has no effect. The rationale behind this 994 * is to make all HIF accesses 4-byte aligned. 995 */ 996 ret = ath6kl_sdio_read_write_sync(ar, addr, 997 (u8 *)&ar->bmi.cmd_credits, 4, 998 HIF_RD_SYNC_BYTE_INC); 999 if (ret) { 1000 ath6kl_err("Unable to decrement the command credit " 1001 "count register: %d\n", ret); 1002 return ret; 1003 } 1004 1005 /* The counter is only 8 bits. 1006 * Ignore anything in the upper 3 bytes 1007 */ 1008 ar->bmi.cmd_credits &= 0xFF; 1009 } 1010 1011 if (!ar->bmi.cmd_credits) { 1012 ath6kl_err("bmi communication timeout\n"); 1013 return -ETIMEDOUT; 1014 } 1015 1016 return 0; 1017 } 1018 1019 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) 1020 { 1021 unsigned long timeout; 1022 u32 rx_word = 0; 1023 int ret = 0; 1024 1025 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); 1026 while ((time_before(jiffies, timeout)) && !rx_word) { 1027 ret = ath6kl_sdio_read_write_sync(ar, 1028 RX_LOOKAHEAD_VALID_ADDRESS, 1029 (u8 *)&rx_word, sizeof(rx_word), 1030 HIF_RD_SYNC_BYTE_INC); 1031 if (ret) { 1032 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n"); 1033 return ret; 1034 } 1035 1036 /* all we really want is one bit */ 1037 rx_word &= (1 << ENDPOINT1); 1038 } 1039 1040 if (!rx_word) { 1041 ath6kl_err("bmi_recv_buf FIFO empty\n"); 1042 return -EINVAL; 1043 } 1044 1045 return ret; 1046 } 1047 1048 static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) 1049 { 1050 int ret; 1051 u32 addr; 1052 1053 ret = ath6kl_sdio_bmi_credits(ar); 1054 if (ret) 1055 return ret; 1056 1057 addr = ar->mbox_info.htc_addr; 1058 1059 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, 1060 HIF_WR_SYNC_BYTE_INC); 1061 if (ret) 1062 ath6kl_err("unable to send the bmi data to the device\n"); 1063 1064 return ret; 1065 } 1066 1067 static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) 1068 { 1069 int ret; 1070 u32 addr; 1071 1072 /* 1073 * During normal bootup, small reads may be required. 1074 * Rather than issue an HIF Read and then wait as the Target 1075 * adds successive bytes to the FIFO, we wait here until 1076 * we know that response data is available. 1077 * 1078 * This allows us to cleanly timeout on an unexpected 1079 * Target failure rather than risk problems at the HIF level. 1080 * In particular, this avoids SDIO timeouts and possibly garbage 1081 * data on some host controllers. And on an interconnect 1082 * such as Compact Flash (as well as some SDIO masters) which 1083 * does not provide any indication on data timeout, it avoids 1084 * a potential hang or garbage response. 1085 * 1086 * Synchronization is more difficult for reads larger than the 1087 * size of the MBOX FIFO (128B), because the Target is unable 1088 * to push the 129th byte of data until AFTER the Host posts an 1089 * HIF Read and removes some FIFO data. So for large reads the 1090 * Host proceeds to post an HIF Read BEFORE all the data is 1091 * actually available to read. Fortunately, large BMI reads do 1092 * not occur in practice -- they're supported for debug/development. 1093 * 1094 * So Host/Target BMI synchronization is divided into these cases: 1095 * CASE 1: length < 4 1096 * Should not happen 1097 * 1098 * CASE 2: 4 <= length <= 128 1099 * Wait for first 4 bytes to be in FIFO 1100 * If CONSERVATIVE_BMI_READ is enabled, also wait for 1101 * a BMI command credit, which indicates that the ENTIRE 1102 * response is available in the the FIFO 1103 * 1104 * CASE 3: length > 128 1105 * Wait for the first 4 bytes to be in FIFO 1106 * 1107 * For most uses, a small timeout should be sufficient and we will 1108 * usually see a response quickly; but there may be some unusual 1109 * (debug) cases of BMI_EXECUTE where we want an larger timeout. 1110 * For now, we use an unbounded busy loop while waiting for 1111 * BMI_EXECUTE. 1112 * 1113 * If BMI_EXECUTE ever needs to support longer-latency execution, 1114 * especially in production, this code needs to be enhanced to sleep 1115 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently 1116 * a function of Host processor speed. 1117 */ 1118 if (len >= 4) { /* NB: Currently, always true */ 1119 ret = ath6kl_bmi_get_rx_lkahd(ar); 1120 if (ret) 1121 return ret; 1122 } 1123 1124 addr = ar->mbox_info.htc_addr; 1125 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, 1126 HIF_RD_SYNC_BYTE_INC); 1127 if (ret) { 1128 ath6kl_err("Unable to read the bmi data from the device: %d\n", 1129 ret); 1130 return ret; 1131 } 1132 1133 return 0; 1134 } 1135 1136 static void ath6kl_sdio_stop(struct ath6kl *ar) 1137 { 1138 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 1139 struct bus_request *req, *tmp_req; 1140 void *context; 1141 1142 /* FIXME: make sure that wq is not queued again */ 1143 1144 cancel_work_sync(&ar_sdio->wr_async_work); 1145 1146 spin_lock_bh(&ar_sdio->wr_async_lock); 1147 1148 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { 1149 list_del(&req->list); 1150 1151 if (req->scat_req) { 1152 /* this is a scatter gather request */ 1153 req->scat_req->status = -ECANCELED; 1154 req->scat_req->complete(ar_sdio->ar->htc_target, 1155 req->scat_req); 1156 } else { 1157 context = req->packet; 1158 ath6kl_sdio_free_bus_req(ar_sdio, req); 1159 ath6kl_hif_rw_comp_handler(context, -ECANCELED); 1160 } 1161 } 1162 1163 spin_unlock_bh(&ar_sdio->wr_async_lock); 1164 1165 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4); 1166 } 1167 1168 static const struct ath6kl_hif_ops ath6kl_sdio_ops = { 1169 .read_write_sync = ath6kl_sdio_read_write_sync, 1170 .write_async = ath6kl_sdio_write_async, 1171 .irq_enable = ath6kl_sdio_irq_enable, 1172 .irq_disable = ath6kl_sdio_irq_disable, 1173 .scatter_req_get = ath6kl_sdio_scatter_req_get, 1174 .scatter_req_add = ath6kl_sdio_scatter_req_add, 1175 .enable_scatter = ath6kl_sdio_enable_scatter, 1176 .scat_req_rw = ath6kl_sdio_async_rw_scatter, 1177 .cleanup_scatter = ath6kl_sdio_cleanup_scatter, 1178 .suspend = ath6kl_sdio_suspend, 1179 .resume = ath6kl_sdio_resume, 1180 .diag_read32 = ath6kl_sdio_diag_read32, 1181 .diag_write32 = ath6kl_sdio_diag_write32, 1182 .bmi_read = ath6kl_sdio_bmi_read, 1183 .bmi_write = ath6kl_sdio_bmi_write, 1184 .power_on = ath6kl_sdio_power_on, 1185 .power_off = ath6kl_sdio_power_off, 1186 .stop = ath6kl_sdio_stop, 1187 }; 1188 1189 #ifdef CONFIG_PM_SLEEP 1190 1191 /* 1192 * Empty handlers so that mmc subsystem doesn't remove us entirely during 1193 * suspend. We instead follow cfg80211 suspend/resume handlers. 1194 */ 1195 static int ath6kl_sdio_pm_suspend(struct device *device) 1196 { 1197 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n"); 1198 1199 return 0; 1200 } 1201 1202 static int ath6kl_sdio_pm_resume(struct device *device) 1203 { 1204 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n"); 1205 1206 return 0; 1207 } 1208 1209 static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend, 1210 ath6kl_sdio_pm_resume); 1211 1212 #define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops) 1213 1214 #else 1215 1216 #define ATH6KL_SDIO_PM_OPS NULL 1217 1218 #endif /* CONFIG_PM_SLEEP */ 1219 1220 static int ath6kl_sdio_probe(struct sdio_func *func, 1221 const struct sdio_device_id *id) 1222 { 1223 int ret; 1224 struct ath6kl_sdio *ar_sdio; 1225 struct ath6kl *ar; 1226 int count; 1227 1228 ath6kl_dbg(ATH6KL_DBG_BOOT, 1229 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", 1230 func->num, func->vendor, func->device, 1231 func->max_blksize, func->cur_blksize); 1232 1233 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL); 1234 if (!ar_sdio) 1235 return -ENOMEM; 1236 1237 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL); 1238 if (!ar_sdio->dma_buffer) { 1239 ret = -ENOMEM; 1240 goto err_hif; 1241 } 1242 1243 ar_sdio->func = func; 1244 sdio_set_drvdata(func, ar_sdio); 1245 1246 ar_sdio->id = id; 1247 ar_sdio->is_disabled = true; 1248 1249 spin_lock_init(&ar_sdio->lock); 1250 spin_lock_init(&ar_sdio->scat_lock); 1251 spin_lock_init(&ar_sdio->wr_async_lock); 1252 mutex_init(&ar_sdio->dma_buffer_mutex); 1253 mutex_init(&ar_sdio->mtx_irq); 1254 1255 INIT_LIST_HEAD(&ar_sdio->scat_req); 1256 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); 1257 INIT_LIST_HEAD(&ar_sdio->wr_asyncq); 1258 1259 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work); 1260 1261 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) 1262 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]); 1263 1264 ar = ath6kl_core_create(&ar_sdio->func->dev); 1265 if (!ar) { 1266 ath6kl_err("Failed to alloc ath6kl core\n"); 1267 ret = -ENOMEM; 1268 goto err_dma; 1269 } 1270 1271 ar_sdio->ar = ar; 1272 ar->hif_type = ATH6KL_HIF_TYPE_SDIO; 1273 ar->hif_priv = ar_sdio; 1274 ar->hif_ops = &ath6kl_sdio_ops; 1275 ar->bmi.max_data_size = 256; 1276 1277 ath6kl_sdio_set_mbox_info(ar); 1278 1279 ret = ath6kl_sdio_config(ar); 1280 if (ret) { 1281 ath6kl_err("Failed to config sdio: %d\n", ret); 1282 goto err_core_alloc; 1283 } 1284 1285 ret = ath6kl_core_init(ar); 1286 if (ret) { 1287 ath6kl_err("Failed to init ath6kl core\n"); 1288 goto err_core_alloc; 1289 } 1290 1291 return ret; 1292 1293 err_core_alloc: 1294 ath6kl_core_destroy(ar_sdio->ar); 1295 err_dma: 1296 kfree(ar_sdio->dma_buffer); 1297 err_hif: 1298 kfree(ar_sdio); 1299 1300 return ret; 1301 } 1302 1303 static void ath6kl_sdio_remove(struct sdio_func *func) 1304 { 1305 struct ath6kl_sdio *ar_sdio; 1306 1307 ath6kl_dbg(ATH6KL_DBG_BOOT, 1308 "sdio removed func %d vendor 0x%x device 0x%x\n", 1309 func->num, func->vendor, func->device); 1310 1311 ar_sdio = sdio_get_drvdata(func); 1312 1313 ath6kl_stop_txrx(ar_sdio->ar); 1314 cancel_work_sync(&ar_sdio->wr_async_work); 1315 1316 ath6kl_core_cleanup(ar_sdio->ar); 1317 1318 kfree(ar_sdio->dma_buffer); 1319 kfree(ar_sdio); 1320 } 1321 1322 static const struct sdio_device_id ath6kl_sdio_devices[] = { 1323 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))}, 1324 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, 1325 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, 1326 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, 1327 {}, 1328 }; 1329 1330 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); 1331 1332 static struct sdio_driver ath6kl_sdio_driver = { 1333 .name = "ath6kl_sdio", 1334 .id_table = ath6kl_sdio_devices, 1335 .probe = ath6kl_sdio_probe, 1336 .remove = ath6kl_sdio_remove, 1337 .drv.pm = ATH6KL_SDIO_PM_OPS, 1338 }; 1339 1340 static int __init ath6kl_sdio_init(void) 1341 { 1342 int ret; 1343 1344 ret = sdio_register_driver(&ath6kl_sdio_driver); 1345 if (ret) 1346 ath6kl_err("sdio driver registration failed: %d\n", ret); 1347 1348 return ret; 1349 } 1350 1351 static void __exit ath6kl_sdio_exit(void) 1352 { 1353 sdio_unregister_driver(&ath6kl_sdio_driver); 1354 } 1355 1356 module_init(ath6kl_sdio_init); 1357 module_exit(ath6kl_sdio_exit); 1358 1359 MODULE_AUTHOR("Atheros Communications, Inc."); 1360 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices"); 1361 MODULE_LICENSE("Dual BSD/GPL"); 1362 1363 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE); 1364 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE); 1365 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE); 1366 MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE); 1367 MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE); 1368 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE); 1369 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE); 1370 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE); 1371 MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE); 1372 MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE); 1373 MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE); 1374 MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE); 1375 MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE); 1376 MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE); 1377 MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE); 1378 MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE); 1379