1 /* 2 * Copyright (c) 2010 Broadcom Corporation 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 /* ****************** SDIO CARD Interface Functions **************************/ 17 18 #include <linux/types.h> 19 #include <linux/netdevice.h> 20 #include <linux/pci.h> 21 #include <linux/pci_ids.h> 22 #include <linux/sched.h> 23 #include <linux/completion.h> 24 #include <linux/scatterlist.h> 25 #include <linux/mmc/sdio.h> 26 #include <linux/mmc/core.h> 27 #include <linux/mmc/sdio_func.h> 28 #include <linux/mmc/card.h> 29 #include <linux/mmc/host.h> 30 #include <linux/platform_device.h> 31 #include <linux/platform_data/brcmfmac-sdio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/suspend.h> 34 #include <linux/errno.h> 35 #include <linux/module.h> 36 #include <linux/acpi.h> 37 #include <net/cfg80211.h> 38 39 #include <defs.h> 40 #include <brcm_hw_ids.h> 41 #include <brcmu_utils.h> 42 #include <brcmu_wifi.h> 43 #include <chipcommon.h> 44 #include <soc.h> 45 #include "chip.h" 46 #include "bus.h" 47 #include "debug.h" 48 #include "sdio.h" 49 #include "of.h" 50 #include "core.h" 51 #include "common.h" 52 53 #define SDIOH_API_ACCESS_RETRY_LIMIT 2 54 55 #define DMA_ALIGN_MASK 0x03 56 57 #define SDIO_FUNC1_BLOCKSIZE 64 58 #define SDIO_FUNC2_BLOCKSIZE 512 59 /* Maximum milliseconds to wait for F2 to come up */ 60 #define SDIO_WAIT_F2RDY 3000 61 62 #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */ 63 64 struct brcmf_sdiod_freezer { 65 atomic_t freezing; 66 atomic_t thread_count; 67 u32 frozen_count; 68 wait_queue_head_t thread_freeze; 69 struct completion resumed; 70 }; 71 72 static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id) 73 { 74 struct brcmf_bus *bus_if = dev_get_drvdata(dev_id); 75 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 76 77 brcmf_dbg(INTR, "OOB intr triggered\n"); 78 79 /* out-of-band interrupt is level-triggered which won't 80 * be cleared until dpc 81 */ 82 if (sdiodev->irq_en) { 83 disable_irq_nosync(irq); 84 sdiodev->irq_en = false; 85 } 86 87 brcmf_sdio_isr(sdiodev->bus); 88 89 return IRQ_HANDLED; 90 } 91 92 static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func) 93 { 94 struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev); 95 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 96 97 brcmf_dbg(INTR, "IB intr triggered\n"); 98 99 brcmf_sdio_isr(sdiodev->bus); 100 } 101 102 /* dummy handler for SDIO function 2 interrupt */ 103 static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func) 104 { 105 } 106 107 int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) 108 { 109 int ret = 0; 110 u8 data; 111 u32 addr, gpiocontrol; 112 unsigned long flags; 113 114 if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) { 115 brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n", 116 sdiodev->pdata->oob_irq_nr); 117 ret = request_irq(sdiodev->pdata->oob_irq_nr, 118 brcmf_sdiod_oob_irqhandler, 119 sdiodev->pdata->oob_irq_flags, 120 "brcmf_oob_intr", 121 &sdiodev->func[1]->dev); 122 if (ret != 0) { 123 brcmf_err("request_irq failed %d\n", ret); 124 return ret; 125 } 126 sdiodev->oob_irq_requested = true; 127 spin_lock_init(&sdiodev->irq_en_lock); 128 spin_lock_irqsave(&sdiodev->irq_en_lock, flags); 129 sdiodev->irq_en = true; 130 spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags); 131 132 ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr); 133 if (ret != 0) { 134 brcmf_err("enable_irq_wake failed %d\n", ret); 135 return ret; 136 } 137 sdiodev->irq_wake = true; 138 139 sdio_claim_host(sdiodev->func[1]); 140 141 if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) { 142 /* assign GPIO to SDIO core */ 143 addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol); 144 gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret); 145 gpiocontrol |= 0x2; 146 brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret); 147 148 brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf, 149 &ret); 150 brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret); 151 brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret); 152 } 153 154 /* must configure SDIO_CCCR_IENx to enable irq */ 155 data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret); 156 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; 157 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); 158 159 /* redirect, configure and enable io for interrupt signal */ 160 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; 161 if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH) 162 data |= SDIO_SEPINT_ACT_HI; 163 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); 164 165 sdio_release_host(sdiodev->func[1]); 166 } else { 167 brcmf_dbg(SDIO, "Entering\n"); 168 sdio_claim_host(sdiodev->func[1]); 169 sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler); 170 sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler); 171 sdio_release_host(sdiodev->func[1]); 172 } 173 174 return 0; 175 } 176 177 int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) 178 { 179 brcmf_dbg(SDIO, "Entering\n"); 180 181 if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) { 182 sdio_claim_host(sdiodev->func[1]); 183 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); 184 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL); 185 sdio_release_host(sdiodev->func[1]); 186 187 if (sdiodev->oob_irq_requested) { 188 sdiodev->oob_irq_requested = false; 189 if (sdiodev->irq_wake) { 190 disable_irq_wake(sdiodev->pdata->oob_irq_nr); 191 sdiodev->irq_wake = false; 192 } 193 free_irq(sdiodev->pdata->oob_irq_nr, 194 &sdiodev->func[1]->dev); 195 sdiodev->irq_en = false; 196 } 197 } else { 198 sdio_claim_host(sdiodev->func[1]); 199 sdio_release_irq(sdiodev->func[2]); 200 sdio_release_irq(sdiodev->func[1]); 201 sdio_release_host(sdiodev->func[1]); 202 } 203 204 return 0; 205 } 206 207 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, 208 enum brcmf_sdiod_state state) 209 { 210 if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM || 211 state == sdiodev->state) 212 return; 213 214 brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state); 215 switch (sdiodev->state) { 216 case BRCMF_SDIOD_DATA: 217 /* any other state means bus interface is down */ 218 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN); 219 break; 220 case BRCMF_SDIOD_DOWN: 221 /* transition from DOWN to DATA means bus interface is up */ 222 if (state == BRCMF_SDIOD_DATA) 223 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP); 224 break; 225 default: 226 break; 227 } 228 sdiodev->state = state; 229 } 230 231 static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func, 232 uint regaddr, u8 byte) 233 { 234 int err_ret; 235 236 /* 237 * Can only directly write to some F0 registers. 238 * Handle CCCR_IENx and CCCR_ABORT command 239 * as a special case. 240 */ 241 if ((regaddr == SDIO_CCCR_ABORT) || 242 (regaddr == SDIO_CCCR_IENx)) 243 sdio_writeb(func, byte, regaddr, &err_ret); 244 else 245 sdio_f0_writeb(func, byte, regaddr, &err_ret); 246 247 return err_ret; 248 } 249 250 static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn, 251 u32 addr, u8 regsz, void *data, bool write) 252 { 253 struct sdio_func *func; 254 int ret; 255 256 brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", 257 write, fn, addr, regsz); 258 259 /* only allow byte access on F0 */ 260 if (WARN_ON(regsz > 1 && !fn)) 261 return -EINVAL; 262 func = sdiodev->func[fn]; 263 264 switch (regsz) { 265 case sizeof(u8): 266 if (write) { 267 if (fn) 268 sdio_writeb(func, *(u8 *)data, addr, &ret); 269 else 270 ret = brcmf_sdiod_f0_writeb(func, addr, 271 *(u8 *)data); 272 } else { 273 if (fn) 274 *(u8 *)data = sdio_readb(func, addr, &ret); 275 else 276 *(u8 *)data = sdio_f0_readb(func, addr, &ret); 277 } 278 break; 279 case sizeof(u16): 280 if (write) 281 sdio_writew(func, *(u16 *)data, addr, &ret); 282 else 283 *(u16 *)data = sdio_readw(func, addr, &ret); 284 break; 285 case sizeof(u32): 286 if (write) 287 sdio_writel(func, *(u32 *)data, addr, &ret); 288 else 289 *(u32 *)data = sdio_readl(func, addr, &ret); 290 break; 291 default: 292 brcmf_err("invalid size: %d\n", regsz); 293 break; 294 } 295 296 if (ret) 297 brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n", 298 write ? "write" : "read", fn, addr, ret); 299 300 return ret; 301 } 302 303 static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, 304 u8 regsz, void *data, bool write) 305 { 306 u8 func; 307 s32 retry = 0; 308 int ret; 309 310 if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM) 311 return -ENOMEDIUM; 312 313 /* 314 * figure out how to read the register based on address range 315 * 0x00 ~ 0x7FF: function 0 CCCR and FBR 316 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers 317 * The rest: function 1 silicon backplane core registers 318 */ 319 if ((addr & ~REG_F0_REG_MASK) == 0) 320 func = SDIO_FUNC_0; 321 else 322 func = SDIO_FUNC_1; 323 324 do { 325 if (!write) 326 memset(data, 0, regsz); 327 /* for retry wait for 1 ms till bus get settled down */ 328 if (retry) 329 usleep_range(1000, 2000); 330 ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz, 331 data, write); 332 } while (ret != 0 && ret != -ENOMEDIUM && 333 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT); 334 335 if (ret == -ENOMEDIUM) 336 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); 337 else if (ret != 0) { 338 /* 339 * SleepCSR register access can fail when 340 * waking up the device so reduce this noise 341 * in the logs. 342 */ 343 if (addr != SBSDIO_FUNC1_SLEEPCSR) 344 brcmf_err("failed to %s data F%d@0x%05x, err: %d\n", 345 write ? "write" : "read", func, addr, ret); 346 else 347 brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n", 348 write ? "write" : "read", func, addr, ret); 349 } 350 return ret; 351 } 352 353 static int 354 brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address) 355 { 356 int err = 0, i; 357 u8 addr[3]; 358 359 if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM) 360 return -ENOMEDIUM; 361 362 addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK; 363 addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK; 364 addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK; 365 366 for (i = 0; i < 3; i++) { 367 err = brcmf_sdiod_regrw_helper(sdiodev, 368 SBSDIO_FUNC1_SBADDRLOW + i, 369 sizeof(u8), &addr[i], true); 370 if (err) { 371 brcmf_err("failed at addr: 0x%0x\n", 372 SBSDIO_FUNC1_SBADDRLOW + i); 373 break; 374 } 375 } 376 377 return err; 378 } 379 380 static int 381 brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr) 382 { 383 uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK; 384 int err = 0; 385 386 if (bar0 != sdiodev->sbwad) { 387 err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0); 388 if (err) 389 return err; 390 391 sdiodev->sbwad = bar0; 392 } 393 394 *addr &= SBSDIO_SB_OFT_ADDR_MASK; 395 396 if (width == 4) 397 *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 398 399 return 0; 400 } 401 402 u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret) 403 { 404 u8 data; 405 int retval; 406 407 brcmf_dbg(SDIO, "addr:0x%08x\n", addr); 408 retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data, 409 false); 410 brcmf_dbg(SDIO, "data:0x%02x\n", data); 411 412 if (ret) 413 *ret = retval; 414 415 return data; 416 } 417 418 u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret) 419 { 420 u32 data; 421 int retval; 422 423 brcmf_dbg(SDIO, "addr:0x%08x\n", addr); 424 retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr); 425 if (retval) 426 goto done; 427 retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data, 428 false); 429 brcmf_dbg(SDIO, "data:0x%08x\n", data); 430 431 done: 432 if (ret) 433 *ret = retval; 434 435 return data; 436 } 437 438 void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, 439 u8 data, int *ret) 440 { 441 int retval; 442 443 brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data); 444 retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data, 445 true); 446 if (ret) 447 *ret = retval; 448 } 449 450 void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, 451 u32 data, int *ret) 452 { 453 int retval; 454 455 brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data); 456 retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr); 457 if (retval) 458 goto done; 459 retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data, 460 true); 461 462 done: 463 if (ret) 464 *ret = retval; 465 } 466 467 static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, 468 bool write, u32 addr, struct sk_buff *pkt) 469 { 470 unsigned int req_sz; 471 int err; 472 473 /* Single skb use the standard mmc interface */ 474 req_sz = pkt->len + 3; 475 req_sz &= (uint)~3; 476 477 if (write) 478 err = sdio_memcpy_toio(sdiodev->func[fn], addr, 479 ((u8 *)(pkt->data)), req_sz); 480 else if (fn == 1) 481 err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)), 482 addr, req_sz); 483 else 484 /* function 2 read is FIFO operation */ 485 err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr, 486 req_sz); 487 if (err == -ENOMEDIUM) 488 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); 489 return err; 490 } 491 492 /** 493 * brcmf_sdiod_sglist_rw - SDIO interface function for block data access 494 * @sdiodev: brcmfmac sdio device 495 * @fn: SDIO function number 496 * @write: direction flag 497 * @addr: dongle memory address as source/destination 498 * @pkt: skb pointer 499 * 500 * This function takes the respbonsibility as the interface function to MMC 501 * stack for block data access. It assumes that the skb passed down by the 502 * caller has already been padded and aligned. 503 */ 504 static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, 505 bool write, u32 addr, 506 struct sk_buff_head *pktlist) 507 { 508 unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset; 509 unsigned int max_req_sz, orig_offset, dst_offset; 510 unsigned short max_seg_cnt, seg_sz; 511 unsigned char *pkt_data, *orig_data, *dst_data; 512 struct sk_buff *pkt_next = NULL, *local_pkt_next; 513 struct sk_buff_head local_list, *target_list; 514 struct mmc_request mmc_req; 515 struct mmc_command mmc_cmd; 516 struct mmc_data mmc_dat; 517 struct scatterlist *sgl; 518 int ret = 0; 519 520 if (!pktlist->qlen) 521 return -EINVAL; 522 523 target_list = pktlist; 524 /* for host with broken sg support, prepare a page aligned list */ 525 __skb_queue_head_init(&local_list); 526 if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) { 527 req_sz = 0; 528 skb_queue_walk(pktlist, pkt_next) 529 req_sz += pkt_next->len; 530 req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize); 531 while (req_sz > PAGE_SIZE) { 532 pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE); 533 if (pkt_next == NULL) { 534 ret = -ENOMEM; 535 goto exit; 536 } 537 __skb_queue_tail(&local_list, pkt_next); 538 req_sz -= PAGE_SIZE; 539 } 540 pkt_next = brcmu_pkt_buf_get_skb(req_sz); 541 if (pkt_next == NULL) { 542 ret = -ENOMEM; 543 goto exit; 544 } 545 __skb_queue_tail(&local_list, pkt_next); 546 target_list = &local_list; 547 } 548 549 func_blk_sz = sdiodev->func[fn]->cur_blksize; 550 max_req_sz = sdiodev->max_request_size; 551 max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count, 552 target_list->qlen); 553 seg_sz = target_list->qlen; 554 pkt_offset = 0; 555 pkt_next = target_list->next; 556 557 memset(&mmc_req, 0, sizeof(struct mmc_request)); 558 memset(&mmc_cmd, 0, sizeof(struct mmc_command)); 559 memset(&mmc_dat, 0, sizeof(struct mmc_data)); 560 561 mmc_dat.sg = sdiodev->sgtable.sgl; 562 mmc_dat.blksz = func_blk_sz; 563 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 564 mmc_cmd.opcode = SD_IO_RW_EXTENDED; 565 mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */ 566 mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */ 567 mmc_cmd.arg |= 1<<27; /* block mode */ 568 /* for function 1 the addr will be incremented */ 569 mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0; 570 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; 571 mmc_req.cmd = &mmc_cmd; 572 mmc_req.data = &mmc_dat; 573 574 while (seg_sz) { 575 req_sz = 0; 576 sg_cnt = 0; 577 sgl = sdiodev->sgtable.sgl; 578 /* prep sg table */ 579 while (pkt_next != (struct sk_buff *)target_list) { 580 pkt_data = pkt_next->data + pkt_offset; 581 sg_data_sz = pkt_next->len - pkt_offset; 582 if (sg_data_sz > sdiodev->max_segment_size) 583 sg_data_sz = sdiodev->max_segment_size; 584 if (sg_data_sz > max_req_sz - req_sz) 585 sg_data_sz = max_req_sz - req_sz; 586 587 sg_set_buf(sgl, pkt_data, sg_data_sz); 588 589 sg_cnt++; 590 sgl = sg_next(sgl); 591 req_sz += sg_data_sz; 592 pkt_offset += sg_data_sz; 593 if (pkt_offset == pkt_next->len) { 594 pkt_offset = 0; 595 pkt_next = pkt_next->next; 596 } 597 598 if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) 599 break; 600 } 601 seg_sz -= sg_cnt; 602 603 if (req_sz % func_blk_sz != 0) { 604 brcmf_err("sg request length %u is not %u aligned\n", 605 req_sz, func_blk_sz); 606 ret = -ENOTBLK; 607 goto exit; 608 } 609 610 mmc_dat.sg_len = sg_cnt; 611 mmc_dat.blocks = req_sz / func_blk_sz; 612 mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */ 613 mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */ 614 /* incrementing addr for function 1 */ 615 if (fn == 1) 616 addr += req_sz; 617 618 mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card); 619 mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req); 620 621 ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error; 622 if (ret == -ENOMEDIUM) { 623 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); 624 break; 625 } else if (ret != 0) { 626 brcmf_err("CMD53 sg block %s failed %d\n", 627 write ? "write" : "read", ret); 628 ret = -EIO; 629 break; 630 } 631 } 632 633 if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) { 634 local_pkt_next = local_list.next; 635 orig_offset = 0; 636 skb_queue_walk(pktlist, pkt_next) { 637 dst_offset = 0; 638 do { 639 req_sz = local_pkt_next->len - orig_offset; 640 req_sz = min_t(uint, pkt_next->len - dst_offset, 641 req_sz); 642 orig_data = local_pkt_next->data + orig_offset; 643 dst_data = pkt_next->data + dst_offset; 644 memcpy(dst_data, orig_data, req_sz); 645 orig_offset += req_sz; 646 dst_offset += req_sz; 647 if (orig_offset == local_pkt_next->len) { 648 orig_offset = 0; 649 local_pkt_next = local_pkt_next->next; 650 } 651 if (dst_offset == pkt_next->len) 652 break; 653 } while (!skb_queue_empty(&local_list)); 654 } 655 } 656 657 exit: 658 sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents); 659 while ((pkt_next = __skb_dequeue(&local_list)) != NULL) 660 brcmu_pkt_buf_free_skb(pkt_next); 661 662 return ret; 663 } 664 665 int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes) 666 { 667 struct sk_buff *mypkt; 668 int err; 669 670 mypkt = brcmu_pkt_buf_get_skb(nbytes); 671 if (!mypkt) { 672 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n", 673 nbytes); 674 return -EIO; 675 } 676 677 err = brcmf_sdiod_recv_pkt(sdiodev, mypkt); 678 if (!err) 679 memcpy(buf, mypkt->data, nbytes); 680 681 brcmu_pkt_buf_free_skb(mypkt); 682 return err; 683 } 684 685 int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt) 686 { 687 u32 addr = sdiodev->sbwad; 688 int err = 0; 689 690 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len); 691 692 err = brcmf_sdiod_addrprep(sdiodev, 4, &addr); 693 if (err) 694 goto done; 695 696 err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt); 697 698 done: 699 return err; 700 } 701 702 int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, 703 struct sk_buff_head *pktq, uint totlen) 704 { 705 struct sk_buff *glom_skb; 706 struct sk_buff *skb; 707 u32 addr = sdiodev->sbwad; 708 int err = 0; 709 710 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", 711 addr, pktq->qlen); 712 713 err = brcmf_sdiod_addrprep(sdiodev, 4, &addr); 714 if (err) 715 goto done; 716 717 if (pktq->qlen == 1) 718 err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, 719 pktq->next); 720 else if (!sdiodev->sg_support) { 721 glom_skb = brcmu_pkt_buf_get_skb(totlen); 722 if (!glom_skb) 723 return -ENOMEM; 724 err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, 725 glom_skb); 726 if (err) 727 goto done; 728 729 skb_queue_walk(pktq, skb) { 730 memcpy(skb->data, glom_skb->data, skb->len); 731 skb_pull(glom_skb, skb->len); 732 } 733 } else 734 err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr, 735 pktq); 736 737 done: 738 return err; 739 } 740 741 int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes) 742 { 743 struct sk_buff *mypkt; 744 u32 addr = sdiodev->sbwad; 745 int err; 746 747 mypkt = brcmu_pkt_buf_get_skb(nbytes); 748 if (!mypkt) { 749 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n", 750 nbytes); 751 return -EIO; 752 } 753 754 memcpy(mypkt->data, buf, nbytes); 755 756 err = brcmf_sdiod_addrprep(sdiodev, 4, &addr); 757 758 if (!err) 759 err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr, 760 mypkt); 761 762 brcmu_pkt_buf_free_skb(mypkt); 763 return err; 764 765 } 766 767 int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev, 768 struct sk_buff_head *pktq) 769 { 770 struct sk_buff *skb; 771 u32 addr = sdiodev->sbwad; 772 int err; 773 774 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen); 775 776 err = brcmf_sdiod_addrprep(sdiodev, 4, &addr); 777 if (err) 778 return err; 779 780 if (pktq->qlen == 1 || !sdiodev->sg_support) 781 skb_queue_walk(pktq, skb) { 782 err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, 783 addr, skb); 784 if (err) 785 break; 786 } 787 else 788 err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr, 789 pktq); 790 791 return err; 792 } 793 794 int 795 brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, 796 u8 *data, uint size) 797 { 798 int bcmerror = 0; 799 struct sk_buff *pkt; 800 u32 sdaddr; 801 uint dsize; 802 803 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size); 804 pkt = dev_alloc_skb(dsize); 805 if (!pkt) { 806 brcmf_err("dev_alloc_skb failed: len %d\n", dsize); 807 return -EIO; 808 } 809 pkt->priority = 0; 810 811 /* Determine initial transfer parameters */ 812 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; 813 if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK) 814 dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr); 815 else 816 dsize = size; 817 818 sdio_claim_host(sdiodev->func[1]); 819 820 /* Do the transfer(s) */ 821 while (size) { 822 /* Set the backplane window to include the start address */ 823 bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address); 824 if (bcmerror) 825 break; 826 827 brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n", 828 write ? "write" : "read", dsize, 829 sdaddr, address & SBSDIO_SBWINDOW_MASK); 830 831 sdaddr &= SBSDIO_SB_OFT_ADDR_MASK; 832 sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 833 834 skb_put(pkt, dsize); 835 if (write) 836 memcpy(pkt->data, data, dsize); 837 bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write, 838 sdaddr, pkt); 839 if (bcmerror) { 840 brcmf_err("membytes transfer failed\n"); 841 break; 842 } 843 if (!write) 844 memcpy(data, pkt->data, dsize); 845 skb_trim(pkt, 0); 846 847 /* Adjust for next transfer (if any) */ 848 size -= dsize; 849 if (size) { 850 data += dsize; 851 address += dsize; 852 sdaddr = 0; 853 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size); 854 } 855 } 856 857 dev_kfree_skb(pkt); 858 859 /* Return the window to backplane enumeration space for core access */ 860 if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad)) 861 brcmf_err("FAILED to set window back to 0x%x\n", 862 sdiodev->sbwad); 863 864 sdio_release_host(sdiodev->func[1]); 865 866 return bcmerror; 867 } 868 869 int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn) 870 { 871 char t_func = (char)fn; 872 brcmf_dbg(SDIO, "Enter\n"); 873 874 /* issue abort cmd52 command through F0 */ 875 brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT, 876 sizeof(t_func), &t_func, true); 877 878 brcmf_dbg(SDIO, "Exit\n"); 879 return 0; 880 } 881 882 void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) 883 { 884 struct sdio_func *func; 885 struct mmc_host *host; 886 uint max_blocks; 887 uint nents; 888 int err; 889 890 func = sdiodev->func[2]; 891 host = func->card->host; 892 sdiodev->sg_support = host->max_segs > 1; 893 max_blocks = min_t(uint, host->max_blk_count, 511u); 894 sdiodev->max_request_size = min_t(uint, host->max_req_size, 895 max_blocks * func->cur_blksize); 896 sdiodev->max_segment_count = min_t(uint, host->max_segs, 897 SG_MAX_SINGLE_ALLOC); 898 sdiodev->max_segment_size = host->max_seg_size; 899 900 if (!sdiodev->sg_support) 901 return; 902 903 nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, 904 sdiodev->bus_if->drvr->settings->sdiod_txglomsz); 905 nents += (nents >> 4) + 1; 906 907 WARN_ON(nents > sdiodev->max_segment_count); 908 909 brcmf_dbg(TRACE, "nents=%d\n", nents); 910 err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL); 911 if (err < 0) { 912 brcmf_err("allocation failed: disable scatter-gather"); 913 sdiodev->sg_support = false; 914 } 915 916 sdiodev->txglomsz = sdiodev->bus_if->drvr->settings->sdiod_txglomsz; 917 } 918 919 #ifdef CONFIG_PM_SLEEP 920 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev) 921 { 922 sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL); 923 if (!sdiodev->freezer) 924 return -ENOMEM; 925 atomic_set(&sdiodev->freezer->thread_count, 0); 926 atomic_set(&sdiodev->freezer->freezing, 0); 927 init_waitqueue_head(&sdiodev->freezer->thread_freeze); 928 init_completion(&sdiodev->freezer->resumed); 929 return 0; 930 } 931 932 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev) 933 { 934 if (sdiodev->freezer) { 935 WARN_ON(atomic_read(&sdiodev->freezer->freezing)); 936 kfree(sdiodev->freezer); 937 } 938 } 939 940 static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev) 941 { 942 atomic_t *expect = &sdiodev->freezer->thread_count; 943 int res = 0; 944 945 sdiodev->freezer->frozen_count = 0; 946 reinit_completion(&sdiodev->freezer->resumed); 947 atomic_set(&sdiodev->freezer->freezing, 1); 948 brcmf_sdio_trigger_dpc(sdiodev->bus); 949 wait_event(sdiodev->freezer->thread_freeze, 950 atomic_read(expect) == sdiodev->freezer->frozen_count); 951 sdio_claim_host(sdiodev->func[1]); 952 res = brcmf_sdio_sleep(sdiodev->bus, true); 953 sdio_release_host(sdiodev->func[1]); 954 return res; 955 } 956 957 static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev) 958 { 959 sdio_claim_host(sdiodev->func[1]); 960 brcmf_sdio_sleep(sdiodev->bus, false); 961 sdio_release_host(sdiodev->func[1]); 962 atomic_set(&sdiodev->freezer->freezing, 0); 963 complete_all(&sdiodev->freezer->resumed); 964 } 965 966 bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev) 967 { 968 return atomic_read(&sdiodev->freezer->freezing); 969 } 970 971 void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev) 972 { 973 if (!brcmf_sdiod_freezing(sdiodev)) 974 return; 975 sdiodev->freezer->frozen_count++; 976 wake_up(&sdiodev->freezer->thread_freeze); 977 wait_for_completion(&sdiodev->freezer->resumed); 978 } 979 980 void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev) 981 { 982 atomic_inc(&sdiodev->freezer->thread_count); 983 } 984 985 void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev) 986 { 987 atomic_dec(&sdiodev->freezer->thread_count); 988 } 989 #else 990 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev) 991 { 992 return 0; 993 } 994 995 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev) 996 { 997 } 998 #endif /* CONFIG_PM_SLEEP */ 999 1000 static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) 1001 { 1002 sdiodev->state = BRCMF_SDIOD_DOWN; 1003 if (sdiodev->bus) { 1004 brcmf_sdio_remove(sdiodev->bus); 1005 sdiodev->bus = NULL; 1006 } 1007 1008 brcmf_sdiod_freezer_detach(sdiodev); 1009 1010 /* Disable Function 2 */ 1011 sdio_claim_host(sdiodev->func[2]); 1012 sdio_disable_func(sdiodev->func[2]); 1013 sdio_release_host(sdiodev->func[2]); 1014 1015 /* Disable Function 1 */ 1016 sdio_claim_host(sdiodev->func[1]); 1017 sdio_disable_func(sdiodev->func[1]); 1018 sdio_release_host(sdiodev->func[1]); 1019 1020 sg_free_table(&sdiodev->sgtable); 1021 sdiodev->sbwad = 0; 1022 1023 pm_runtime_allow(sdiodev->func[1]->card->host->parent); 1024 return 0; 1025 } 1026 1027 static void brcmf_sdiod_host_fixup(struct mmc_host *host) 1028 { 1029 /* runtime-pm powers off the device */ 1030 pm_runtime_forbid(host->parent); 1031 /* avoid removal detection upon resume */ 1032 host->caps |= MMC_CAP_NONREMOVABLE; 1033 } 1034 1035 static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) 1036 { 1037 int ret = 0; 1038 1039 sdiodev->num_funcs = 2; 1040 1041 sdio_claim_host(sdiodev->func[1]); 1042 1043 ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE); 1044 if (ret) { 1045 brcmf_err("Failed to set F1 blocksize\n"); 1046 sdio_release_host(sdiodev->func[1]); 1047 goto out; 1048 } 1049 ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE); 1050 if (ret) { 1051 brcmf_err("Failed to set F2 blocksize\n"); 1052 sdio_release_host(sdiodev->func[1]); 1053 goto out; 1054 } 1055 1056 /* increase F2 timeout */ 1057 sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY; 1058 1059 /* Enable Function 1 */ 1060 ret = sdio_enable_func(sdiodev->func[1]); 1061 sdio_release_host(sdiodev->func[1]); 1062 if (ret) { 1063 brcmf_err("Failed to enable F1: err=%d\n", ret); 1064 goto out; 1065 } 1066 1067 ret = brcmf_sdiod_freezer_attach(sdiodev); 1068 if (ret) 1069 goto out; 1070 1071 /* try to attach to the target device */ 1072 sdiodev->bus = brcmf_sdio_probe(sdiodev); 1073 if (!sdiodev->bus) { 1074 ret = -ENODEV; 1075 goto out; 1076 } 1077 brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host); 1078 out: 1079 if (ret) 1080 brcmf_sdiod_remove(sdiodev); 1081 1082 return ret; 1083 } 1084 1085 #define BRCMF_SDIO_DEVICE(dev_id) \ 1086 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)} 1087 1088 /* devices we support, null terminated */ 1089 static const struct sdio_device_id brcmf_sdmmc_ids[] = { 1090 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143), 1091 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241), 1092 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329), 1093 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330), 1094 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334), 1095 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340), 1096 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), 1097 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), 1098 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), 1099 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), 1100 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345), 1101 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354), 1102 { /* end: all zeroes */ } 1103 }; 1104 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); 1105 1106 static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata; 1107 1108 1109 static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev, 1110 int val) 1111 { 1112 #if IS_ENABLED(CONFIG_ACPI) 1113 struct acpi_device *adev; 1114 1115 adev = ACPI_COMPANION(dev); 1116 if (adev) 1117 adev->flags.power_manageable = 0; 1118 #endif 1119 } 1120 1121 static int brcmf_ops_sdio_probe(struct sdio_func *func, 1122 const struct sdio_device_id *id) 1123 { 1124 int err; 1125 struct brcmf_sdio_dev *sdiodev; 1126 struct brcmf_bus *bus_if; 1127 struct device *dev; 1128 1129 brcmf_dbg(SDIO, "Enter\n"); 1130 brcmf_dbg(SDIO, "Class=%x\n", func->class); 1131 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor); 1132 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); 1133 brcmf_dbg(SDIO, "Function#: %d\n", func->num); 1134 1135 dev = &func->dev; 1136 /* prohibit ACPI power management for this device */ 1137 brcmf_sdiod_acpi_set_power_manageable(dev, 0); 1138 1139 /* Consume func num 1 but dont do anything with it. */ 1140 if (func->num == 1) 1141 return 0; 1142 1143 /* Ignore anything but func 2 */ 1144 if (func->num != 2) 1145 return -ENODEV; 1146 1147 bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL); 1148 if (!bus_if) 1149 return -ENOMEM; 1150 sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL); 1151 if (!sdiodev) { 1152 kfree(bus_if); 1153 return -ENOMEM; 1154 } 1155 1156 /* store refs to functions used. mmc_card does 1157 * not hold the F0 function pointer. 1158 */ 1159 sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL); 1160 sdiodev->func[0]->num = 0; 1161 sdiodev->func[1] = func->card->sdio_func[0]; 1162 sdiodev->func[2] = func; 1163 1164 sdiodev->bus_if = bus_if; 1165 bus_if->bus_priv.sdio = sdiodev; 1166 bus_if->proto_type = BRCMF_PROTO_BCDC; 1167 dev_set_drvdata(&func->dev, bus_if); 1168 dev_set_drvdata(&sdiodev->func[1]->dev, bus_if); 1169 sdiodev->dev = &sdiodev->func[1]->dev; 1170 sdiodev->pdata = brcmfmac_sdio_pdata; 1171 1172 if (!sdiodev->pdata) 1173 brcmf_of_probe(sdiodev); 1174 1175 #ifdef CONFIG_PM_SLEEP 1176 /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ 1177 * is true or when platform data OOB irq is true). 1178 */ 1179 if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) && 1180 ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) || 1181 (sdiodev->pdata && sdiodev->pdata->oob_irq_supported))) 1182 bus_if->wowl_supported = true; 1183 #endif 1184 1185 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN); 1186 1187 brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n"); 1188 err = brcmf_sdiod_probe(sdiodev); 1189 if (err) { 1190 brcmf_err("F2 error, probe failed %d...\n", err); 1191 goto fail; 1192 } 1193 1194 brcmf_dbg(SDIO, "F2 init completed...\n"); 1195 return 0; 1196 1197 fail: 1198 dev_set_drvdata(&func->dev, NULL); 1199 dev_set_drvdata(&sdiodev->func[1]->dev, NULL); 1200 kfree(sdiodev->func[0]); 1201 kfree(sdiodev); 1202 kfree(bus_if); 1203 return err; 1204 } 1205 1206 static void brcmf_ops_sdio_remove(struct sdio_func *func) 1207 { 1208 struct brcmf_bus *bus_if; 1209 struct brcmf_sdio_dev *sdiodev; 1210 1211 brcmf_dbg(SDIO, "Enter\n"); 1212 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor); 1213 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); 1214 brcmf_dbg(SDIO, "Function: %d\n", func->num); 1215 1216 if (func->num != 1) 1217 return; 1218 1219 bus_if = dev_get_drvdata(&func->dev); 1220 if (bus_if) { 1221 sdiodev = bus_if->bus_priv.sdio; 1222 brcmf_sdiod_remove(sdiodev); 1223 1224 dev_set_drvdata(&sdiodev->func[1]->dev, NULL); 1225 dev_set_drvdata(&sdiodev->func[2]->dev, NULL); 1226 1227 kfree(bus_if); 1228 kfree(sdiodev->func[0]); 1229 kfree(sdiodev); 1230 } 1231 1232 brcmf_dbg(SDIO, "Exit\n"); 1233 } 1234 1235 void brcmf_sdio_wowl_config(struct device *dev, bool enabled) 1236 { 1237 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1238 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 1239 1240 brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled); 1241 sdiodev->wowl_enabled = enabled; 1242 } 1243 1244 #ifdef CONFIG_PM_SLEEP 1245 static int brcmf_ops_sdio_suspend(struct device *dev) 1246 { 1247 struct sdio_func *func; 1248 struct brcmf_bus *bus_if; 1249 struct brcmf_sdio_dev *sdiodev; 1250 mmc_pm_flag_t sdio_flags; 1251 1252 func = container_of(dev, struct sdio_func, dev); 1253 brcmf_dbg(SDIO, "Enter: F%d\n", func->num); 1254 if (func->num != SDIO_FUNC_1) 1255 return 0; 1256 1257 1258 bus_if = dev_get_drvdata(dev); 1259 sdiodev = bus_if->bus_priv.sdio; 1260 1261 brcmf_sdiod_freezer_on(sdiodev); 1262 brcmf_sdio_wd_timer(sdiodev->bus, 0); 1263 1264 sdio_flags = MMC_PM_KEEP_POWER; 1265 if (sdiodev->wowl_enabled) { 1266 if (sdiodev->pdata->oob_irq_supported) 1267 enable_irq_wake(sdiodev->pdata->oob_irq_nr); 1268 else 1269 sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; 1270 } 1271 if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags)) 1272 brcmf_err("Failed to set pm_flags %x\n", sdio_flags); 1273 return 0; 1274 } 1275 1276 static int brcmf_ops_sdio_resume(struct device *dev) 1277 { 1278 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1279 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 1280 struct sdio_func *func = container_of(dev, struct sdio_func, dev); 1281 1282 brcmf_dbg(SDIO, "Enter: F%d\n", func->num); 1283 if (func->num != SDIO_FUNC_2) 1284 return 0; 1285 1286 brcmf_sdiod_freezer_off(sdiodev); 1287 return 0; 1288 } 1289 1290 static const struct dev_pm_ops brcmf_sdio_pm_ops = { 1291 .suspend = brcmf_ops_sdio_suspend, 1292 .resume = brcmf_ops_sdio_resume, 1293 }; 1294 #endif /* CONFIG_PM_SLEEP */ 1295 1296 static struct sdio_driver brcmf_sdmmc_driver = { 1297 .probe = brcmf_ops_sdio_probe, 1298 .remove = brcmf_ops_sdio_remove, 1299 .name = BRCMFMAC_SDIO_PDATA_NAME, 1300 .id_table = brcmf_sdmmc_ids, 1301 .drv = { 1302 .owner = THIS_MODULE, 1303 #ifdef CONFIG_PM_SLEEP 1304 .pm = &brcmf_sdio_pm_ops, 1305 #endif /* CONFIG_PM_SLEEP */ 1306 }, 1307 }; 1308 1309 static int __init brcmf_sdio_pd_probe(struct platform_device *pdev) 1310 { 1311 brcmf_dbg(SDIO, "Enter\n"); 1312 1313 brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev); 1314 1315 if (brcmfmac_sdio_pdata->power_on) 1316 brcmfmac_sdio_pdata->power_on(); 1317 1318 return 0; 1319 } 1320 1321 static int brcmf_sdio_pd_remove(struct platform_device *pdev) 1322 { 1323 brcmf_dbg(SDIO, "Enter\n"); 1324 1325 if (brcmfmac_sdio_pdata->power_off) 1326 brcmfmac_sdio_pdata->power_off(); 1327 1328 sdio_unregister_driver(&brcmf_sdmmc_driver); 1329 1330 return 0; 1331 } 1332 1333 static struct platform_driver brcmf_sdio_pd = { 1334 .remove = brcmf_sdio_pd_remove, 1335 .driver = { 1336 .name = BRCMFMAC_SDIO_PDATA_NAME, 1337 } 1338 }; 1339 1340 void brcmf_sdio_register(void) 1341 { 1342 int ret; 1343 1344 ret = sdio_register_driver(&brcmf_sdmmc_driver); 1345 if (ret) 1346 brcmf_err("sdio_register_driver failed: %d\n", ret); 1347 } 1348 1349 void brcmf_sdio_exit(void) 1350 { 1351 brcmf_dbg(SDIO, "Enter\n"); 1352 1353 if (brcmfmac_sdio_pdata) 1354 platform_driver_unregister(&brcmf_sdio_pd); 1355 else 1356 sdio_unregister_driver(&brcmf_sdmmc_driver); 1357 } 1358 1359 void __init brcmf_sdio_init(void) 1360 { 1361 int ret; 1362 1363 brcmf_dbg(SDIO, "Enter\n"); 1364 1365 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); 1366 if (ret == -ENODEV) 1367 brcmf_dbg(SDIO, "No platform data available.\n"); 1368 } 1369