Lines Matching +full:no +full:- +full:sdio

1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2004-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
6 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
15 #include <linux/mmc/sdio.h>
26 #include "sdio.h"
38 return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask); in ath10k_sdio_calc_txrx_padded_len()
48 dev_kfree_skb(pkt->skb); in ath10k_sdio_mbox_free_rx_pkt()
49 pkt->skb = NULL; in ath10k_sdio_mbox_free_rx_pkt()
50 pkt->alloc_len = 0; in ath10k_sdio_mbox_free_rx_pkt()
51 pkt->act_len = 0; in ath10k_sdio_mbox_free_rx_pkt()
52 pkt->trailer_only = false; in ath10k_sdio_mbox_free_rx_pkt()
60 pkt->skb = dev_alloc_skb(full_len); in ath10k_sdio_mbox_alloc_rx_pkt()
61 if (!pkt->skb) in ath10k_sdio_mbox_alloc_rx_pkt()
62 return -ENOMEM; in ath10k_sdio_mbox_alloc_rx_pkt()
64 pkt->act_len = act_len; in ath10k_sdio_mbox_alloc_rx_pkt()
65 pkt->alloc_len = full_len; in ath10k_sdio_mbox_alloc_rx_pkt()
66 pkt->part_of_bundle = part_of_bundle; in ath10k_sdio_mbox_alloc_rx_pkt()
67 pkt->last_in_bundle = last_in_bundle; in ath10k_sdio_mbox_alloc_rx_pkt()
68 pkt->trailer_only = false; in ath10k_sdio_mbox_alloc_rx_pkt()
77 (struct ath10k_htc_hdr *)pkt->skb->data; in is_trailer_only_msg()
78 u16 len = __le16_to_cpu(htc_hdr->len); in is_trailer_only_msg()
80 if (len == htc_hdr->trailer_len) in is_trailer_only_msg()
86 /* sdio/mmc functions */
111 return mmc_wait_for_cmd(card->host, &io_cmd, 0); in ath10k_sdio_func0_cmd52_wr_byte()
126 ret = mmc_wait_for_cmd(card->host, &io_cmd, 0); in ath10k_sdio_func0_cmd52_rd_byte()
136 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_config()
140 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n"); in ath10k_sdio_config()
145 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
153 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
159 func->card, in ath10k_sdio_config()
167 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
176 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
182 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
186 ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n", in ath10k_sdio_config()
192 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
199 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
204 func->enable_timeout = 100; in ath10k_sdio_config()
206 ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size); in ath10k_sdio_config()
208 ath10k_warn(ar, "failed to set sdio block size to %d: %d\n", in ath10k_sdio_config()
209 ar_sdio->mbox_info.block_size, ret); in ath10k_sdio_config()
221 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_write32()
233 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n", in ath10k_sdio_write32()
245 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_writesb32()
251 return -ENOMEM; in ath10k_sdio_writesb32()
264 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n", in ath10k_sdio_writesb32()
278 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_read32()
289 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n", in ath10k_sdio_read32()
301 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_read()
313 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_read()
315 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len); in ath10k_sdio_read()
326 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_write()
341 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_write()
343 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len); in ath10k_sdio_write()
354 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_readsb()
359 len = round_down(len, ar_sdio->mbox_info.block_size); in ath10k_sdio_readsb()
368 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_readsb()
370 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len); in ath10k_sdio_readsb()
385 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_mbox_rx_process_packet()
386 struct sk_buff *skb = pkt->skb; in ath10k_sdio_mbox_rx_process_packet()
387 struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data; in ath10k_sdio_mbox_rx_process_packet()
388 bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT; in ath10k_sdio_mbox_rx_process_packet()
394 trailer = skb->data + skb->len - htc_hdr->trailer_len; in ath10k_sdio_mbox_rx_process_packet()
396 eid = pipe_id_to_eid(htc_hdr->eid); in ath10k_sdio_mbox_rx_process_packet()
400 htc_hdr->trailer_len, in ath10k_sdio_mbox_rx_process_packet()
408 pkt->trailer_only = true; in ath10k_sdio_mbox_rx_process_packet()
410 skb_trim(skb, skb->len - htc_hdr->trailer_len); in ath10k_sdio_mbox_rx_process_packet()
423 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_mbox_rx_process_packets()
432 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { in ath10k_sdio_mbox_rx_process_packets()
437 &lookaheads[lookahead_idx++])->eid; in ath10k_sdio_mbox_rx_process_packets()
440 ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n", in ath10k_sdio_mbox_rx_process_packets()
442 ret = -ENOMEM; in ath10k_sdio_mbox_rx_process_packets()
446 ep = &htc->endpoint[id]; in ath10k_sdio_mbox_rx_process_packets()
448 if (ep->service_id == 0) { in ath10k_sdio_mbox_rx_process_packets()
450 ret = -ENOMEM; in ath10k_sdio_mbox_rx_process_packets()
454 pkt = &ar_sdio->rx_pkts[i]; in ath10k_sdio_mbox_rx_process_packets()
456 if (pkt->part_of_bundle && !pkt->last_in_bundle) { in ath10k_sdio_mbox_rx_process_packets()
460 lookahead_idx--; in ath10k_sdio_mbox_rx_process_packets()
472 if (!pkt->trailer_only) { in ath10k_sdio_mbox_rx_process_packets()
473 cb = ATH10K_SKB_RXCB(pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
474 cb->eid = id; in ath10k_sdio_mbox_rx_process_packets()
476 skb_queue_tail(&ar_sdio->rx_head, pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
477 queue_work(ar->workqueue_aux, in ath10k_sdio_mbox_rx_process_packets()
478 &ar_sdio->async_work_rx); in ath10k_sdio_mbox_rx_process_packets()
480 kfree_skb(pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
484 pkt->skb = NULL; in ath10k_sdio_mbox_rx_process_packets()
485 pkt->alloc_len = 0; in ath10k_sdio_mbox_rx_process_packets()
494 for (; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_process_packets()
495 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_process_packets()
507 u8 max_msgs = ar->htc.max_msgs_per_htc_bundle; in ath10k_sdio_mbox_alloc_bundle()
509 *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags); in ath10k_sdio_mbox_alloc_bundle()
514 le16_to_cpu(htc_hdr->len), in ath10k_sdio_mbox_alloc_bundle()
516 return -ENOMEM; in ath10k_sdio_mbox_alloc_bundle()
551 ret = -ENOMEM; in ath10k_sdio_mbox_rx_alloc()
559 if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) { in ath10k_sdio_mbox_rx_alloc()
561 le16_to_cpu(htc_hdr->len), in ath10k_sdio_mbox_rx_alloc()
563 ret = -ENOMEM; in ath10k_sdio_mbox_rx_alloc()
571 act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_alloc()
576 htc_hdr->eid, htc_hdr->flags, in ath10k_sdio_mbox_rx_alloc()
577 le16_to_cpu(htc_hdr->len)); in ath10k_sdio_mbox_rx_alloc()
578 ret = -EINVAL; in ath10k_sdio_mbox_rx_alloc()
583 ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) { in ath10k_sdio_mbox_rx_alloc()
591 &ar_sdio->rx_pkts[pkt_cnt], in ath10k_sdio_mbox_rx_alloc()
613 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK) in ath10k_sdio_mbox_rx_alloc()
616 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt], in ath10k_sdio_mbox_rx_alloc()
629 ar_sdio->n_rx_pkts = pkt_cnt; in ath10k_sdio_mbox_rx_alloc()
635 if (!ar_sdio->rx_pkts[i].alloc_len) in ath10k_sdio_mbox_rx_alloc()
637 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_alloc()
646 struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0]; in ath10k_sdio_mbox_rx_fetch()
647 struct sk_buff *skb = pkt->skb; in ath10k_sdio_mbox_rx_fetch()
651 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, in ath10k_sdio_mbox_rx_fetch()
652 skb->data, pkt->alloc_len); in ath10k_sdio_mbox_rx_fetch()
656 htc_hdr = (struct ath10k_htc_hdr *)skb->data; in ath10k_sdio_mbox_rx_fetch()
657 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_fetch()
659 if (pkt->act_len > pkt->alloc_len) { in ath10k_sdio_mbox_rx_fetch()
660 ret = -EINVAL; in ath10k_sdio_mbox_rx_fetch()
664 skb_put(skb, pkt->act_len); in ath10k_sdio_mbox_rx_fetch()
668 ar_sdio->n_rx_pkts = 0; in ath10k_sdio_mbox_rx_fetch()
683 for (i = 0; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_fetch_bundle()
684 virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len; in ath10k_sdio_mbox_rx_fetch_bundle()
687 ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len); in ath10k_sdio_mbox_rx_fetch_bundle()
688 ret = -E2BIG; in ath10k_sdio_mbox_rx_fetch_bundle()
692 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, in ath10k_sdio_mbox_rx_fetch_bundle()
693 ar_sdio->vsg_buffer, virt_pkt_len); in ath10k_sdio_mbox_rx_fetch_bundle()
700 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { in ath10k_sdio_mbox_rx_fetch_bundle()
701 pkt = &ar_sdio->rx_pkts[i]; in ath10k_sdio_mbox_rx_fetch_bundle()
702 htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset); in ath10k_sdio_mbox_rx_fetch_bundle()
703 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_fetch_bundle()
705 if (pkt->act_len > pkt->alloc_len) { in ath10k_sdio_mbox_rx_fetch_bundle()
706 ret = -EINVAL; in ath10k_sdio_mbox_rx_fetch_bundle()
710 skb_put_data(pkt->skb, htc_hdr, pkt->act_len); in ath10k_sdio_mbox_rx_fetch_bundle()
711 pkt_offset += pkt->alloc_len; in ath10k_sdio_mbox_rx_fetch_bundle()
718 for (i = 0; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_fetch_bundle()
719 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_fetch_bundle()
721 ar_sdio->n_rx_pkts = 0; in ath10k_sdio_mbox_rx_fetch_bundle()
726 /* This is the timeout for mailbox processing done in the sdio irq
727 * handler. The timeout is deliberately set quite high since SDIO dump logs
759 if (ar_sdio->n_rx_pkts >= 2) in ath10k_sdio_mbox_rxmsg_pending_handler()
761 * re-check again. in ath10k_sdio_mbox_rxmsg_pending_handler()
765 if (ar_sdio->n_rx_pkts > 1) in ath10k_sdio_mbox_rxmsg_pending_handler()
784 * flag that we should re-check IRQ status registers again in ath10k_sdio_mbox_rxmsg_pending_handler()
791 if (ret && (ret != -ECANCELED)) in ath10k_sdio_mbox_rxmsg_pending_handler()
819 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_counter_intr()
823 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_proc_counter_intr()
824 counter_int_status = irq_data->irq_proc_reg->counter_int_status & in ath10k_sdio_mbox_proc_counter_intr()
825 irq_data->irq_en_reg->cntr_int_status_en; in ath10k_sdio_mbox_proc_counter_intr()
836 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_proc_counter_intr()
844 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_err_intr()
848 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n"); in ath10k_sdio_mbox_proc_err_intr()
850 error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F; in ath10k_sdio_mbox_proc_err_intr()
854 return -EIO; in ath10k_sdio_mbox_proc_err_intr()
858 "sdio error_int_status 0x%x\n", error_int_status); in ath10k_sdio_mbox_proc_err_intr()
862 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n"); in ath10k_sdio_mbox_proc_err_intr()
873 irq_data->irq_proc_reg->error_int_status &= ~error_int_status; in ath10k_sdio_mbox_proc_err_intr()
890 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_cpu_intr()
894 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_proc_cpu_intr()
895 cpu_int_status = irq_data->irq_proc_reg->cpu_int_status & in ath10k_sdio_mbox_proc_cpu_intr()
896 irq_data->irq_en_reg->cpu_int_status_en; in ath10k_sdio_mbox_proc_cpu_intr()
899 ret = -EIO; in ath10k_sdio_mbox_proc_cpu_intr()
904 irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status; in ath10k_sdio_mbox_proc_cpu_intr()
907 * this is done to make the access 4-byte aligned to mitigate issues in ath10k_sdio_mbox_proc_cpu_intr()
909 * be a multiple of 4-bytes. in ath10k_sdio_mbox_proc_cpu_intr()
922 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_proc_cpu_intr()
934 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_read_int_status()
935 struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg; in ath10k_sdio_mbox_read_int_status()
936 struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg; in ath10k_sdio_mbox_read_int_status()
940 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_read_int_status()
951 if (!irq_en_reg->int_status_en) { in ath10k_sdio_mbox_read_int_status()
970 *host_int_status = irq_proc_reg->host_int_status & in ath10k_sdio_mbox_read_int_status()
971 irq_en_reg->int_status_en; in ath10k_sdio_mbox_read_int_status()
984 if (irq_proc_reg->rx_lookahead_valid & htc_mbox) { in ath10k_sdio_mbox_read_int_status()
986 irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]); in ath10k_sdio_mbox_read_int_status()
988 ath10k_warn(ar, "sdio mbox lookahead is zero\n"); in ath10k_sdio_mbox_read_int_status()
992 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_read_int_status()
1025 "sdio pending mailbox msg lookahead 0x%08x\n", in ath10k_sdio_mbox_proc_pending_irqs()
1037 "sdio host_int_status 0x%x\n", host_int_status); in ath10k_sdio_mbox_proc_pending_irqs()
1061 * unnecessarily which can re-wake the target, if upper layers in ath10k_sdio_mbox_proc_pending_irqs()
1062 * determine that we are in a low-throughput mode, we can rely on in ath10k_sdio_mbox_proc_pending_irqs()
1063 * taking another interrupt rather than re-checking the status in ath10k_sdio_mbox_proc_pending_irqs()
1064 * registers which can re-wake the target. in ath10k_sdio_mbox_proc_pending_irqs()
1073 "sdio pending irqs done %d status %d", in ath10k_sdio_mbox_proc_pending_irqs()
1082 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; in ath10k_sdio_set_mbox_info()
1083 u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev; in ath10k_sdio_set_mbox_info()
1085 mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1086 mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE; in ath10k_sdio_set_mbox_info()
1087 mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1; in ath10k_sdio_set_mbox_info()
1088 mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1089 mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH; in ath10k_sdio_set_mbox_info()
1091 mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1098 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1104 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1108 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1112 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1116 mbox_info->ext_info[1].htc_ext_addr = in ath10k_sdio_set_mbox_info()
1117 mbox_info->ext_info[0].htc_ext_addr + in ath10k_sdio_set_mbox_info()
1118 mbox_info->ext_info[0].htc_ext_sz + in ath10k_sdio_set_mbox_info()
1120 mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH; in ath10k_sdio_set_mbox_info()
1137 /* Hit the credit counter with a 4-byte access, the first byte in ath10k_sdio_bmi_credits()
1139 * remaining 3 bytes has no effect. The rationale behind this in ath10k_sdio_bmi_credits()
1140 * is to make all HIF accesses 4-byte aligned. in ath10k_sdio_bmi_credits()
1158 return -ETIMEDOUT; in ath10k_sdio_bmi_credits()
1188 return -EINVAL; in ath10k_sdio_bmi_get_rx_lookahead()
1207 addr = ar_sdio->mbox_info.htc_addr; in ath10k_sdio_bmi_exchange_msg()
1209 memcpy(ar_sdio->bmi_buf, req, req_len); in ath10k_sdio_bmi_exchange_msg()
1210 ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len); in ath10k_sdio_bmi_exchange_msg()
1220 /* No response expected */ in ath10k_sdio_bmi_exchange_msg()
1230 * In particular, this avoids SDIO timeouts and possibly garbage in ath10k_sdio_bmi_exchange_msg()
1232 * such as Compact Flash (as well as some SDIO masters) which in ath10k_sdio_bmi_exchange_msg()
1242 * not occur in practice -- they're supported for debug/development. in ath10k_sdio_bmi_exchange_msg()
1263 * If BMI_EXECUTE ever needs to support longer-latency execution, in ath10k_sdio_bmi_exchange_msg()
1273 addr = ar_sdio->mbox_info.htc_addr; in ath10k_sdio_bmi_exchange_msg()
1274 ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len); in ath10k_sdio_bmi_exchange_msg()
1282 memcpy(resp, ar_sdio->bmi_buf, *resp_len); in ath10k_sdio_bmi_exchange_msg()
1287 /* sdio async handling functions */
1295 spin_lock_bh(&ar_sdio->lock); in ath10k_sdio_alloc_busreq()
1297 if (list_empty(&ar_sdio->bus_req_freeq)) { in ath10k_sdio_alloc_busreq()
1302 bus_req = list_first_entry(&ar_sdio->bus_req_freeq, in ath10k_sdio_alloc_busreq()
1304 list_del(&bus_req->list); in ath10k_sdio_alloc_busreq()
1307 spin_unlock_bh(&ar_sdio->lock); in ath10k_sdio_alloc_busreq()
1318 spin_lock_bh(&ar_sdio->lock); in ath10k_sdio_free_bus_req()
1319 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); in ath10k_sdio_free_bus_req()
1320 spin_unlock_bh(&ar_sdio->lock); in ath10k_sdio_free_bus_req()
1330 skb = req->skb; in __ath10k_sdio_write_async()
1331 ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len); in __ath10k_sdio_write_async()
1334 req->address, ret); in __ath10k_sdio_write_async()
1336 if (req->htc_msg) { in __ath10k_sdio_write_async()
1337 ep = &ar->htc.endpoint[req->eid]; in __ath10k_sdio_write_async()
1339 } else if (req->comp) { in __ath10k_sdio_write_async()
1340 complete(req->comp); in __ath10k_sdio_write_async()
1347 * this way SDIO bus is utilised much better.
1353 struct ath10k *ar = ar_sdio->ar; in ath10k_rx_indication_async_work()
1359 skb = skb_dequeue(&ar_sdio->rx_head); in ath10k_rx_indication_async_work()
1363 ep = &ar->htc.endpoint[cb->eid]; in ath10k_rx_indication_async_work()
1364 ep->ep_ops.ep_rx_complete(ar, skb); in ath10k_rx_indication_async_work()
1367 if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) { in ath10k_rx_indication_async_work()
1369 napi_schedule(&ar->napi); in ath10k_rx_indication_async_work()
1376 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_read_rtc_state()
1380 rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret); in ath10k_sdio_read_rtc_state()
1398 sdio_claim_host(ar_sdio->func); in ath10k_sdio_set_mbox_sleep()
1409 ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE; in ath10k_sdio_set_mbox_sleep()
1412 ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE; in ath10k_sdio_set_mbox_sleep()
1431 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n", in ath10k_sdio_set_mbox_sleep()
1438 retry--; in ath10k_sdio_set_mbox_sleep()
1443 sdio_release_host(ar_sdio->func); in ath10k_sdio_set_mbox_sleep()
1453 ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE; in ath10k_sdio_sleep_timer_handler()
1454 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_sleep_timer_handler()
1461 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_write_async_work()
1463 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; in ath10k_sdio_write_async_work()
1465 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1467 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { in ath10k_sdio_write_async_work()
1468 list_del(&req->list); in ath10k_sdio_write_async_work()
1469 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1471 if (req->address >= mbox_info->htc_addr && in ath10k_sdio_write_async_work()
1472 ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) { in ath10k_sdio_write_async_work()
1474 mod_timer(&ar_sdio->sleep_timer, jiffies + in ath10k_sdio_write_async_work()
1479 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1482 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1484 if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE) in ath10k_sdio_write_async_work()
1497 * SDIO workqueue. in ath10k_sdio_prep_async_req()
1503 return -ENOMEM; in ath10k_sdio_prep_async_req()
1506 bus_req->skb = skb; in ath10k_sdio_prep_async_req()
1507 bus_req->eid = eid; in ath10k_sdio_prep_async_req()
1508 bus_req->address = addr; in ath10k_sdio_prep_async_req()
1509 bus_req->htc_msg = htc_msg; in ath10k_sdio_prep_async_req()
1510 bus_req->comp = comp; in ath10k_sdio_prep_async_req()
1512 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_prep_async_req()
1513 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); in ath10k_sdio_prep_async_req()
1514 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_prep_async_req()
1524 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_irq_handler()
1532 sdio_release_host(ar_sdio->func); in ath10k_sdio_irq_handler()
1543 sdio_claim_host(ar_sdio->func); in ath10k_sdio_irq_handler()
1545 if (ret && ret != -ECANCELED) in ath10k_sdio_irq_handler()
1546 ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n", in ath10k_sdio_irq_handler()
1550 /* sdio HIF functions */
1555 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_disable_intrs()
1556 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_disable_intrs()
1559 mutex_lock(&irq_data->mtx); in ath10k_sdio_disable_intrs()
1563 &regs->int_status_en, sizeof(*regs)); in ath10k_sdio_disable_intrs()
1565 ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret); in ath10k_sdio_disable_intrs()
1567 mutex_unlock(&irq_data->mtx); in ath10k_sdio_disable_intrs()
1576 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_hif_power_up()
1579 if (!ar_sdio->is_disabled) in ath10k_sdio_hif_power_up()
1582 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n"); in ath10k_sdio_hif_power_up()
1586 ath10k_err(ar, "failed to config sdio: %d\n", ret); in ath10k_sdio_hif_power_up()
1594 ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret); in ath10k_sdio_hif_power_up()
1606 ar_sdio->is_disabled = false; in ath10k_sdio_hif_power_up()
1620 if (ar_sdio->is_disabled) in ath10k_sdio_hif_power_down()
1623 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n"); in ath10k_sdio_hif_power_down()
1625 timer_delete_sync(&ar_sdio->sleep_timer); in ath10k_sdio_hif_power_down()
1629 sdio_claim_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1631 ret = sdio_disable_func(ar_sdio->func); in ath10k_sdio_hif_power_down()
1633 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret); in ath10k_sdio_hif_power_down()
1634 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1638 ret = mmc_hw_reset(ar_sdio->func->card); in ath10k_sdio_hif_power_down()
1640 ath10k_warn(ar, "unable to reset sdio: %d\n", ret); in ath10k_sdio_hif_power_down()
1642 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1644 ar_sdio->is_disabled = true; in ath10k_sdio_hif_power_down()
1663 skb->len); in ath10k_sdio_hif_tx_sg()
1667 address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] - in ath10k_sdio_hif_tx_sg()
1668 skb->len; in ath10k_sdio_hif_tx_sg()
1675 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_hif_tx_sg()
1683 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_enable_intrs()
1684 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_enable_intrs()
1687 mutex_lock(&irq_data->mtx); in ath10k_sdio_enable_intrs()
1690 regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) | in ath10k_sdio_enable_intrs()
1697 regs->int_status_en |= in ath10k_sdio_enable_intrs()
1703 regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1); in ath10k_sdio_enable_intrs()
1706 regs->err_int_status_en = in ath10k_sdio_enable_intrs()
1713 regs->cntr_int_status_en = in ath10k_sdio_enable_intrs()
1718 &regs->int_status_en, sizeof(*regs)); in ath10k_sdio_enable_intrs()
1724 mutex_unlock(&irq_data->mtx); in ath10k_sdio_enable_intrs()
1738 return -ENOMEM; in ath10k_sdio_hif_diag_read()
1771 return -ENOMEM; in ath10k_sdio_diag_read32()
1825 "sdio mailbox swap service enabled\n"); in ath10k_sdio_hif_start_post()
1826 ar_sdio->swap_mbox = true; in ath10k_sdio_hif_start_post()
1829 "sdio mailbox swap service disabled\n"); in ath10k_sdio_hif_start_post()
1830 ar_sdio->swap_mbox = false; in ath10k_sdio_hif_start_post()
1854 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n", in ath10k_sdio_get_htt_tx_complete()
1881 ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_start()
1882 ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_start()
1884 sdio_claim_host(ar_sdio->func); in ath10k_sdio_hif_start()
1887 ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler); in ath10k_sdio_hif_start()
1889 ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret); in ath10k_sdio_hif_start()
1890 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_start()
1894 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_start()
1898 ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret); in ath10k_sdio_hif_start()
1920 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_irq_disable()
1921 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_irq_disable()
1930 mutex_lock(&irq_data->mtx); in ath10k_sdio_irq_disable()
1933 memcpy(skb->data, regs, sizeof(*regs)); in ath10k_sdio_irq_disable()
1936 mutex_unlock(&irq_data->mtx); in ath10k_sdio_irq_disable()
1944 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_irq_disable()
1952 ath10k_warn(ar, "sdio irq disable request timed out\n"); in ath10k_sdio_irq_disable()
1954 sdio_claim_host(ar_sdio->func); in ath10k_sdio_irq_disable()
1956 ret = sdio_release_irq(ar_sdio->func); in ath10k_sdio_irq_disable()
1958 ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret); in ath10k_sdio_irq_disable()
1960 sdio_release_host(ar_sdio->func); in ath10k_sdio_irq_disable()
1974 cancel_work_sync(&ar_sdio->async_work_rx); in ath10k_sdio_hif_stop()
1976 while ((skb = skb_dequeue(&ar_sdio->rx_head))) in ath10k_sdio_hif_stop()
1979 cancel_work_sync(&ar_sdio->wr_async_work); in ath10k_sdio_hif_stop()
1981 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_hif_stop()
1984 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { in ath10k_sdio_hif_stop()
1987 list_del(&req->list); in ath10k_sdio_hif_stop()
1989 if (req->htc_msg) { in ath10k_sdio_hif_stop()
1990 ep = &ar->htc.endpoint[req->eid]; in ath10k_sdio_hif_stop()
1991 ath10k_htc_notify_tx_completion(ep, req->skb); in ath10k_sdio_hif_stop()
1992 } else if (req->skb) { in ath10k_sdio_hif_stop()
1993 kfree_skb(req->skb); in ath10k_sdio_hif_stop()
1998 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_hif_stop()
2012 switch (ar->state) { in ath10k_sdio_hif_resume()
2015 "sdio resume configuring sdio\n"); in ath10k_sdio_hif_resume()
2017 /* need to set sdio settings after power is cut from sdio */ in ath10k_sdio_hif_resume()
2035 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_hif_map_service_to_pipe()
2041 /* For sdio, we are interested in the mapping between eid in ath10k_sdio_hif_map_service_to_pipe()
2047 if (htc->endpoint[i].service_id == service_id) { in ath10k_sdio_hif_map_service_to_pipe()
2048 eid = htc->endpoint[i].eid; in ath10k_sdio_hif_map_service_to_pipe()
2055 return -EINVAL; in ath10k_sdio_hif_map_service_to_pipe()
2067 if (ar_sdio->swap_mbox) { in ath10k_sdio_hif_map_service_to_pipe()
2068 htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2069 wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2070 htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2071 wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2073 htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2074 wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2075 htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2076 wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2086 ar_sdio->mbox_addr[eid] = wmi_addr; in ath10k_sdio_hif_map_service_to_pipe()
2087 ar_sdio->mbox_size[eid] = wmi_mbox_size; in ath10k_sdio_hif_map_service_to_pipe()
2089 "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n", in ath10k_sdio_hif_map_service_to_pipe()
2090 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); in ath10k_sdio_hif_map_service_to_pipe()
2093 ar_sdio->mbox_addr[eid] = htt_addr; in ath10k_sdio_hif_map_service_to_pipe()
2094 ar_sdio->mbox_size[eid] = htt_mbox_size; in ath10k_sdio_hif_map_service_to_pipe()
2096 "sdio htt data mbox_addr 0x%x mbox_size %d\n", in ath10k_sdio_hif_map_service_to_pipe()
2097 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); in ath10k_sdio_hif_map_service_to_pipe()
2102 return -EINVAL; in ath10k_sdio_hif_map_service_to_pipe()
2111 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n"); in ath10k_sdio_hif_get_default_pipe()
2148 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_pm_suspend()
2152 if (!device_may_wakeup(ar->dev)) in ath10k_sdio_pm_suspend()
2162 ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n", in ath10k_sdio_pm_suspend()
2242 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param); in ath10k_sdio_is_fast_dump_supported()
2287 crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]); in ath10k_sdio_dump_registers()
2301 cur_section = &mem_region->section_table.sections[0]; in ath10k_sdio_dump_memory_section()
2303 if (mem_region->start > cur_section->start) { in ath10k_sdio_dump_memory_section()
2305 mem_region->start, cur_section->start); in ath10k_sdio_dump_memory_section()
2309 skip_size = cur_section->start - mem_region->start; in ath10k_sdio_dump_memory_section()
2322 section_size = cur_section->end - cur_section->start; in ath10k_sdio_dump_memory_section()
2326 cur_section->start, in ath10k_sdio_dump_memory_section()
2327 cur_section->end); in ath10k_sdio_dump_memory_section()
2331 if (++i == mem_region->section_table.size) { in ath10k_sdio_dump_memory_section()
2338 if (cur_section->end > next_section->start) { in ath10k_sdio_dump_memory_section()
2340 next_section->start, in ath10k_sdio_dump_memory_section()
2341 cur_section->end); in ath10k_sdio_dump_memory_section()
2345 skip_size = next_section->start - cur_section->end; in ath10k_sdio_dump_memory_section()
2353 buf_len -= skip_size + section_size; in ath10k_sdio_dump_memory_section()
2356 ret = ath10k_sdio_read_mem(ar, cur_section->start, in ath10k_sdio_dump_memory_section()
2360 cur_section->start, ret); in ath10k_sdio_dump_memory_section()
2387 if (current_region->section_table.size > 0) in ath10k_sdio_dump_memory_generic()
2392 current_region->len); in ath10k_sdio_dump_memory_generic()
2394 /* No individual memory sections defined so we can in ath10k_sdio_dump_memory_generic()
2399 current_region->start, in ath10k_sdio_dump_memory_generic()
2401 current_region->len); in ath10k_sdio_dump_memory_generic()
2404 current_region->start, in ath10k_sdio_dump_memory_generic()
2406 current_region->len); in ath10k_sdio_dump_memory_generic()
2410 current_region->name, ret); in ath10k_sdio_dump_memory_generic()
2414 return current_region->len; in ath10k_sdio_dump_memory_generic()
2436 current_region = &mem_layout->region_table.regions[0]; in ath10k_sdio_dump_memory()
2438 buf = crash_data->ramdump_buf; in ath10k_sdio_dump_memory()
2439 buf_len = crash_data->ramdump_buf_len; in ath10k_sdio_dump_memory()
2443 for (i = 0; i < mem_layout->region_table.size; i++) { in ath10k_sdio_dump_memory()
2446 if (current_region->len > buf_len) { in ath10k_sdio_dump_memory()
2448 current_region->name, in ath10k_sdio_dump_memory()
2449 current_region->len, in ath10k_sdio_dump_memory()
2457 buf_len -= sizeof(*hdr); in ath10k_sdio_dump_memory()
2464 hdr->region_type = cpu_to_le32(current_region->type); in ath10k_sdio_dump_memory()
2465 hdr->start = cpu_to_le32(current_region->start); in ath10k_sdio_dump_memory()
2466 hdr->length = cpu_to_le32(count); in ath10k_sdio_dump_memory()
2473 buf_len -= count; in ath10k_sdio_dump_memory()
2490 ar->stats.fw_crash_counter++; in ath10k_sdio_fw_crashed_dump()
2497 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); in ath10k_sdio_fw_crashed_dump()
2521 /* Assumption: All SDIO based chipsets (so far) are QCA6174 based. in ath10k_sdio_probe()
2524 * assumption is no longer valid and hw_rev must be setup differently in ath10k_sdio_probe()
2529 ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO, in ath10k_sdio_probe()
2532 dev_err(&func->dev, "failed to allocate core\n"); in ath10k_sdio_probe()
2533 return -ENOMEM; in ath10k_sdio_probe()
2536 netif_napi_add(ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll); in ath10k_sdio_probe()
2539 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", in ath10k_sdio_probe()
2540 func->num, func->vendor, func->device, in ath10k_sdio_probe()
2541 func->max_blksize, func->cur_blksize); in ath10k_sdio_probe()
2545 ar_sdio->irq_data.irq_proc_reg = in ath10k_sdio_probe()
2546 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs), in ath10k_sdio_probe()
2548 if (!ar_sdio->irq_data.irq_proc_reg) { in ath10k_sdio_probe()
2549 ret = -ENOMEM; in ath10k_sdio_probe()
2553 ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL); in ath10k_sdio_probe()
2554 if (!ar_sdio->vsg_buffer) { in ath10k_sdio_probe()
2555 ret = -ENOMEM; in ath10k_sdio_probe()
2559 ar_sdio->irq_data.irq_en_reg = in ath10k_sdio_probe()
2560 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs), in ath10k_sdio_probe()
2562 if (!ar_sdio->irq_data.irq_en_reg) { in ath10k_sdio_probe()
2563 ret = -ENOMEM; in ath10k_sdio_probe()
2567 ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL); in ath10k_sdio_probe()
2568 if (!ar_sdio->bmi_buf) { in ath10k_sdio_probe()
2569 ret = -ENOMEM; in ath10k_sdio_probe()
2573 ar_sdio->func = func; in ath10k_sdio_probe()
2576 ar_sdio->is_disabled = true; in ath10k_sdio_probe()
2577 ar_sdio->ar = ar; in ath10k_sdio_probe()
2579 spin_lock_init(&ar_sdio->lock); in ath10k_sdio_probe()
2580 spin_lock_init(&ar_sdio->wr_async_lock); in ath10k_sdio_probe()
2581 mutex_init(&ar_sdio->irq_data.mtx); in ath10k_sdio_probe()
2583 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); in ath10k_sdio_probe()
2584 INIT_LIST_HEAD(&ar_sdio->wr_asyncq); in ath10k_sdio_probe()
2586 INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work); in ath10k_sdio_probe()
2587 ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq"); in ath10k_sdio_probe()
2588 if (!ar_sdio->workqueue) { in ath10k_sdio_probe()
2589 ret = -ENOMEM; in ath10k_sdio_probe()
2594 ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]); in ath10k_sdio_probe()
2596 skb_queue_head_init(&ar_sdio->rx_head); in ath10k_sdio_probe()
2597 INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work); in ath10k_sdio_probe()
2599 dev_id_base = (id->device & 0x0F00); in ath10k_sdio_probe()
2602 ret = -ENODEV; in ath10k_sdio_probe()
2604 dev_id_base, id->device); in ath10k_sdio_probe()
2608 ar->dev_id = QCA9377_1_0_DEVICE_ID; in ath10k_sdio_probe()
2609 ar->id.vendor = id->vendor; in ath10k_sdio_probe()
2610 ar->id.device = id->device; in ath10k_sdio_probe()
2615 /* TODO: don't know yet how to get chip_id with SDIO */ in ath10k_sdio_probe()
2619 ar->hw->max_mtu = ETH_DATA_LEN; in ath10k_sdio_probe()
2627 timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0); in ath10k_sdio_probe()
2632 destroy_workqueue(ar_sdio->workqueue); in ath10k_sdio_probe()
2642 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_remove()
2645 "sdio removed func %d vendor 0x%x device 0x%x\n", in ath10k_sdio_remove()
2646 func->num, func->vendor, func->device); in ath10k_sdio_remove()
2650 netif_napi_del(&ar->napi); in ath10k_sdio_remove()
2652 destroy_workqueue(ar_sdio->workqueue); in ath10k_sdio_remove()
2663 MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2677 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");