1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (c) 2004-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. 5 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> 6 */ 7 8 #ifndef _SDIO_H_ 9 #define _SDIO_H_ 10 11 #define ATH10K_HIF_MBOX_BLOCK_SIZE 256 12 13 #define QCA_MANUFACTURER_ID_BASE GENMASK(11, 8) 14 #define QCA_MANUFACTURER_ID_AR6005_BASE 0x5 15 #define QCA_MANUFACTURER_ID_QCA9377_BASE 0x7 16 #define QCA_SDIO_ID_AR6005_BASE 0x500 17 #define QCA_SDIO_ID_QCA9377_BASE 0x700 18 #define QCA_MANUFACTURER_ID_REV_MASK 0x00FF 19 #define QCA_MANUFACTURER_CODE 0x271 /* Qualcomm/Atheros */ 20 21 #define ATH10K_SDIO_MAX_BUFFER_SIZE 4096 /*Unsure of this constant*/ 22 23 /* Mailbox address in SDIO address space */ 24 #define ATH10K_HIF_MBOX_BASE_ADDR 0x1000 25 #define ATH10K_HIF_MBOX_WIDTH 0x800 26 27 #define ATH10K_HIF_MBOX_TOT_WIDTH \ 28 (ATH10K_HIF_MBOX_NUM_MAX * ATH10K_HIF_MBOX_WIDTH) 29 30 #define ATH10K_HIF_MBOX0_EXT_BASE_ADDR 0x5000 31 #define ATH10K_HIF_MBOX0_EXT_WIDTH (36 * 1024) 32 #define ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0 (56 * 1024) 33 #define ATH10K_HIF_MBOX1_EXT_WIDTH (36 * 1024) 34 #define ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE (2 * 1024) 35 36 #define ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH \ 37 (ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr)) 38 39 #define ATH10K_HIF_MBOX_NUM_MAX 4 40 #define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 1024 41 42 #define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ) 43 44 /* HTC runs over mailbox 0 */ 45 #define ATH10K_HTC_MAILBOX 0 46 #define ATH10K_HTC_MAILBOX_MASK BIT(ATH10K_HTC_MAILBOX) 47 48 /* GMBOX addresses */ 49 #define ATH10K_HIF_GMBOX_BASE_ADDR 0x7000 50 #define ATH10K_HIF_GMBOX_WIDTH 0x4000 51 52 /* Modified versions of the sdio.h macros. 53 * The macros in sdio.h can't be used easily with the FIELD_{PREP|GET} 54 * macros in bitfield.h, so we define our own macros here. 55 */ 56 #define ATH10K_SDIO_DRIVE_DTSX_MASK \ 57 (SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT) 58 59 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_B 0 60 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_A 1 61 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_C 2 62 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_D 3 63 64 /* SDIO CCCR register definitions */ 65 #define CCCR_SDIO_IRQ_MODE_REG 0xF0 66 #define CCCR_SDIO_IRQ_MODE_REG_SDIO3 0x16 67 68 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xF2 69 70 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02 71 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04 72 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08 73 74 #define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS 0xF0 75 #define CCCR_SDIO_ASYNC_INT_DELAY_MASK 0xC0 76 77 /* mode to enable special 4-bit interrupt assertion without clock */ 78 #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ BIT(0) 79 #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3 BIT(1) 80 81 #define ATH10K_SDIO_TARGET_DEBUG_INTR_MASK 0x01 82 83 /* The theoretical maximum number of RX messages that can be fetched 84 * from the mbox interrupt handler in one loop is derived in the following 85 * way: 86 * 87 * Let's assume that each packet in a bundle of the maximum bundle size 88 * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set 89 * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE). 90 * 91 * in this case the driver must allocate 92 * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's. 93 */ 94 #define ATH10K_SDIO_MAX_RX_MSGS \ 95 (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) 96 97 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u 98 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF 99 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000 100 101 enum sdio_mbox_state { 102 SDIO_MBOX_UNKNOWN_STATE = 0, 103 SDIO_MBOX_REQUEST_TO_SLEEP_STATE = 1, 104 SDIO_MBOX_SLEEP_STATE = 2, 105 SDIO_MBOX_AWAKE_STATE = 3, 106 }; 107 108 #define ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US 125 109 #define ATH10K_CIS_RTC_STATE_ADDR 0x1138 110 #define ATH10K_CIS_RTC_STATE_ON 0x01 111 #define ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US 1500 112 #define ATH10K_CIS_READ_RETRY 10 113 #define ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS 50 114 115 /* TODO: remove this and use skb->cb instead, much cleaner approach */ 116 struct ath10k_sdio_bus_request { 117 struct list_head list; 118 119 /* sdio address */ 120 u32 address; 121 122 struct sk_buff *skb; 123 enum ath10k_htc_ep_id eid; 124 int status; 125 /* Specifies if the current request is an HTC message. 126 * If not, the eid is not applicable an the TX completion handler 127 * associated with the endpoint will not be invoked. 128 */ 129 bool htc_msg; 130 /* Completion that (if set) will be invoked for non HTC requests 131 * (htc_msg == false) when the request has been processed. 132 */ 133 struct completion *comp; 134 }; 135 136 struct ath10k_sdio_rx_data { 137 struct sk_buff *skb; 138 size_t alloc_len; 139 size_t act_len; 140 enum ath10k_htc_ep_id eid; 141 bool part_of_bundle; 142 bool last_in_bundle; 143 bool trailer_only; 144 }; 145 146 struct ath10k_sdio_irq_proc_regs { 147 u8 host_int_status; 148 u8 cpu_int_status; 149 u8 error_int_status; 150 u8 counter_int_status; 151 u8 mbox_frame; 152 u8 rx_lookahead_valid; 153 u8 host_int_status2; 154 u8 gmbox_rx_avail; 155 __le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX]; 156 __le32 int_status_enable; 157 }; 158 159 struct ath10k_sdio_irq_enable_regs { 160 u8 int_status_en; 161 u8 cpu_int_status_en; 162 u8 err_int_status_en; 163 u8 cntr_int_status_en; 164 }; 165 166 struct ath10k_sdio_irq_data { 167 /* protects irq_proc_reg and irq_en_reg below. 168 * We use a mutex here and not a spinlock since we will have the 169 * mutex locked while calling the sdio_memcpy_ functions. 170 * These function require non atomic context, and hence, spinlocks 171 * can be held while calling these functions. 172 */ 173 struct mutex mtx; 174 struct ath10k_sdio_irq_proc_regs *irq_proc_reg; 175 struct ath10k_sdio_irq_enable_regs *irq_en_reg; 176 }; 177 178 struct ath10k_mbox_ext_info { 179 u32 htc_ext_addr; 180 u32 htc_ext_sz; 181 }; 182 183 struct ath10k_mbox_info { 184 u32 htc_addr; 185 struct ath10k_mbox_ext_info ext_info[2]; 186 u32 block_size; 187 u32 block_mask; 188 u32 gmbox_addr; 189 u32 gmbox_sz; 190 }; 191 192 struct ath10k_sdio { 193 struct sdio_func *func; 194 195 struct ath10k_mbox_info mbox_info; 196 bool swap_mbox; 197 u32 mbox_addr[ATH10K_HTC_EP_COUNT]; 198 u32 mbox_size[ATH10K_HTC_EP_COUNT]; 199 200 /* available bus requests */ 201 struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM]; 202 /* free list of bus requests */ 203 struct list_head bus_req_freeq; 204 205 struct sk_buff_head rx_head; 206 207 /* protects access to bus_req_freeq */ 208 spinlock_t lock; 209 210 struct ath10k_sdio_rx_data rx_pkts[ATH10K_SDIO_MAX_RX_MSGS]; 211 size_t n_rx_pkts; 212 213 struct ath10k *ar; 214 struct ath10k_sdio_irq_data irq_data; 215 216 /* temporary buffer for sdio read. 217 * It is allocated when probe, and used for receive bundled packets, 218 * the read for bundled packets is not parallel, so it does not need 219 * protected. 220 */ 221 u8 *vsg_buffer; 222 223 /* temporary buffer for BMI requests */ 224 u8 *bmi_buf; 225 226 bool is_disabled; 227 228 struct workqueue_struct *workqueue; 229 struct work_struct wr_async_work; 230 struct list_head wr_asyncq; 231 /* protects access to wr_asyncq */ 232 spinlock_t wr_async_lock; 233 234 struct work_struct async_work_rx; 235 struct timer_list sleep_timer; 236 enum sdio_mbox_state mbox_state; 237 }; 238 239 static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar) 240 { 241 return (struct ath10k_sdio *)ar->drv_priv; 242 } 243 244 #endif 245