xref: /freebsd/sys/contrib/dev/athk/ath10k/sdio.h (revision da8fa4e37a0c048a67d7baa3b5a9bed637d02564)
1*da8fa4e3SBjoern A. Zeeb /* SPDX-License-Identifier: ISC */
2*da8fa4e3SBjoern A. Zeeb /*
3*da8fa4e3SBjoern A. Zeeb  * Copyright (c) 2004-2011 Atheros Communications Inc.
4*da8fa4e3SBjoern A. Zeeb  * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5*da8fa4e3SBjoern A. Zeeb  * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
6*da8fa4e3SBjoern A. Zeeb  */
7*da8fa4e3SBjoern A. Zeeb 
8*da8fa4e3SBjoern A. Zeeb #ifndef _SDIO_H_
9*da8fa4e3SBjoern A. Zeeb #define _SDIO_H_
10*da8fa4e3SBjoern A. Zeeb 
11*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_MBOX_BLOCK_SIZE              256
12*da8fa4e3SBjoern A. Zeeb 
13*da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_MAX_BUFFER_SIZE             4096 /*Unsure of this constant*/
14*da8fa4e3SBjoern A. Zeeb 
15*da8fa4e3SBjoern A. Zeeb /* Mailbox address in SDIO address space */
16*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_MBOX_BASE_ADDR               0x1000
17*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_MBOX_WIDTH                   0x800
18*da8fa4e3SBjoern A. Zeeb 
19*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_MBOX_TOT_WIDTH \
20*da8fa4e3SBjoern A. Zeeb 	(ATH10K_HIF_MBOX_NUM_MAX * ATH10K_HIF_MBOX_WIDTH)
21*da8fa4e3SBjoern A. Zeeb 
22*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_MBOX0_EXT_BASE_ADDR          0x5000
23*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_MBOX0_EXT_WIDTH              (36 * 1024)
24*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0     (56 * 1024)
25*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_MBOX1_EXT_WIDTH              (36 * 1024)
26*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE        (2 * 1024)
27*da8fa4e3SBjoern A. Zeeb 
28*da8fa4e3SBjoern A. Zeeb #define ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH \
29*da8fa4e3SBjoern A. Zeeb 	(ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr))
30*da8fa4e3SBjoern A. Zeeb 
31*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_MBOX_NUM_MAX                 4
32*da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_BUS_REQUEST_MAX_NUM         1024
33*da8fa4e3SBjoern A. Zeeb 
34*da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ)
35*da8fa4e3SBjoern A. Zeeb 
36*da8fa4e3SBjoern A. Zeeb /* HTC runs over mailbox 0 */
37*da8fa4e3SBjoern A. Zeeb #define ATH10K_HTC_MAILBOX                      0
38*da8fa4e3SBjoern A. Zeeb #define ATH10K_HTC_MAILBOX_MASK                 BIT(ATH10K_HTC_MAILBOX)
39*da8fa4e3SBjoern A. Zeeb 
40*da8fa4e3SBjoern A. Zeeb /* GMBOX addresses */
41*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_GMBOX_BASE_ADDR              0x7000
42*da8fa4e3SBjoern A. Zeeb #define ATH10K_HIF_GMBOX_WIDTH                  0x4000
43*da8fa4e3SBjoern A. Zeeb 
44*da8fa4e3SBjoern A. Zeeb /* Modified versions of the sdio.h macros.
45*da8fa4e3SBjoern A. Zeeb  * The macros in sdio.h can't be used easily with the FIELD_{PREP|GET}
46*da8fa4e3SBjoern A. Zeeb  * macros in bitfield.h, so we define our own macros here.
47*da8fa4e3SBjoern A. Zeeb  */
48*da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_DRIVE_DTSX_MASK \
49*da8fa4e3SBjoern A. Zeeb 	(SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT)
50*da8fa4e3SBjoern A. Zeeb 
51*da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_DRIVE_DTSX_TYPE_B           0
52*da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_DRIVE_DTSX_TYPE_A           1
53*da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_DRIVE_DTSX_TYPE_C           2
54*da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_DRIVE_DTSX_TYPE_D           3
55*da8fa4e3SBjoern A. Zeeb 
56*da8fa4e3SBjoern A. Zeeb /* SDIO CCCR register definitions */
57*da8fa4e3SBjoern A. Zeeb #define CCCR_SDIO_IRQ_MODE_REG                  0xF0
58*da8fa4e3SBjoern A. Zeeb #define CCCR_SDIO_IRQ_MODE_REG_SDIO3            0x16
59*da8fa4e3SBjoern A. Zeeb 
60*da8fa4e3SBjoern A. Zeeb #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR   0xF2
61*da8fa4e3SBjoern A. Zeeb 
62*da8fa4e3SBjoern A. Zeeb #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A      0x02
63*da8fa4e3SBjoern A. Zeeb #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C      0x04
64*da8fa4e3SBjoern A. Zeeb #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D      0x08
65*da8fa4e3SBjoern A. Zeeb 
66*da8fa4e3SBjoern A. Zeeb #define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS       0xF0
67*da8fa4e3SBjoern A. Zeeb #define CCCR_SDIO_ASYNC_INT_DELAY_MASK          0xC0
68*da8fa4e3SBjoern A. Zeeb 
69*da8fa4e3SBjoern A. Zeeb /* mode to enable special 4-bit interrupt assertion without clock */
70*da8fa4e3SBjoern A. Zeeb #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ            BIT(0)
71*da8fa4e3SBjoern A. Zeeb #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3      BIT(1)
72*da8fa4e3SBjoern A. Zeeb 
73*da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_TARGET_DEBUG_INTR_MASK      0x01
74*da8fa4e3SBjoern A. Zeeb 
75*da8fa4e3SBjoern A. Zeeb /* The theoretical maximum number of RX messages that can be fetched
76*da8fa4e3SBjoern A. Zeeb  * from the mbox interrupt handler in one loop is derived in the following
77*da8fa4e3SBjoern A. Zeeb  * way:
78*da8fa4e3SBjoern A. Zeeb  *
79*da8fa4e3SBjoern A. Zeeb  * Let's assume that each packet in a bundle of the maximum bundle size
80*da8fa4e3SBjoern A. Zeeb  * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
81*da8fa4e3SBjoern A. Zeeb  * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
82*da8fa4e3SBjoern A. Zeeb  *
83*da8fa4e3SBjoern A. Zeeb  * in this case the driver must allocate
84*da8fa4e3SBjoern A. Zeeb  * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's.
85*da8fa4e3SBjoern A. Zeeb  */
86*da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_MAX_RX_MSGS \
87*da8fa4e3SBjoern A. Zeeb 	(HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2)
88*da8fa4e3SBjoern A. Zeeb 
89*da8fa4e3SBjoern A. Zeeb #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL   0x00000868u
90*da8fa4e3SBjoern A. Zeeb #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
91*da8fa4e3SBjoern A. Zeeb #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
92*da8fa4e3SBjoern A. Zeeb 
93*da8fa4e3SBjoern A. Zeeb enum sdio_mbox_state {
94*da8fa4e3SBjoern A. Zeeb 	SDIO_MBOX_UNKNOWN_STATE = 0,
95*da8fa4e3SBjoern A. Zeeb 	SDIO_MBOX_REQUEST_TO_SLEEP_STATE = 1,
96*da8fa4e3SBjoern A. Zeeb 	SDIO_MBOX_SLEEP_STATE = 2,
97*da8fa4e3SBjoern A. Zeeb 	SDIO_MBOX_AWAKE_STATE = 3,
98*da8fa4e3SBjoern A. Zeeb };
99*da8fa4e3SBjoern A. Zeeb 
100*da8fa4e3SBjoern A. Zeeb #define ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US	125
101*da8fa4e3SBjoern A. Zeeb #define ATH10K_CIS_RTC_STATE_ADDR		0x1138
102*da8fa4e3SBjoern A. Zeeb #define ATH10K_CIS_RTC_STATE_ON			0x01
103*da8fa4e3SBjoern A. Zeeb #define ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US	1500
104*da8fa4e3SBjoern A. Zeeb #define ATH10K_CIS_READ_RETRY			10
105*da8fa4e3SBjoern A. Zeeb #define ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS	50
106*da8fa4e3SBjoern A. Zeeb 
107*da8fa4e3SBjoern A. Zeeb /* TODO: remove this and use skb->cb instead, much cleaner approach */
108*da8fa4e3SBjoern A. Zeeb struct ath10k_sdio_bus_request {
109*da8fa4e3SBjoern A. Zeeb 	struct list_head list;
110*da8fa4e3SBjoern A. Zeeb 
111*da8fa4e3SBjoern A. Zeeb 	/* sdio address */
112*da8fa4e3SBjoern A. Zeeb 	u32 address;
113*da8fa4e3SBjoern A. Zeeb 
114*da8fa4e3SBjoern A. Zeeb 	struct sk_buff *skb;
115*da8fa4e3SBjoern A. Zeeb 	enum ath10k_htc_ep_id eid;
116*da8fa4e3SBjoern A. Zeeb 	int status;
117*da8fa4e3SBjoern A. Zeeb 	/* Specifies if the current request is an HTC message.
118*da8fa4e3SBjoern A. Zeeb 	 * If not, the eid is not applicable an the TX completion handler
119*da8fa4e3SBjoern A. Zeeb 	 * associated with the endpoint will not be invoked.
120*da8fa4e3SBjoern A. Zeeb 	 */
121*da8fa4e3SBjoern A. Zeeb 	bool htc_msg;
122*da8fa4e3SBjoern A. Zeeb 	/* Completion that (if set) will be invoked for non HTC requests
123*da8fa4e3SBjoern A. Zeeb 	 * (htc_msg == false) when the request has been processed.
124*da8fa4e3SBjoern A. Zeeb 	 */
125*da8fa4e3SBjoern A. Zeeb 	struct completion *comp;
126*da8fa4e3SBjoern A. Zeeb };
127*da8fa4e3SBjoern A. Zeeb 
128*da8fa4e3SBjoern A. Zeeb struct ath10k_sdio_rx_data {
129*da8fa4e3SBjoern A. Zeeb 	struct sk_buff *skb;
130*da8fa4e3SBjoern A. Zeeb 	size_t alloc_len;
131*da8fa4e3SBjoern A. Zeeb 	size_t act_len;
132*da8fa4e3SBjoern A. Zeeb 	enum ath10k_htc_ep_id eid;
133*da8fa4e3SBjoern A. Zeeb 	bool part_of_bundle;
134*da8fa4e3SBjoern A. Zeeb 	bool last_in_bundle;
135*da8fa4e3SBjoern A. Zeeb 	bool trailer_only;
136*da8fa4e3SBjoern A. Zeeb };
137*da8fa4e3SBjoern A. Zeeb 
138*da8fa4e3SBjoern A. Zeeb struct ath10k_sdio_irq_proc_regs {
139*da8fa4e3SBjoern A. Zeeb 	u8 host_int_status;
140*da8fa4e3SBjoern A. Zeeb 	u8 cpu_int_status;
141*da8fa4e3SBjoern A. Zeeb 	u8 error_int_status;
142*da8fa4e3SBjoern A. Zeeb 	u8 counter_int_status;
143*da8fa4e3SBjoern A. Zeeb 	u8 mbox_frame;
144*da8fa4e3SBjoern A. Zeeb 	u8 rx_lookahead_valid;
145*da8fa4e3SBjoern A. Zeeb 	u8 host_int_status2;
146*da8fa4e3SBjoern A. Zeeb 	u8 gmbox_rx_avail;
147*da8fa4e3SBjoern A. Zeeb 	__le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX];
148*da8fa4e3SBjoern A. Zeeb 	__le32 int_status_enable;
149*da8fa4e3SBjoern A. Zeeb };
150*da8fa4e3SBjoern A. Zeeb 
151*da8fa4e3SBjoern A. Zeeb struct ath10k_sdio_irq_enable_regs {
152*da8fa4e3SBjoern A. Zeeb 	u8 int_status_en;
153*da8fa4e3SBjoern A. Zeeb 	u8 cpu_int_status_en;
154*da8fa4e3SBjoern A. Zeeb 	u8 err_int_status_en;
155*da8fa4e3SBjoern A. Zeeb 	u8 cntr_int_status_en;
156*da8fa4e3SBjoern A. Zeeb };
157*da8fa4e3SBjoern A. Zeeb 
158*da8fa4e3SBjoern A. Zeeb struct ath10k_sdio_irq_data {
159*da8fa4e3SBjoern A. Zeeb 	/* protects irq_proc_reg and irq_en_reg below.
160*da8fa4e3SBjoern A. Zeeb 	 * We use a mutex here and not a spinlock since we will have the
161*da8fa4e3SBjoern A. Zeeb 	 * mutex locked while calling the sdio_memcpy_ functions.
162*da8fa4e3SBjoern A. Zeeb 	 * These function require non atomic context, and hence, spinlocks
163*da8fa4e3SBjoern A. Zeeb 	 * can be held while calling these functions.
164*da8fa4e3SBjoern A. Zeeb 	 */
165*da8fa4e3SBjoern A. Zeeb 	struct mutex mtx;
166*da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_proc_regs *irq_proc_reg;
167*da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_enable_regs *irq_en_reg;
168*da8fa4e3SBjoern A. Zeeb };
169*da8fa4e3SBjoern A. Zeeb 
170*da8fa4e3SBjoern A. Zeeb struct ath10k_mbox_ext_info {
171*da8fa4e3SBjoern A. Zeeb 	u32 htc_ext_addr;
172*da8fa4e3SBjoern A. Zeeb 	u32 htc_ext_sz;
173*da8fa4e3SBjoern A. Zeeb };
174*da8fa4e3SBjoern A. Zeeb 
175*da8fa4e3SBjoern A. Zeeb struct ath10k_mbox_info {
176*da8fa4e3SBjoern A. Zeeb 	u32 htc_addr;
177*da8fa4e3SBjoern A. Zeeb 	struct ath10k_mbox_ext_info ext_info[2];
178*da8fa4e3SBjoern A. Zeeb 	u32 block_size;
179*da8fa4e3SBjoern A. Zeeb 	u32 block_mask;
180*da8fa4e3SBjoern A. Zeeb 	u32 gmbox_addr;
181*da8fa4e3SBjoern A. Zeeb 	u32 gmbox_sz;
182*da8fa4e3SBjoern A. Zeeb };
183*da8fa4e3SBjoern A. Zeeb 
184*da8fa4e3SBjoern A. Zeeb struct ath10k_sdio {
185*da8fa4e3SBjoern A. Zeeb 	struct sdio_func *func;
186*da8fa4e3SBjoern A. Zeeb 
187*da8fa4e3SBjoern A. Zeeb 	struct ath10k_mbox_info mbox_info;
188*da8fa4e3SBjoern A. Zeeb 	bool swap_mbox;
189*da8fa4e3SBjoern A. Zeeb 	u32 mbox_addr[ATH10K_HTC_EP_COUNT];
190*da8fa4e3SBjoern A. Zeeb 	u32 mbox_size[ATH10K_HTC_EP_COUNT];
191*da8fa4e3SBjoern A. Zeeb 
192*da8fa4e3SBjoern A. Zeeb 	/* available bus requests */
193*da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM];
194*da8fa4e3SBjoern A. Zeeb 	/* free list of bus requests */
195*da8fa4e3SBjoern A. Zeeb 	struct list_head bus_req_freeq;
196*da8fa4e3SBjoern A. Zeeb 
197*da8fa4e3SBjoern A. Zeeb 	struct sk_buff_head rx_head;
198*da8fa4e3SBjoern A. Zeeb 
199*da8fa4e3SBjoern A. Zeeb 	/* protects access to bus_req_freeq */
200*da8fa4e3SBjoern A. Zeeb 	spinlock_t lock;
201*da8fa4e3SBjoern A. Zeeb 
202*da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_rx_data rx_pkts[ATH10K_SDIO_MAX_RX_MSGS];
203*da8fa4e3SBjoern A. Zeeb 	size_t n_rx_pkts;
204*da8fa4e3SBjoern A. Zeeb 
205*da8fa4e3SBjoern A. Zeeb 	struct ath10k *ar;
206*da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_data irq_data;
207*da8fa4e3SBjoern A. Zeeb 
208*da8fa4e3SBjoern A. Zeeb 	/* temporary buffer for sdio read.
209*da8fa4e3SBjoern A. Zeeb 	 * It is allocated when probe, and used for receive bundled packets,
210*da8fa4e3SBjoern A. Zeeb 	 * the read for bundled packets is not parallel, so it does not need
211*da8fa4e3SBjoern A. Zeeb 	 * protected.
212*da8fa4e3SBjoern A. Zeeb 	 */
213*da8fa4e3SBjoern A. Zeeb 	u8 *vsg_buffer;
214*da8fa4e3SBjoern A. Zeeb 
215*da8fa4e3SBjoern A. Zeeb 	/* temporary buffer for BMI requests */
216*da8fa4e3SBjoern A. Zeeb 	u8 *bmi_buf;
217*da8fa4e3SBjoern A. Zeeb 
218*da8fa4e3SBjoern A. Zeeb 	bool is_disabled;
219*da8fa4e3SBjoern A. Zeeb 
220*da8fa4e3SBjoern A. Zeeb 	struct workqueue_struct *workqueue;
221*da8fa4e3SBjoern A. Zeeb 	struct work_struct wr_async_work;
222*da8fa4e3SBjoern A. Zeeb 	struct list_head wr_asyncq;
223*da8fa4e3SBjoern A. Zeeb 	/* protects access to wr_asyncq */
224*da8fa4e3SBjoern A. Zeeb 	spinlock_t wr_async_lock;
225*da8fa4e3SBjoern A. Zeeb 
226*da8fa4e3SBjoern A. Zeeb 	struct work_struct async_work_rx;
227*da8fa4e3SBjoern A. Zeeb 	struct timer_list sleep_timer;
228*da8fa4e3SBjoern A. Zeeb 	enum sdio_mbox_state mbox_state;
229*da8fa4e3SBjoern A. Zeeb };
230*da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_priv(struct ath10k * ar)231*da8fa4e3SBjoern A. Zeeb static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
232*da8fa4e3SBjoern A. Zeeb {
233*da8fa4e3SBjoern A. Zeeb 	return (struct ath10k_sdio *)ar->drv_priv;
234*da8fa4e3SBjoern A. Zeeb }
235*da8fa4e3SBjoern A. Zeeb 
236*da8fa4e3SBjoern A. Zeeb #endif
237