xref: /linux/drivers/net/wireless/realtek/rtw88/sdio.c (revision 8a5f956a9fb7d74fff681145082acfad5afa6bb8)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (C) 2021 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
3  * Copyright (C) 2021 Jernej Skrabec <jernej.skrabec@gmail.com>
4  *
5  * Based on rtw88/pci.c:
6  *   Copyright(c) 2018-2019  Realtek Corporation
7  */
8 
9 #include <linux/module.h>
10 #include <linux/mmc/host.h>
11 #include <linux/mmc/sdio_func.h>
12 #include "main.h"
13 #include "mac.h"
14 #include "debug.h"
15 #include "fw.h"
16 #include "ps.h"
17 #include "reg.h"
18 #include "rx.h"
19 #include "sdio.h"
20 #include "tx.h"
21 
22 #define RTW_SDIO_INDIRECT_RW_RETRIES			50
23 
24 static bool rtw_sdio_is_bus_addr(u32 addr)
25 {
26 	return !!(addr & RTW_SDIO_BUS_MSK);
27 }
28 
29 static bool rtw_sdio_bus_claim_needed(struct rtw_sdio *rtwsdio)
30 {
31 	return !rtwsdio->irq_thread ||
32 	       rtwsdio->irq_thread != current;
33 }
34 
35 static u32 rtw_sdio_to_bus_offset(struct rtw_dev *rtwdev, u32 addr)
36 {
37 	switch (addr & RTW_SDIO_BUS_MSK) {
38 	case WLAN_IOREG_OFFSET:
39 		addr &= WLAN_IOREG_REG_MSK;
40 		addr |= FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
41 				   REG_SDIO_CMD_ADDR_MAC_REG);
42 		break;
43 	case SDIO_LOCAL_OFFSET:
44 		addr &= SDIO_LOCAL_REG_MSK;
45 		addr |= FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
46 				   REG_SDIO_CMD_ADDR_SDIO_REG);
47 		break;
48 	default:
49 		rtw_warn(rtwdev, "Cannot convert addr 0x%08x to bus offset",
50 			 addr);
51 	}
52 
53 	return addr;
54 }
55 
56 static bool rtw_sdio_use_memcpy_io(struct rtw_dev *rtwdev, u32 addr,
57 				   u8 alignment)
58 {
59 	return IS_ALIGNED(addr, alignment) &&
60 	       test_bit(RTW_FLAG_POWERON, rtwdev->flags);
61 }
62 
63 static void rtw_sdio_writel(struct rtw_dev *rtwdev, u32 val, u32 addr,
64 			    int *err_ret)
65 {
66 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
67 	u8 buf[4];
68 	int i;
69 
70 	if (rtw_sdio_use_memcpy_io(rtwdev, addr, 4)) {
71 		sdio_writel(rtwsdio->sdio_func, val, addr, err_ret);
72 		return;
73 	}
74 
75 	*(__le32 *)buf = cpu_to_le32(val);
76 
77 	for (i = 0; i < 4; i++) {
78 		sdio_writeb(rtwsdio->sdio_func, buf[i], addr + i, err_ret);
79 		if (*err_ret)
80 			return;
81 	}
82 }
83 
84 static void rtw_sdio_writew(struct rtw_dev *rtwdev, u16 val, u32 addr,
85 			    int *err_ret)
86 {
87 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
88 	u8 buf[2];
89 	int i;
90 
91 	*(__le16 *)buf = cpu_to_le16(val);
92 
93 	for (i = 0; i < 2; i++) {
94 		sdio_writeb(rtwsdio->sdio_func, buf[i], addr + i, err_ret);
95 		if (*err_ret)
96 			return;
97 	}
98 }
99 
100 static u32 rtw_sdio_readl(struct rtw_dev *rtwdev, u32 addr, int *err_ret)
101 {
102 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
103 	u8 buf[4];
104 	int i;
105 
106 	if (rtw_sdio_use_memcpy_io(rtwdev, addr, 4))
107 		return sdio_readl(rtwsdio->sdio_func, addr, err_ret);
108 
109 	for (i = 0; i < 4; i++) {
110 		buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret);
111 		if (*err_ret)
112 			return 0;
113 	}
114 
115 	return le32_to_cpu(*(__le32 *)buf);
116 }
117 
118 static u16 rtw_sdio_readw(struct rtw_dev *rtwdev, u32 addr, int *err_ret)
119 {
120 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
121 	u8 buf[2];
122 	int i;
123 
124 	for (i = 0; i < 2; i++) {
125 		buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret);
126 		if (*err_ret)
127 			return 0;
128 	}
129 
130 	return le16_to_cpu(*(__le16 *)buf);
131 }
132 
133 static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr,
134 				  bool direct)
135 {
136 	if (!direct)
137 		return addr;
138 
139 	if (!rtw_sdio_is_bus_addr(addr))
140 		addr |= WLAN_IOREG_OFFSET;
141 
142 	return rtw_sdio_to_bus_offset(rtwdev, addr);
143 }
144 
145 static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr)
146 {
147 	if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags) &&
148 	    !rtw_sdio_is_bus_addr(addr))
149 		return false;
150 
151 	return !rtw_sdio_is_sdio30_supported(rtwdev) ||
152 		rtw_sdio_is_bus_addr(addr);
153 }
154 
155 static int rtw_sdio_indirect_reg_cfg(struct rtw_dev *rtwdev, u32 addr, u32 cfg)
156 {
157 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
158 	unsigned int retry;
159 	u32 reg_cfg;
160 	int ret;
161 	u8 tmp;
162 
163 	reg_cfg = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_CFG);
164 
165 	rtw_sdio_writel(rtwdev, addr | cfg | BIT_SDIO_INDIRECT_REG_CFG_UNK20,
166 			reg_cfg, &ret);
167 	if (ret)
168 		return ret;
169 
170 	for (retry = 0; retry < RTW_SDIO_INDIRECT_RW_RETRIES; retry++) {
171 		tmp = sdio_readb(rtwsdio->sdio_func, reg_cfg + 2, &ret);
172 		if (!ret && (tmp & BIT(4)))
173 			return 0;
174 	}
175 
176 	return -ETIMEDOUT;
177 }
178 
179 static u8 rtw_sdio_indirect_read8(struct rtw_dev *rtwdev, u32 addr,
180 				  int *err_ret)
181 {
182 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
183 	u32 reg_data;
184 
185 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
186 					     BIT_SDIO_INDIRECT_REG_CFG_READ);
187 	if (*err_ret)
188 		return 0;
189 
190 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
191 	return sdio_readb(rtwsdio->sdio_func, reg_data, err_ret);
192 }
193 
194 static int rtw_sdio_indirect_read_bytes(struct rtw_dev *rtwdev, u32 addr,
195 					u8 *buf, int count)
196 {
197 	int i, ret = 0;
198 
199 	for (i = 0; i < count; i++) {
200 		buf[i] = rtw_sdio_indirect_read8(rtwdev, addr + i, &ret);
201 		if (ret)
202 			break;
203 	}
204 
205 	return ret;
206 }
207 
208 static u16 rtw_sdio_indirect_read16(struct rtw_dev *rtwdev, u32 addr,
209 				    int *err_ret)
210 {
211 	u32 reg_data;
212 	u8 buf[2];
213 
214 	if (!IS_ALIGNED(addr, 2)) {
215 		*err_ret = rtw_sdio_indirect_read_bytes(rtwdev, addr, buf, 2);
216 		if (*err_ret)
217 			return 0;
218 
219 		return le16_to_cpu(*(__le16 *)buf);
220 	}
221 
222 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
223 					     BIT_SDIO_INDIRECT_REG_CFG_READ);
224 	if (*err_ret)
225 		return 0;
226 
227 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
228 	return rtw_sdio_readw(rtwdev, reg_data, err_ret);
229 }
230 
231 static u32 rtw_sdio_indirect_read32(struct rtw_dev *rtwdev, u32 addr,
232 				    int *err_ret)
233 {
234 	u32 reg_data;
235 	u8 buf[4];
236 
237 	if (!IS_ALIGNED(addr, 4)) {
238 		*err_ret = rtw_sdio_indirect_read_bytes(rtwdev, addr, buf, 4);
239 		if (*err_ret)
240 			return 0;
241 
242 		return le32_to_cpu(*(__le32 *)buf);
243 	}
244 
245 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
246 					     BIT_SDIO_INDIRECT_REG_CFG_READ);
247 	if (*err_ret)
248 		return 0;
249 
250 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
251 	return rtw_sdio_readl(rtwdev, reg_data, err_ret);
252 }
253 
254 static u8 rtw_sdio_read8(struct rtw_dev *rtwdev, u32 addr)
255 {
256 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
257 	bool direct, bus_claim;
258 	int ret;
259 	u8 val;
260 
261 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
262 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
263 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
264 
265 	if (bus_claim)
266 		sdio_claim_host(rtwsdio->sdio_func);
267 
268 	if (direct)
269 		val = sdio_readb(rtwsdio->sdio_func, addr, &ret);
270 	else
271 		val = rtw_sdio_indirect_read8(rtwdev, addr, &ret);
272 
273 	if (bus_claim)
274 		sdio_release_host(rtwsdio->sdio_func);
275 
276 	if (ret)
277 		rtw_warn(rtwdev, "sdio read8 failed (0x%x): %d", addr, ret);
278 
279 	return val;
280 }
281 
282 static u16 rtw_sdio_read16(struct rtw_dev *rtwdev, u32 addr)
283 {
284 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
285 	bool direct, bus_claim;
286 	int ret;
287 	u16 val;
288 
289 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
290 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
291 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
292 
293 	if (bus_claim)
294 		sdio_claim_host(rtwsdio->sdio_func);
295 
296 	if (direct)
297 		val = rtw_sdio_readw(rtwdev, addr, &ret);
298 	else
299 		val = rtw_sdio_indirect_read16(rtwdev, addr, &ret);
300 
301 	if (bus_claim)
302 		sdio_release_host(rtwsdio->sdio_func);
303 
304 	if (ret)
305 		rtw_warn(rtwdev, "sdio read16 failed (0x%x): %d", addr, ret);
306 
307 	return val;
308 }
309 
310 static u32 rtw_sdio_read32(struct rtw_dev *rtwdev, u32 addr)
311 {
312 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
313 	bool direct, bus_claim;
314 	u32 val;
315 	int ret;
316 
317 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
318 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
319 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
320 
321 	if (bus_claim)
322 		sdio_claim_host(rtwsdio->sdio_func);
323 
324 	if (direct)
325 		val = rtw_sdio_readl(rtwdev, addr, &ret);
326 	else
327 		val = rtw_sdio_indirect_read32(rtwdev, addr, &ret);
328 
329 	if (bus_claim)
330 		sdio_release_host(rtwsdio->sdio_func);
331 
332 	if (ret)
333 		rtw_warn(rtwdev, "sdio read32 failed (0x%x): %d", addr, ret);
334 
335 	return val;
336 }
337 
338 static void rtw_sdio_indirect_write8(struct rtw_dev *rtwdev, u8 val, u32 addr,
339 				     int *err_ret)
340 {
341 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
342 	u32 reg_data;
343 
344 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
345 	sdio_writeb(rtwsdio->sdio_func, val, reg_data, err_ret);
346 	if (*err_ret)
347 		return;
348 
349 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
350 					     BIT_SDIO_INDIRECT_REG_CFG_WRITE);
351 }
352 
353 static void rtw_sdio_indirect_write16(struct rtw_dev *rtwdev, u16 val, u32 addr,
354 				      int *err_ret)
355 {
356 	u32 reg_data;
357 
358 	if (!IS_ALIGNED(addr, 2)) {
359 		addr = rtw_sdio_to_io_address(rtwdev, addr, true);
360 		rtw_sdio_writew(rtwdev, val, addr, err_ret);
361 		return;
362 	}
363 
364 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
365 	rtw_sdio_writew(rtwdev, val, reg_data, err_ret);
366 	if (*err_ret)
367 		return;
368 
369 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
370 					     BIT_SDIO_INDIRECT_REG_CFG_WRITE |
371 					     BIT_SDIO_INDIRECT_REG_CFG_WORD);
372 }
373 
374 static void rtw_sdio_indirect_write32(struct rtw_dev *rtwdev, u32 val,
375 				      u32 addr, int *err_ret)
376 {
377 	u32 reg_data;
378 
379 	if (!IS_ALIGNED(addr, 4)) {
380 		addr = rtw_sdio_to_io_address(rtwdev, addr, true);
381 		rtw_sdio_writel(rtwdev, val, addr, err_ret);
382 		return;
383 	}
384 
385 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
386 	rtw_sdio_writel(rtwdev, val, reg_data, err_ret);
387 
388 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
389 					     BIT_SDIO_INDIRECT_REG_CFG_WRITE |
390 					     BIT_SDIO_INDIRECT_REG_CFG_DWORD);
391 }
392 
393 static void rtw_sdio_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
394 {
395 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
396 	bool direct, bus_claim;
397 	int ret;
398 
399 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
400 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
401 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
402 
403 	if (bus_claim)
404 		sdio_claim_host(rtwsdio->sdio_func);
405 
406 	if (direct)
407 		sdio_writeb(rtwsdio->sdio_func, val, addr, &ret);
408 	else
409 		rtw_sdio_indirect_write8(rtwdev, val, addr, &ret);
410 
411 	if (bus_claim)
412 		sdio_release_host(rtwsdio->sdio_func);
413 
414 	if (ret)
415 		rtw_warn(rtwdev, "sdio write8 failed (0x%x): %d", addr, ret);
416 }
417 
418 static void rtw_sdio_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
419 {
420 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
421 	bool direct, bus_claim;
422 	int ret;
423 
424 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
425 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
426 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
427 
428 	if (bus_claim)
429 		sdio_claim_host(rtwsdio->sdio_func);
430 
431 	if (direct)
432 		rtw_sdio_writew(rtwdev, val, addr, &ret);
433 	else
434 		rtw_sdio_indirect_write16(rtwdev, val, addr, &ret);
435 
436 	if (bus_claim)
437 		sdio_release_host(rtwsdio->sdio_func);
438 
439 	if (ret)
440 		rtw_warn(rtwdev, "sdio write16 failed (0x%x): %d", addr, ret);
441 }
442 
443 static void rtw_sdio_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
444 {
445 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
446 	bool direct, bus_claim;
447 	int ret;
448 
449 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
450 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
451 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
452 
453 	if (bus_claim)
454 		sdio_claim_host(rtwsdio->sdio_func);
455 
456 	if (direct)
457 		rtw_sdio_writel(rtwdev, val, addr, &ret);
458 	else
459 		rtw_sdio_indirect_write32(rtwdev, val, addr, &ret);
460 
461 	if (bus_claim)
462 		sdio_release_host(rtwsdio->sdio_func);
463 
464 	if (ret)
465 		rtw_warn(rtwdev, "sdio write32 failed (0x%x): %d", addr, ret);
466 }
467 
468 static u32 rtw_sdio_get_tx_addr(struct rtw_dev *rtwdev, size_t size,
469 				enum rtw_tx_queue_type queue)
470 {
471 	u32 txaddr;
472 
473 	switch (queue) {
474 	case RTW_TX_QUEUE_BCN:
475 	case RTW_TX_QUEUE_H2C:
476 	case RTW_TX_QUEUE_HI0:
477 		txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
478 				    REG_SDIO_CMD_ADDR_TXFF_HIGH);
479 		break;
480 	case RTW_TX_QUEUE_VI:
481 	case RTW_TX_QUEUE_VO:
482 		txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
483 				    REG_SDIO_CMD_ADDR_TXFF_NORMAL);
484 		break;
485 	case RTW_TX_QUEUE_BE:
486 	case RTW_TX_QUEUE_BK:
487 		txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
488 				    REG_SDIO_CMD_ADDR_TXFF_LOW);
489 		break;
490 	case RTW_TX_QUEUE_MGMT:
491 		txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
492 				    REG_SDIO_CMD_ADDR_TXFF_EXTRA);
493 		break;
494 	default:
495 		rtw_warn(rtwdev, "Unsupported queue for TX addr: 0x%02x\n",
496 			 queue);
497 		return 0;
498 	}
499 
500 	txaddr += DIV_ROUND_UP(size, 4);
501 
502 	return txaddr;
503 };
504 
505 static int rtw_sdio_read_port(struct rtw_dev *rtwdev, u8 *buf, size_t count)
506 {
507 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
508 	struct mmc_host *host = rtwsdio->sdio_func->card->host;
509 	bool bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
510 	u32 rxaddr = rtwsdio->rx_addr++;
511 	int ret = 0, err;
512 	size_t bytes;
513 
514 	if (bus_claim)
515 		sdio_claim_host(rtwsdio->sdio_func);
516 
517 	while (count > 0) {
518 		bytes = min_t(size_t, host->max_req_size, count);
519 
520 		err = sdio_memcpy_fromio(rtwsdio->sdio_func, buf,
521 					 RTW_SDIO_ADDR_RX_RX0FF_GEN(rxaddr),
522 					 bytes);
523 		if (err) {
524 			rtw_warn(rtwdev,
525 				 "Failed to read %zu byte(s) from SDIO port 0x%08x: %d",
526 				 bytes, rxaddr, err);
527 
528 			 /* Signal to the caller that reading did not work and
529 			  * that the data in the buffer is short/corrupted.
530 			  */
531 			ret = err;
532 
533 			/* Don't stop here - instead drain the remaining data
534 			 * from the card's buffer, else the card will return
535 			 * corrupt data for the next rtw_sdio_read_port() call.
536 			 */
537 		}
538 
539 		count -= bytes;
540 		buf += bytes;
541 	}
542 
543 	if (bus_claim)
544 		sdio_release_host(rtwsdio->sdio_func);
545 
546 	return ret;
547 }
548 
549 static int rtw_sdio_check_free_txpg(struct rtw_dev *rtwdev, u8 queue,
550 				    size_t count)
551 {
552 	unsigned int pages_free, pages_needed;
553 
554 	if (rtw_chip_wcpu_8051(rtwdev)) {
555 		u32 free_txpg;
556 
557 		free_txpg = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG);
558 
559 		switch (queue) {
560 		case RTW_TX_QUEUE_BCN:
561 		case RTW_TX_QUEUE_H2C:
562 		case RTW_TX_QUEUE_HI0:
563 		case RTW_TX_QUEUE_MGMT:
564 			/* high */
565 			pages_free = free_txpg & 0xff;
566 			break;
567 		case RTW_TX_QUEUE_VI:
568 		case RTW_TX_QUEUE_VO:
569 			/* normal */
570 			pages_free = (free_txpg >> 8) & 0xff;
571 			break;
572 		case RTW_TX_QUEUE_BE:
573 		case RTW_TX_QUEUE_BK:
574 			/* low */
575 			pages_free = (free_txpg >> 16) & 0xff;
576 			break;
577 		default:
578 			rtw_warn(rtwdev, "Unknown mapping for queue %u\n", queue);
579 			return -EINVAL;
580 		}
581 
582 		/* add the pages from the public queue */
583 		pages_free += (free_txpg >> 24) & 0xff;
584 	} else {
585 		u32 free_txpg[3];
586 
587 		free_txpg[0] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG);
588 		free_txpg[1] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG + 4);
589 		free_txpg[2] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG + 8);
590 
591 		switch (queue) {
592 		case RTW_TX_QUEUE_BCN:
593 		case RTW_TX_QUEUE_H2C:
594 		case RTW_TX_QUEUE_HI0:
595 			/* high */
596 			pages_free = free_txpg[0] & 0xfff;
597 			break;
598 		case RTW_TX_QUEUE_VI:
599 		case RTW_TX_QUEUE_VO:
600 			/* normal */
601 			pages_free = (free_txpg[0] >> 16) & 0xfff;
602 			break;
603 		case RTW_TX_QUEUE_BE:
604 		case RTW_TX_QUEUE_BK:
605 			/* low */
606 			pages_free = free_txpg[1] & 0xfff;
607 			break;
608 		case RTW_TX_QUEUE_MGMT:
609 			/* extra */
610 			pages_free = free_txpg[2] & 0xfff;
611 			break;
612 		default:
613 			rtw_warn(rtwdev, "Unknown mapping for queue %u\n", queue);
614 			return -EINVAL;
615 		}
616 
617 		/* add the pages from the public queue */
618 		pages_free += (free_txpg[1] >> 16) & 0xfff;
619 	}
620 
621 	pages_needed = DIV_ROUND_UP(count, rtwdev->chip->page_size);
622 
623 	if (pages_needed > pages_free) {
624 		rtw_dbg(rtwdev, RTW_DBG_SDIO,
625 			"Not enough free pages (%u needed, %u free) in queue %u for %zu bytes\n",
626 			pages_needed, pages_free, queue, count);
627 		return -EBUSY;
628 	}
629 
630 	return 0;
631 }
632 
633 static int rtw_sdio_write_port(struct rtw_dev *rtwdev, struct sk_buff *skb,
634 			       enum rtw_tx_queue_type queue)
635 {
636 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
637 	bool bus_claim;
638 	size_t txsize;
639 	u32 txaddr;
640 	int ret;
641 
642 	txaddr = rtw_sdio_get_tx_addr(rtwdev, skb->len, queue);
643 	if (!txaddr)
644 		return -EINVAL;
645 
646 	txsize = sdio_align_size(rtwsdio->sdio_func, skb->len);
647 
648 	ret = rtw_sdio_check_free_txpg(rtwdev, queue, txsize);
649 	if (ret)
650 		return ret;
651 
652 	if (!IS_ALIGNED((unsigned long)skb->data, RTW_SDIO_DATA_PTR_ALIGN))
653 		rtw_warn(rtwdev, "Got unaligned SKB in %s() for queue %u\n",
654 			 __func__, queue);
655 
656 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
657 
658 	if (bus_claim)
659 		sdio_claim_host(rtwsdio->sdio_func);
660 
661 	ret = sdio_memcpy_toio(rtwsdio->sdio_func, txaddr, skb->data, txsize);
662 
663 	if (bus_claim)
664 		sdio_release_host(rtwsdio->sdio_func);
665 
666 	if (ret)
667 		rtw_warn(rtwdev,
668 			 "Failed to write %zu byte(s) to SDIO port 0x%08x",
669 			 txsize, txaddr);
670 
671 	return ret;
672 }
673 
674 static void rtw_sdio_init(struct rtw_dev *rtwdev)
675 {
676 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
677 
678 	rtwsdio->irq_mask = REG_SDIO_HIMR_RX_REQUEST | REG_SDIO_HIMR_CPWM1;
679 }
680 
681 static void rtw_sdio_enable_rx_aggregation(struct rtw_dev *rtwdev)
682 {
683 	u8 size, timeout;
684 
685 	switch (rtwdev->chip->id) {
686 	case RTW_CHIP_TYPE_8703B:
687 	case RTW_CHIP_TYPE_8821A:
688 	case RTW_CHIP_TYPE_8812A:
689 		size = 0x6;
690 		timeout = 0x6;
691 		break;
692 	case RTW_CHIP_TYPE_8723D:
693 		size = 0xa;
694 		timeout = 0x3;
695 		rtw_write8_set(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7));
696 		break;
697 	default:
698 		size = 0xff;
699 		timeout = 0x1;
700 		break;
701 	}
702 
703 	/* Make the firmware honor the size limit configured below */
704 	rtw_write32_set(rtwdev, REG_RXDMA_AGG_PG_TH, BIT_EN_PRE_CALC);
705 
706 	rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
707 
708 	rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH,
709 		    FIELD_PREP(BIT_RXDMA_AGG_PG_TH, size) |
710 		    FIELD_PREP(BIT_DMA_AGG_TO_V1, timeout));
711 
712 	rtw_write8_set(rtwdev, REG_RXDMA_MODE, BIT_DMA_MODE);
713 }
714 
715 static void rtw_sdio_enable_interrupt(struct rtw_dev *rtwdev)
716 {
717 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
718 
719 	rtw_write32(rtwdev, REG_SDIO_HIMR, rtwsdio->irq_mask);
720 }
721 
722 static void rtw_sdio_disable_interrupt(struct rtw_dev *rtwdev)
723 {
724 	rtw_write32(rtwdev, REG_SDIO_HIMR, 0x0);
725 }
726 
727 static u8 rtw_sdio_get_tx_qsel(struct rtw_dev *rtwdev, struct sk_buff *skb,
728 			       u8 queue)
729 {
730 	switch (queue) {
731 	case RTW_TX_QUEUE_BCN:
732 		return TX_DESC_QSEL_BEACON;
733 	case RTW_TX_QUEUE_H2C:
734 		return TX_DESC_QSEL_H2C;
735 	case RTW_TX_QUEUE_MGMT:
736 		return TX_DESC_QSEL_MGMT;
737 	case RTW_TX_QUEUE_HI0:
738 		return TX_DESC_QSEL_HIGH;
739 	default:
740 		return skb->priority;
741 	}
742 }
743 
744 static int rtw_sdio_setup(struct rtw_dev *rtwdev)
745 {
746 	/* nothing to do */
747 	return 0;
748 }
749 
750 static int rtw_sdio_start(struct rtw_dev *rtwdev)
751 {
752 	rtw_sdio_enable_rx_aggregation(rtwdev);
753 	rtw_sdio_enable_interrupt(rtwdev);
754 
755 	return 0;
756 }
757 
758 static void rtw_sdio_stop(struct rtw_dev *rtwdev)
759 {
760 	rtw_sdio_disable_interrupt(rtwdev);
761 }
762 
763 static void rtw_sdio_deep_ps_enter(struct rtw_dev *rtwdev)
764 {
765 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
766 	bool tx_empty = true;
767 	u8 queue;
768 
769 	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) {
770 		/* Deep PS state is not allowed to TX-DMA */
771 		for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
772 			/* BCN queue is rsvd page, does not have DMA interrupt
773 			 * H2C queue is managed by firmware
774 			 */
775 			if (queue == RTW_TX_QUEUE_BCN ||
776 			    queue == RTW_TX_QUEUE_H2C)
777 				continue;
778 
779 			/* check if there is any skb DMAing */
780 			if (skb_queue_len(&rtwsdio->tx_queue[queue])) {
781 				tx_empty = false;
782 				break;
783 			}
784 		}
785 	}
786 
787 	if (!tx_empty) {
788 		rtw_dbg(rtwdev, RTW_DBG_PS,
789 			"TX path not empty, cannot enter deep power save state\n");
790 		return;
791 	}
792 
793 	set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
794 	rtw_power_mode_change(rtwdev, true);
795 }
796 
797 static void rtw_sdio_deep_ps_leave(struct rtw_dev *rtwdev)
798 {
799 	if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
800 		rtw_power_mode_change(rtwdev, false);
801 }
802 
803 static void rtw_sdio_deep_ps(struct rtw_dev *rtwdev, bool enter)
804 {
805 	if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
806 		rtw_sdio_deep_ps_enter(rtwdev);
807 
808 	if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
809 		rtw_sdio_deep_ps_leave(rtwdev);
810 }
811 
812 static void rtw_sdio_tx_kick_off(struct rtw_dev *rtwdev)
813 {
814 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
815 
816 	queue_work(rtwsdio->txwq, &rtwsdio->tx_handler_data->work);
817 }
818 
819 static void rtw_sdio_link_ps(struct rtw_dev *rtwdev, bool enter)
820 {
821 	/* nothing to do */
822 }
823 
824 static void rtw_sdio_interface_cfg(struct rtw_dev *rtwdev)
825 {
826 	u32 val;
827 
828 	rtw_read32(rtwdev, REG_SDIO_FREE_TXPG);
829 
830 	val = rtw_read32(rtwdev, REG_SDIO_TX_CTRL);
831 	val &= 0xfff8;
832 	rtw_write32(rtwdev, REG_SDIO_TX_CTRL, val);
833 }
834 
835 static struct rtw_sdio_tx_data *rtw_sdio_get_tx_data(struct sk_buff *skb)
836 {
837 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
838 
839 	BUILD_BUG_ON(sizeof(struct rtw_sdio_tx_data) >
840 		     sizeof(info->status.status_driver_data));
841 
842 	return (struct rtw_sdio_tx_data *)info->status.status_driver_data;
843 }
844 
845 static void rtw_sdio_tx_skb_prepare(struct rtw_dev *rtwdev,
846 				    struct rtw_tx_pkt_info *pkt_info,
847 				    struct sk_buff *skb,
848 				    enum rtw_tx_queue_type queue)
849 {
850 	const struct rtw_chip_info *chip = rtwdev->chip;
851 	unsigned long data_addr, aligned_addr;
852 	size_t offset;
853 	u8 *pkt_desc;
854 
855 	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
856 
857 	data_addr = (unsigned long)pkt_desc;
858 	aligned_addr = ALIGN(data_addr, RTW_SDIO_DATA_PTR_ALIGN);
859 
860 	if (data_addr != aligned_addr) {
861 		/* Ensure that the start of the pkt_desc is always aligned at
862 		 * RTW_SDIO_DATA_PTR_ALIGN.
863 		 */
864 		offset = RTW_SDIO_DATA_PTR_ALIGN - (aligned_addr - data_addr);
865 
866 		pkt_desc = skb_push(skb, offset);
867 
868 		/* By inserting padding to align the start of the pkt_desc we
869 		 * need to inform the firmware that the actual data starts at
870 		 * a different offset than normal.
871 		 */
872 		pkt_info->offset += offset;
873 	}
874 
875 	memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
876 
877 	pkt_info->qsel = rtw_sdio_get_tx_qsel(rtwdev, skb, queue);
878 
879 	rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb);
880 	rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, pkt_desc);
881 }
882 
883 static int rtw_sdio_write_data(struct rtw_dev *rtwdev,
884 			       struct rtw_tx_pkt_info *pkt_info,
885 			       struct sk_buff *skb,
886 			       enum rtw_tx_queue_type queue)
887 {
888 	int ret;
889 
890 	rtw_sdio_tx_skb_prepare(rtwdev, pkt_info, skb, queue);
891 
892 	ret = rtw_sdio_write_port(rtwdev, skb, queue);
893 	dev_kfree_skb_any(skb);
894 
895 	return ret;
896 }
897 
898 static int rtw_sdio_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
899 					 u32 size)
900 {
901 	struct rtw_tx_pkt_info pkt_info = {};
902 	struct sk_buff *skb;
903 
904 	skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
905 	if (!skb)
906 		return -ENOMEM;
907 
908 	return rtw_sdio_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
909 }
910 
911 static int rtw_sdio_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
912 {
913 	struct rtw_tx_pkt_info pkt_info = {};
914 	struct sk_buff *skb;
915 
916 	skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
917 	if (!skb)
918 		return -ENOMEM;
919 
920 	return rtw_sdio_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
921 }
922 
923 static int rtw_sdio_tx_write(struct rtw_dev *rtwdev,
924 			     struct rtw_tx_pkt_info *pkt_info,
925 			     struct sk_buff *skb)
926 {
927 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
928 	enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
929 	struct rtw_sdio_tx_data *tx_data;
930 
931 	rtw_sdio_tx_skb_prepare(rtwdev, pkt_info, skb, queue);
932 
933 	tx_data = rtw_sdio_get_tx_data(skb);
934 	tx_data->sn = pkt_info->sn;
935 
936 	skb_queue_tail(&rtwsdio->tx_queue[queue], skb);
937 
938 	return 0;
939 }
940 
941 static void rtw_sdio_tx_err_isr(struct rtw_dev *rtwdev)
942 {
943 	u32 val = rtw_read32(rtwdev, REG_TXDMA_STATUS);
944 
945 	rtw_write32(rtwdev, REG_TXDMA_STATUS, val);
946 }
947 
948 static void rtw_sdio_rx_skb(struct rtw_dev *rtwdev, struct sk_buff *skb,
949 			    u32 pkt_offset, struct rtw_rx_pkt_stat *pkt_stat,
950 			    struct ieee80211_rx_status *rx_status)
951 {
952 	*IEEE80211_SKB_RXCB(skb) = *rx_status;
953 
954 	if (pkt_stat->is_c2h) {
955 		skb_put(skb, pkt_stat->pkt_len + pkt_offset);
956 		rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb);
957 		return;
958 	}
959 
960 	skb_put(skb, pkt_stat->pkt_len);
961 	skb_reserve(skb, pkt_offset);
962 
963 	rtw_update_rx_freq_for_invalid(rtwdev, skb, rx_status, pkt_stat);
964 	rtw_rx_stats(rtwdev, pkt_stat->vif, skb);
965 
966 	ieee80211_rx_irqsafe(rtwdev->hw, skb);
967 }
968 
969 static void rtw_sdio_rxfifo_recv(struct rtw_dev *rtwdev, u32 rx_len)
970 {
971 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
972 	const struct rtw_chip_info *chip = rtwdev->chip;
973 	u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
974 	struct ieee80211_rx_status rx_status;
975 	struct rtw_rx_pkt_stat pkt_stat;
976 	struct sk_buff *skb, *split_skb;
977 	u32 pkt_offset, curr_pkt_len;
978 	size_t bufsz;
979 	u8 *rx_desc;
980 	int ret;
981 
982 	bufsz = sdio_align_size(rtwsdio->sdio_func, rx_len);
983 
984 	skb = dev_alloc_skb(bufsz);
985 	if (!skb)
986 		return;
987 
988 	ret = rtw_sdio_read_port(rtwdev, skb->data, bufsz);
989 	if (ret) {
990 		dev_kfree_skb_any(skb);
991 		return;
992 	}
993 
994 	while (true) {
995 		rx_desc = skb->data;
996 		rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
997 		pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
998 			     pkt_stat.shift;
999 
1000 		curr_pkt_len = ALIGN(pkt_offset + pkt_stat.pkt_len,
1001 				     RTW_SDIO_DATA_PTR_ALIGN);
1002 
1003 		if ((curr_pkt_len + pkt_desc_sz) >= rx_len) {
1004 			/* Use the original skb (with it's adjusted offset)
1005 			 * when processing the last (or even the only) entry to
1006 			 * have it's memory freed automatically.
1007 			 */
1008 			rtw_sdio_rx_skb(rtwdev, skb, pkt_offset, &pkt_stat,
1009 					&rx_status);
1010 			break;
1011 		}
1012 
1013 		split_skb = dev_alloc_skb(curr_pkt_len);
1014 		if (!split_skb) {
1015 			rtw_sdio_rx_skb(rtwdev, skb, pkt_offset, &pkt_stat,
1016 					&rx_status);
1017 			break;
1018 		}
1019 
1020 		skb_copy_header(split_skb, skb);
1021 		memcpy(split_skb->data, skb->data, curr_pkt_len);
1022 
1023 		rtw_sdio_rx_skb(rtwdev, split_skb, pkt_offset, &pkt_stat,
1024 				&rx_status);
1025 
1026 		/* Move to the start of the next RX descriptor */
1027 		skb_reserve(skb, curr_pkt_len);
1028 		rx_len -= curr_pkt_len;
1029 	}
1030 }
1031 
1032 static void rtw_sdio_rx_isr(struct rtw_dev *rtwdev)
1033 {
1034 	u32 rx_len, hisr, total_rx_bytes = 0;
1035 
1036 	do {
1037 		if (rtw_chip_wcpu_8051(rtwdev))
1038 			rx_len = rtw_read16(rtwdev, REG_SDIO_RX0_REQ_LEN);
1039 		else
1040 			rx_len = rtw_read32(rtwdev, REG_SDIO_RX0_REQ_LEN);
1041 
1042 		if (!rx_len)
1043 			break;
1044 
1045 		rtw_sdio_rxfifo_recv(rtwdev, rx_len);
1046 
1047 		total_rx_bytes += rx_len;
1048 
1049 		if (rtw_chip_wcpu_8051(rtwdev)) {
1050 			/* Stop if no more RX requests are pending, even if
1051 			 * rx_len could be greater than zero in the next
1052 			 * iteration. This is needed because the RX buffer may
1053 			 * already contain data while either HW or FW are not
1054 			 * done filling that buffer yet. Still reading the
1055 			 * buffer can result in packets where
1056 			 * rtw_rx_pkt_stat.pkt_len is zero or points beyond the
1057 			 * end of the buffer.
1058 			 */
1059 			hisr = rtw_read32(rtwdev, REG_SDIO_HISR);
1060 		} else {
1061 			/* RTW_WCPU_3081 chips have improved hardware or
1062 			 * firmware and can use rx_len unconditionally.
1063 			 */
1064 			hisr = REG_SDIO_HISR_RX_REQUEST;
1065 		}
1066 	} while (total_rx_bytes < SZ_64K && hisr & REG_SDIO_HISR_RX_REQUEST);
1067 }
1068 
1069 static void rtw_sdio_handle_interrupt(struct sdio_func *sdio_func)
1070 {
1071 	struct ieee80211_hw *hw = sdio_get_drvdata(sdio_func);
1072 	struct rtw_sdio *rtwsdio;
1073 	struct rtw_dev *rtwdev;
1074 	u32 hisr;
1075 
1076 	rtwdev = hw->priv;
1077 	rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1078 
1079 	rtwsdio->irq_thread = current;
1080 
1081 	hisr = rtw_read32(rtwdev, REG_SDIO_HISR);
1082 
1083 	if (hisr & REG_SDIO_HISR_TXERR)
1084 		rtw_sdio_tx_err_isr(rtwdev);
1085 	if (hisr & REG_SDIO_HISR_RX_REQUEST) {
1086 		hisr &= ~REG_SDIO_HISR_RX_REQUEST;
1087 		rtw_sdio_rx_isr(rtwdev);
1088 	}
1089 
1090 	rtw_write32(rtwdev, REG_SDIO_HISR, hisr);
1091 
1092 	rtwsdio->irq_thread = NULL;
1093 }
1094 
1095 static int __maybe_unused rtw_sdio_suspend(struct device *dev)
1096 {
1097 	struct sdio_func *func = dev_to_sdio_func(dev);
1098 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
1099 	struct rtw_dev *rtwdev = hw->priv;
1100 	int ret;
1101 
1102 	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
1103 	if (ret)
1104 		rtw_err(rtwdev, "Failed to host PM flag MMC_PM_KEEP_POWER");
1105 
1106 	return ret;
1107 }
1108 
1109 static int __maybe_unused rtw_sdio_resume(struct device *dev)
1110 {
1111 	return 0;
1112 }
1113 
1114 SIMPLE_DEV_PM_OPS(rtw_sdio_pm_ops, rtw_sdio_suspend, rtw_sdio_resume);
1115 EXPORT_SYMBOL(rtw_sdio_pm_ops);
1116 
1117 static int rtw_sdio_claim(struct rtw_dev *rtwdev, struct sdio_func *sdio_func)
1118 {
1119 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1120 	int ret;
1121 
1122 	sdio_claim_host(sdio_func);
1123 
1124 	ret = sdio_enable_func(sdio_func);
1125 	if (ret) {
1126 		rtw_err(rtwdev, "Failed to enable SDIO func");
1127 		goto err_release_host;
1128 	}
1129 
1130 	ret = sdio_set_block_size(sdio_func, RTW_SDIO_BLOCK_SIZE);
1131 	if (ret) {
1132 		rtw_err(rtwdev, "Failed to set SDIO block size to 512");
1133 		goto err_disable_func;
1134 	}
1135 
1136 	rtwsdio->sdio_func = sdio_func;
1137 
1138 	rtwsdio->sdio3_bus_mode = mmc_card_uhs(sdio_func->card);
1139 
1140 	sdio_set_drvdata(sdio_func, rtwdev->hw);
1141 	SET_IEEE80211_DEV(rtwdev->hw, &sdio_func->dev);
1142 
1143 	sdio_release_host(sdio_func);
1144 
1145 	return 0;
1146 
1147 err_disable_func:
1148 	sdio_disable_func(sdio_func);
1149 err_release_host:
1150 	sdio_release_host(sdio_func);
1151 	return ret;
1152 }
1153 
1154 static void rtw_sdio_declaim(struct rtw_dev *rtwdev,
1155 			     struct sdio_func *sdio_func)
1156 {
1157 	sdio_claim_host(sdio_func);
1158 	sdio_disable_func(sdio_func);
1159 	sdio_release_host(sdio_func);
1160 }
1161 
1162 static const struct rtw_hci_ops rtw_sdio_ops = {
1163 	.tx_write = rtw_sdio_tx_write,
1164 	.tx_kick_off = rtw_sdio_tx_kick_off,
1165 	.setup = rtw_sdio_setup,
1166 	.start = rtw_sdio_start,
1167 	.stop = rtw_sdio_stop,
1168 	.deep_ps = rtw_sdio_deep_ps,
1169 	.link_ps = rtw_sdio_link_ps,
1170 	.interface_cfg = rtw_sdio_interface_cfg,
1171 	.dynamic_rx_agg = NULL,
1172 	.write_firmware_page = rtw_write_firmware_page,
1173 
1174 	.read8 = rtw_sdio_read8,
1175 	.read16 = rtw_sdio_read16,
1176 	.read32 = rtw_sdio_read32,
1177 	.write8 = rtw_sdio_write8,
1178 	.write16 = rtw_sdio_write16,
1179 	.write32 = rtw_sdio_write32,
1180 	.write_data_rsvd_page = rtw_sdio_write_data_rsvd_page,
1181 	.write_data_h2c = rtw_sdio_write_data_h2c,
1182 };
1183 
1184 static int rtw_sdio_request_irq(struct rtw_dev *rtwdev,
1185 				struct sdio_func *sdio_func)
1186 {
1187 	int ret;
1188 
1189 	sdio_claim_host(sdio_func);
1190 	ret = sdio_claim_irq(sdio_func, &rtw_sdio_handle_interrupt);
1191 	sdio_release_host(sdio_func);
1192 
1193 	if (ret) {
1194 		rtw_err(rtwdev, "failed to claim SDIO IRQ");
1195 		return ret;
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 static void rtw_sdio_indicate_tx_status(struct rtw_dev *rtwdev,
1202 					struct sk_buff *skb)
1203 {
1204 	struct rtw_sdio_tx_data *tx_data = rtw_sdio_get_tx_data(skb);
1205 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1206 	struct ieee80211_hw *hw = rtwdev->hw;
1207 
1208 	skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
1209 
1210 	/* enqueue to wait for tx report */
1211 	if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1212 		rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1213 		return;
1214 	}
1215 
1216 	/* always ACK for others, then they won't be marked as drop */
1217 	ieee80211_tx_info_clear_status(info);
1218 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1219 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1220 	else
1221 		info->flags |= IEEE80211_TX_STAT_ACK;
1222 
1223 	ieee80211_tx_status_irqsafe(hw, skb);
1224 }
1225 
1226 static void rtw_sdio_process_tx_queue(struct rtw_dev *rtwdev,
1227 				      enum rtw_tx_queue_type queue)
1228 {
1229 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1230 	struct sk_buff *skb;
1231 	int ret;
1232 
1233 	skb = skb_dequeue(&rtwsdio->tx_queue[queue]);
1234 	if (!skb)
1235 		return;
1236 
1237 	ret = rtw_sdio_write_port(rtwdev, skb, queue);
1238 	if (ret) {
1239 		skb_queue_head(&rtwsdio->tx_queue[queue], skb);
1240 		return;
1241 	}
1242 
1243 	rtw_sdio_indicate_tx_status(rtwdev, skb);
1244 }
1245 
1246 static void rtw_sdio_tx_handler(struct work_struct *work)
1247 {
1248 	struct rtw_sdio_work_data *work_data =
1249 		container_of(work, struct rtw_sdio_work_data, work);
1250 	struct rtw_sdio *rtwsdio;
1251 	struct rtw_dev *rtwdev;
1252 	int limit, queue;
1253 
1254 	rtwdev = work_data->rtwdev;
1255 	rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1256 
1257 	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
1258 		rtw_sdio_deep_ps_leave(rtwdev);
1259 
1260 	for (queue = RTK_MAX_TX_QUEUE_NUM - 1; queue >= 0; queue--) {
1261 		for (limit = 0; limit < 1000; limit++) {
1262 			rtw_sdio_process_tx_queue(rtwdev, queue);
1263 
1264 			if (skb_queue_empty(&rtwsdio->tx_queue[queue]))
1265 				break;
1266 		}
1267 	}
1268 }
1269 
1270 static void rtw_sdio_free_irq(struct rtw_dev *rtwdev,
1271 			      struct sdio_func *sdio_func)
1272 {
1273 	sdio_claim_host(sdio_func);
1274 	sdio_release_irq(sdio_func);
1275 	sdio_release_host(sdio_func);
1276 }
1277 
1278 static int rtw_sdio_init_tx(struct rtw_dev *rtwdev)
1279 {
1280 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1281 	int i;
1282 
1283 	rtwsdio->txwq = create_singlethread_workqueue("rtw88_sdio: tx wq");
1284 	if (!rtwsdio->txwq) {
1285 		rtw_err(rtwdev, "failed to create TX work queue\n");
1286 		return -ENOMEM;
1287 	}
1288 
1289 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
1290 		skb_queue_head_init(&rtwsdio->tx_queue[i]);
1291 	rtwsdio->tx_handler_data = kmalloc(sizeof(*rtwsdio->tx_handler_data),
1292 					   GFP_KERNEL);
1293 	if (!rtwsdio->tx_handler_data)
1294 		goto err_destroy_wq;
1295 
1296 	rtwsdio->tx_handler_data->rtwdev = rtwdev;
1297 	INIT_WORK(&rtwsdio->tx_handler_data->work, rtw_sdio_tx_handler);
1298 
1299 	return 0;
1300 
1301 err_destroy_wq:
1302 	destroy_workqueue(rtwsdio->txwq);
1303 	return -ENOMEM;
1304 }
1305 
1306 static void rtw_sdio_deinit_tx(struct rtw_dev *rtwdev)
1307 {
1308 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1309 	int i;
1310 
1311 	destroy_workqueue(rtwsdio->txwq);
1312 	kfree(rtwsdio->tx_handler_data);
1313 
1314 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
1315 		ieee80211_purge_tx_queue(rtwdev->hw, &rtwsdio->tx_queue[i]);
1316 }
1317 
1318 int rtw_sdio_probe(struct sdio_func *sdio_func,
1319 		   const struct sdio_device_id *id)
1320 {
1321 	struct ieee80211_hw *hw;
1322 	struct rtw_dev *rtwdev;
1323 	int drv_data_size;
1324 	int ret;
1325 
1326 	drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_sdio);
1327 	hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1328 	if (!hw) {
1329 		dev_err(&sdio_func->dev, "failed to allocate hw");
1330 		return -ENOMEM;
1331 	}
1332 
1333 	rtwdev = hw->priv;
1334 	rtwdev->hw = hw;
1335 	rtwdev->dev = &sdio_func->dev;
1336 	rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1337 	rtwdev->hci.ops = &rtw_sdio_ops;
1338 	rtwdev->hci.type = RTW_HCI_TYPE_SDIO;
1339 
1340 	ret = rtw_core_init(rtwdev);
1341 	if (ret)
1342 		goto err_release_hw;
1343 
1344 	rtw_dbg(rtwdev, RTW_DBG_SDIO,
1345 		"rtw88 SDIO probe: vendor=0x%04x device=%04x class=%02x",
1346 		id->vendor, id->device, id->class);
1347 
1348 	ret = rtw_sdio_claim(rtwdev, sdio_func);
1349 	if (ret) {
1350 		rtw_err(rtwdev, "failed to claim SDIO device");
1351 		goto err_deinit_core;
1352 	}
1353 
1354 	rtw_sdio_init(rtwdev);
1355 
1356 	ret = rtw_sdio_init_tx(rtwdev);
1357 	if (ret) {
1358 		rtw_err(rtwdev, "failed to init SDIO TX queue\n");
1359 		goto err_sdio_declaim;
1360 	}
1361 
1362 	ret = rtw_chip_info_setup(rtwdev);
1363 	if (ret) {
1364 		rtw_err(rtwdev, "failed to setup chip information");
1365 		goto err_destroy_txwq;
1366 	}
1367 
1368 	ret = rtw_sdio_request_irq(rtwdev, sdio_func);
1369 	if (ret)
1370 		goto err_destroy_txwq;
1371 
1372 	ret = rtw_register_hw(rtwdev, hw);
1373 	if (ret) {
1374 		rtw_err(rtwdev, "failed to register hw");
1375 		goto err_free_irq;
1376 	}
1377 
1378 	return 0;
1379 
1380 err_free_irq:
1381 	rtw_sdio_free_irq(rtwdev, sdio_func);
1382 err_destroy_txwq:
1383 	rtw_sdio_deinit_tx(rtwdev);
1384 err_sdio_declaim:
1385 	rtw_sdio_declaim(rtwdev, sdio_func);
1386 err_deinit_core:
1387 	rtw_core_deinit(rtwdev);
1388 err_release_hw:
1389 	ieee80211_free_hw(hw);
1390 
1391 	return ret;
1392 }
1393 EXPORT_SYMBOL(rtw_sdio_probe);
1394 
1395 void rtw_sdio_remove(struct sdio_func *sdio_func)
1396 {
1397 	struct ieee80211_hw *hw = sdio_get_drvdata(sdio_func);
1398 	struct rtw_dev *rtwdev;
1399 
1400 	if (!hw)
1401 		return;
1402 
1403 	rtwdev = hw->priv;
1404 
1405 	rtw_unregister_hw(rtwdev, hw);
1406 	rtw_sdio_disable_interrupt(rtwdev);
1407 	rtw_sdio_free_irq(rtwdev, sdio_func);
1408 	rtw_sdio_declaim(rtwdev, sdio_func);
1409 	rtw_sdio_deinit_tx(rtwdev);
1410 	rtw_core_deinit(rtwdev);
1411 	ieee80211_free_hw(hw);
1412 }
1413 EXPORT_SYMBOL(rtw_sdio_remove);
1414 
1415 void rtw_sdio_shutdown(struct device *dev)
1416 {
1417 	struct sdio_func *sdio_func = dev_to_sdio_func(dev);
1418 	const struct rtw_chip_info *chip;
1419 	struct ieee80211_hw *hw;
1420 	struct rtw_dev *rtwdev;
1421 
1422 	hw = sdio_get_drvdata(sdio_func);
1423 	if (!hw)
1424 		return;
1425 
1426 	rtwdev = hw->priv;
1427 	chip = rtwdev->chip;
1428 
1429 	if (chip->ops->shutdown)
1430 		chip->ops->shutdown(rtwdev);
1431 }
1432 EXPORT_SYMBOL(rtw_sdio_shutdown);
1433 
1434 MODULE_AUTHOR("Martin Blumenstingl");
1435 MODULE_AUTHOR("Jernej Skrabec");
1436 MODULE_DESCRIPTION("Realtek 802.11ac wireless SDIO driver");
1437 MODULE_LICENSE("Dual BSD/GPL");
1438