xref: /linux/drivers/net/wireless/realtek/rtw88/sdio.c (revision dbf8fe85a16a33d6b6bd01f2bc606fc017771465)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (C) 2021 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
3  * Copyright (C) 2021 Jernej Skrabec <jernej.skrabec@gmail.com>
4  *
5  * Based on rtw88/pci.c:
6  *   Copyright(c) 2018-2019  Realtek Corporation
7  */
8 
9 #include <linux/module.h>
10 #include <linux/mmc/host.h>
11 #include <linux/mmc/sdio_func.h>
12 #include "main.h"
13 #include "mac.h"
14 #include "debug.h"
15 #include "fw.h"
16 #include "ps.h"
17 #include "reg.h"
18 #include "rx.h"
19 #include "sdio.h"
20 #include "tx.h"
21 
22 #define RTW_SDIO_INDIRECT_RW_RETRIES			50
23 
rtw_sdio_is_bus_addr(u32 addr)24 static bool rtw_sdio_is_bus_addr(u32 addr)
25 {
26 	return !!(addr & RTW_SDIO_BUS_MSK);
27 }
28 
rtw_sdio_bus_claim_needed(struct rtw_sdio * rtwsdio)29 static bool rtw_sdio_bus_claim_needed(struct rtw_sdio *rtwsdio)
30 {
31 	return !rtwsdio->irq_thread ||
32 	       rtwsdio->irq_thread != current;
33 }
34 
rtw_sdio_to_bus_offset(struct rtw_dev * rtwdev,u32 addr)35 static u32 rtw_sdio_to_bus_offset(struct rtw_dev *rtwdev, u32 addr)
36 {
37 	switch (addr & RTW_SDIO_BUS_MSK) {
38 	case WLAN_IOREG_OFFSET:
39 		addr &= WLAN_IOREG_REG_MSK;
40 		addr |= FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
41 				   REG_SDIO_CMD_ADDR_MAC_REG);
42 		break;
43 	case SDIO_LOCAL_OFFSET:
44 		addr &= SDIO_LOCAL_REG_MSK;
45 		addr |= FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
46 				   REG_SDIO_CMD_ADDR_SDIO_REG);
47 		break;
48 	default:
49 		rtw_warn(rtwdev, "Cannot convert addr 0x%08x to bus offset",
50 			 addr);
51 	}
52 
53 	return addr;
54 }
55 
rtw_sdio_use_memcpy_io(struct rtw_dev * rtwdev,u32 addr,u8 alignment)56 static bool rtw_sdio_use_memcpy_io(struct rtw_dev *rtwdev, u32 addr,
57 				   u8 alignment)
58 {
59 	return IS_ALIGNED(addr, alignment) &&
60 	       test_bit(RTW_FLAG_POWERON, rtwdev->flags);
61 }
62 
rtw_sdio_writel(struct rtw_dev * rtwdev,u32 val,u32 addr,int * err_ret)63 static void rtw_sdio_writel(struct rtw_dev *rtwdev, u32 val, u32 addr,
64 			    int *err_ret)
65 {
66 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
67 	u8 buf[4];
68 	int i;
69 
70 	if (rtw_sdio_use_memcpy_io(rtwdev, addr, 4)) {
71 		sdio_writel(rtwsdio->sdio_func, val, addr, err_ret);
72 		return;
73 	}
74 
75 	*(__le32 *)buf = cpu_to_le32(val);
76 
77 	for (i = 0; i < 4; i++) {
78 		sdio_writeb(rtwsdio->sdio_func, buf[i], addr + i, err_ret);
79 		if (*err_ret)
80 			return;
81 	}
82 }
83 
rtw_sdio_writew(struct rtw_dev * rtwdev,u16 val,u32 addr,int * err_ret)84 static void rtw_sdio_writew(struct rtw_dev *rtwdev, u16 val, u32 addr,
85 			    int *err_ret)
86 {
87 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
88 	u8 buf[2];
89 	int i;
90 
91 	*(__le16 *)buf = cpu_to_le16(val);
92 
93 	for (i = 0; i < 2; i++) {
94 		sdio_writeb(rtwsdio->sdio_func, buf[i], addr + i, err_ret);
95 		if (*err_ret)
96 			return;
97 	}
98 }
99 
rtw_sdio_readl(struct rtw_dev * rtwdev,u32 addr,int * err_ret)100 static u32 rtw_sdio_readl(struct rtw_dev *rtwdev, u32 addr, int *err_ret)
101 {
102 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
103 	u8 buf[4];
104 	int i;
105 
106 	if (rtw_sdio_use_memcpy_io(rtwdev, addr, 4))
107 		return sdio_readl(rtwsdio->sdio_func, addr, err_ret);
108 
109 	for (i = 0; i < 4; i++) {
110 		buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret);
111 		if (*err_ret)
112 			return 0;
113 	}
114 
115 	return le32_to_cpu(*(__le32 *)buf);
116 }
117 
rtw_sdio_readw(struct rtw_dev * rtwdev,u32 addr,int * err_ret)118 static u16 rtw_sdio_readw(struct rtw_dev *rtwdev, u32 addr, int *err_ret)
119 {
120 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
121 	u8 buf[2];
122 	int i;
123 
124 	for (i = 0; i < 2; i++) {
125 		buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret);
126 		if (*err_ret)
127 			return 0;
128 	}
129 
130 	return le16_to_cpu(*(__le16 *)buf);
131 }
132 
rtw_sdio_to_io_address(struct rtw_dev * rtwdev,u32 addr,bool direct)133 static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr,
134 				  bool direct)
135 {
136 	if (!direct)
137 		return addr;
138 
139 	if (!rtw_sdio_is_bus_addr(addr))
140 		addr |= WLAN_IOREG_OFFSET;
141 
142 	return rtw_sdio_to_bus_offset(rtwdev, addr);
143 }
144 
rtw_sdio_use_direct_io(struct rtw_dev * rtwdev,u32 addr)145 static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr)
146 {
147 	bool might_indirect_under_power_off = rtwdev->chip->id == RTW_CHIP_TYPE_8822C;
148 
149 	if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags) &&
150 	    !rtw_sdio_is_bus_addr(addr) && might_indirect_under_power_off)
151 		return false;
152 
153 	return !rtw_sdio_is_sdio30_supported(rtwdev) ||
154 		rtw_sdio_is_bus_addr(addr);
155 }
156 
rtw_sdio_indirect_reg_cfg(struct rtw_dev * rtwdev,u32 addr,u32 cfg)157 static int rtw_sdio_indirect_reg_cfg(struct rtw_dev *rtwdev, u32 addr, u32 cfg)
158 {
159 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
160 	unsigned int retry;
161 	u32 reg_cfg;
162 	int ret;
163 	u8 tmp;
164 
165 	reg_cfg = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_CFG);
166 
167 	rtw_sdio_writel(rtwdev, addr | cfg | BIT_SDIO_INDIRECT_REG_CFG_UNK20,
168 			reg_cfg, &ret);
169 	if (ret)
170 		return ret;
171 
172 	for (retry = 0; retry < RTW_SDIO_INDIRECT_RW_RETRIES; retry++) {
173 		tmp = sdio_readb(rtwsdio->sdio_func, reg_cfg + 2, &ret);
174 		if (!ret && (tmp & BIT(4)))
175 			return 0;
176 	}
177 
178 	return -ETIMEDOUT;
179 }
180 
rtw_sdio_indirect_read8(struct rtw_dev * rtwdev,u32 addr,int * err_ret)181 static u8 rtw_sdio_indirect_read8(struct rtw_dev *rtwdev, u32 addr,
182 				  int *err_ret)
183 {
184 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
185 	u32 reg_data;
186 
187 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
188 					     BIT_SDIO_INDIRECT_REG_CFG_READ);
189 	if (*err_ret)
190 		return 0;
191 
192 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
193 	return sdio_readb(rtwsdio->sdio_func, reg_data, err_ret);
194 }
195 
rtw_sdio_indirect_read_bytes(struct rtw_dev * rtwdev,u32 addr,u8 * buf,int count)196 static int rtw_sdio_indirect_read_bytes(struct rtw_dev *rtwdev, u32 addr,
197 					u8 *buf, int count)
198 {
199 	int i, ret = 0;
200 
201 	for (i = 0; i < count; i++) {
202 		buf[i] = rtw_sdio_indirect_read8(rtwdev, addr + i, &ret);
203 		if (ret)
204 			break;
205 	}
206 
207 	return ret;
208 }
209 
rtw_sdio_indirect_read16(struct rtw_dev * rtwdev,u32 addr,int * err_ret)210 static u16 rtw_sdio_indirect_read16(struct rtw_dev *rtwdev, u32 addr,
211 				    int *err_ret)
212 {
213 	u32 reg_data;
214 	u8 buf[2];
215 
216 	if (!IS_ALIGNED(addr, 2)) {
217 		*err_ret = rtw_sdio_indirect_read_bytes(rtwdev, addr, buf, 2);
218 		if (*err_ret)
219 			return 0;
220 
221 		return le16_to_cpu(*(__le16 *)buf);
222 	}
223 
224 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
225 					     BIT_SDIO_INDIRECT_REG_CFG_READ);
226 	if (*err_ret)
227 		return 0;
228 
229 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
230 	return rtw_sdio_readw(rtwdev, reg_data, err_ret);
231 }
232 
rtw_sdio_indirect_read32(struct rtw_dev * rtwdev,u32 addr,int * err_ret)233 static u32 rtw_sdio_indirect_read32(struct rtw_dev *rtwdev, u32 addr,
234 				    int *err_ret)
235 {
236 	u32 reg_data;
237 	u8 buf[4];
238 
239 	if (!IS_ALIGNED(addr, 4)) {
240 		*err_ret = rtw_sdio_indirect_read_bytes(rtwdev, addr, buf, 4);
241 		if (*err_ret)
242 			return 0;
243 
244 		return le32_to_cpu(*(__le32 *)buf);
245 	}
246 
247 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
248 					     BIT_SDIO_INDIRECT_REG_CFG_READ);
249 	if (*err_ret)
250 		return 0;
251 
252 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
253 	return rtw_sdio_readl(rtwdev, reg_data, err_ret);
254 }
255 
rtw_sdio_read8(struct rtw_dev * rtwdev,u32 addr)256 static u8 rtw_sdio_read8(struct rtw_dev *rtwdev, u32 addr)
257 {
258 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
259 	bool direct, bus_claim;
260 	int ret;
261 	u8 val;
262 
263 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
264 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
265 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
266 
267 	if (bus_claim)
268 		sdio_claim_host(rtwsdio->sdio_func);
269 
270 	if (direct)
271 		val = sdio_readb(rtwsdio->sdio_func, addr, &ret);
272 	else
273 		val = rtw_sdio_indirect_read8(rtwdev, addr, &ret);
274 
275 	if (bus_claim)
276 		sdio_release_host(rtwsdio->sdio_func);
277 
278 	if (ret)
279 		rtw_warn(rtwdev, "sdio read8 failed (0x%x): %d", addr, ret);
280 
281 	return val;
282 }
283 
rtw_sdio_read16(struct rtw_dev * rtwdev,u32 addr)284 static u16 rtw_sdio_read16(struct rtw_dev *rtwdev, u32 addr)
285 {
286 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
287 	bool direct, bus_claim;
288 	int ret;
289 	u16 val;
290 
291 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
292 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
293 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
294 
295 	if (bus_claim)
296 		sdio_claim_host(rtwsdio->sdio_func);
297 
298 	if (direct)
299 		val = rtw_sdio_readw(rtwdev, addr, &ret);
300 	else
301 		val = rtw_sdio_indirect_read16(rtwdev, addr, &ret);
302 
303 	if (bus_claim)
304 		sdio_release_host(rtwsdio->sdio_func);
305 
306 	if (ret)
307 		rtw_warn(rtwdev, "sdio read16 failed (0x%x): %d", addr, ret);
308 
309 	return val;
310 }
311 
rtw_sdio_read32(struct rtw_dev * rtwdev,u32 addr)312 static u32 rtw_sdio_read32(struct rtw_dev *rtwdev, u32 addr)
313 {
314 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
315 	bool direct, bus_claim;
316 	u32 val;
317 	int ret;
318 
319 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
320 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
321 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
322 
323 	if (bus_claim)
324 		sdio_claim_host(rtwsdio->sdio_func);
325 
326 	if (direct)
327 		val = rtw_sdio_readl(rtwdev, addr, &ret);
328 	else
329 		val = rtw_sdio_indirect_read32(rtwdev, addr, &ret);
330 
331 	if (bus_claim)
332 		sdio_release_host(rtwsdio->sdio_func);
333 
334 	if (ret)
335 		rtw_warn(rtwdev, "sdio read32 failed (0x%x): %d", addr, ret);
336 
337 	return val;
338 }
339 
rtw_sdio_indirect_write8(struct rtw_dev * rtwdev,u8 val,u32 addr,int * err_ret)340 static void rtw_sdio_indirect_write8(struct rtw_dev *rtwdev, u8 val, u32 addr,
341 				     int *err_ret)
342 {
343 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
344 	u32 reg_data;
345 
346 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
347 	sdio_writeb(rtwsdio->sdio_func, val, reg_data, err_ret);
348 	if (*err_ret)
349 		return;
350 
351 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
352 					     BIT_SDIO_INDIRECT_REG_CFG_WRITE);
353 }
354 
rtw_sdio_indirect_write16(struct rtw_dev * rtwdev,u16 val,u32 addr,int * err_ret)355 static void rtw_sdio_indirect_write16(struct rtw_dev *rtwdev, u16 val, u32 addr,
356 				      int *err_ret)
357 {
358 	u32 reg_data;
359 
360 	if (!IS_ALIGNED(addr, 2)) {
361 		addr = rtw_sdio_to_io_address(rtwdev, addr, true);
362 		rtw_sdio_writew(rtwdev, val, addr, err_ret);
363 		return;
364 	}
365 
366 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
367 	rtw_sdio_writew(rtwdev, val, reg_data, err_ret);
368 	if (*err_ret)
369 		return;
370 
371 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
372 					     BIT_SDIO_INDIRECT_REG_CFG_WRITE |
373 					     BIT_SDIO_INDIRECT_REG_CFG_WORD);
374 }
375 
rtw_sdio_indirect_write32(struct rtw_dev * rtwdev,u32 val,u32 addr,int * err_ret)376 static void rtw_sdio_indirect_write32(struct rtw_dev *rtwdev, u32 val,
377 				      u32 addr, int *err_ret)
378 {
379 	u32 reg_data;
380 
381 	if (!IS_ALIGNED(addr, 4)) {
382 		addr = rtw_sdio_to_io_address(rtwdev, addr, true);
383 		rtw_sdio_writel(rtwdev, val, addr, err_ret);
384 		return;
385 	}
386 
387 	reg_data = rtw_sdio_to_bus_offset(rtwdev, REG_SDIO_INDIRECT_REG_DATA);
388 	rtw_sdio_writel(rtwdev, val, reg_data, err_ret);
389 
390 	*err_ret = rtw_sdio_indirect_reg_cfg(rtwdev, addr,
391 					     BIT_SDIO_INDIRECT_REG_CFG_WRITE |
392 					     BIT_SDIO_INDIRECT_REG_CFG_DWORD);
393 }
394 
rtw_sdio_write8(struct rtw_dev * rtwdev,u32 addr,u8 val)395 static void rtw_sdio_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
396 {
397 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
398 	bool direct, bus_claim;
399 	int ret;
400 
401 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
402 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
403 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
404 
405 	if (bus_claim)
406 		sdio_claim_host(rtwsdio->sdio_func);
407 
408 	if (direct)
409 		sdio_writeb(rtwsdio->sdio_func, val, addr, &ret);
410 	else
411 		rtw_sdio_indirect_write8(rtwdev, val, addr, &ret);
412 
413 	if (bus_claim)
414 		sdio_release_host(rtwsdio->sdio_func);
415 
416 	if (ret)
417 		rtw_warn(rtwdev, "sdio write8 failed (0x%x): %d", addr, ret);
418 }
419 
rtw_sdio_write16(struct rtw_dev * rtwdev,u32 addr,u16 val)420 static void rtw_sdio_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
421 {
422 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
423 	bool direct, bus_claim;
424 	int ret;
425 
426 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
427 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
428 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
429 
430 	if (bus_claim)
431 		sdio_claim_host(rtwsdio->sdio_func);
432 
433 	if (direct)
434 		rtw_sdio_writew(rtwdev, val, addr, &ret);
435 	else
436 		rtw_sdio_indirect_write16(rtwdev, val, addr, &ret);
437 
438 	if (bus_claim)
439 		sdio_release_host(rtwsdio->sdio_func);
440 
441 	if (ret)
442 		rtw_warn(rtwdev, "sdio write16 failed (0x%x): %d", addr, ret);
443 }
444 
rtw_sdio_write32(struct rtw_dev * rtwdev,u32 addr,u32 val)445 static void rtw_sdio_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
446 {
447 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
448 	bool direct, bus_claim;
449 	int ret;
450 
451 	direct = rtw_sdio_use_direct_io(rtwdev, addr);
452 	addr = rtw_sdio_to_io_address(rtwdev, addr, direct);
453 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
454 
455 	if (bus_claim)
456 		sdio_claim_host(rtwsdio->sdio_func);
457 
458 	if (direct)
459 		rtw_sdio_writel(rtwdev, val, addr, &ret);
460 	else
461 		rtw_sdio_indirect_write32(rtwdev, val, addr, &ret);
462 
463 	if (bus_claim)
464 		sdio_release_host(rtwsdio->sdio_func);
465 
466 	if (ret)
467 		rtw_warn(rtwdev, "sdio write32 failed (0x%x): %d", addr, ret);
468 }
469 
rtw_sdio_get_tx_addr(struct rtw_dev * rtwdev,size_t size,enum rtw_tx_queue_type queue)470 static u32 rtw_sdio_get_tx_addr(struct rtw_dev *rtwdev, size_t size,
471 				enum rtw_tx_queue_type queue)
472 {
473 	u32 txaddr;
474 
475 	switch (queue) {
476 	case RTW_TX_QUEUE_BCN:
477 	case RTW_TX_QUEUE_H2C:
478 	case RTW_TX_QUEUE_HI0:
479 		txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
480 				    REG_SDIO_CMD_ADDR_TXFF_HIGH);
481 		break;
482 	case RTW_TX_QUEUE_VI:
483 	case RTW_TX_QUEUE_VO:
484 		txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
485 				    REG_SDIO_CMD_ADDR_TXFF_NORMAL);
486 		break;
487 	case RTW_TX_QUEUE_BE:
488 	case RTW_TX_QUEUE_BK:
489 		txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
490 				    REG_SDIO_CMD_ADDR_TXFF_LOW);
491 		break;
492 	case RTW_TX_QUEUE_MGMT:
493 		txaddr = FIELD_PREP(REG_SDIO_CMD_ADDR_MSK,
494 				    REG_SDIO_CMD_ADDR_TXFF_EXTRA);
495 		break;
496 	default:
497 		rtw_warn(rtwdev, "Unsupported queue for TX addr: 0x%02x\n",
498 			 queue);
499 		return 0;
500 	}
501 
502 	txaddr += DIV_ROUND_UP(size, 4);
503 
504 	return txaddr;
505 };
506 
rtw_sdio_read_port(struct rtw_dev * rtwdev,u8 * buf,size_t count)507 static int rtw_sdio_read_port(struct rtw_dev *rtwdev, u8 *buf, size_t count)
508 {
509 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
510 	struct mmc_host *host = rtwsdio->sdio_func->card->host;
511 	bool bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
512 	u32 rxaddr = rtwsdio->rx_addr++;
513 	int ret = 0, err;
514 	size_t bytes;
515 
516 	if (bus_claim)
517 		sdio_claim_host(rtwsdio->sdio_func);
518 
519 	while (count > 0) {
520 		bytes = min_t(size_t, host->max_req_size, count);
521 
522 		err = sdio_memcpy_fromio(rtwsdio->sdio_func, buf,
523 					 RTW_SDIO_ADDR_RX_RX0FF_GEN(rxaddr),
524 					 bytes);
525 		if (err) {
526 			rtw_warn(rtwdev,
527 				 "Failed to read %zu byte(s) from SDIO port 0x%08x: %d",
528 				 bytes, rxaddr, err);
529 
530 			 /* Signal to the caller that reading did not work and
531 			  * that the data in the buffer is short/corrupted.
532 			  */
533 			ret = err;
534 
535 			/* Don't stop here - instead drain the remaining data
536 			 * from the card's buffer, else the card will return
537 			 * corrupt data for the next rtw_sdio_read_port() call.
538 			 */
539 		}
540 
541 		count -= bytes;
542 		buf += bytes;
543 	}
544 
545 	if (bus_claim)
546 		sdio_release_host(rtwsdio->sdio_func);
547 
548 	return ret;
549 }
550 
rtw_sdio_check_free_txpg(struct rtw_dev * rtwdev,u8 queue,size_t count)551 static int rtw_sdio_check_free_txpg(struct rtw_dev *rtwdev, u8 queue,
552 				    size_t count)
553 {
554 	unsigned int pages_free, pages_needed;
555 
556 	if (rtw_chip_wcpu_8051(rtwdev)) {
557 		u32 free_txpg;
558 
559 		free_txpg = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG);
560 
561 		switch (queue) {
562 		case RTW_TX_QUEUE_BCN:
563 		case RTW_TX_QUEUE_H2C:
564 		case RTW_TX_QUEUE_HI0:
565 		case RTW_TX_QUEUE_MGMT:
566 			/* high */
567 			pages_free = free_txpg & 0xff;
568 			break;
569 		case RTW_TX_QUEUE_VI:
570 		case RTW_TX_QUEUE_VO:
571 			/* normal */
572 			pages_free = (free_txpg >> 8) & 0xff;
573 			break;
574 		case RTW_TX_QUEUE_BE:
575 		case RTW_TX_QUEUE_BK:
576 			/* low */
577 			pages_free = (free_txpg >> 16) & 0xff;
578 			break;
579 		default:
580 			rtw_warn(rtwdev, "Unknown mapping for queue %u\n", queue);
581 			return -EINVAL;
582 		}
583 
584 		/* add the pages from the public queue */
585 		pages_free += (free_txpg >> 24) & 0xff;
586 	} else {
587 		u32 free_txpg[3];
588 
589 		free_txpg[0] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG);
590 		free_txpg[1] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG + 4);
591 		free_txpg[2] = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG + 8);
592 
593 		switch (queue) {
594 		case RTW_TX_QUEUE_BCN:
595 		case RTW_TX_QUEUE_H2C:
596 		case RTW_TX_QUEUE_HI0:
597 			/* high */
598 			pages_free = free_txpg[0] & 0xfff;
599 			break;
600 		case RTW_TX_QUEUE_VI:
601 		case RTW_TX_QUEUE_VO:
602 			/* normal */
603 			pages_free = (free_txpg[0] >> 16) & 0xfff;
604 			break;
605 		case RTW_TX_QUEUE_BE:
606 		case RTW_TX_QUEUE_BK:
607 			/* low */
608 			pages_free = free_txpg[1] & 0xfff;
609 			break;
610 		case RTW_TX_QUEUE_MGMT:
611 			/* extra */
612 			pages_free = free_txpg[2] & 0xfff;
613 			break;
614 		default:
615 			rtw_warn(rtwdev, "Unknown mapping for queue %u\n", queue);
616 			return -EINVAL;
617 		}
618 
619 		/* add the pages from the public queue */
620 		pages_free += (free_txpg[1] >> 16) & 0xfff;
621 	}
622 
623 	pages_needed = DIV_ROUND_UP(count, rtwdev->chip->page_size);
624 
625 	if (pages_needed > pages_free) {
626 		rtw_dbg(rtwdev, RTW_DBG_SDIO,
627 			"Not enough free pages (%u needed, %u free) in queue %u for %zu bytes\n",
628 			pages_needed, pages_free, queue, count);
629 		return -EBUSY;
630 	}
631 
632 	return 0;
633 }
634 
rtw_sdio_write_port(struct rtw_dev * rtwdev,struct sk_buff * skb,enum rtw_tx_queue_type queue)635 static int rtw_sdio_write_port(struct rtw_dev *rtwdev, struct sk_buff *skb,
636 			       enum rtw_tx_queue_type queue)
637 {
638 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
639 	bool bus_claim;
640 	size_t txsize;
641 	u32 txaddr;
642 	int ret;
643 
644 	txaddr = rtw_sdio_get_tx_addr(rtwdev, skb->len, queue);
645 	if (!txaddr)
646 		return -EINVAL;
647 
648 	txsize = sdio_align_size(rtwsdio->sdio_func, skb->len);
649 
650 	ret = rtw_sdio_check_free_txpg(rtwdev, queue, txsize);
651 	if (ret)
652 		return ret;
653 
654 	if (!IS_ALIGNED((unsigned long)skb->data, RTW_SDIO_DATA_PTR_ALIGN))
655 		rtw_warn(rtwdev, "Got unaligned SKB in %s() for queue %u\n",
656 			 __func__, queue);
657 
658 	bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
659 
660 	if (bus_claim)
661 		sdio_claim_host(rtwsdio->sdio_func);
662 
663 	ret = sdio_memcpy_toio(rtwsdio->sdio_func, txaddr, skb->data, txsize);
664 
665 	if (bus_claim)
666 		sdio_release_host(rtwsdio->sdio_func);
667 
668 	if (ret)
669 		rtw_warn(rtwdev,
670 			 "Failed to write %zu byte(s) to SDIO port 0x%08x",
671 			 txsize, txaddr);
672 
673 	return ret;
674 }
675 
rtw_sdio_init(struct rtw_dev * rtwdev)676 static void rtw_sdio_init(struct rtw_dev *rtwdev)
677 {
678 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
679 
680 	rtwsdio->irq_mask = REG_SDIO_HIMR_RX_REQUEST | REG_SDIO_HIMR_CPWM1;
681 }
682 
rtw_sdio_enable_rx_aggregation(struct rtw_dev * rtwdev)683 static void rtw_sdio_enable_rx_aggregation(struct rtw_dev *rtwdev)
684 {
685 	u8 size, timeout;
686 
687 	switch (rtwdev->chip->id) {
688 	case RTW_CHIP_TYPE_8703B:
689 	case RTW_CHIP_TYPE_8821A:
690 	case RTW_CHIP_TYPE_8812A:
691 		size = 0x6;
692 		timeout = 0x6;
693 		break;
694 	case RTW_CHIP_TYPE_8723D:
695 		size = 0xa;
696 		timeout = 0x3;
697 		rtw_write8_set(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7));
698 		break;
699 	default:
700 		size = 0xff;
701 		timeout = 0x1;
702 		break;
703 	}
704 
705 	/* Make the firmware honor the size limit configured below */
706 	rtw_write32_set(rtwdev, REG_RXDMA_AGG_PG_TH, BIT_EN_PRE_CALC);
707 
708 	rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
709 
710 	rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH,
711 		    FIELD_PREP(BIT_RXDMA_AGG_PG_TH, size) |
712 		    FIELD_PREP(BIT_DMA_AGG_TO_V1, timeout));
713 
714 	rtw_write8_set(rtwdev, REG_RXDMA_MODE, BIT_DMA_MODE);
715 }
716 
rtw_sdio_enable_interrupt(struct rtw_dev * rtwdev)717 static void rtw_sdio_enable_interrupt(struct rtw_dev *rtwdev)
718 {
719 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
720 
721 	rtw_write32(rtwdev, REG_SDIO_HIMR, rtwsdio->irq_mask);
722 }
723 
rtw_sdio_disable_interrupt(struct rtw_dev * rtwdev)724 static void rtw_sdio_disable_interrupt(struct rtw_dev *rtwdev)
725 {
726 	rtw_write32(rtwdev, REG_SDIO_HIMR, 0x0);
727 }
728 
rtw_sdio_get_tx_qsel(struct rtw_dev * rtwdev,struct sk_buff * skb,u8 queue)729 static u8 rtw_sdio_get_tx_qsel(struct rtw_dev *rtwdev, struct sk_buff *skb,
730 			       u8 queue)
731 {
732 	switch (queue) {
733 	case RTW_TX_QUEUE_BCN:
734 		return TX_DESC_QSEL_BEACON;
735 	case RTW_TX_QUEUE_H2C:
736 		return TX_DESC_QSEL_H2C;
737 	case RTW_TX_QUEUE_MGMT:
738 		return TX_DESC_QSEL_MGMT;
739 	case RTW_TX_QUEUE_HI0:
740 		return TX_DESC_QSEL_HIGH;
741 	default:
742 		return skb->priority;
743 	}
744 }
745 
rtw_sdio_setup(struct rtw_dev * rtwdev)746 static int rtw_sdio_setup(struct rtw_dev *rtwdev)
747 {
748 	/* nothing to do */
749 	return 0;
750 }
751 
rtw_sdio_start(struct rtw_dev * rtwdev)752 static int rtw_sdio_start(struct rtw_dev *rtwdev)
753 {
754 	rtw_sdio_enable_rx_aggregation(rtwdev);
755 	rtw_sdio_enable_interrupt(rtwdev);
756 
757 	return 0;
758 }
759 
rtw_sdio_stop(struct rtw_dev * rtwdev)760 static void rtw_sdio_stop(struct rtw_dev *rtwdev)
761 {
762 	rtw_sdio_disable_interrupt(rtwdev);
763 }
764 
rtw_sdio_deep_ps_enter(struct rtw_dev * rtwdev)765 static void rtw_sdio_deep_ps_enter(struct rtw_dev *rtwdev)
766 {
767 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
768 	bool tx_empty = true;
769 	u8 queue;
770 
771 	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) {
772 		/* Deep PS state is not allowed to TX-DMA */
773 		for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
774 			/* BCN queue is rsvd page, does not have DMA interrupt
775 			 * H2C queue is managed by firmware
776 			 */
777 			if (queue == RTW_TX_QUEUE_BCN ||
778 			    queue == RTW_TX_QUEUE_H2C)
779 				continue;
780 
781 			/* check if there is any skb DMAing */
782 			if (skb_queue_len(&rtwsdio->tx_queue[queue])) {
783 				tx_empty = false;
784 				break;
785 			}
786 		}
787 	}
788 
789 	if (!tx_empty) {
790 		rtw_dbg(rtwdev, RTW_DBG_PS,
791 			"TX path not empty, cannot enter deep power save state\n");
792 		return;
793 	}
794 
795 	set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
796 	rtw_power_mode_change(rtwdev, true);
797 }
798 
rtw_sdio_deep_ps_leave(struct rtw_dev * rtwdev)799 static void rtw_sdio_deep_ps_leave(struct rtw_dev *rtwdev)
800 {
801 	if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
802 		rtw_power_mode_change(rtwdev, false);
803 }
804 
rtw_sdio_deep_ps(struct rtw_dev * rtwdev,bool enter)805 static void rtw_sdio_deep_ps(struct rtw_dev *rtwdev, bool enter)
806 {
807 	if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
808 		rtw_sdio_deep_ps_enter(rtwdev);
809 
810 	if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
811 		rtw_sdio_deep_ps_leave(rtwdev);
812 }
813 
rtw_sdio_tx_kick_off(struct rtw_dev * rtwdev)814 static void rtw_sdio_tx_kick_off(struct rtw_dev *rtwdev)
815 {
816 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
817 
818 	queue_work(rtwsdio->txwq, &rtwsdio->tx_handler_data->work);
819 }
820 
rtw_sdio_link_ps(struct rtw_dev * rtwdev,bool enter)821 static void rtw_sdio_link_ps(struct rtw_dev *rtwdev, bool enter)
822 {
823 	/* nothing to do */
824 }
825 
rtw_sdio_interface_cfg(struct rtw_dev * rtwdev)826 static void rtw_sdio_interface_cfg(struct rtw_dev *rtwdev)
827 {
828 	u32 val;
829 
830 	rtw_read32(rtwdev, REG_SDIO_FREE_TXPG);
831 
832 	val = rtw_read32(rtwdev, REG_SDIO_TX_CTRL);
833 	val &= 0xfff8;
834 	rtw_write32(rtwdev, REG_SDIO_TX_CTRL, val);
835 }
836 
rtw_sdio_get_tx_data(struct sk_buff * skb)837 static struct rtw_sdio_tx_data *rtw_sdio_get_tx_data(struct sk_buff *skb)
838 {
839 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
840 
841 	BUILD_BUG_ON(sizeof(struct rtw_sdio_tx_data) >
842 		     sizeof(info->status.status_driver_data));
843 
844 	return (struct rtw_sdio_tx_data *)info->status.status_driver_data;
845 }
846 
rtw_sdio_tx_skb_prepare(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb,enum rtw_tx_queue_type queue)847 static void rtw_sdio_tx_skb_prepare(struct rtw_dev *rtwdev,
848 				    struct rtw_tx_pkt_info *pkt_info,
849 				    struct sk_buff *skb,
850 				    enum rtw_tx_queue_type queue)
851 {
852 	const struct rtw_chip_info *chip = rtwdev->chip;
853 	unsigned long data_addr, aligned_addr;
854 	size_t offset;
855 	u8 *pkt_desc;
856 
857 	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
858 
859 	data_addr = (unsigned long)pkt_desc;
860 	aligned_addr = ALIGN(data_addr, RTW_SDIO_DATA_PTR_ALIGN);
861 
862 	if (data_addr != aligned_addr) {
863 		/* Ensure that the start of the pkt_desc is always aligned at
864 		 * RTW_SDIO_DATA_PTR_ALIGN.
865 		 */
866 		offset = RTW_SDIO_DATA_PTR_ALIGN - (aligned_addr - data_addr);
867 
868 		pkt_desc = skb_push(skb, offset);
869 
870 		/* By inserting padding to align the start of the pkt_desc we
871 		 * need to inform the firmware that the actual data starts at
872 		 * a different offset than normal.
873 		 */
874 		pkt_info->offset += offset;
875 	}
876 
877 	memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
878 
879 	pkt_info->qsel = rtw_sdio_get_tx_qsel(rtwdev, skb, queue);
880 
881 	rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb);
882 	rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, pkt_desc);
883 }
884 
rtw_sdio_write_data(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb,enum rtw_tx_queue_type queue)885 static int rtw_sdio_write_data(struct rtw_dev *rtwdev,
886 			       struct rtw_tx_pkt_info *pkt_info,
887 			       struct sk_buff *skb,
888 			       enum rtw_tx_queue_type queue)
889 {
890 	int ret;
891 
892 	rtw_sdio_tx_skb_prepare(rtwdev, pkt_info, skb, queue);
893 
894 	ret = rtw_sdio_write_port(rtwdev, skb, queue);
895 	dev_kfree_skb_any(skb);
896 
897 	return ret;
898 }
899 
rtw_sdio_write_data_rsvd_page(struct rtw_dev * rtwdev,u8 * buf,u32 size)900 static int rtw_sdio_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
901 					 u32 size)
902 {
903 	struct rtw_tx_pkt_info pkt_info = {};
904 	struct sk_buff *skb;
905 
906 	skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
907 	if (!skb)
908 		return -ENOMEM;
909 
910 	return rtw_sdio_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
911 }
912 
rtw_sdio_write_data_h2c(struct rtw_dev * rtwdev,u8 * buf,u32 size)913 static int rtw_sdio_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
914 {
915 	struct rtw_tx_pkt_info pkt_info = {};
916 	struct sk_buff *skb;
917 
918 	skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
919 	if (!skb)
920 		return -ENOMEM;
921 
922 	return rtw_sdio_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
923 }
924 
rtw_sdio_tx_write(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb)925 static int rtw_sdio_tx_write(struct rtw_dev *rtwdev,
926 			     struct rtw_tx_pkt_info *pkt_info,
927 			     struct sk_buff *skb)
928 {
929 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
930 	enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
931 	struct rtw_sdio_tx_data *tx_data;
932 
933 	rtw_sdio_tx_skb_prepare(rtwdev, pkt_info, skb, queue);
934 
935 	tx_data = rtw_sdio_get_tx_data(skb);
936 	tx_data->sn = pkt_info->sn;
937 
938 	skb_queue_tail(&rtwsdio->tx_queue[queue], skb);
939 
940 	return 0;
941 }
942 
rtw_sdio_tx_err_isr(struct rtw_dev * rtwdev)943 static void rtw_sdio_tx_err_isr(struct rtw_dev *rtwdev)
944 {
945 	u32 val = rtw_read32(rtwdev, REG_TXDMA_STATUS);
946 
947 	rtw_write32(rtwdev, REG_TXDMA_STATUS, val);
948 }
949 
rtw_sdio_rx_skb(struct rtw_dev * rtwdev,struct sk_buff * skb,u32 pkt_offset,struct rtw_rx_pkt_stat * pkt_stat,struct ieee80211_rx_status * rx_status)950 static void rtw_sdio_rx_skb(struct rtw_dev *rtwdev, struct sk_buff *skb,
951 			    u32 pkt_offset, struct rtw_rx_pkt_stat *pkt_stat,
952 			    struct ieee80211_rx_status *rx_status)
953 {
954 	*IEEE80211_SKB_RXCB(skb) = *rx_status;
955 
956 	if (pkt_stat->is_c2h) {
957 		skb_put(skb, pkt_stat->pkt_len + pkt_offset);
958 		rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb);
959 		return;
960 	}
961 
962 	skb_put(skb, pkt_stat->pkt_len);
963 	skb_reserve(skb, pkt_offset);
964 
965 	rtw_update_rx_freq_for_invalid(rtwdev, skb, rx_status, pkt_stat);
966 	rtw_rx_stats(rtwdev, pkt_stat->vif, skb);
967 
968 	ieee80211_rx_irqsafe(rtwdev->hw, skb);
969 }
970 
rtw_sdio_rxfifo_recv(struct rtw_dev * rtwdev,u32 rx_len)971 static void rtw_sdio_rxfifo_recv(struct rtw_dev *rtwdev, u32 rx_len)
972 {
973 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
974 	const struct rtw_chip_info *chip = rtwdev->chip;
975 	u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
976 	struct ieee80211_rx_status rx_status;
977 	struct rtw_rx_pkt_stat pkt_stat;
978 	struct sk_buff *skb, *split_skb;
979 	u32 pkt_offset, curr_pkt_len;
980 	size_t bufsz;
981 	u8 *rx_desc;
982 	int ret;
983 
984 	bufsz = sdio_align_size(rtwsdio->sdio_func, rx_len);
985 
986 	skb = dev_alloc_skb(bufsz);
987 	if (!skb)
988 		return;
989 
990 	ret = rtw_sdio_read_port(rtwdev, skb->data, bufsz);
991 	if (ret) {
992 		dev_kfree_skb_any(skb);
993 		return;
994 	}
995 
996 	while (true) {
997 		rx_desc = skb->data;
998 		rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
999 		pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1000 			     pkt_stat.shift;
1001 
1002 		curr_pkt_len = ALIGN(pkt_offset + pkt_stat.pkt_len,
1003 				     RTW_SDIO_DATA_PTR_ALIGN);
1004 
1005 		if ((curr_pkt_len + pkt_desc_sz) >= rx_len) {
1006 			/* Use the original skb (with it's adjusted offset)
1007 			 * when processing the last (or even the only) entry to
1008 			 * have it's memory freed automatically.
1009 			 */
1010 			rtw_sdio_rx_skb(rtwdev, skb, pkt_offset, &pkt_stat,
1011 					&rx_status);
1012 			break;
1013 		}
1014 
1015 		split_skb = dev_alloc_skb(curr_pkt_len);
1016 		if (!split_skb) {
1017 			rtw_sdio_rx_skb(rtwdev, skb, pkt_offset, &pkt_stat,
1018 					&rx_status);
1019 			break;
1020 		}
1021 
1022 		skb_copy_header(split_skb, skb);
1023 		memcpy(split_skb->data, skb->data, curr_pkt_len);
1024 
1025 		rtw_sdio_rx_skb(rtwdev, split_skb, pkt_offset, &pkt_stat,
1026 				&rx_status);
1027 
1028 		/* Move to the start of the next RX descriptor */
1029 		skb_reserve(skb, curr_pkt_len);
1030 		rx_len -= curr_pkt_len;
1031 	}
1032 }
1033 
rtw_sdio_rx_isr(struct rtw_dev * rtwdev)1034 static void rtw_sdio_rx_isr(struct rtw_dev *rtwdev)
1035 {
1036 	u32 rx_len, hisr, total_rx_bytes = 0;
1037 
1038 	do {
1039 		if (rtw_chip_wcpu_8051(rtwdev))
1040 			rx_len = rtw_read16(rtwdev, REG_SDIO_RX0_REQ_LEN);
1041 		else
1042 			rx_len = rtw_read32(rtwdev, REG_SDIO_RX0_REQ_LEN);
1043 
1044 		if (!rx_len)
1045 			break;
1046 
1047 		rtw_sdio_rxfifo_recv(rtwdev, rx_len);
1048 
1049 		total_rx_bytes += rx_len;
1050 
1051 		if (rtw_chip_wcpu_8051(rtwdev)) {
1052 			/* Stop if no more RX requests are pending, even if
1053 			 * rx_len could be greater than zero in the next
1054 			 * iteration. This is needed because the RX buffer may
1055 			 * already contain data while either HW or FW are not
1056 			 * done filling that buffer yet. Still reading the
1057 			 * buffer can result in packets where
1058 			 * rtw_rx_pkt_stat.pkt_len is zero or points beyond the
1059 			 * end of the buffer.
1060 			 */
1061 			hisr = rtw_read32(rtwdev, REG_SDIO_HISR);
1062 		} else {
1063 			/* RTW_WCPU_3081 chips have improved hardware or
1064 			 * firmware and can use rx_len unconditionally.
1065 			 */
1066 			hisr = REG_SDIO_HISR_RX_REQUEST;
1067 		}
1068 	} while (total_rx_bytes < SZ_64K && hisr & REG_SDIO_HISR_RX_REQUEST);
1069 }
1070 
rtw_sdio_handle_interrupt(struct sdio_func * sdio_func)1071 static void rtw_sdio_handle_interrupt(struct sdio_func *sdio_func)
1072 {
1073 	struct ieee80211_hw *hw = sdio_get_drvdata(sdio_func);
1074 	struct rtw_sdio *rtwsdio;
1075 	struct rtw_dev *rtwdev;
1076 	u32 hisr;
1077 
1078 	rtwdev = hw->priv;
1079 	rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1080 
1081 	rtwsdio->irq_thread = current;
1082 
1083 	hisr = rtw_read32(rtwdev, REG_SDIO_HISR);
1084 
1085 	if (hisr & REG_SDIO_HISR_TXERR)
1086 		rtw_sdio_tx_err_isr(rtwdev);
1087 	if (hisr & REG_SDIO_HISR_RX_REQUEST) {
1088 		hisr &= ~REG_SDIO_HISR_RX_REQUEST;
1089 		rtw_sdio_rx_isr(rtwdev);
1090 	}
1091 
1092 	rtw_write32(rtwdev, REG_SDIO_HISR, hisr);
1093 
1094 	rtwsdio->irq_thread = NULL;
1095 }
1096 
rtw_sdio_suspend(struct device * dev)1097 static int __maybe_unused rtw_sdio_suspend(struct device *dev)
1098 {
1099 	struct sdio_func *func = dev_to_sdio_func(dev);
1100 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
1101 	struct rtw_dev *rtwdev = hw->priv;
1102 	int ret;
1103 
1104 	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
1105 	if (ret)
1106 		rtw_err(rtwdev, "Failed to host PM flag MMC_PM_KEEP_POWER");
1107 
1108 	return ret;
1109 }
1110 
rtw_sdio_resume(struct device * dev)1111 static int __maybe_unused rtw_sdio_resume(struct device *dev)
1112 {
1113 	return 0;
1114 }
1115 
1116 SIMPLE_DEV_PM_OPS(rtw_sdio_pm_ops, rtw_sdio_suspend, rtw_sdio_resume);
1117 EXPORT_SYMBOL(rtw_sdio_pm_ops);
1118 
rtw_sdio_claim(struct rtw_dev * rtwdev,struct sdio_func * sdio_func)1119 static int rtw_sdio_claim(struct rtw_dev *rtwdev, struct sdio_func *sdio_func)
1120 {
1121 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1122 	int ret;
1123 
1124 	sdio_claim_host(sdio_func);
1125 
1126 	ret = sdio_enable_func(sdio_func);
1127 	if (ret) {
1128 		rtw_err(rtwdev, "Failed to enable SDIO func");
1129 		goto err_release_host;
1130 	}
1131 
1132 	ret = sdio_set_block_size(sdio_func, RTW_SDIO_BLOCK_SIZE);
1133 	if (ret) {
1134 		rtw_err(rtwdev, "Failed to set SDIO block size to 512");
1135 		goto err_disable_func;
1136 	}
1137 
1138 	rtwsdio->sdio_func = sdio_func;
1139 
1140 	rtwsdio->sdio3_bus_mode = mmc_card_uhs(sdio_func->card);
1141 
1142 	sdio_set_drvdata(sdio_func, rtwdev->hw);
1143 	SET_IEEE80211_DEV(rtwdev->hw, &sdio_func->dev);
1144 
1145 	sdio_release_host(sdio_func);
1146 
1147 	return 0;
1148 
1149 err_disable_func:
1150 	sdio_disable_func(sdio_func);
1151 err_release_host:
1152 	sdio_release_host(sdio_func);
1153 	return ret;
1154 }
1155 
rtw_sdio_declaim(struct rtw_dev * rtwdev,struct sdio_func * sdio_func)1156 static void rtw_sdio_declaim(struct rtw_dev *rtwdev,
1157 			     struct sdio_func *sdio_func)
1158 {
1159 	sdio_claim_host(sdio_func);
1160 	sdio_disable_func(sdio_func);
1161 	sdio_release_host(sdio_func);
1162 }
1163 
1164 static const struct rtw_hci_ops rtw_sdio_ops = {
1165 	.tx_write = rtw_sdio_tx_write,
1166 	.tx_kick_off = rtw_sdio_tx_kick_off,
1167 	.setup = rtw_sdio_setup,
1168 	.start = rtw_sdio_start,
1169 	.stop = rtw_sdio_stop,
1170 	.deep_ps = rtw_sdio_deep_ps,
1171 	.link_ps = rtw_sdio_link_ps,
1172 	.interface_cfg = rtw_sdio_interface_cfg,
1173 	.dynamic_rx_agg = NULL,
1174 	.write_firmware_page = rtw_write_firmware_page,
1175 
1176 	.read8 = rtw_sdio_read8,
1177 	.read16 = rtw_sdio_read16,
1178 	.read32 = rtw_sdio_read32,
1179 	.write8 = rtw_sdio_write8,
1180 	.write16 = rtw_sdio_write16,
1181 	.write32 = rtw_sdio_write32,
1182 	.write_data_rsvd_page = rtw_sdio_write_data_rsvd_page,
1183 	.write_data_h2c = rtw_sdio_write_data_h2c,
1184 };
1185 
rtw_sdio_request_irq(struct rtw_dev * rtwdev,struct sdio_func * sdio_func)1186 static int rtw_sdio_request_irq(struct rtw_dev *rtwdev,
1187 				struct sdio_func *sdio_func)
1188 {
1189 	int ret;
1190 
1191 	sdio_claim_host(sdio_func);
1192 	ret = sdio_claim_irq(sdio_func, &rtw_sdio_handle_interrupt);
1193 	sdio_release_host(sdio_func);
1194 
1195 	if (ret) {
1196 		rtw_err(rtwdev, "failed to claim SDIO IRQ");
1197 		return ret;
1198 	}
1199 
1200 	return 0;
1201 }
1202 
rtw_sdio_indicate_tx_status(struct rtw_dev * rtwdev,struct sk_buff * skb)1203 static void rtw_sdio_indicate_tx_status(struct rtw_dev *rtwdev,
1204 					struct sk_buff *skb)
1205 {
1206 	struct rtw_sdio_tx_data *tx_data = rtw_sdio_get_tx_data(skb);
1207 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1208 	struct ieee80211_hw *hw = rtwdev->hw;
1209 
1210 	skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
1211 
1212 	/* enqueue to wait for tx report */
1213 	if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1214 		rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1215 		return;
1216 	}
1217 
1218 	/* always ACK for others, then they won't be marked as drop */
1219 	ieee80211_tx_info_clear_status(info);
1220 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1221 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1222 	else
1223 		info->flags |= IEEE80211_TX_STAT_ACK;
1224 
1225 	ieee80211_tx_status_irqsafe(hw, skb);
1226 }
1227 
rtw_sdio_process_tx_queue(struct rtw_dev * rtwdev,enum rtw_tx_queue_type queue)1228 static void rtw_sdio_process_tx_queue(struct rtw_dev *rtwdev,
1229 				      enum rtw_tx_queue_type queue)
1230 {
1231 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1232 	struct sk_buff *skb;
1233 	int ret;
1234 
1235 	skb = skb_dequeue(&rtwsdio->tx_queue[queue]);
1236 	if (!skb)
1237 		return;
1238 
1239 	ret = rtw_sdio_write_port(rtwdev, skb, queue);
1240 	if (ret) {
1241 		skb_queue_head(&rtwsdio->tx_queue[queue], skb);
1242 		return;
1243 	}
1244 
1245 	rtw_sdio_indicate_tx_status(rtwdev, skb);
1246 }
1247 
rtw_sdio_tx_handler(struct work_struct * work)1248 static void rtw_sdio_tx_handler(struct work_struct *work)
1249 {
1250 	struct rtw_sdio_work_data *work_data =
1251 		container_of(work, struct rtw_sdio_work_data, work);
1252 	struct rtw_sdio *rtwsdio;
1253 	struct rtw_dev *rtwdev;
1254 	int limit, queue;
1255 
1256 	rtwdev = work_data->rtwdev;
1257 	rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1258 
1259 	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
1260 		rtw_sdio_deep_ps_leave(rtwdev);
1261 
1262 	for (queue = RTK_MAX_TX_QUEUE_NUM - 1; queue >= 0; queue--) {
1263 		for (limit = 0; limit < 1000; limit++) {
1264 			rtw_sdio_process_tx_queue(rtwdev, queue);
1265 
1266 			if (skb_queue_empty(&rtwsdio->tx_queue[queue]))
1267 				break;
1268 		}
1269 	}
1270 }
1271 
rtw_sdio_free_irq(struct rtw_dev * rtwdev,struct sdio_func * sdio_func)1272 static void rtw_sdio_free_irq(struct rtw_dev *rtwdev,
1273 			      struct sdio_func *sdio_func)
1274 {
1275 	sdio_claim_host(sdio_func);
1276 	sdio_release_irq(sdio_func);
1277 	sdio_release_host(sdio_func);
1278 }
1279 
rtw_sdio_init_tx(struct rtw_dev * rtwdev)1280 static int rtw_sdio_init_tx(struct rtw_dev *rtwdev)
1281 {
1282 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1283 	int i;
1284 
1285 	rtwsdio->txwq = create_singlethread_workqueue("rtw88_sdio: tx wq");
1286 	if (!rtwsdio->txwq) {
1287 		rtw_err(rtwdev, "failed to create TX work queue\n");
1288 		return -ENOMEM;
1289 	}
1290 
1291 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
1292 		skb_queue_head_init(&rtwsdio->tx_queue[i]);
1293 	rtwsdio->tx_handler_data = kmalloc(sizeof(*rtwsdio->tx_handler_data),
1294 					   GFP_KERNEL);
1295 	if (!rtwsdio->tx_handler_data)
1296 		goto err_destroy_wq;
1297 
1298 	rtwsdio->tx_handler_data->rtwdev = rtwdev;
1299 	INIT_WORK(&rtwsdio->tx_handler_data->work, rtw_sdio_tx_handler);
1300 
1301 	return 0;
1302 
1303 err_destroy_wq:
1304 	destroy_workqueue(rtwsdio->txwq);
1305 	return -ENOMEM;
1306 }
1307 
rtw_sdio_deinit_tx(struct rtw_dev * rtwdev)1308 static void rtw_sdio_deinit_tx(struct rtw_dev *rtwdev)
1309 {
1310 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
1311 	int i;
1312 
1313 	destroy_workqueue(rtwsdio->txwq);
1314 	kfree(rtwsdio->tx_handler_data);
1315 
1316 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
1317 		ieee80211_purge_tx_queue(rtwdev->hw, &rtwsdio->tx_queue[i]);
1318 }
1319 
rtw_sdio_probe(struct sdio_func * sdio_func,const struct sdio_device_id * id)1320 int rtw_sdio_probe(struct sdio_func *sdio_func,
1321 		   const struct sdio_device_id *id)
1322 {
1323 	struct ieee80211_hw *hw;
1324 	struct rtw_dev *rtwdev;
1325 	int drv_data_size;
1326 	int ret;
1327 
1328 	drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_sdio);
1329 	hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1330 	if (!hw) {
1331 		dev_err(&sdio_func->dev, "failed to allocate hw");
1332 		return -ENOMEM;
1333 	}
1334 
1335 	rtwdev = hw->priv;
1336 	rtwdev->hw = hw;
1337 	rtwdev->dev = &sdio_func->dev;
1338 	rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1339 	rtwdev->hci.ops = &rtw_sdio_ops;
1340 	rtwdev->hci.type = RTW_HCI_TYPE_SDIO;
1341 
1342 	ret = rtw_core_init(rtwdev);
1343 	if (ret)
1344 		goto err_release_hw;
1345 
1346 	rtw_dbg(rtwdev, RTW_DBG_SDIO,
1347 		"rtw88 SDIO probe: vendor=0x%04x device=%04x class=%02x",
1348 		id->vendor, id->device, id->class);
1349 
1350 	ret = rtw_sdio_claim(rtwdev, sdio_func);
1351 	if (ret) {
1352 		rtw_err(rtwdev, "failed to claim SDIO device");
1353 		goto err_deinit_core;
1354 	}
1355 
1356 	rtw_sdio_init(rtwdev);
1357 
1358 	ret = rtw_sdio_init_tx(rtwdev);
1359 	if (ret) {
1360 		rtw_err(rtwdev, "failed to init SDIO TX queue\n");
1361 		goto err_sdio_declaim;
1362 	}
1363 
1364 	ret = rtw_chip_info_setup(rtwdev);
1365 	if (ret) {
1366 		rtw_err(rtwdev, "failed to setup chip information");
1367 		goto err_destroy_txwq;
1368 	}
1369 
1370 	ret = rtw_sdio_request_irq(rtwdev, sdio_func);
1371 	if (ret)
1372 		goto err_destroy_txwq;
1373 
1374 	ret = rtw_register_hw(rtwdev, hw);
1375 	if (ret) {
1376 		rtw_err(rtwdev, "failed to register hw");
1377 		goto err_free_irq;
1378 	}
1379 
1380 	return 0;
1381 
1382 err_free_irq:
1383 	rtw_sdio_free_irq(rtwdev, sdio_func);
1384 err_destroy_txwq:
1385 	rtw_sdio_deinit_tx(rtwdev);
1386 err_sdio_declaim:
1387 	rtw_sdio_declaim(rtwdev, sdio_func);
1388 err_deinit_core:
1389 	rtw_core_deinit(rtwdev);
1390 err_release_hw:
1391 	ieee80211_free_hw(hw);
1392 
1393 	return ret;
1394 }
1395 EXPORT_SYMBOL(rtw_sdio_probe);
1396 
rtw_sdio_remove(struct sdio_func * sdio_func)1397 void rtw_sdio_remove(struct sdio_func *sdio_func)
1398 {
1399 	struct ieee80211_hw *hw = sdio_get_drvdata(sdio_func);
1400 	struct rtw_dev *rtwdev;
1401 
1402 	if (!hw)
1403 		return;
1404 
1405 	rtwdev = hw->priv;
1406 
1407 	rtw_unregister_hw(rtwdev, hw);
1408 	rtw_sdio_disable_interrupt(rtwdev);
1409 	rtw_sdio_free_irq(rtwdev, sdio_func);
1410 	rtw_sdio_declaim(rtwdev, sdio_func);
1411 	rtw_sdio_deinit_tx(rtwdev);
1412 	rtw_core_deinit(rtwdev);
1413 	ieee80211_free_hw(hw);
1414 }
1415 EXPORT_SYMBOL(rtw_sdio_remove);
1416 
rtw_sdio_shutdown(struct device * dev)1417 void rtw_sdio_shutdown(struct device *dev)
1418 {
1419 	struct sdio_func *sdio_func = dev_to_sdio_func(dev);
1420 	const struct rtw_chip_info *chip;
1421 	struct ieee80211_hw *hw;
1422 	struct rtw_dev *rtwdev;
1423 
1424 	hw = sdio_get_drvdata(sdio_func);
1425 	if (!hw)
1426 		return;
1427 
1428 	rtwdev = hw->priv;
1429 	chip = rtwdev->chip;
1430 
1431 	if (chip->ops->shutdown)
1432 		chip->ops->shutdown(rtwdev);
1433 }
1434 EXPORT_SYMBOL(rtw_sdio_shutdown);
1435 
1436 MODULE_AUTHOR("Martin Blumenstingl");
1437 MODULE_AUTHOR("Jernej Skrabec");
1438 MODULE_DESCRIPTION("Realtek 802.11ac wireless SDIO driver");
1439 MODULE_LICENSE("Dual BSD/GPL");
1440