xref: /linux/drivers/net/wireless/st/cw1200/bh.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
4  *
5  * Copyright (c) 2010, ST-Ericsson
6  * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
7  *
8  * Based on:
9  * ST-Ericsson UMAC CW1200 driver, which is
10  * Copyright (c) 2010, ST-Ericsson
11  * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
12  */
13 
14 #include <linux/module.h>
15 #include <net/mac80211.h>
16 #include <linux/kthread.h>
17 #include <linux/timer.h>
18 
19 #include "cw1200.h"
20 #include "bh.h"
21 #include "hwio.h"
22 #include "wsm.h"
23 #include "hwbus.h"
24 #include "debug.h"
25 #include "fwio.h"
26 
27 static int cw1200_bh(void *arg);
28 
29 #define DOWNLOAD_BLOCK_SIZE_WR	(0x1000 - 4)
30 /* an SPI message cannot be bigger than (2"12-1)*2 bytes
31  * "*2" to cvt to bytes
32  */
33 #define MAX_SZ_RD_WR_BUFFERS	(DOWNLOAD_BLOCK_SIZE_WR*2)
34 #define PIGGYBACK_CTRL_REG	(2)
35 #define EFFECTIVE_BUF_SIZE	(MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
36 
37 /* Suspend state privates */
38 enum cw1200_bh_pm_state {
39 	CW1200_BH_RESUMED = 0,
40 	CW1200_BH_SUSPEND,
41 	CW1200_BH_SUSPENDED,
42 	CW1200_BH_RESUME,
43 };
44 
cw1200_bh_work(struct work_struct * work)45 static void cw1200_bh_work(struct work_struct *work)
46 {
47 	struct cw1200_common *priv =
48 	container_of(work, struct cw1200_common, bh_work);
49 	cw1200_bh(priv);
50 }
51 
cw1200_register_bh(struct cw1200_common * priv)52 int cw1200_register_bh(struct cw1200_common *priv)
53 {
54 	int err = 0;
55 	/* Realtime workqueue */
56 	priv->bh_workqueue = alloc_workqueue("cw1200_bh",
57 				WQ_MEM_RECLAIM | WQ_HIGHPRI
58 				| WQ_CPU_INTENSIVE, 1);
59 
60 	if (!priv->bh_workqueue)
61 		return -ENOMEM;
62 
63 	INIT_WORK(&priv->bh_work, cw1200_bh_work);
64 
65 	pr_debug("[BH] register.\n");
66 
67 	atomic_set(&priv->bh_rx, 0);
68 	atomic_set(&priv->bh_tx, 0);
69 	atomic_set(&priv->bh_term, 0);
70 	atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
71 	priv->bh_error = 0;
72 	priv->hw_bufs_used = 0;
73 	priv->buf_id_tx = 0;
74 	priv->buf_id_rx = 0;
75 	init_waitqueue_head(&priv->bh_wq);
76 	init_waitqueue_head(&priv->bh_evt_wq);
77 
78 	err = !queue_work(priv->bh_workqueue, &priv->bh_work);
79 	WARN_ON(err);
80 	return err;
81 }
82 
cw1200_unregister_bh(struct cw1200_common * priv)83 void cw1200_unregister_bh(struct cw1200_common *priv)
84 {
85 	atomic_inc(&priv->bh_term);
86 	wake_up(&priv->bh_wq);
87 
88 	destroy_workqueue(priv->bh_workqueue);
89 	priv->bh_workqueue = NULL;
90 
91 	pr_debug("[BH] unregistered.\n");
92 }
93 
cw1200_irq_handler(struct cw1200_common * priv)94 void cw1200_irq_handler(struct cw1200_common *priv)
95 {
96 	pr_debug("[BH] irq.\n");
97 
98 	/* Disable Interrupts! */
99 	/* NOTE:  hwbus_ops->lock already held */
100 	__cw1200_irq_enable(priv, 0);
101 
102 	if (/* WARN_ON */(priv->bh_error))
103 		return;
104 
105 	if (atomic_inc_return(&priv->bh_rx) == 1)
106 		wake_up(&priv->bh_wq);
107 }
108 EXPORT_SYMBOL_GPL(cw1200_irq_handler);
109 
cw1200_bh_wakeup(struct cw1200_common * priv)110 void cw1200_bh_wakeup(struct cw1200_common *priv)
111 {
112 	pr_debug("[BH] wakeup.\n");
113 	if (priv->bh_error) {
114 		pr_err("[BH] wakeup failed (BH error)\n");
115 		return;
116 	}
117 
118 	if (atomic_inc_return(&priv->bh_tx) == 1)
119 		wake_up(&priv->bh_wq);
120 }
121 
cw1200_bh_suspend(struct cw1200_common * priv)122 int cw1200_bh_suspend(struct cw1200_common *priv)
123 {
124 	pr_debug("[BH] suspend.\n");
125 	if (priv->bh_error) {
126 		wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
127 		return -EINVAL;
128 	}
129 
130 	atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
131 	wake_up(&priv->bh_wq);
132 	return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
133 		(CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
134 		 1 * HZ) ? 0 : -ETIMEDOUT;
135 }
136 
cw1200_bh_resume(struct cw1200_common * priv)137 int cw1200_bh_resume(struct cw1200_common *priv)
138 {
139 	pr_debug("[BH] resume.\n");
140 	if (priv->bh_error) {
141 		wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
142 		return -EINVAL;
143 	}
144 
145 	atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
146 	wake_up(&priv->bh_wq);
147 	return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
148 		(CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
149 		1 * HZ) ? 0 : -ETIMEDOUT;
150 }
151 
wsm_alloc_tx_buffer(struct cw1200_common * priv)152 static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
153 {
154 	++priv->hw_bufs_used;
155 }
156 
wsm_release_tx_buffer(struct cw1200_common * priv,int count)157 int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
158 {
159 	int ret = 0;
160 	int hw_bufs_used = priv->hw_bufs_used;
161 
162 	priv->hw_bufs_used -= count;
163 	if (WARN_ON(priv->hw_bufs_used < 0))
164 		ret = -1;
165 	else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
166 		ret = 1;
167 	if (!priv->hw_bufs_used)
168 		wake_up(&priv->bh_evt_wq);
169 	return ret;
170 }
171 
cw1200_bh_read_ctrl_reg(struct cw1200_common * priv,u16 * ctrl_reg)172 static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
173 					  u16 *ctrl_reg)
174 {
175 	int ret;
176 
177 	ret = cw1200_reg_read_16(priv,
178 			ST90TDS_CONTROL_REG_ID, ctrl_reg);
179 	if (ret) {
180 		ret = cw1200_reg_read_16(priv,
181 				ST90TDS_CONTROL_REG_ID, ctrl_reg);
182 		if (ret)
183 			pr_err("[BH] Failed to read control register.\n");
184 	}
185 
186 	return ret;
187 }
188 
cw1200_device_wakeup(struct cw1200_common * priv)189 static int cw1200_device_wakeup(struct cw1200_common *priv)
190 {
191 	u16 ctrl_reg;
192 	int ret;
193 
194 	pr_debug("[BH] Device wakeup.\n");
195 
196 	/* First, set the dpll register */
197 	ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
198 				  cw1200_dpll_from_clk(priv->hw_refclk));
199 	if (WARN_ON(ret))
200 		return ret;
201 
202 	/* To force the device to be always-on, the host sets WLAN_UP to 1 */
203 	ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
204 			ST90TDS_CONT_WUP_BIT);
205 	if (WARN_ON(ret))
206 		return ret;
207 
208 	ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
209 	if (WARN_ON(ret))
210 		return ret;
211 
212 	/* If the device returns WLAN_RDY as 1, the device is active and will
213 	 * remain active.
214 	 */
215 	if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
216 		pr_debug("[BH] Device awake.\n");
217 		return 1;
218 	}
219 
220 	return 0;
221 }
222 
223 /* Must be called from BH thraed. */
cw1200_enable_powersave(struct cw1200_common * priv,bool enable)224 void cw1200_enable_powersave(struct cw1200_common *priv,
225 			     bool enable)
226 {
227 	pr_debug("[BH] Powerave is %s.\n",
228 		 enable ? "enabled" : "disabled");
229 	priv->powersave_enabled = enable;
230 }
231 
cw1200_bh_rx_helper(struct cw1200_common * priv,uint16_t * ctrl_reg,int * tx)232 static int cw1200_bh_rx_helper(struct cw1200_common *priv,
233 			       uint16_t *ctrl_reg,
234 			       int *tx)
235 {
236 	size_t read_len = 0;
237 	struct sk_buff *skb_rx = NULL;
238 	struct wsm_hdr *wsm;
239 	size_t wsm_len;
240 	u16 wsm_id;
241 	u8 wsm_seq;
242 	int rx_resync = 1;
243 
244 	size_t alloc_len;
245 	u8 *data;
246 
247 	read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
248 	if (!read_len)
249 		return 0; /* No more work */
250 
251 	if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
252 		    (read_len > EFFECTIVE_BUF_SIZE))) {
253 		pr_debug("Invalid read len: %zu (%04x)",
254 			 read_len, *ctrl_reg);
255 		goto err;
256 	}
257 
258 	/* Add SIZE of PIGGYBACK reg (CONTROL Reg)
259 	 * to the NEXT Message length + 2 Bytes for SKB
260 	 */
261 	read_len = read_len + 2;
262 
263 	alloc_len = priv->hwbus_ops->align_size(
264 		priv->hwbus_priv, read_len);
265 
266 	/* Check if not exceeding CW1200 capabilities */
267 	if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
268 		pr_debug("Read aligned len: %zu\n",
269 			 alloc_len);
270 	}
271 
272 	skb_rx = dev_alloc_skb(alloc_len);
273 	if (WARN_ON(!skb_rx))
274 		goto err;
275 
276 	skb_trim(skb_rx, 0);
277 	skb_put(skb_rx, read_len);
278 	data = skb_rx->data;
279 	if (WARN_ON(!data))
280 		goto err;
281 
282 	if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
283 		pr_err("rx blew up, len %zu\n", alloc_len);
284 		goto err;
285 	}
286 
287 	/* Piggyback */
288 	*ctrl_reg = __le16_to_cpu(
289 		((__le16 *)data)[alloc_len / 2 - 1]);
290 
291 	wsm = (struct wsm_hdr *)data;
292 	wsm_len = __le16_to_cpu(wsm->len);
293 	if (WARN_ON(wsm_len > read_len))
294 		goto err;
295 
296 	if (priv->wsm_enable_wsm_dumps)
297 		print_hex_dump_bytes("<-- ",
298 				     DUMP_PREFIX_NONE,
299 				     data, wsm_len);
300 
301 	wsm_id  = __le16_to_cpu(wsm->id) & 0xFFF;
302 	wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
303 
304 	skb_trim(skb_rx, wsm_len);
305 
306 	if (wsm_id == 0x0800) {
307 		wsm_handle_exception(priv,
308 				     &data[sizeof(*wsm)],
309 				     wsm_len - sizeof(*wsm));
310 		goto err;
311 	} else if (!rx_resync) {
312 		if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
313 			goto err;
314 	}
315 	priv->wsm_rx_seq = (wsm_seq + 1) & 7;
316 	rx_resync = 0;
317 
318 	if (wsm_id & 0x0400) {
319 		int rc = wsm_release_tx_buffer(priv, 1);
320 		if (WARN_ON(rc < 0))
321 			return rc;
322 		else if (rc > 0)
323 			*tx = 1;
324 	}
325 
326 	/* cw1200_wsm_rx takes care on SKB livetime */
327 	if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
328 		goto err;
329 
330 	dev_kfree_skb(skb_rx);
331 
332 	return 0;
333 
334 err:
335 	dev_kfree_skb(skb_rx);
336 	return -1;
337 }
338 
cw1200_bh_tx_helper(struct cw1200_common * priv,int * pending_tx,int * tx_burst)339 static int cw1200_bh_tx_helper(struct cw1200_common *priv,
340 			       int *pending_tx,
341 			       int *tx_burst)
342 {
343 	size_t tx_len;
344 	u8 *data;
345 	int ret;
346 	struct wsm_hdr *wsm;
347 
348 	if (priv->device_can_sleep) {
349 		ret = cw1200_device_wakeup(priv);
350 		if (WARN_ON(ret < 0)) { /* Error in wakeup */
351 			*pending_tx = 1;
352 			return 0;
353 		} else if (ret) { /* Woke up */
354 			priv->device_can_sleep = false;
355 		} else { /* Did not awake */
356 			*pending_tx = 1;
357 			return 0;
358 		}
359 	}
360 
361 	wsm_alloc_tx_buffer(priv);
362 	ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
363 	if (ret <= 0) {
364 		wsm_release_tx_buffer(priv, 1);
365 		if (WARN_ON(ret < 0))
366 			return ret; /* Error */
367 		return 0; /* No work */
368 	}
369 
370 	wsm = (struct wsm_hdr *)data;
371 	BUG_ON(tx_len < sizeof(*wsm));
372 	BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
373 
374 	atomic_inc(&priv->bh_tx);
375 
376 	tx_len = priv->hwbus_ops->align_size(
377 		priv->hwbus_priv, tx_len);
378 
379 	/* Check if not exceeding CW1200 capabilities */
380 	if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
381 		pr_debug("Write aligned len: %zu\n", tx_len);
382 
383 	wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
384 	wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
385 
386 	if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
387 		pr_err("tx blew up, len %zu\n", tx_len);
388 		wsm_release_tx_buffer(priv, 1);
389 		return -1; /* Error */
390 	}
391 
392 	if (priv->wsm_enable_wsm_dumps)
393 		print_hex_dump_bytes("--> ",
394 				     DUMP_PREFIX_NONE,
395 				     data,
396 				     __le16_to_cpu(wsm->len));
397 
398 	wsm_txed(priv, data);
399 	priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
400 
401 	if (*tx_burst > 1) {
402 		cw1200_debug_tx_burst(priv);
403 		return 1; /* Work remains */
404 	}
405 
406 	return 0;
407 }
408 
cw1200_bh(void * arg)409 static int cw1200_bh(void *arg)
410 {
411 	struct cw1200_common *priv = arg;
412 	int rx, tx, term, suspend;
413 	u16 ctrl_reg = 0;
414 	int tx_allowed;
415 	int pending_tx = 0;
416 	int tx_burst;
417 	long status;
418 	u32 dummy;
419 	int ret;
420 
421 	for (;;) {
422 		if (!priv->hw_bufs_used &&
423 		    priv->powersave_enabled &&
424 		    !priv->device_can_sleep &&
425 		    !atomic_read(&priv->recent_scan)) {
426 			status = 1 * HZ;
427 			pr_debug("[BH] Device wakedown. No data.\n");
428 			cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
429 			priv->device_can_sleep = true;
430 		} else if (priv->hw_bufs_used) {
431 			/* Interrupt loss detection */
432 			status = 1 * HZ;
433 		} else {
434 			status = MAX_SCHEDULE_TIMEOUT;
435 		}
436 
437 		/* Dummy Read for SDIO retry mechanism*/
438 		if ((priv->hw_type != -1) &&
439 		    (atomic_read(&priv->bh_rx) == 0) &&
440 		    (atomic_read(&priv->bh_tx) == 0))
441 			cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
442 					&dummy, sizeof(dummy));
443 
444 		pr_debug("[BH] waiting ...\n");
445 		status = wait_event_interruptible_timeout(priv->bh_wq, ({
446 				rx = atomic_xchg(&priv->bh_rx, 0);
447 				tx = atomic_xchg(&priv->bh_tx, 0);
448 				term = atomic_xchg(&priv->bh_term, 0);
449 				suspend = pending_tx ?
450 					0 : atomic_read(&priv->bh_suspend);
451 				(rx || tx || term || suspend || priv->bh_error);
452 			}), status);
453 
454 		pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
455 			 rx, tx, term, suspend, priv->bh_error, status);
456 
457 		/* Did an error occur? */
458 		if ((status < 0 && status != -ERESTARTSYS) ||
459 		    term || priv->bh_error) {
460 			break;
461 		}
462 		if (!status) {  /* wait_event timed out */
463 			unsigned long timestamp = jiffies;
464 			long timeout;
465 			int pending = 0;
466 			int i;
467 
468 			/* Check to see if we have any outstanding frames */
469 			if (priv->hw_bufs_used && (!rx || !tx)) {
470 				wiphy_warn(priv->hw->wiphy,
471 					   "Missed interrupt? (%d frames outstanding)\n",
472 					   priv->hw_bufs_used);
473 				rx = 1;
474 
475 				/* Get a timestamp of "oldest" frame */
476 				for (i = 0; i < 4; ++i)
477 					pending += cw1200_queue_get_xmit_timestamp(
478 						&priv->tx_queue[i],
479 						&timestamp,
480 						priv->pending_frame_id);
481 
482 				/* Check if frame transmission is timed out.
483 				 * Add an extra second with respect to possible
484 				 * interrupt loss.
485 				 */
486 				timeout = timestamp +
487 					WSM_CMD_LAST_CHANCE_TIMEOUT +
488 					1 * HZ  -
489 					jiffies;
490 
491 				/* And terminate BH thread if the frame is "stuck" */
492 				if (pending && timeout < 0) {
493 					wiphy_warn(priv->hw->wiphy,
494 						   "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
495 						   priv->hw_bufs_used, pending,
496 						   timestamp, jiffies);
497 					break;
498 				}
499 			} else if (!priv->device_can_sleep &&
500 				   !atomic_read(&priv->recent_scan)) {
501 				pr_debug("[BH] Device wakedown. Timeout.\n");
502 				cw1200_reg_write_16(priv,
503 						    ST90TDS_CONTROL_REG_ID, 0);
504 				priv->device_can_sleep = true;
505 			}
506 			goto done;
507 		} else if (suspend) {
508 			pr_debug("[BH] Device suspend.\n");
509 			if (priv->powersave_enabled) {
510 				pr_debug("[BH] Device wakedown. Suspend.\n");
511 				cw1200_reg_write_16(priv,
512 						    ST90TDS_CONTROL_REG_ID, 0);
513 				priv->device_can_sleep = true;
514 			}
515 
516 			atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
517 			wake_up(&priv->bh_evt_wq);
518 			status = wait_event_interruptible(priv->bh_wq,
519 							  CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
520 			if (status < 0) {
521 				wiphy_err(priv->hw->wiphy,
522 					  "Failed to wait for resume: %ld.\n",
523 					  status);
524 				break;
525 			}
526 			pr_debug("[BH] Device resume.\n");
527 			atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
528 			wake_up(&priv->bh_evt_wq);
529 			atomic_inc(&priv->bh_rx);
530 			goto done;
531 		}
532 
533 	rx:
534 		tx += pending_tx;
535 		pending_tx = 0;
536 
537 		if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
538 			break;
539 
540 		/* Don't bother trying to rx unless we have data to read */
541 		if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
542 			ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
543 			if (ret < 0)
544 				break;
545 			/* Double up here if there's more data.. */
546 			if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
547 				ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
548 				if (ret < 0)
549 					break;
550 			}
551 		}
552 
553 	tx:
554 		if (tx) {
555 			tx = 0;
556 
557 			BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
558 			tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
559 			tx_allowed = tx_burst > 0;
560 
561 			if (!tx_allowed) {
562 				/* Buffers full.  Ensure we process tx
563 				 * after we handle rx..
564 				 */
565 				pending_tx = tx;
566 				goto done_rx;
567 			}
568 			ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
569 			if (ret < 0)
570 				break;
571 			if (ret > 0) /* More to transmit */
572 				tx = ret;
573 
574 			/* Re-read ctrl reg */
575 			if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
576 				break;
577 		}
578 
579 	done_rx:
580 		if (priv->bh_error)
581 			break;
582 		if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
583 			goto rx;
584 		if (tx)
585 			goto tx;
586 
587 	done:
588 		/* Re-enable device interrupts */
589 		priv->hwbus_ops->lock(priv->hwbus_priv);
590 		__cw1200_irq_enable(priv, 1);
591 		priv->hwbus_ops->unlock(priv->hwbus_priv);
592 	}
593 
594 	/* Explicitly disable device interrupts */
595 	priv->hwbus_ops->lock(priv->hwbus_priv);
596 	__cw1200_irq_enable(priv, 0);
597 	priv->hwbus_ops->unlock(priv->hwbus_priv);
598 
599 	if (!term) {
600 		pr_err("[BH] Fatal error, exiting.\n");
601 		priv->bh_error = 1;
602 		/* TODO: schedule_work(recovery) */
603 	}
604 	return 0;
605 }
606