xref: /linux/drivers/net/wireless/ti/wlcore/main.c (revision 7bb377107c72a40ab7505341f8626c8eb79a0cb7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17 
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31 
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_SUSPEND_SLEEP 100
34 #define WL1271_WAKEUP_TIMEOUT 500
35 
36 static char *fwlog_param;
37 static int fwlog_mem_blocks = -1;
38 static int bug_on_recovery = -1;
39 static int no_recovery     = -1;
40 
41 static void __wl1271_op_remove_interface(struct wl1271 *wl,
42 					 struct ieee80211_vif *vif,
43 					 bool reset_tx_queues);
44 static void wlcore_op_stop_locked(struct wl1271 *wl);
45 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
46 
47 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
48 {
49 	int ret;
50 
51 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
52 		return -EINVAL;
53 
54 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
55 		return 0;
56 
57 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
58 		return 0;
59 
60 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
61 	if (ret < 0)
62 		return ret;
63 
64 	wl1271_info("Association completed.");
65 	return 0;
66 }
67 
68 static void wl1271_reg_notify(struct wiphy *wiphy,
69 			      struct regulatory_request *request)
70 {
71 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
72 	struct wl1271 *wl = hw->priv;
73 
74 	/* copy the current dfs region */
75 	if (request)
76 		wl->dfs_region = request->dfs_region;
77 
78 	wlcore_regdomain_config(wl);
79 }
80 
81 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
82 				   bool enable)
83 {
84 	int ret = 0;
85 
86 	/* we should hold wl->mutex */
87 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
88 	if (ret < 0)
89 		goto out;
90 
91 	if (enable)
92 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
93 	else
94 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
95 out:
96 	return ret;
97 }
98 
99 /*
100  * this function is being called when the rx_streaming interval
101  * has beed changed or rx_streaming should be disabled
102  */
103 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
104 {
105 	int ret = 0;
106 	int period = wl->conf.rx_streaming.interval;
107 
108 	/* don't reconfigure if rx_streaming is disabled */
109 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
110 		goto out;
111 
112 	/* reconfigure/disable according to new streaming_period */
113 	if (period &&
114 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
115 	    (wl->conf.rx_streaming.always ||
116 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
117 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
118 	else {
119 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
120 		/* don't cancel_work_sync since we might deadlock */
121 		del_timer_sync(&wlvif->rx_streaming_timer);
122 	}
123 out:
124 	return ret;
125 }
126 
127 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
128 {
129 	int ret;
130 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
131 						rx_streaming_enable_work);
132 	struct wl1271 *wl = wlvif->wl;
133 
134 	mutex_lock(&wl->mutex);
135 
136 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
137 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
138 	    (!wl->conf.rx_streaming.always &&
139 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
140 		goto out;
141 
142 	if (!wl->conf.rx_streaming.interval)
143 		goto out;
144 
145 	ret = pm_runtime_get_sync(wl->dev);
146 	if (ret < 0) {
147 		pm_runtime_put_noidle(wl->dev);
148 		goto out;
149 	}
150 
151 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 	if (ret < 0)
153 		goto out_sleep;
154 
155 	/* stop it after some time of inactivity */
156 	mod_timer(&wlvif->rx_streaming_timer,
157 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
158 
159 out_sleep:
160 	pm_runtime_mark_last_busy(wl->dev);
161 	pm_runtime_put_autosuspend(wl->dev);
162 out:
163 	mutex_unlock(&wl->mutex);
164 }
165 
166 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
167 {
168 	int ret;
169 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
170 						rx_streaming_disable_work);
171 	struct wl1271 *wl = wlvif->wl;
172 
173 	mutex_lock(&wl->mutex);
174 
175 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
176 		goto out;
177 
178 	ret = pm_runtime_get_sync(wl->dev);
179 	if (ret < 0) {
180 		pm_runtime_put_noidle(wl->dev);
181 		goto out;
182 	}
183 
184 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
185 	if (ret)
186 		goto out_sleep;
187 
188 out_sleep:
189 	pm_runtime_mark_last_busy(wl->dev);
190 	pm_runtime_put_autosuspend(wl->dev);
191 out:
192 	mutex_unlock(&wl->mutex);
193 }
194 
195 static void wl1271_rx_streaming_timer(struct timer_list *t)
196 {
197 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
198 	struct wl1271 *wl = wlvif->wl;
199 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
200 }
201 
202 /* wl->mutex must be taken */
203 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
204 {
205 	/* if the watchdog is not armed, don't do anything */
206 	if (wl->tx_allocated_blocks == 0)
207 		return;
208 
209 	cancel_delayed_work(&wl->tx_watchdog_work);
210 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
211 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
212 }
213 
214 static void wlcore_rc_update_work(struct work_struct *work)
215 {
216 	int ret;
217 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
218 						rc_update_work);
219 	struct wl1271 *wl = wlvif->wl;
220 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
221 
222 	mutex_lock(&wl->mutex);
223 
224 	if (unlikely(wl->state != WLCORE_STATE_ON))
225 		goto out;
226 
227 	ret = pm_runtime_get_sync(wl->dev);
228 	if (ret < 0) {
229 		pm_runtime_put_noidle(wl->dev);
230 		goto out;
231 	}
232 
233 	if (ieee80211_vif_is_mesh(vif)) {
234 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
235 						     true, wlvif->sta.hlid);
236 		if (ret < 0)
237 			goto out_sleep;
238 	} else {
239 		wlcore_hw_sta_rc_update(wl, wlvif);
240 	}
241 
242 out_sleep:
243 	pm_runtime_mark_last_busy(wl->dev);
244 	pm_runtime_put_autosuspend(wl->dev);
245 out:
246 	mutex_unlock(&wl->mutex);
247 }
248 
249 static void wl12xx_tx_watchdog_work(struct work_struct *work)
250 {
251 	struct delayed_work *dwork;
252 	struct wl1271 *wl;
253 
254 	dwork = to_delayed_work(work);
255 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
256 
257 	mutex_lock(&wl->mutex);
258 
259 	if (unlikely(wl->state != WLCORE_STATE_ON))
260 		goto out;
261 
262 	/* Tx went out in the meantime - everything is ok */
263 	if (unlikely(wl->tx_allocated_blocks == 0))
264 		goto out;
265 
266 	/*
267 	 * if a ROC is in progress, we might not have any Tx for a long
268 	 * time (e.g. pending Tx on the non-ROC channels)
269 	 */
270 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
271 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
272 			     wl->conf.tx.tx_watchdog_timeout);
273 		wl12xx_rearm_tx_watchdog_locked(wl);
274 		goto out;
275 	}
276 
277 	/*
278 	 * if a scan is in progress, we might not have any Tx for a long
279 	 * time
280 	 */
281 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
282 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
283 			     wl->conf.tx.tx_watchdog_timeout);
284 		wl12xx_rearm_tx_watchdog_locked(wl);
285 		goto out;
286 	}
287 
288 	/*
289 	* AP might cache a frame for a long time for a sleeping station,
290 	* so rearm the timer if there's an AP interface with stations. If
291 	* Tx is genuinely stuck we will most hopefully discover it when all
292 	* stations are removed due to inactivity.
293 	*/
294 	if (wl->active_sta_count) {
295 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
296 			     " %d stations",
297 			      wl->conf.tx.tx_watchdog_timeout,
298 			      wl->active_sta_count);
299 		wl12xx_rearm_tx_watchdog_locked(wl);
300 		goto out;
301 	}
302 
303 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
304 		     wl->conf.tx.tx_watchdog_timeout);
305 	wl12xx_queue_recovery_work(wl);
306 
307 out:
308 	mutex_unlock(&wl->mutex);
309 }
310 
311 static void wlcore_adjust_conf(struct wl1271 *wl)
312 {
313 
314 	if (fwlog_param) {
315 		if (!strcmp(fwlog_param, "continuous")) {
316 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
317 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
318 		} else if (!strcmp(fwlog_param, "dbgpins")) {
319 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
320 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
321 		} else if (!strcmp(fwlog_param, "disable")) {
322 			wl->conf.fwlog.mem_blocks = 0;
323 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
324 		} else {
325 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
326 		}
327 	}
328 
329 	if (bug_on_recovery != -1)
330 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
331 
332 	if (no_recovery != -1)
333 		wl->conf.recovery.no_recovery = (u8) no_recovery;
334 }
335 
336 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
337 					struct wl12xx_vif *wlvif,
338 					u8 hlid, u8 tx_pkts)
339 {
340 	bool fw_ps;
341 
342 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
343 
344 	/*
345 	 * Wake up from high level PS if the STA is asleep with too little
346 	 * packets in FW or if the STA is awake.
347 	 */
348 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
349 		wl12xx_ps_link_end(wl, wlvif, hlid);
350 
351 	/*
352 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
353 	 * Make an exception if this is the only connected link. In this
354 	 * case FW-memory congestion is less of a problem.
355 	 * Note that a single connected STA means 2*ap_count + 1 active links,
356 	 * since we must account for the global and broadcast AP links
357 	 * for each AP. The "fw_ps" check assures us the other link is a STA
358 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
359 	 */
360 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
361 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
362 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
363 }
364 
365 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
366 					   struct wl12xx_vif *wlvif,
367 					   struct wl_fw_status *status)
368 {
369 	unsigned long cur_fw_ps_map;
370 	u8 hlid;
371 
372 	cur_fw_ps_map = status->link_ps_bitmap;
373 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
374 		wl1271_debug(DEBUG_PSM,
375 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
376 			     wl->ap_fw_ps_map, cur_fw_ps_map,
377 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
378 
379 		wl->ap_fw_ps_map = cur_fw_ps_map;
380 	}
381 
382 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
383 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
384 					    wl->links[hlid].allocated_pkts);
385 }
386 
387 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
388 {
389 	struct wl12xx_vif *wlvif;
390 	u32 old_tx_blk_count = wl->tx_blocks_available;
391 	int avail, freed_blocks;
392 	int i;
393 	int ret;
394 	struct wl1271_link *lnk;
395 
396 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
397 				   wl->raw_fw_status,
398 				   wl->fw_status_len, false);
399 	if (ret < 0)
400 		return ret;
401 
402 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
403 
404 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
405 		     "drv_rx_counter = %d, tx_results_counter = %d)",
406 		     status->intr,
407 		     status->fw_rx_counter,
408 		     status->drv_rx_counter,
409 		     status->tx_results_counter);
410 
411 	for (i = 0; i < NUM_TX_QUEUES; i++) {
412 		/* prevent wrap-around in freed-packets counter */
413 		wl->tx_allocated_pkts[i] -=
414 				(status->counters.tx_released_pkts[i] -
415 				wl->tx_pkts_freed[i]) & 0xff;
416 
417 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
418 	}
419 
420 
421 	for_each_set_bit(i, wl->links_map, wl->num_links) {
422 		u8 diff;
423 		lnk = &wl->links[i];
424 
425 		/* prevent wrap-around in freed-packets counter */
426 		diff = (status->counters.tx_lnk_free_pkts[i] -
427 		       lnk->prev_freed_pkts) & 0xff;
428 
429 		if (diff == 0)
430 			continue;
431 
432 		lnk->allocated_pkts -= diff;
433 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
434 
435 		/* accumulate the prev_freed_pkts counter */
436 		lnk->total_freed_pkts += diff;
437 	}
438 
439 	/* prevent wrap-around in total blocks counter */
440 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
441 		freed_blocks = status->total_released_blks -
442 			       wl->tx_blocks_freed;
443 	else
444 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
445 			       status->total_released_blks;
446 
447 	wl->tx_blocks_freed = status->total_released_blks;
448 
449 	wl->tx_allocated_blocks -= freed_blocks;
450 
451 	/*
452 	 * If the FW freed some blocks:
453 	 * If we still have allocated blocks - re-arm the timer, Tx is
454 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
455 	 */
456 	if (freed_blocks) {
457 		if (wl->tx_allocated_blocks)
458 			wl12xx_rearm_tx_watchdog_locked(wl);
459 		else
460 			cancel_delayed_work(&wl->tx_watchdog_work);
461 	}
462 
463 	avail = status->tx_total - wl->tx_allocated_blocks;
464 
465 	/*
466 	 * The FW might change the total number of TX memblocks before
467 	 * we get a notification about blocks being released. Thus, the
468 	 * available blocks calculation might yield a temporary result
469 	 * which is lower than the actual available blocks. Keeping in
470 	 * mind that only blocks that were allocated can be moved from
471 	 * TX to RX, tx_blocks_available should never decrease here.
472 	 */
473 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
474 				      avail);
475 
476 	/* if more blocks are available now, tx work can be scheduled */
477 	if (wl->tx_blocks_available > old_tx_blk_count)
478 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
479 
480 	/* for AP update num of allocated TX blocks per link and ps status */
481 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
482 		wl12xx_irq_update_links_status(wl, wlvif, status);
483 	}
484 
485 	/* update the host-chipset time offset */
486 	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
487 		(s64)(status->fw_localtime);
488 
489 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
490 
491 	return 0;
492 }
493 
494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
495 {
496 	struct sk_buff *skb;
497 
498 	/* Pass all received frames to the network stack */
499 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 		ieee80211_rx_ni(wl->hw, skb);
501 
502 	/* Return sent skbs to the network stack */
503 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 		ieee80211_tx_status_ni(wl->hw, skb);
505 }
506 
507 static void wl1271_netstack_work(struct work_struct *work)
508 {
509 	struct wl1271 *wl =
510 		container_of(work, struct wl1271, netstack_work);
511 
512 	do {
513 		wl1271_flush_deferred_work(wl);
514 	} while (skb_queue_len(&wl->deferred_rx_queue));
515 }
516 
517 #define WL1271_IRQ_MAX_LOOPS 256
518 
519 static int wlcore_irq_locked(struct wl1271 *wl)
520 {
521 	int ret = 0;
522 	u32 intr;
523 	int loopcount = WL1271_IRQ_MAX_LOOPS;
524 	bool done = false;
525 	unsigned int defer_count;
526 	unsigned long flags;
527 
528 	/*
529 	 * In case edge triggered interrupt must be used, we cannot iterate
530 	 * more than once without introducing race conditions with the hardirq.
531 	 */
532 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
533 		loopcount = 1;
534 
535 	wl1271_debug(DEBUG_IRQ, "IRQ work");
536 
537 	if (unlikely(wl->state != WLCORE_STATE_ON))
538 		goto out;
539 
540 	ret = pm_runtime_get_sync(wl->dev);
541 	if (ret < 0) {
542 		pm_runtime_put_noidle(wl->dev);
543 		goto out;
544 	}
545 
546 	while (!done && loopcount--) {
547 		smp_mb__after_atomic();
548 
549 		ret = wlcore_fw_status(wl, wl->fw_status);
550 		if (ret < 0)
551 			goto out;
552 
553 		wlcore_hw_tx_immediate_compl(wl);
554 
555 		intr = wl->fw_status->intr;
556 		intr &= WLCORE_ALL_INTR_MASK;
557 		if (!intr) {
558 			done = true;
559 			continue;
560 		}
561 
562 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
563 			wl1271_error("HW watchdog interrupt received! starting recovery.");
564 			wl->watchdog_recovery = true;
565 			ret = -EIO;
566 
567 			/* restarting the chip. ignore any other interrupt. */
568 			goto out;
569 		}
570 
571 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
572 			wl1271_error("SW watchdog interrupt received! "
573 				     "starting recovery.");
574 			wl->watchdog_recovery = true;
575 			ret = -EIO;
576 
577 			/* restarting the chip. ignore any other interrupt. */
578 			goto out;
579 		}
580 
581 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
582 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
583 
584 			ret = wlcore_rx(wl, wl->fw_status);
585 			if (ret < 0)
586 				goto out;
587 
588 			/* Check if any tx blocks were freed */
589 			spin_lock_irqsave(&wl->wl_lock, flags);
590 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
591 			    wl1271_tx_total_queue_count(wl) > 0) {
592 				spin_unlock_irqrestore(&wl->wl_lock, flags);
593 				/*
594 				 * In order to avoid starvation of the TX path,
595 				 * call the work function directly.
596 				 */
597 				ret = wlcore_tx_work_locked(wl);
598 				if (ret < 0)
599 					goto out;
600 			} else {
601 				spin_unlock_irqrestore(&wl->wl_lock, flags);
602 			}
603 
604 			/* check for tx results */
605 			ret = wlcore_hw_tx_delayed_compl(wl);
606 			if (ret < 0)
607 				goto out;
608 
609 			/* Make sure the deferred queues don't get too long */
610 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
611 				      skb_queue_len(&wl->deferred_rx_queue);
612 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
613 				wl1271_flush_deferred_work(wl);
614 		}
615 
616 		if (intr & WL1271_ACX_INTR_EVENT_A) {
617 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
618 			ret = wl1271_event_handle(wl, 0);
619 			if (ret < 0)
620 				goto out;
621 		}
622 
623 		if (intr & WL1271_ACX_INTR_EVENT_B) {
624 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
625 			ret = wl1271_event_handle(wl, 1);
626 			if (ret < 0)
627 				goto out;
628 		}
629 
630 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
631 			wl1271_debug(DEBUG_IRQ,
632 				     "WL1271_ACX_INTR_INIT_COMPLETE");
633 
634 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
635 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
636 	}
637 
638 	pm_runtime_mark_last_busy(wl->dev);
639 	pm_runtime_put_autosuspend(wl->dev);
640 
641 out:
642 	return ret;
643 }
644 
645 static irqreturn_t wlcore_irq(int irq, void *cookie)
646 {
647 	int ret;
648 	unsigned long flags;
649 	struct wl1271 *wl = cookie;
650 
651 	/* complete the ELP completion */
652 	spin_lock_irqsave(&wl->wl_lock, flags);
653 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
654 	if (wl->elp_compl) {
655 		complete(wl->elp_compl);
656 		wl->elp_compl = NULL;
657 	}
658 
659 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
660 		/* don't enqueue a work right now. mark it as pending */
661 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
662 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
663 		disable_irq_nosync(wl->irq);
664 		pm_wakeup_event(wl->dev, 0);
665 		spin_unlock_irqrestore(&wl->wl_lock, flags);
666 		goto out_handled;
667 	}
668 	spin_unlock_irqrestore(&wl->wl_lock, flags);
669 
670 	/* TX might be handled here, avoid redundant work */
671 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
672 	cancel_work_sync(&wl->tx_work);
673 
674 	mutex_lock(&wl->mutex);
675 
676 	ret = wlcore_irq_locked(wl);
677 	if (ret)
678 		wl12xx_queue_recovery_work(wl);
679 
680 	spin_lock_irqsave(&wl->wl_lock, flags);
681 	/* In case TX was not handled here, queue TX work */
682 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
683 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
684 	    wl1271_tx_total_queue_count(wl) > 0)
685 		ieee80211_queue_work(wl->hw, &wl->tx_work);
686 	spin_unlock_irqrestore(&wl->wl_lock, flags);
687 
688 	mutex_unlock(&wl->mutex);
689 
690 out_handled:
691 	spin_lock_irqsave(&wl->wl_lock, flags);
692 	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
693 	spin_unlock_irqrestore(&wl->wl_lock, flags);
694 
695 	return IRQ_HANDLED;
696 }
697 
698 struct vif_counter_data {
699 	u8 counter;
700 
701 	struct ieee80211_vif *cur_vif;
702 	bool cur_vif_running;
703 };
704 
705 static void wl12xx_vif_count_iter(void *data, u8 *mac,
706 				  struct ieee80211_vif *vif)
707 {
708 	struct vif_counter_data *counter = data;
709 
710 	counter->counter++;
711 	if (counter->cur_vif == vif)
712 		counter->cur_vif_running = true;
713 }
714 
715 /* caller must not hold wl->mutex, as it might deadlock */
716 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
717 			       struct ieee80211_vif *cur_vif,
718 			       struct vif_counter_data *data)
719 {
720 	memset(data, 0, sizeof(*data));
721 	data->cur_vif = cur_vif;
722 
723 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
724 					    wl12xx_vif_count_iter, data);
725 }
726 
727 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
728 {
729 	const struct firmware *fw;
730 	const char *fw_name;
731 	enum wl12xx_fw_type fw_type;
732 	int ret;
733 
734 	if (plt) {
735 		fw_type = WL12XX_FW_TYPE_PLT;
736 		fw_name = wl->plt_fw_name;
737 	} else {
738 		/*
739 		 * we can't call wl12xx_get_vif_count() here because
740 		 * wl->mutex is taken, so use the cached last_vif_count value
741 		 */
742 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
743 			fw_type = WL12XX_FW_TYPE_MULTI;
744 			fw_name = wl->mr_fw_name;
745 		} else {
746 			fw_type = WL12XX_FW_TYPE_NORMAL;
747 			fw_name = wl->sr_fw_name;
748 		}
749 	}
750 
751 	if (wl->fw_type == fw_type)
752 		return 0;
753 
754 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
755 
756 	ret = request_firmware(&fw, fw_name, wl->dev);
757 
758 	if (ret < 0) {
759 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
760 		return ret;
761 	}
762 
763 	if (fw->size % 4) {
764 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
765 			     fw->size);
766 		ret = -EILSEQ;
767 		goto out;
768 	}
769 
770 	vfree(wl->fw);
771 	wl->fw_type = WL12XX_FW_TYPE_NONE;
772 	wl->fw_len = fw->size;
773 	wl->fw = vmalloc(wl->fw_len);
774 
775 	if (!wl->fw) {
776 		wl1271_error("could not allocate memory for the firmware");
777 		ret = -ENOMEM;
778 		goto out;
779 	}
780 
781 	memcpy(wl->fw, fw->data, wl->fw_len);
782 	ret = 0;
783 	wl->fw_type = fw_type;
784 out:
785 	release_firmware(fw);
786 
787 	return ret;
788 }
789 
790 void wl12xx_queue_recovery_work(struct wl1271 *wl)
791 {
792 	/* Avoid a recursive recovery */
793 	if (wl->state == WLCORE_STATE_ON) {
794 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
795 				  &wl->flags));
796 
797 		wl->state = WLCORE_STATE_RESTARTING;
798 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
799 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
800 	}
801 }
802 
803 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
804 {
805 	size_t len;
806 
807 	/* Make sure we have enough room */
808 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
809 
810 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
811 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
812 	wl->fwlog_size += len;
813 
814 	return len;
815 }
816 
817 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
818 {
819 	u32 end_of_log = 0;
820 	int error;
821 
822 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
823 		return;
824 
825 	wl1271_info("Reading FW panic log");
826 
827 	/*
828 	 * Make sure the chip is awake and the logger isn't active.
829 	 * Do not send a stop fwlog command if the fw is hanged or if
830 	 * dbgpins are used (due to some fw bug).
831 	 */
832 	error = pm_runtime_get_sync(wl->dev);
833 	if (error < 0) {
834 		pm_runtime_put_noidle(wl->dev);
835 		return;
836 	}
837 	if (!wl->watchdog_recovery &&
838 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
839 		wl12xx_cmd_stop_fwlog(wl);
840 
841 	/* Traverse the memory blocks linked list */
842 	do {
843 		end_of_log = wlcore_event_fw_logger(wl);
844 		if (end_of_log == 0) {
845 			msleep(100);
846 			end_of_log = wlcore_event_fw_logger(wl);
847 		}
848 	} while (end_of_log != 0);
849 }
850 
851 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
852 				   u8 hlid, struct ieee80211_sta *sta)
853 {
854 	struct wl1271_station *wl_sta;
855 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
856 
857 	wl_sta = (void *)sta->drv_priv;
858 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
859 
860 	/*
861 	 * increment the initial seq number on recovery to account for
862 	 * transmitted packets that we haven't yet got in the FW status
863 	 */
864 	if (wlvif->encryption_type == KEY_GEM)
865 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
866 
867 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
868 		wl_sta->total_freed_pkts += sqn_recovery_padding;
869 }
870 
871 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
872 					struct wl12xx_vif *wlvif,
873 					u8 hlid, const u8 *addr)
874 {
875 	struct ieee80211_sta *sta;
876 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
877 
878 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
879 		    is_zero_ether_addr(addr)))
880 		return;
881 
882 	rcu_read_lock();
883 	sta = ieee80211_find_sta(vif, addr);
884 	if (sta)
885 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
886 	rcu_read_unlock();
887 }
888 
889 static void wlcore_print_recovery(struct wl1271 *wl)
890 {
891 	u32 pc = 0;
892 	u32 hint_sts = 0;
893 	int ret;
894 
895 	wl1271_info("Hardware recovery in progress. FW ver: %s",
896 		    wl->chip.fw_ver_str);
897 
898 	/* change partitions momentarily so we can read the FW pc */
899 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
900 	if (ret < 0)
901 		return;
902 
903 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
904 	if (ret < 0)
905 		return;
906 
907 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
908 	if (ret < 0)
909 		return;
910 
911 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
912 				pc, hint_sts, ++wl->recovery_count);
913 
914 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
915 }
916 
917 
918 static void wl1271_recovery_work(struct work_struct *work)
919 {
920 	struct wl1271 *wl =
921 		container_of(work, struct wl1271, recovery_work);
922 	struct wl12xx_vif *wlvif;
923 	struct ieee80211_vif *vif;
924 	int error;
925 
926 	mutex_lock(&wl->mutex);
927 
928 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
929 		goto out_unlock;
930 
931 	error = pm_runtime_get_sync(wl->dev);
932 	if (error < 0) {
933 		wl1271_warning("Enable for recovery failed");
934 		pm_runtime_put_noidle(wl->dev);
935 	}
936 	wlcore_disable_interrupts_nosync(wl);
937 
938 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
939 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
940 			wl12xx_read_fwlog_panic(wl);
941 		wlcore_print_recovery(wl);
942 	}
943 
944 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
945 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
946 
947 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
948 
949 	if (wl->conf.recovery.no_recovery) {
950 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
951 		goto out_unlock;
952 	}
953 
954 	/* Prevent spurious TX during FW restart */
955 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
956 
957 	/* reboot the chipset */
958 	while (!list_empty(&wl->wlvif_list)) {
959 		wlvif = list_first_entry(&wl->wlvif_list,
960 				       struct wl12xx_vif, list);
961 		vif = wl12xx_wlvif_to_vif(wlvif);
962 
963 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
964 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
965 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
966 						    vif->bss_conf.bssid);
967 		}
968 
969 		__wl1271_op_remove_interface(wl, vif, false);
970 	}
971 
972 	wlcore_op_stop_locked(wl);
973 	pm_runtime_mark_last_busy(wl->dev);
974 	pm_runtime_put_autosuspend(wl->dev);
975 
976 	ieee80211_restart_hw(wl->hw);
977 
978 	/*
979 	 * Its safe to enable TX now - the queues are stopped after a request
980 	 * to restart the HW.
981 	 */
982 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
983 
984 out_unlock:
985 	wl->watchdog_recovery = false;
986 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
987 	mutex_unlock(&wl->mutex);
988 }
989 
990 static int wlcore_fw_wakeup(struct wl1271 *wl)
991 {
992 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
993 }
994 
995 static int wl1271_setup(struct wl1271 *wl)
996 {
997 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
998 	if (!wl->raw_fw_status)
999 		goto err;
1000 
1001 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1002 	if (!wl->fw_status)
1003 		goto err;
1004 
1005 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1006 	if (!wl->tx_res_if)
1007 		goto err;
1008 
1009 	return 0;
1010 err:
1011 	kfree(wl->fw_status);
1012 	kfree(wl->raw_fw_status);
1013 	return -ENOMEM;
1014 }
1015 
1016 static int wl12xx_set_power_on(struct wl1271 *wl)
1017 {
1018 	int ret;
1019 
1020 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1021 	ret = wl1271_power_on(wl);
1022 	if (ret < 0)
1023 		goto out;
1024 	msleep(WL1271_POWER_ON_SLEEP);
1025 	wl1271_io_reset(wl);
1026 	wl1271_io_init(wl);
1027 
1028 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1029 	if (ret < 0)
1030 		goto fail;
1031 
1032 	/* ELP module wake up */
1033 	ret = wlcore_fw_wakeup(wl);
1034 	if (ret < 0)
1035 		goto fail;
1036 
1037 out:
1038 	return ret;
1039 
1040 fail:
1041 	wl1271_power_off(wl);
1042 	return ret;
1043 }
1044 
1045 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1046 {
1047 	int ret = 0;
1048 
1049 	ret = wl12xx_set_power_on(wl);
1050 	if (ret < 0)
1051 		goto out;
1052 
1053 	/*
1054 	 * For wl127x based devices we could use the default block
1055 	 * size (512 bytes), but due to a bug in the sdio driver, we
1056 	 * need to set it explicitly after the chip is powered on.  To
1057 	 * simplify the code and since the performance impact is
1058 	 * negligible, we use the same block size for all different
1059 	 * chip types.
1060 	 *
1061 	 * Check if the bus supports blocksize alignment and, if it
1062 	 * doesn't, make sure we don't have the quirk.
1063 	 */
1064 	if (!wl1271_set_block_size(wl))
1065 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1066 
1067 	/* TODO: make sure the lower driver has set things up correctly */
1068 
1069 	ret = wl1271_setup(wl);
1070 	if (ret < 0)
1071 		goto out;
1072 
1073 	ret = wl12xx_fetch_firmware(wl, plt);
1074 	if (ret < 0) {
1075 		kfree(wl->fw_status);
1076 		kfree(wl->raw_fw_status);
1077 		kfree(wl->tx_res_if);
1078 	}
1079 
1080 out:
1081 	return ret;
1082 }
1083 
1084 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1085 {
1086 	int retries = WL1271_BOOT_RETRIES;
1087 	struct wiphy *wiphy = wl->hw->wiphy;
1088 
1089 	static const char* const PLT_MODE[] = {
1090 		"PLT_OFF",
1091 		"PLT_ON",
1092 		"PLT_FEM_DETECT",
1093 		"PLT_CHIP_AWAKE"
1094 	};
1095 
1096 	int ret;
1097 
1098 	mutex_lock(&wl->mutex);
1099 
1100 	wl1271_notice("power up");
1101 
1102 	if (wl->state != WLCORE_STATE_OFF) {
1103 		wl1271_error("cannot go into PLT state because not "
1104 			     "in off state: %d", wl->state);
1105 		ret = -EBUSY;
1106 		goto out;
1107 	}
1108 
1109 	/* Indicate to lower levels that we are now in PLT mode */
1110 	wl->plt = true;
1111 	wl->plt_mode = plt_mode;
1112 
1113 	while (retries) {
1114 		retries--;
1115 		ret = wl12xx_chip_wakeup(wl, true);
1116 		if (ret < 0)
1117 			goto power_off;
1118 
1119 		if (plt_mode != PLT_CHIP_AWAKE) {
1120 			ret = wl->ops->plt_init(wl);
1121 			if (ret < 0)
1122 				goto power_off;
1123 		}
1124 
1125 		wl->state = WLCORE_STATE_ON;
1126 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1127 			      PLT_MODE[plt_mode],
1128 			      wl->chip.fw_ver_str);
1129 
1130 		/* update hw/fw version info in wiphy struct */
1131 		wiphy->hw_version = wl->chip.id;
1132 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1133 			sizeof(wiphy->fw_version));
1134 
1135 		goto out;
1136 
1137 power_off:
1138 		wl1271_power_off(wl);
1139 	}
1140 
1141 	wl->plt = false;
1142 	wl->plt_mode = PLT_OFF;
1143 
1144 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1145 		     WL1271_BOOT_RETRIES);
1146 out:
1147 	mutex_unlock(&wl->mutex);
1148 
1149 	return ret;
1150 }
1151 
1152 int wl1271_plt_stop(struct wl1271 *wl)
1153 {
1154 	int ret = 0;
1155 
1156 	wl1271_notice("power down");
1157 
1158 	/*
1159 	 * Interrupts must be disabled before setting the state to OFF.
1160 	 * Otherwise, the interrupt handler might be called and exit without
1161 	 * reading the interrupt status.
1162 	 */
1163 	wlcore_disable_interrupts(wl);
1164 	mutex_lock(&wl->mutex);
1165 	if (!wl->plt) {
1166 		mutex_unlock(&wl->mutex);
1167 
1168 		/*
1169 		 * This will not necessarily enable interrupts as interrupts
1170 		 * may have been disabled when op_stop was called. It will,
1171 		 * however, balance the above call to disable_interrupts().
1172 		 */
1173 		wlcore_enable_interrupts(wl);
1174 
1175 		wl1271_error("cannot power down because not in PLT "
1176 			     "state: %d", wl->state);
1177 		ret = -EBUSY;
1178 		goto out;
1179 	}
1180 
1181 	mutex_unlock(&wl->mutex);
1182 
1183 	wl1271_flush_deferred_work(wl);
1184 	cancel_work_sync(&wl->netstack_work);
1185 	cancel_work_sync(&wl->recovery_work);
1186 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1187 
1188 	mutex_lock(&wl->mutex);
1189 	wl1271_power_off(wl);
1190 	wl->flags = 0;
1191 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1192 	wl->state = WLCORE_STATE_OFF;
1193 	wl->plt = false;
1194 	wl->plt_mode = PLT_OFF;
1195 	wl->rx_counter = 0;
1196 	mutex_unlock(&wl->mutex);
1197 
1198 out:
1199 	return ret;
1200 }
1201 
1202 static void wl1271_op_tx(struct ieee80211_hw *hw,
1203 			 struct ieee80211_tx_control *control,
1204 			 struct sk_buff *skb)
1205 {
1206 	struct wl1271 *wl = hw->priv;
1207 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1208 	struct ieee80211_vif *vif = info->control.vif;
1209 	struct wl12xx_vif *wlvif = NULL;
1210 	unsigned long flags;
1211 	int q, mapping;
1212 	u8 hlid;
1213 
1214 	if (!vif) {
1215 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1216 		ieee80211_free_txskb(hw, skb);
1217 		return;
1218 	}
1219 
1220 	wlvif = wl12xx_vif_to_data(vif);
1221 	mapping = skb_get_queue_mapping(skb);
1222 	q = wl1271_tx_get_queue(mapping);
1223 
1224 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1225 
1226 	spin_lock_irqsave(&wl->wl_lock, flags);
1227 
1228 	/*
1229 	 * drop the packet if the link is invalid or the queue is stopped
1230 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1231 	 * allow these packets through.
1232 	 */
1233 	if (hlid == WL12XX_INVALID_LINK_ID ||
1234 	    (!test_bit(hlid, wlvif->links_map)) ||
1235 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1236 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1237 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1238 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1239 		ieee80211_free_txskb(hw, skb);
1240 		goto out;
1241 	}
1242 
1243 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1244 		     hlid, q, skb->len);
1245 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1246 
1247 	wl->tx_queue_count[q]++;
1248 	wlvif->tx_queue_count[q]++;
1249 
1250 	/*
1251 	 * The workqueue is slow to process the tx_queue and we need stop
1252 	 * the queue here, otherwise the queue will get too long.
1253 	 */
1254 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1255 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1256 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1257 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1258 		wlcore_stop_queue_locked(wl, wlvif, q,
1259 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1260 	}
1261 
1262 	/*
1263 	 * The chip specific setup must run before the first TX packet -
1264 	 * before that, the tx_work will not be initialized!
1265 	 */
1266 
1267 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1268 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1269 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1270 
1271 out:
1272 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1273 }
1274 
1275 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1276 {
1277 	unsigned long flags;
1278 	int q;
1279 
1280 	/* no need to queue a new dummy packet if one is already pending */
1281 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1282 		return 0;
1283 
1284 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1285 
1286 	spin_lock_irqsave(&wl->wl_lock, flags);
1287 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1288 	wl->tx_queue_count[q]++;
1289 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1290 
1291 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1292 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1293 		return wlcore_tx_work_locked(wl);
1294 
1295 	/*
1296 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1297 	 * interrupt handler function
1298 	 */
1299 	return 0;
1300 }
1301 
1302 /*
1303  * The size of the dummy packet should be at least 1400 bytes. However, in
1304  * order to minimize the number of bus transactions, aligning it to 512 bytes
1305  * boundaries could be beneficial, performance wise
1306  */
1307 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1308 
1309 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1310 {
1311 	struct sk_buff *skb;
1312 	struct ieee80211_hdr_3addr *hdr;
1313 	unsigned int dummy_packet_size;
1314 
1315 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1316 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1317 
1318 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1319 	if (!skb) {
1320 		wl1271_warning("Failed to allocate a dummy packet skb");
1321 		return NULL;
1322 	}
1323 
1324 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1325 
1326 	hdr = skb_put_zero(skb, sizeof(*hdr));
1327 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1328 					 IEEE80211_STYPE_NULLFUNC |
1329 					 IEEE80211_FCTL_TODS);
1330 
1331 	skb_put_zero(skb, dummy_packet_size);
1332 
1333 	/* Dummy packets require the TID to be management */
1334 	skb->priority = WL1271_TID_MGMT;
1335 
1336 	/* Initialize all fields that might be used */
1337 	skb_set_queue_mapping(skb, 0);
1338 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1339 
1340 	return skb;
1341 }
1342 
1343 
1344 static int
1345 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1346 {
1347 	int num_fields = 0, in_field = 0, fields_size = 0;
1348 	int i, pattern_len = 0;
1349 
1350 	if (!p->mask) {
1351 		wl1271_warning("No mask in WoWLAN pattern");
1352 		return -EINVAL;
1353 	}
1354 
1355 	/*
1356 	 * The pattern is broken up into segments of bytes at different offsets
1357 	 * that need to be checked by the FW filter. Each segment is called
1358 	 * a field in the FW API. We verify that the total number of fields
1359 	 * required for this pattern won't exceed FW limits (8)
1360 	 * as well as the total fields buffer won't exceed the FW limit.
1361 	 * Note that if there's a pattern which crosses Ethernet/IP header
1362 	 * boundary a new field is required.
1363 	 */
1364 	for (i = 0; i < p->pattern_len; i++) {
1365 		if (test_bit(i, (unsigned long *)p->mask)) {
1366 			if (!in_field) {
1367 				in_field = 1;
1368 				pattern_len = 1;
1369 			} else {
1370 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1371 					num_fields++;
1372 					fields_size += pattern_len +
1373 						RX_FILTER_FIELD_OVERHEAD;
1374 					pattern_len = 1;
1375 				} else
1376 					pattern_len++;
1377 			}
1378 		} else {
1379 			if (in_field) {
1380 				in_field = 0;
1381 				fields_size += pattern_len +
1382 					RX_FILTER_FIELD_OVERHEAD;
1383 				num_fields++;
1384 			}
1385 		}
1386 	}
1387 
1388 	if (in_field) {
1389 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1390 		num_fields++;
1391 	}
1392 
1393 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1394 		wl1271_warning("RX Filter too complex. Too many segments");
1395 		return -EINVAL;
1396 	}
1397 
1398 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1399 		wl1271_warning("RX filter pattern is too big");
1400 		return -E2BIG;
1401 	}
1402 
1403 	return 0;
1404 }
1405 
1406 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1407 {
1408 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1409 }
1410 
1411 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1412 {
1413 	int i;
1414 
1415 	if (filter == NULL)
1416 		return;
1417 
1418 	for (i = 0; i < filter->num_fields; i++)
1419 		kfree(filter->fields[i].pattern);
1420 
1421 	kfree(filter);
1422 }
1423 
1424 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1425 				 u16 offset, u8 flags,
1426 				 const u8 *pattern, u8 len)
1427 {
1428 	struct wl12xx_rx_filter_field *field;
1429 
1430 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1431 		wl1271_warning("Max fields per RX filter. can't alloc another");
1432 		return -EINVAL;
1433 	}
1434 
1435 	field = &filter->fields[filter->num_fields];
1436 
1437 	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1438 	if (!field->pattern) {
1439 		wl1271_warning("Failed to allocate RX filter pattern");
1440 		return -ENOMEM;
1441 	}
1442 
1443 	filter->num_fields++;
1444 
1445 	field->offset = cpu_to_le16(offset);
1446 	field->flags = flags;
1447 	field->len = len;
1448 
1449 	return 0;
1450 }
1451 
1452 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1453 {
1454 	int i, fields_size = 0;
1455 
1456 	for (i = 0; i < filter->num_fields; i++)
1457 		fields_size += filter->fields[i].len +
1458 			sizeof(struct wl12xx_rx_filter_field) -
1459 			sizeof(u8 *);
1460 
1461 	return fields_size;
1462 }
1463 
1464 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1465 				    u8 *buf)
1466 {
1467 	int i;
1468 	struct wl12xx_rx_filter_field *field;
1469 
1470 	for (i = 0; i < filter->num_fields; i++) {
1471 		field = (struct wl12xx_rx_filter_field *)buf;
1472 
1473 		field->offset = filter->fields[i].offset;
1474 		field->flags = filter->fields[i].flags;
1475 		field->len = filter->fields[i].len;
1476 
1477 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1478 		buf += sizeof(struct wl12xx_rx_filter_field) -
1479 			sizeof(u8 *) + field->len;
1480 	}
1481 }
1482 
1483 /*
1484  * Allocates an RX filter returned through f
1485  * which needs to be freed using rx_filter_free()
1486  */
1487 static int
1488 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1489 					   struct wl12xx_rx_filter **f)
1490 {
1491 	int i, j, ret = 0;
1492 	struct wl12xx_rx_filter *filter;
1493 	u16 offset;
1494 	u8 flags, len;
1495 
1496 	filter = wl1271_rx_filter_alloc();
1497 	if (!filter) {
1498 		wl1271_warning("Failed to alloc rx filter");
1499 		ret = -ENOMEM;
1500 		goto err;
1501 	}
1502 
1503 	i = 0;
1504 	while (i < p->pattern_len) {
1505 		if (!test_bit(i, (unsigned long *)p->mask)) {
1506 			i++;
1507 			continue;
1508 		}
1509 
1510 		for (j = i; j < p->pattern_len; j++) {
1511 			if (!test_bit(j, (unsigned long *)p->mask))
1512 				break;
1513 
1514 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1515 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1516 				break;
1517 		}
1518 
1519 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1520 			offset = i;
1521 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1522 		} else {
1523 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1524 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1525 		}
1526 
1527 		len = j - i;
1528 
1529 		ret = wl1271_rx_filter_alloc_field(filter,
1530 						   offset,
1531 						   flags,
1532 						   &p->pattern[i], len);
1533 		if (ret)
1534 			goto err;
1535 
1536 		i = j;
1537 	}
1538 
1539 	filter->action = FILTER_SIGNAL;
1540 
1541 	*f = filter;
1542 	return 0;
1543 
1544 err:
1545 	wl1271_rx_filter_free(filter);
1546 	*f = NULL;
1547 
1548 	return ret;
1549 }
1550 
1551 static int wl1271_configure_wowlan(struct wl1271 *wl,
1552 				   struct cfg80211_wowlan *wow)
1553 {
1554 	int i, ret;
1555 
1556 	if (!wow || wow->any || !wow->n_patterns) {
1557 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1558 							  FILTER_SIGNAL);
1559 		if (ret)
1560 			goto out;
1561 
1562 		ret = wl1271_rx_filter_clear_all(wl);
1563 		if (ret)
1564 			goto out;
1565 
1566 		return 0;
1567 	}
1568 
1569 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1570 		return -EINVAL;
1571 
1572 	/* Validate all incoming patterns before clearing current FW state */
1573 	for (i = 0; i < wow->n_patterns; i++) {
1574 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1575 		if (ret) {
1576 			wl1271_warning("Bad wowlan pattern %d", i);
1577 			return ret;
1578 		}
1579 	}
1580 
1581 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1582 	if (ret)
1583 		goto out;
1584 
1585 	ret = wl1271_rx_filter_clear_all(wl);
1586 	if (ret)
1587 		goto out;
1588 
1589 	/* Translate WoWLAN patterns into filters */
1590 	for (i = 0; i < wow->n_patterns; i++) {
1591 		struct cfg80211_pkt_pattern *p;
1592 		struct wl12xx_rx_filter *filter = NULL;
1593 
1594 		p = &wow->patterns[i];
1595 
1596 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1597 		if (ret) {
1598 			wl1271_warning("Failed to create an RX filter from "
1599 				       "wowlan pattern %d", i);
1600 			goto out;
1601 		}
1602 
1603 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1604 
1605 		wl1271_rx_filter_free(filter);
1606 		if (ret)
1607 			goto out;
1608 	}
1609 
1610 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1611 
1612 out:
1613 	return ret;
1614 }
1615 
1616 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1617 					struct wl12xx_vif *wlvif,
1618 					struct cfg80211_wowlan *wow)
1619 {
1620 	int ret = 0;
1621 
1622 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1623 		goto out;
1624 
1625 	ret = wl1271_configure_wowlan(wl, wow);
1626 	if (ret < 0)
1627 		goto out;
1628 
1629 	if ((wl->conf.conn.suspend_wake_up_event ==
1630 	     wl->conf.conn.wake_up_event) &&
1631 	    (wl->conf.conn.suspend_listen_interval ==
1632 	     wl->conf.conn.listen_interval))
1633 		goto out;
1634 
1635 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1636 				    wl->conf.conn.suspend_wake_up_event,
1637 				    wl->conf.conn.suspend_listen_interval);
1638 
1639 	if (ret < 0)
1640 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1641 out:
1642 	return ret;
1643 
1644 }
1645 
1646 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1647 					struct wl12xx_vif *wlvif,
1648 					struct cfg80211_wowlan *wow)
1649 {
1650 	int ret = 0;
1651 
1652 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1653 		goto out;
1654 
1655 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1656 	if (ret < 0)
1657 		goto out;
1658 
1659 	ret = wl1271_configure_wowlan(wl, wow);
1660 	if (ret < 0)
1661 		goto out;
1662 
1663 out:
1664 	return ret;
1665 
1666 }
1667 
1668 static int wl1271_configure_suspend(struct wl1271 *wl,
1669 				    struct wl12xx_vif *wlvif,
1670 				    struct cfg80211_wowlan *wow)
1671 {
1672 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1673 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1674 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1675 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1676 	return 0;
1677 }
1678 
1679 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1680 {
1681 	int ret = 0;
1682 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1683 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1684 
1685 	if ((!is_ap) && (!is_sta))
1686 		return;
1687 
1688 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1689 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1690 		return;
1691 
1692 	wl1271_configure_wowlan(wl, NULL);
1693 
1694 	if (is_sta) {
1695 		if ((wl->conf.conn.suspend_wake_up_event ==
1696 		     wl->conf.conn.wake_up_event) &&
1697 		    (wl->conf.conn.suspend_listen_interval ==
1698 		     wl->conf.conn.listen_interval))
1699 			return;
1700 
1701 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1702 				    wl->conf.conn.wake_up_event,
1703 				    wl->conf.conn.listen_interval);
1704 
1705 		if (ret < 0)
1706 			wl1271_error("resume: wake up conditions failed: %d",
1707 				     ret);
1708 
1709 	} else if (is_ap) {
1710 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1711 	}
1712 }
1713 
1714 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1715 					    struct cfg80211_wowlan *wow)
1716 {
1717 	struct wl1271 *wl = hw->priv;
1718 	struct wl12xx_vif *wlvif;
1719 	unsigned long flags;
1720 	int ret;
1721 
1722 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1723 	WARN_ON(!wow);
1724 
1725 	/* we want to perform the recovery before suspending */
1726 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1727 		wl1271_warning("postponing suspend to perform recovery");
1728 		return -EBUSY;
1729 	}
1730 
1731 	wl1271_tx_flush(wl);
1732 
1733 	mutex_lock(&wl->mutex);
1734 
1735 	ret = pm_runtime_get_sync(wl->dev);
1736 	if (ret < 0) {
1737 		pm_runtime_put_noidle(wl->dev);
1738 		mutex_unlock(&wl->mutex);
1739 		return ret;
1740 	}
1741 
1742 	wl->wow_enabled = true;
1743 	wl12xx_for_each_wlvif(wl, wlvif) {
1744 		if (wlcore_is_p2p_mgmt(wlvif))
1745 			continue;
1746 
1747 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1748 		if (ret < 0) {
1749 			mutex_unlock(&wl->mutex);
1750 			wl1271_warning("couldn't prepare device to suspend");
1751 			return ret;
1752 		}
1753 	}
1754 
1755 	/* disable fast link flow control notifications from FW */
1756 	ret = wlcore_hw_interrupt_notify(wl, false);
1757 	if (ret < 0)
1758 		goto out_sleep;
1759 
1760 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1761 	ret = wlcore_hw_rx_ba_filter(wl,
1762 				     !!wl->conf.conn.suspend_rx_ba_activity);
1763 	if (ret < 0)
1764 		goto out_sleep;
1765 
1766 out_sleep:
1767 	pm_runtime_put_noidle(wl->dev);
1768 	mutex_unlock(&wl->mutex);
1769 
1770 	if (ret < 0) {
1771 		wl1271_warning("couldn't prepare device to suspend");
1772 		return ret;
1773 	}
1774 
1775 	/* flush any remaining work */
1776 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1777 
1778 	flush_work(&wl->tx_work);
1779 
1780 	/*
1781 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1782 	 * it on resume anyway.
1783 	 */
1784 	cancel_delayed_work(&wl->tx_watchdog_work);
1785 
1786 	/*
1787 	 * set suspended flag to avoid triggering a new threaded_irq
1788 	 * work.
1789 	 */
1790 	spin_lock_irqsave(&wl->wl_lock, flags);
1791 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1792 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1793 
1794 	return pm_runtime_force_suspend(wl->dev);
1795 }
1796 
1797 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1798 {
1799 	struct wl1271 *wl = hw->priv;
1800 	struct wl12xx_vif *wlvif;
1801 	unsigned long flags;
1802 	bool run_irq_work = false, pending_recovery;
1803 	int ret;
1804 
1805 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1806 		     wl->wow_enabled);
1807 	WARN_ON(!wl->wow_enabled);
1808 
1809 	ret = pm_runtime_force_resume(wl->dev);
1810 	if (ret < 0) {
1811 		wl1271_error("ELP wakeup failure!");
1812 		goto out_sleep;
1813 	}
1814 
1815 	/*
1816 	 * re-enable irq_work enqueuing, and call irq_work directly if
1817 	 * there is a pending work.
1818 	 */
1819 	spin_lock_irqsave(&wl->wl_lock, flags);
1820 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1821 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1822 		run_irq_work = true;
1823 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1824 
1825 	mutex_lock(&wl->mutex);
1826 
1827 	/* test the recovery flag before calling any SDIO functions */
1828 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1829 				    &wl->flags);
1830 
1831 	if (run_irq_work) {
1832 		wl1271_debug(DEBUG_MAC80211,
1833 			     "run postponed irq_work directly");
1834 
1835 		/* don't talk to the HW if recovery is pending */
1836 		if (!pending_recovery) {
1837 			ret = wlcore_irq_locked(wl);
1838 			if (ret)
1839 				wl12xx_queue_recovery_work(wl);
1840 		}
1841 
1842 		wlcore_enable_interrupts(wl);
1843 	}
1844 
1845 	if (pending_recovery) {
1846 		wl1271_warning("queuing forgotten recovery on resume");
1847 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1848 		goto out_sleep;
1849 	}
1850 
1851 	ret = pm_runtime_get_sync(wl->dev);
1852 	if (ret < 0) {
1853 		pm_runtime_put_noidle(wl->dev);
1854 		goto out;
1855 	}
1856 
1857 	wl12xx_for_each_wlvif(wl, wlvif) {
1858 		if (wlcore_is_p2p_mgmt(wlvif))
1859 			continue;
1860 
1861 		wl1271_configure_resume(wl, wlvif);
1862 	}
1863 
1864 	ret = wlcore_hw_interrupt_notify(wl, true);
1865 	if (ret < 0)
1866 		goto out_sleep;
1867 
1868 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1869 	ret = wlcore_hw_rx_ba_filter(wl, false);
1870 	if (ret < 0)
1871 		goto out_sleep;
1872 
1873 out_sleep:
1874 	pm_runtime_mark_last_busy(wl->dev);
1875 	pm_runtime_put_autosuspend(wl->dev);
1876 
1877 out:
1878 	wl->wow_enabled = false;
1879 
1880 	/*
1881 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1882 	 * That way we avoid possible conditions where Tx-complete interrupts
1883 	 * fail to arrive and we perform a spurious recovery.
1884 	 */
1885 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1886 	mutex_unlock(&wl->mutex);
1887 
1888 	return 0;
1889 }
1890 
1891 static int wl1271_op_start(struct ieee80211_hw *hw)
1892 {
1893 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1894 
1895 	/*
1896 	 * We have to delay the booting of the hardware because
1897 	 * we need to know the local MAC address before downloading and
1898 	 * initializing the firmware. The MAC address cannot be changed
1899 	 * after boot, and without the proper MAC address, the firmware
1900 	 * will not function properly.
1901 	 *
1902 	 * The MAC address is first known when the corresponding interface
1903 	 * is added. That is where we will initialize the hardware.
1904 	 */
1905 
1906 	return 0;
1907 }
1908 
1909 static void wlcore_op_stop_locked(struct wl1271 *wl)
1910 {
1911 	int i;
1912 
1913 	if (wl->state == WLCORE_STATE_OFF) {
1914 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1915 					&wl->flags))
1916 			wlcore_enable_interrupts(wl);
1917 
1918 		return;
1919 	}
1920 
1921 	/*
1922 	 * this must be before the cancel_work calls below, so that the work
1923 	 * functions don't perform further work.
1924 	 */
1925 	wl->state = WLCORE_STATE_OFF;
1926 
1927 	/*
1928 	 * Use the nosync variant to disable interrupts, so the mutex could be
1929 	 * held while doing so without deadlocking.
1930 	 */
1931 	wlcore_disable_interrupts_nosync(wl);
1932 
1933 	mutex_unlock(&wl->mutex);
1934 
1935 	wlcore_synchronize_interrupts(wl);
1936 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1937 		cancel_work_sync(&wl->recovery_work);
1938 	wl1271_flush_deferred_work(wl);
1939 	cancel_delayed_work_sync(&wl->scan_complete_work);
1940 	cancel_work_sync(&wl->netstack_work);
1941 	cancel_work_sync(&wl->tx_work);
1942 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1943 
1944 	/* let's notify MAC80211 about the remaining pending TX frames */
1945 	mutex_lock(&wl->mutex);
1946 	wl12xx_tx_reset(wl);
1947 
1948 	wl1271_power_off(wl);
1949 	/*
1950 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1951 	 * an interrupt storm. Now that the power is down, it is safe to
1952 	 * re-enable interrupts to balance the disable depth
1953 	 */
1954 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1955 		wlcore_enable_interrupts(wl);
1956 
1957 	wl->band = NL80211_BAND_2GHZ;
1958 
1959 	wl->rx_counter = 0;
1960 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1961 	wl->channel_type = NL80211_CHAN_NO_HT;
1962 	wl->tx_blocks_available = 0;
1963 	wl->tx_allocated_blocks = 0;
1964 	wl->tx_results_count = 0;
1965 	wl->tx_packets_count = 0;
1966 	wl->time_offset = 0;
1967 	wl->ap_fw_ps_map = 0;
1968 	wl->ap_ps_map = 0;
1969 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1970 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1971 	memset(wl->links_map, 0, sizeof(wl->links_map));
1972 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1973 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1974 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1975 	wl->active_sta_count = 0;
1976 	wl->active_link_count = 0;
1977 
1978 	/* The system link is always allocated */
1979 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1980 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1981 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1982 
1983 	/*
1984 	 * this is performed after the cancel_work calls and the associated
1985 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1986 	 * get executed before all these vars have been reset.
1987 	 */
1988 	wl->flags = 0;
1989 
1990 	wl->tx_blocks_freed = 0;
1991 
1992 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1993 		wl->tx_pkts_freed[i] = 0;
1994 		wl->tx_allocated_pkts[i] = 0;
1995 	}
1996 
1997 	wl1271_debugfs_reset(wl);
1998 
1999 	kfree(wl->raw_fw_status);
2000 	wl->raw_fw_status = NULL;
2001 	kfree(wl->fw_status);
2002 	wl->fw_status = NULL;
2003 	kfree(wl->tx_res_if);
2004 	wl->tx_res_if = NULL;
2005 	kfree(wl->target_mem_map);
2006 	wl->target_mem_map = NULL;
2007 
2008 	/*
2009 	 * FW channels must be re-calibrated after recovery,
2010 	 * save current Reg-Domain channel configuration and clear it.
2011 	 */
2012 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2013 	       sizeof(wl->reg_ch_conf_pending));
2014 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2015 }
2016 
2017 static void wlcore_op_stop(struct ieee80211_hw *hw)
2018 {
2019 	struct wl1271 *wl = hw->priv;
2020 
2021 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2022 
2023 	mutex_lock(&wl->mutex);
2024 
2025 	wlcore_op_stop_locked(wl);
2026 
2027 	mutex_unlock(&wl->mutex);
2028 }
2029 
2030 static void wlcore_channel_switch_work(struct work_struct *work)
2031 {
2032 	struct delayed_work *dwork;
2033 	struct wl1271 *wl;
2034 	struct ieee80211_vif *vif;
2035 	struct wl12xx_vif *wlvif;
2036 	int ret;
2037 
2038 	dwork = to_delayed_work(work);
2039 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2040 	wl = wlvif->wl;
2041 
2042 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2043 
2044 	mutex_lock(&wl->mutex);
2045 
2046 	if (unlikely(wl->state != WLCORE_STATE_ON))
2047 		goto out;
2048 
2049 	/* check the channel switch is still ongoing */
2050 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2051 		goto out;
2052 
2053 	vif = wl12xx_wlvif_to_vif(wlvif);
2054 	ieee80211_chswitch_done(vif, false);
2055 
2056 	ret = pm_runtime_get_sync(wl->dev);
2057 	if (ret < 0) {
2058 		pm_runtime_put_noidle(wl->dev);
2059 		goto out;
2060 	}
2061 
2062 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2063 
2064 	pm_runtime_mark_last_busy(wl->dev);
2065 	pm_runtime_put_autosuspend(wl->dev);
2066 out:
2067 	mutex_unlock(&wl->mutex);
2068 }
2069 
2070 static void wlcore_connection_loss_work(struct work_struct *work)
2071 {
2072 	struct delayed_work *dwork;
2073 	struct wl1271 *wl;
2074 	struct ieee80211_vif *vif;
2075 	struct wl12xx_vif *wlvif;
2076 
2077 	dwork = to_delayed_work(work);
2078 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2079 	wl = wlvif->wl;
2080 
2081 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2082 
2083 	mutex_lock(&wl->mutex);
2084 
2085 	if (unlikely(wl->state != WLCORE_STATE_ON))
2086 		goto out;
2087 
2088 	/* Call mac80211 connection loss */
2089 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2090 		goto out;
2091 
2092 	vif = wl12xx_wlvif_to_vif(wlvif);
2093 	ieee80211_connection_loss(vif);
2094 out:
2095 	mutex_unlock(&wl->mutex);
2096 }
2097 
2098 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2099 {
2100 	struct delayed_work *dwork;
2101 	struct wl1271 *wl;
2102 	struct wl12xx_vif *wlvif;
2103 	unsigned long time_spare;
2104 	int ret;
2105 
2106 	dwork = to_delayed_work(work);
2107 	wlvif = container_of(dwork, struct wl12xx_vif,
2108 			     pending_auth_complete_work);
2109 	wl = wlvif->wl;
2110 
2111 	mutex_lock(&wl->mutex);
2112 
2113 	if (unlikely(wl->state != WLCORE_STATE_ON))
2114 		goto out;
2115 
2116 	/*
2117 	 * Make sure a second really passed since the last auth reply. Maybe
2118 	 * a second auth reply arrived while we were stuck on the mutex.
2119 	 * Check for a little less than the timeout to protect from scheduler
2120 	 * irregularities.
2121 	 */
2122 	time_spare = jiffies +
2123 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2124 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2125 		goto out;
2126 
2127 	ret = pm_runtime_get_sync(wl->dev);
2128 	if (ret < 0) {
2129 		pm_runtime_put_noidle(wl->dev);
2130 		goto out;
2131 	}
2132 
2133 	/* cancel the ROC if active */
2134 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2135 
2136 	pm_runtime_mark_last_busy(wl->dev);
2137 	pm_runtime_put_autosuspend(wl->dev);
2138 out:
2139 	mutex_unlock(&wl->mutex);
2140 }
2141 
2142 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2143 {
2144 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2145 					WL12XX_MAX_RATE_POLICIES);
2146 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2147 		return -EBUSY;
2148 
2149 	__set_bit(policy, wl->rate_policies_map);
2150 	*idx = policy;
2151 	return 0;
2152 }
2153 
2154 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2155 {
2156 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2157 		return;
2158 
2159 	__clear_bit(*idx, wl->rate_policies_map);
2160 	*idx = WL12XX_MAX_RATE_POLICIES;
2161 }
2162 
2163 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2164 {
2165 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2166 					WLCORE_MAX_KLV_TEMPLATES);
2167 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2168 		return -EBUSY;
2169 
2170 	__set_bit(policy, wl->klv_templates_map);
2171 	*idx = policy;
2172 	return 0;
2173 }
2174 
2175 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2176 {
2177 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2178 		return;
2179 
2180 	__clear_bit(*idx, wl->klv_templates_map);
2181 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2182 }
2183 
2184 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2185 {
2186 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2187 
2188 	switch (wlvif->bss_type) {
2189 	case BSS_TYPE_AP_BSS:
2190 		if (wlvif->p2p)
2191 			return WL1271_ROLE_P2P_GO;
2192 		else if (ieee80211_vif_is_mesh(vif))
2193 			return WL1271_ROLE_MESH_POINT;
2194 		else
2195 			return WL1271_ROLE_AP;
2196 
2197 	case BSS_TYPE_STA_BSS:
2198 		if (wlvif->p2p)
2199 			return WL1271_ROLE_P2P_CL;
2200 		else
2201 			return WL1271_ROLE_STA;
2202 
2203 	case BSS_TYPE_IBSS:
2204 		return WL1271_ROLE_IBSS;
2205 
2206 	default:
2207 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2208 	}
2209 	return WL12XX_INVALID_ROLE_TYPE;
2210 }
2211 
2212 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2213 {
2214 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2215 	int i;
2216 
2217 	/* clear everything but the persistent data */
2218 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2219 
2220 	switch (ieee80211_vif_type_p2p(vif)) {
2221 	case NL80211_IFTYPE_P2P_CLIENT:
2222 		wlvif->p2p = 1;
2223 		/* fall-through */
2224 	case NL80211_IFTYPE_STATION:
2225 	case NL80211_IFTYPE_P2P_DEVICE:
2226 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2227 		break;
2228 	case NL80211_IFTYPE_ADHOC:
2229 		wlvif->bss_type = BSS_TYPE_IBSS;
2230 		break;
2231 	case NL80211_IFTYPE_P2P_GO:
2232 		wlvif->p2p = 1;
2233 		/* fall-through */
2234 	case NL80211_IFTYPE_AP:
2235 	case NL80211_IFTYPE_MESH_POINT:
2236 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2237 		break;
2238 	default:
2239 		wlvif->bss_type = MAX_BSS_TYPE;
2240 		return -EOPNOTSUPP;
2241 	}
2242 
2243 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2244 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2245 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2246 
2247 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2248 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2249 		/* init sta/ibss data */
2250 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2251 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2252 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2253 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2254 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2255 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2256 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2257 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2258 	} else {
2259 		/* init ap data */
2260 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2261 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2262 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2263 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2264 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2265 			wl12xx_allocate_rate_policy(wl,
2266 						&wlvif->ap.ucast_rate_idx[i]);
2267 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2268 		/*
2269 		 * TODO: check if basic_rate shouldn't be
2270 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2271 		 * instead (the same thing for STA above).
2272 		*/
2273 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2274 		/* TODO: this seems to be used only for STA, check it */
2275 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2276 	}
2277 
2278 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2279 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2280 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2281 
2282 	/*
2283 	 * mac80211 configures some values globally, while we treat them
2284 	 * per-interface. thus, on init, we have to copy them from wl
2285 	 */
2286 	wlvif->band = wl->band;
2287 	wlvif->channel = wl->channel;
2288 	wlvif->power_level = wl->power_level;
2289 	wlvif->channel_type = wl->channel_type;
2290 
2291 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2292 		  wl1271_rx_streaming_enable_work);
2293 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2294 		  wl1271_rx_streaming_disable_work);
2295 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2296 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2297 			  wlcore_channel_switch_work);
2298 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2299 			  wlcore_connection_loss_work);
2300 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2301 			  wlcore_pending_auth_complete_work);
2302 	INIT_LIST_HEAD(&wlvif->list);
2303 
2304 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2305 	return 0;
2306 }
2307 
2308 static int wl12xx_init_fw(struct wl1271 *wl)
2309 {
2310 	int retries = WL1271_BOOT_RETRIES;
2311 	bool booted = false;
2312 	struct wiphy *wiphy = wl->hw->wiphy;
2313 	int ret;
2314 
2315 	while (retries) {
2316 		retries--;
2317 		ret = wl12xx_chip_wakeup(wl, false);
2318 		if (ret < 0)
2319 			goto power_off;
2320 
2321 		ret = wl->ops->boot(wl);
2322 		if (ret < 0)
2323 			goto power_off;
2324 
2325 		ret = wl1271_hw_init(wl);
2326 		if (ret < 0)
2327 			goto irq_disable;
2328 
2329 		booted = true;
2330 		break;
2331 
2332 irq_disable:
2333 		mutex_unlock(&wl->mutex);
2334 		/* Unlocking the mutex in the middle of handling is
2335 		   inherently unsafe. In this case we deem it safe to do,
2336 		   because we need to let any possibly pending IRQ out of
2337 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2338 		   work function will not do anything.) Also, any other
2339 		   possible concurrent operations will fail due to the
2340 		   current state, hence the wl1271 struct should be safe. */
2341 		wlcore_disable_interrupts(wl);
2342 		wl1271_flush_deferred_work(wl);
2343 		cancel_work_sync(&wl->netstack_work);
2344 		mutex_lock(&wl->mutex);
2345 power_off:
2346 		wl1271_power_off(wl);
2347 	}
2348 
2349 	if (!booted) {
2350 		wl1271_error("firmware boot failed despite %d retries",
2351 			     WL1271_BOOT_RETRIES);
2352 		goto out;
2353 	}
2354 
2355 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2356 
2357 	/* update hw/fw version info in wiphy struct */
2358 	wiphy->hw_version = wl->chip.id;
2359 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2360 		sizeof(wiphy->fw_version));
2361 
2362 	/*
2363 	 * Now we know if 11a is supported (info from the NVS), so disable
2364 	 * 11a channels if not supported
2365 	 */
2366 	if (!wl->enable_11a)
2367 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2368 
2369 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2370 		     wl->enable_11a ? "" : "not ");
2371 
2372 	wl->state = WLCORE_STATE_ON;
2373 out:
2374 	return ret;
2375 }
2376 
2377 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2378 {
2379 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2380 }
2381 
2382 /*
2383  * Check whether a fw switch (i.e. moving from one loaded
2384  * fw to another) is needed. This function is also responsible
2385  * for updating wl->last_vif_count, so it must be called before
2386  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2387  * will be used).
2388  */
2389 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2390 				  struct vif_counter_data vif_counter_data,
2391 				  bool add)
2392 {
2393 	enum wl12xx_fw_type current_fw = wl->fw_type;
2394 	u8 vif_count = vif_counter_data.counter;
2395 
2396 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2397 		return false;
2398 
2399 	/* increase the vif count if this is a new vif */
2400 	if (add && !vif_counter_data.cur_vif_running)
2401 		vif_count++;
2402 
2403 	wl->last_vif_count = vif_count;
2404 
2405 	/* no need for fw change if the device is OFF */
2406 	if (wl->state == WLCORE_STATE_OFF)
2407 		return false;
2408 
2409 	/* no need for fw change if a single fw is used */
2410 	if (!wl->mr_fw_name)
2411 		return false;
2412 
2413 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2414 		return true;
2415 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2416 		return true;
2417 
2418 	return false;
2419 }
2420 
2421 /*
2422  * Enter "forced psm". Make sure the sta is in psm against the ap,
2423  * to make the fw switch a bit more disconnection-persistent.
2424  */
2425 static void wl12xx_force_active_psm(struct wl1271 *wl)
2426 {
2427 	struct wl12xx_vif *wlvif;
2428 
2429 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2430 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2431 	}
2432 }
2433 
2434 struct wlcore_hw_queue_iter_data {
2435 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2436 	/* current vif */
2437 	struct ieee80211_vif *vif;
2438 	/* is the current vif among those iterated */
2439 	bool cur_running;
2440 };
2441 
2442 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2443 				 struct ieee80211_vif *vif)
2444 {
2445 	struct wlcore_hw_queue_iter_data *iter_data = data;
2446 
2447 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2448 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2449 		return;
2450 
2451 	if (iter_data->cur_running || vif == iter_data->vif) {
2452 		iter_data->cur_running = true;
2453 		return;
2454 	}
2455 
2456 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2457 }
2458 
2459 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2460 					 struct wl12xx_vif *wlvif)
2461 {
2462 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2463 	struct wlcore_hw_queue_iter_data iter_data = {};
2464 	int i, q_base;
2465 
2466 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2467 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2468 		return 0;
2469 	}
2470 
2471 	iter_data.vif = vif;
2472 
2473 	/* mark all bits taken by active interfaces */
2474 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2475 					IEEE80211_IFACE_ITER_RESUME_ALL,
2476 					wlcore_hw_queue_iter, &iter_data);
2477 
2478 	/* the current vif is already running in mac80211 (resume/recovery) */
2479 	if (iter_data.cur_running) {
2480 		wlvif->hw_queue_base = vif->hw_queue[0];
2481 		wl1271_debug(DEBUG_MAC80211,
2482 			     "using pre-allocated hw queue base %d",
2483 			     wlvif->hw_queue_base);
2484 
2485 		/* interface type might have changed type */
2486 		goto adjust_cab_queue;
2487 	}
2488 
2489 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2490 				     WLCORE_NUM_MAC_ADDRESSES);
2491 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2492 		return -EBUSY;
2493 
2494 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2495 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2496 		     wlvif->hw_queue_base);
2497 
2498 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2499 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2500 		/* register hw queues in mac80211 */
2501 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2502 	}
2503 
2504 adjust_cab_queue:
2505 	/* the last places are reserved for cab queues per interface */
2506 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2507 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2508 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2509 	else
2510 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2511 
2512 	return 0;
2513 }
2514 
2515 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2516 				   struct ieee80211_vif *vif)
2517 {
2518 	struct wl1271 *wl = hw->priv;
2519 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2520 	struct vif_counter_data vif_count;
2521 	int ret = 0;
2522 	u8 role_type;
2523 
2524 	if (wl->plt) {
2525 		wl1271_error("Adding Interface not allowed while in PLT mode");
2526 		return -EBUSY;
2527 	}
2528 
2529 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2530 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2531 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2532 
2533 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2534 		     ieee80211_vif_type_p2p(vif), vif->addr);
2535 
2536 	wl12xx_get_vif_count(hw, vif, &vif_count);
2537 
2538 	mutex_lock(&wl->mutex);
2539 
2540 	/*
2541 	 * in some very corner case HW recovery scenarios its possible to
2542 	 * get here before __wl1271_op_remove_interface is complete, so
2543 	 * opt out if that is the case.
2544 	 */
2545 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2546 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2547 		ret = -EBUSY;
2548 		goto out;
2549 	}
2550 
2551 
2552 	ret = wl12xx_init_vif_data(wl, vif);
2553 	if (ret < 0)
2554 		goto out;
2555 
2556 	wlvif->wl = wl;
2557 	role_type = wl12xx_get_role_type(wl, wlvif);
2558 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2559 		ret = -EINVAL;
2560 		goto out;
2561 	}
2562 
2563 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2564 	if (ret < 0)
2565 		goto out;
2566 
2567 	/*
2568 	 * TODO: after the nvs issue will be solved, move this block
2569 	 * to start(), and make sure here the driver is ON.
2570 	 */
2571 	if (wl->state == WLCORE_STATE_OFF) {
2572 		/*
2573 		 * we still need this in order to configure the fw
2574 		 * while uploading the nvs
2575 		 */
2576 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2577 
2578 		ret = wl12xx_init_fw(wl);
2579 		if (ret < 0)
2580 			goto out;
2581 	}
2582 
2583 	/*
2584 	 * Call runtime PM only after possible wl12xx_init_fw() above
2585 	 * is done. Otherwise we do not have interrupts enabled.
2586 	 */
2587 	ret = pm_runtime_get_sync(wl->dev);
2588 	if (ret < 0) {
2589 		pm_runtime_put_noidle(wl->dev);
2590 		goto out_unlock;
2591 	}
2592 
2593 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2594 		wl12xx_force_active_psm(wl);
2595 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2596 		mutex_unlock(&wl->mutex);
2597 		wl1271_recovery_work(&wl->recovery_work);
2598 		return 0;
2599 	}
2600 
2601 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2602 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2603 					     role_type, &wlvif->role_id);
2604 		if (ret < 0)
2605 			goto out;
2606 
2607 		ret = wl1271_init_vif_specific(wl, vif);
2608 		if (ret < 0)
2609 			goto out;
2610 
2611 	} else {
2612 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2613 					     &wlvif->dev_role_id);
2614 		if (ret < 0)
2615 			goto out;
2616 
2617 		/* needed mainly for configuring rate policies */
2618 		ret = wl1271_sta_hw_init(wl, wlvif);
2619 		if (ret < 0)
2620 			goto out;
2621 	}
2622 
2623 	list_add(&wlvif->list, &wl->wlvif_list);
2624 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2625 
2626 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2627 		wl->ap_count++;
2628 	else
2629 		wl->sta_count++;
2630 out:
2631 	pm_runtime_mark_last_busy(wl->dev);
2632 	pm_runtime_put_autosuspend(wl->dev);
2633 out_unlock:
2634 	mutex_unlock(&wl->mutex);
2635 
2636 	return ret;
2637 }
2638 
2639 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2640 					 struct ieee80211_vif *vif,
2641 					 bool reset_tx_queues)
2642 {
2643 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2644 	int i, ret;
2645 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2646 
2647 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2648 
2649 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2650 		return;
2651 
2652 	/* because of hardware recovery, we may get here twice */
2653 	if (wl->state == WLCORE_STATE_OFF)
2654 		return;
2655 
2656 	wl1271_info("down");
2657 
2658 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2659 	    wl->scan_wlvif == wlvif) {
2660 		struct cfg80211_scan_info info = {
2661 			.aborted = true,
2662 		};
2663 
2664 		/*
2665 		 * Rearm the tx watchdog just before idling scan. This
2666 		 * prevents just-finished scans from triggering the watchdog
2667 		 */
2668 		wl12xx_rearm_tx_watchdog_locked(wl);
2669 
2670 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2671 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2672 		wl->scan_wlvif = NULL;
2673 		wl->scan.req = NULL;
2674 		ieee80211_scan_completed(wl->hw, &info);
2675 	}
2676 
2677 	if (wl->sched_vif == wlvif)
2678 		wl->sched_vif = NULL;
2679 
2680 	if (wl->roc_vif == vif) {
2681 		wl->roc_vif = NULL;
2682 		ieee80211_remain_on_channel_expired(wl->hw);
2683 	}
2684 
2685 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2686 		/* disable active roles */
2687 		ret = pm_runtime_get_sync(wl->dev);
2688 		if (ret < 0) {
2689 			pm_runtime_put_noidle(wl->dev);
2690 			goto deinit;
2691 		}
2692 
2693 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2694 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2695 			if (wl12xx_dev_role_started(wlvif))
2696 				wl12xx_stop_dev(wl, wlvif);
2697 		}
2698 
2699 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2700 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2701 			if (ret < 0)
2702 				goto deinit;
2703 		} else {
2704 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2705 			if (ret < 0)
2706 				goto deinit;
2707 		}
2708 
2709 		pm_runtime_mark_last_busy(wl->dev);
2710 		pm_runtime_put_autosuspend(wl->dev);
2711 	}
2712 deinit:
2713 	wl12xx_tx_reset_wlvif(wl, wlvif);
2714 
2715 	/* clear all hlids (except system_hlid) */
2716 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2717 
2718 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2719 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2720 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2721 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2722 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2723 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2724 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2725 	} else {
2726 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2727 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2728 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2729 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2730 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2731 			wl12xx_free_rate_policy(wl,
2732 						&wlvif->ap.ucast_rate_idx[i]);
2733 		wl1271_free_ap_keys(wl, wlvif);
2734 	}
2735 
2736 	dev_kfree_skb(wlvif->probereq);
2737 	wlvif->probereq = NULL;
2738 	if (wl->last_wlvif == wlvif)
2739 		wl->last_wlvif = NULL;
2740 	list_del(&wlvif->list);
2741 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2742 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2743 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2744 
2745 	if (is_ap)
2746 		wl->ap_count--;
2747 	else
2748 		wl->sta_count--;
2749 
2750 	/*
2751 	 * Last AP, have more stations. Configure sleep auth according to STA.
2752 	 * Don't do thin on unintended recovery.
2753 	 */
2754 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2755 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2756 		goto unlock;
2757 
2758 	if (wl->ap_count == 0 && is_ap) {
2759 		/* mask ap events */
2760 		wl->event_mask &= ~wl->ap_event_mask;
2761 		wl1271_event_unmask(wl);
2762 	}
2763 
2764 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2765 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2766 		/* Configure for power according to debugfs */
2767 		if (sta_auth != WL1271_PSM_ILLEGAL)
2768 			wl1271_acx_sleep_auth(wl, sta_auth);
2769 		/* Configure for ELP power saving */
2770 		else
2771 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2772 	}
2773 
2774 unlock:
2775 	mutex_unlock(&wl->mutex);
2776 
2777 	del_timer_sync(&wlvif->rx_streaming_timer);
2778 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2779 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2780 	cancel_work_sync(&wlvif->rc_update_work);
2781 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2782 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2783 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2784 
2785 	mutex_lock(&wl->mutex);
2786 }
2787 
2788 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2789 				       struct ieee80211_vif *vif)
2790 {
2791 	struct wl1271 *wl = hw->priv;
2792 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2793 	struct wl12xx_vif *iter;
2794 	struct vif_counter_data vif_count;
2795 
2796 	wl12xx_get_vif_count(hw, vif, &vif_count);
2797 	mutex_lock(&wl->mutex);
2798 
2799 	if (wl->state == WLCORE_STATE_OFF ||
2800 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2801 		goto out;
2802 
2803 	/*
2804 	 * wl->vif can be null here if someone shuts down the interface
2805 	 * just when hardware recovery has been started.
2806 	 */
2807 	wl12xx_for_each_wlvif(wl, iter) {
2808 		if (iter != wlvif)
2809 			continue;
2810 
2811 		__wl1271_op_remove_interface(wl, vif, true);
2812 		break;
2813 	}
2814 	WARN_ON(iter != wlvif);
2815 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2816 		wl12xx_force_active_psm(wl);
2817 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2818 		wl12xx_queue_recovery_work(wl);
2819 	}
2820 out:
2821 	mutex_unlock(&wl->mutex);
2822 }
2823 
2824 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2825 				      struct ieee80211_vif *vif,
2826 				      enum nl80211_iftype new_type, bool p2p)
2827 {
2828 	struct wl1271 *wl = hw->priv;
2829 	int ret;
2830 
2831 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2832 	wl1271_op_remove_interface(hw, vif);
2833 
2834 	vif->type = new_type;
2835 	vif->p2p = p2p;
2836 	ret = wl1271_op_add_interface(hw, vif);
2837 
2838 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2839 	return ret;
2840 }
2841 
2842 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2843 {
2844 	int ret;
2845 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2846 
2847 	/*
2848 	 * One of the side effects of the JOIN command is that is clears
2849 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2850 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2851 	 * Currently the only valid scenario for JOIN during association
2852 	 * is on roaming, in which case we will also be given new keys.
2853 	 * Keep the below message for now, unless it starts bothering
2854 	 * users who really like to roam a lot :)
2855 	 */
2856 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2857 		wl1271_info("JOIN while associated.");
2858 
2859 	/* clear encryption type */
2860 	wlvif->encryption_type = KEY_NONE;
2861 
2862 	if (is_ibss)
2863 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2864 	else {
2865 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2866 			/*
2867 			 * TODO: this is an ugly workaround for wl12xx fw
2868 			 * bug - we are not able to tx/rx after the first
2869 			 * start_sta, so make dummy start+stop calls,
2870 			 * and then call start_sta again.
2871 			 * this should be fixed in the fw.
2872 			 */
2873 			wl12xx_cmd_role_start_sta(wl, wlvif);
2874 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2875 		}
2876 
2877 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2878 	}
2879 
2880 	return ret;
2881 }
2882 
2883 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2884 			    int offset)
2885 {
2886 	u8 ssid_len;
2887 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2888 					 skb->len - offset);
2889 
2890 	if (!ptr) {
2891 		wl1271_error("No SSID in IEs!");
2892 		return -ENOENT;
2893 	}
2894 
2895 	ssid_len = ptr[1];
2896 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2897 		wl1271_error("SSID is too long!");
2898 		return -EINVAL;
2899 	}
2900 
2901 	wlvif->ssid_len = ssid_len;
2902 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2903 	return 0;
2904 }
2905 
2906 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2907 {
2908 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2909 	struct sk_buff *skb;
2910 	int ieoffset;
2911 
2912 	/* we currently only support setting the ssid from the ap probe req */
2913 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2914 		return -EINVAL;
2915 
2916 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2917 	if (!skb)
2918 		return -EINVAL;
2919 
2920 	ieoffset = offsetof(struct ieee80211_mgmt,
2921 			    u.probe_req.variable);
2922 	wl1271_ssid_set(wlvif, skb, ieoffset);
2923 	dev_kfree_skb(skb);
2924 
2925 	return 0;
2926 }
2927 
2928 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2929 			    struct ieee80211_bss_conf *bss_conf,
2930 			    u32 sta_rate_set)
2931 {
2932 	int ieoffset;
2933 	int ret;
2934 
2935 	wlvif->aid = bss_conf->aid;
2936 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2937 	wlvif->beacon_int = bss_conf->beacon_int;
2938 	wlvif->wmm_enabled = bss_conf->qos;
2939 
2940 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2941 
2942 	/*
2943 	 * with wl1271, we don't need to update the
2944 	 * beacon_int and dtim_period, because the firmware
2945 	 * updates it by itself when the first beacon is
2946 	 * received after a join.
2947 	 */
2948 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2949 	if (ret < 0)
2950 		return ret;
2951 
2952 	/*
2953 	 * Get a template for hardware connection maintenance
2954 	 */
2955 	dev_kfree_skb(wlvif->probereq);
2956 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2957 							wlvif,
2958 							NULL);
2959 	ieoffset = offsetof(struct ieee80211_mgmt,
2960 			    u.probe_req.variable);
2961 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2962 
2963 	/* enable the connection monitoring feature */
2964 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2965 	if (ret < 0)
2966 		return ret;
2967 
2968 	/*
2969 	 * The join command disable the keep-alive mode, shut down its process,
2970 	 * and also clear the template config, so we need to reset it all after
2971 	 * the join. The acx_aid starts the keep-alive process, and the order
2972 	 * of the commands below is relevant.
2973 	 */
2974 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2975 	if (ret < 0)
2976 		return ret;
2977 
2978 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2979 	if (ret < 0)
2980 		return ret;
2981 
2982 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2983 	if (ret < 0)
2984 		return ret;
2985 
2986 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2987 					   wlvif->sta.klv_template_id,
2988 					   ACX_KEEP_ALIVE_TPL_VALID);
2989 	if (ret < 0)
2990 		return ret;
2991 
2992 	/*
2993 	 * The default fw psm configuration is AUTO, while mac80211 default
2994 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2995 	 */
2996 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2997 	if (ret < 0)
2998 		return ret;
2999 
3000 	if (sta_rate_set) {
3001 		wlvif->rate_set =
3002 			wl1271_tx_enabled_rates_get(wl,
3003 						    sta_rate_set,
3004 						    wlvif->band);
3005 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3006 		if (ret < 0)
3007 			return ret;
3008 	}
3009 
3010 	return ret;
3011 }
3012 
3013 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3014 {
3015 	int ret;
3016 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3017 
3018 	/* make sure we are connected (sta) joined */
3019 	if (sta &&
3020 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3021 		return false;
3022 
3023 	/* make sure we are joined (ibss) */
3024 	if (!sta &&
3025 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3026 		return false;
3027 
3028 	if (sta) {
3029 		/* use defaults when not associated */
3030 		wlvif->aid = 0;
3031 
3032 		/* free probe-request template */
3033 		dev_kfree_skb(wlvif->probereq);
3034 		wlvif->probereq = NULL;
3035 
3036 		/* disable connection monitor features */
3037 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3038 		if (ret < 0)
3039 			return ret;
3040 
3041 		/* Disable the keep-alive feature */
3042 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3043 		if (ret < 0)
3044 			return ret;
3045 
3046 		/* disable beacon filtering */
3047 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3048 		if (ret < 0)
3049 			return ret;
3050 	}
3051 
3052 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3053 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3054 
3055 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3056 		ieee80211_chswitch_done(vif, false);
3057 		cancel_delayed_work(&wlvif->channel_switch_work);
3058 	}
3059 
3060 	/* invalidate keep-alive template */
3061 	wl1271_acx_keep_alive_config(wl, wlvif,
3062 				     wlvif->sta.klv_template_id,
3063 				     ACX_KEEP_ALIVE_TPL_INVALID);
3064 
3065 	return 0;
3066 }
3067 
3068 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3069 {
3070 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3071 	wlvif->rate_set = wlvif->basic_rate_set;
3072 }
3073 
3074 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3075 				   bool idle)
3076 {
3077 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3078 
3079 	if (idle == cur_idle)
3080 		return;
3081 
3082 	if (idle) {
3083 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3084 	} else {
3085 		/* The current firmware only supports sched_scan in idle */
3086 		if (wl->sched_vif == wlvif)
3087 			wl->ops->sched_scan_stop(wl, wlvif);
3088 
3089 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3090 	}
3091 }
3092 
3093 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3094 			     struct ieee80211_conf *conf, u32 changed)
3095 {
3096 	int ret;
3097 
3098 	if (wlcore_is_p2p_mgmt(wlvif))
3099 		return 0;
3100 
3101 	if (conf->power_level != wlvif->power_level) {
3102 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3103 		if (ret < 0)
3104 			return ret;
3105 
3106 		wlvif->power_level = conf->power_level;
3107 	}
3108 
3109 	return 0;
3110 }
3111 
3112 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3113 {
3114 	struct wl1271 *wl = hw->priv;
3115 	struct wl12xx_vif *wlvif;
3116 	struct ieee80211_conf *conf = &hw->conf;
3117 	int ret = 0;
3118 
3119 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3120 		     " changed 0x%x",
3121 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3122 		     conf->power_level,
3123 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3124 			 changed);
3125 
3126 	mutex_lock(&wl->mutex);
3127 
3128 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3129 		wl->power_level = conf->power_level;
3130 
3131 	if (unlikely(wl->state != WLCORE_STATE_ON))
3132 		goto out;
3133 
3134 	ret = pm_runtime_get_sync(wl->dev);
3135 	if (ret < 0) {
3136 		pm_runtime_put_noidle(wl->dev);
3137 		goto out;
3138 	}
3139 
3140 	/* configure each interface */
3141 	wl12xx_for_each_wlvif(wl, wlvif) {
3142 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3143 		if (ret < 0)
3144 			goto out_sleep;
3145 	}
3146 
3147 out_sleep:
3148 	pm_runtime_mark_last_busy(wl->dev);
3149 	pm_runtime_put_autosuspend(wl->dev);
3150 
3151 out:
3152 	mutex_unlock(&wl->mutex);
3153 
3154 	return ret;
3155 }
3156 
3157 struct wl1271_filter_params {
3158 	bool enabled;
3159 	int mc_list_length;
3160 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3161 };
3162 
3163 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3164 				       struct netdev_hw_addr_list *mc_list)
3165 {
3166 	struct wl1271_filter_params *fp;
3167 	struct netdev_hw_addr *ha;
3168 
3169 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3170 	if (!fp) {
3171 		wl1271_error("Out of memory setting filters.");
3172 		return 0;
3173 	}
3174 
3175 	/* update multicast filtering parameters */
3176 	fp->mc_list_length = 0;
3177 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3178 		fp->enabled = false;
3179 	} else {
3180 		fp->enabled = true;
3181 		netdev_hw_addr_list_for_each(ha, mc_list) {
3182 			memcpy(fp->mc_list[fp->mc_list_length],
3183 					ha->addr, ETH_ALEN);
3184 			fp->mc_list_length++;
3185 		}
3186 	}
3187 
3188 	return (u64)(unsigned long)fp;
3189 }
3190 
3191 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3192 				  FIF_FCSFAIL | \
3193 				  FIF_BCN_PRBRESP_PROMISC | \
3194 				  FIF_CONTROL | \
3195 				  FIF_OTHER_BSS)
3196 
3197 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3198 				       unsigned int changed,
3199 				       unsigned int *total, u64 multicast)
3200 {
3201 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3202 	struct wl1271 *wl = hw->priv;
3203 	struct wl12xx_vif *wlvif;
3204 
3205 	int ret;
3206 
3207 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3208 		     " total %x", changed, *total);
3209 
3210 	mutex_lock(&wl->mutex);
3211 
3212 	*total &= WL1271_SUPPORTED_FILTERS;
3213 	changed &= WL1271_SUPPORTED_FILTERS;
3214 
3215 	if (unlikely(wl->state != WLCORE_STATE_ON))
3216 		goto out;
3217 
3218 	ret = pm_runtime_get_sync(wl->dev);
3219 	if (ret < 0) {
3220 		pm_runtime_put_noidle(wl->dev);
3221 		goto out;
3222 	}
3223 
3224 	wl12xx_for_each_wlvif(wl, wlvif) {
3225 		if (wlcore_is_p2p_mgmt(wlvif))
3226 			continue;
3227 
3228 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3229 			if (*total & FIF_ALLMULTI)
3230 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3231 								   false,
3232 								   NULL, 0);
3233 			else if (fp)
3234 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3235 							fp->enabled,
3236 							fp->mc_list,
3237 							fp->mc_list_length);
3238 			if (ret < 0)
3239 				goto out_sleep;
3240 		}
3241 
3242 		/*
3243 		 * If interface in AP mode and created with allmulticast then disable
3244 		 * the firmware filters so that all multicast packets are passed
3245 		 * This is mandatory for MDNS based discovery protocols
3246 		 */
3247  		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3248  			if (*total & FIF_ALLMULTI) {
3249 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3250 							false,
3251 							NULL, 0);
3252 				if (ret < 0)
3253 					goto out_sleep;
3254 			}
3255 		}
3256 	}
3257 
3258 	/*
3259 	 * the fw doesn't provide an api to configure the filters. instead,
3260 	 * the filters configuration is based on the active roles / ROC
3261 	 * state.
3262 	 */
3263 
3264 out_sleep:
3265 	pm_runtime_mark_last_busy(wl->dev);
3266 	pm_runtime_put_autosuspend(wl->dev);
3267 
3268 out:
3269 	mutex_unlock(&wl->mutex);
3270 	kfree(fp);
3271 }
3272 
3273 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3274 				u8 id, u8 key_type, u8 key_size,
3275 				const u8 *key, u8 hlid, u32 tx_seq_32,
3276 				u16 tx_seq_16, bool is_pairwise)
3277 {
3278 	struct wl1271_ap_key *ap_key;
3279 	int i;
3280 
3281 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3282 
3283 	if (key_size > MAX_KEY_SIZE)
3284 		return -EINVAL;
3285 
3286 	/*
3287 	 * Find next free entry in ap_keys. Also check we are not replacing
3288 	 * an existing key.
3289 	 */
3290 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3291 		if (wlvif->ap.recorded_keys[i] == NULL)
3292 			break;
3293 
3294 		if (wlvif->ap.recorded_keys[i]->id == id) {
3295 			wl1271_warning("trying to record key replacement");
3296 			return -EINVAL;
3297 		}
3298 	}
3299 
3300 	if (i == MAX_NUM_KEYS)
3301 		return -EBUSY;
3302 
3303 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3304 	if (!ap_key)
3305 		return -ENOMEM;
3306 
3307 	ap_key->id = id;
3308 	ap_key->key_type = key_type;
3309 	ap_key->key_size = key_size;
3310 	memcpy(ap_key->key, key, key_size);
3311 	ap_key->hlid = hlid;
3312 	ap_key->tx_seq_32 = tx_seq_32;
3313 	ap_key->tx_seq_16 = tx_seq_16;
3314 	ap_key->is_pairwise = is_pairwise;
3315 
3316 	wlvif->ap.recorded_keys[i] = ap_key;
3317 	return 0;
3318 }
3319 
3320 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3321 {
3322 	int i;
3323 
3324 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3325 		kfree(wlvif->ap.recorded_keys[i]);
3326 		wlvif->ap.recorded_keys[i] = NULL;
3327 	}
3328 }
3329 
3330 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3331 {
3332 	int i, ret = 0;
3333 	struct wl1271_ap_key *key;
3334 	bool wep_key_added = false;
3335 
3336 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3337 		u8 hlid;
3338 		if (wlvif->ap.recorded_keys[i] == NULL)
3339 			break;
3340 
3341 		key = wlvif->ap.recorded_keys[i];
3342 		hlid = key->hlid;
3343 		if (hlid == WL12XX_INVALID_LINK_ID)
3344 			hlid = wlvif->ap.bcast_hlid;
3345 
3346 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3347 					    key->id, key->key_type,
3348 					    key->key_size, key->key,
3349 					    hlid, key->tx_seq_32,
3350 					    key->tx_seq_16, key->is_pairwise);
3351 		if (ret < 0)
3352 			goto out;
3353 
3354 		if (key->key_type == KEY_WEP)
3355 			wep_key_added = true;
3356 	}
3357 
3358 	if (wep_key_added) {
3359 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3360 						     wlvif->ap.bcast_hlid);
3361 		if (ret < 0)
3362 			goto out;
3363 	}
3364 
3365 out:
3366 	wl1271_free_ap_keys(wl, wlvif);
3367 	return ret;
3368 }
3369 
3370 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3371 		       u16 action, u8 id, u8 key_type,
3372 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3373 		       u16 tx_seq_16, struct ieee80211_sta *sta,
3374 		       bool is_pairwise)
3375 {
3376 	int ret;
3377 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3378 
3379 	if (is_ap) {
3380 		struct wl1271_station *wl_sta;
3381 		u8 hlid;
3382 
3383 		if (sta) {
3384 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3385 			hlid = wl_sta->hlid;
3386 		} else {
3387 			hlid = wlvif->ap.bcast_hlid;
3388 		}
3389 
3390 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3391 			/*
3392 			 * We do not support removing keys after AP shutdown.
3393 			 * Pretend we do to make mac80211 happy.
3394 			 */
3395 			if (action != KEY_ADD_OR_REPLACE)
3396 				return 0;
3397 
3398 			ret = wl1271_record_ap_key(wl, wlvif, id,
3399 					     key_type, key_size,
3400 					     key, hlid, tx_seq_32,
3401 					     tx_seq_16, is_pairwise);
3402 		} else {
3403 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3404 					     id, key_type, key_size,
3405 					     key, hlid, tx_seq_32,
3406 					     tx_seq_16, is_pairwise);
3407 		}
3408 
3409 		if (ret < 0)
3410 			return ret;
3411 	} else {
3412 		const u8 *addr;
3413 		static const u8 bcast_addr[ETH_ALEN] = {
3414 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3415 		};
3416 
3417 		addr = sta ? sta->addr : bcast_addr;
3418 
3419 		if (is_zero_ether_addr(addr)) {
3420 			/* We dont support TX only encryption */
3421 			return -EOPNOTSUPP;
3422 		}
3423 
3424 		/* The wl1271 does not allow to remove unicast keys - they
3425 		   will be cleared automatically on next CMD_JOIN. Ignore the
3426 		   request silently, as we dont want the mac80211 to emit
3427 		   an error message. */
3428 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3429 			return 0;
3430 
3431 		/* don't remove key if hlid was already deleted */
3432 		if (action == KEY_REMOVE &&
3433 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3434 			return 0;
3435 
3436 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3437 					     id, key_type, key_size,
3438 					     key, addr, tx_seq_32,
3439 					     tx_seq_16);
3440 		if (ret < 0)
3441 			return ret;
3442 
3443 	}
3444 
3445 	return 0;
3446 }
3447 
3448 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3449 			     struct ieee80211_vif *vif,
3450 			     struct ieee80211_sta *sta,
3451 			     struct ieee80211_key_conf *key_conf)
3452 {
3453 	struct wl1271 *wl = hw->priv;
3454 	int ret;
3455 	bool might_change_spare =
3456 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3457 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3458 
3459 	if (might_change_spare) {
3460 		/*
3461 		 * stop the queues and flush to ensure the next packets are
3462 		 * in sync with FW spare block accounting
3463 		 */
3464 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3465 		wl1271_tx_flush(wl);
3466 	}
3467 
3468 	mutex_lock(&wl->mutex);
3469 
3470 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3471 		ret = -EAGAIN;
3472 		goto out_wake_queues;
3473 	}
3474 
3475 	ret = pm_runtime_get_sync(wl->dev);
3476 	if (ret < 0) {
3477 		pm_runtime_put_noidle(wl->dev);
3478 		goto out_wake_queues;
3479 	}
3480 
3481 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3482 
3483 	pm_runtime_mark_last_busy(wl->dev);
3484 	pm_runtime_put_autosuspend(wl->dev);
3485 
3486 out_wake_queues:
3487 	if (might_change_spare)
3488 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3489 
3490 	mutex_unlock(&wl->mutex);
3491 
3492 	return ret;
3493 }
3494 
3495 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3496 		   struct ieee80211_vif *vif,
3497 		   struct ieee80211_sta *sta,
3498 		   struct ieee80211_key_conf *key_conf)
3499 {
3500 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3501 	int ret;
3502 	u32 tx_seq_32 = 0;
3503 	u16 tx_seq_16 = 0;
3504 	u8 key_type;
3505 	u8 hlid;
3506 	bool is_pairwise;
3507 
3508 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3509 
3510 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3511 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3512 		     key_conf->cipher, key_conf->keyidx,
3513 		     key_conf->keylen, key_conf->flags);
3514 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3515 
3516 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3517 		if (sta) {
3518 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3519 			hlid = wl_sta->hlid;
3520 		} else {
3521 			hlid = wlvif->ap.bcast_hlid;
3522 		}
3523 	else
3524 		hlid = wlvif->sta.hlid;
3525 
3526 	if (hlid != WL12XX_INVALID_LINK_ID) {
3527 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3528 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3529 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3530 	}
3531 
3532 	switch (key_conf->cipher) {
3533 	case WLAN_CIPHER_SUITE_WEP40:
3534 	case WLAN_CIPHER_SUITE_WEP104:
3535 		key_type = KEY_WEP;
3536 
3537 		key_conf->hw_key_idx = key_conf->keyidx;
3538 		break;
3539 	case WLAN_CIPHER_SUITE_TKIP:
3540 		key_type = KEY_TKIP;
3541 		key_conf->hw_key_idx = key_conf->keyidx;
3542 		break;
3543 	case WLAN_CIPHER_SUITE_CCMP:
3544 		key_type = KEY_AES;
3545 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3546 		break;
3547 	case WL1271_CIPHER_SUITE_GEM:
3548 		key_type = KEY_GEM;
3549 		break;
3550 	case WLAN_CIPHER_SUITE_AES_CMAC:
3551 		key_type = KEY_IGTK;
3552 		break;
3553 	default:
3554 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3555 
3556 		return -EOPNOTSUPP;
3557 	}
3558 
3559 	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3560 
3561 	switch (cmd) {
3562 	case SET_KEY:
3563 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3564 				 key_conf->keyidx, key_type,
3565 				 key_conf->keylen, key_conf->key,
3566 				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3567 		if (ret < 0) {
3568 			wl1271_error("Could not add or replace key");
3569 			return ret;
3570 		}
3571 
3572 		/*
3573 		 * reconfiguring arp response if the unicast (or common)
3574 		 * encryption key type was changed
3575 		 */
3576 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3577 		    (sta || key_type == KEY_WEP) &&
3578 		    wlvif->encryption_type != key_type) {
3579 			wlvif->encryption_type = key_type;
3580 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3581 			if (ret < 0) {
3582 				wl1271_warning("build arp rsp failed: %d", ret);
3583 				return ret;
3584 			}
3585 		}
3586 		break;
3587 
3588 	case DISABLE_KEY:
3589 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3590 				     key_conf->keyidx, key_type,
3591 				     key_conf->keylen, key_conf->key,
3592 				     0, 0, sta, is_pairwise);
3593 		if (ret < 0) {
3594 			wl1271_error("Could not remove key");
3595 			return ret;
3596 		}
3597 		break;
3598 
3599 	default:
3600 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3601 		return -EOPNOTSUPP;
3602 	}
3603 
3604 	return ret;
3605 }
3606 EXPORT_SYMBOL_GPL(wlcore_set_key);
3607 
3608 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3609 					  struct ieee80211_vif *vif,
3610 					  int key_idx)
3611 {
3612 	struct wl1271 *wl = hw->priv;
3613 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3614 	int ret;
3615 
3616 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3617 		     key_idx);
3618 
3619 	/* we don't handle unsetting of default key */
3620 	if (key_idx == -1)
3621 		return;
3622 
3623 	mutex_lock(&wl->mutex);
3624 
3625 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3626 		ret = -EAGAIN;
3627 		goto out_unlock;
3628 	}
3629 
3630 	ret = pm_runtime_get_sync(wl->dev);
3631 	if (ret < 0) {
3632 		pm_runtime_put_noidle(wl->dev);
3633 		goto out_unlock;
3634 	}
3635 
3636 	wlvif->default_key = key_idx;
3637 
3638 	/* the default WEP key needs to be configured at least once */
3639 	if (wlvif->encryption_type == KEY_WEP) {
3640 		ret = wl12xx_cmd_set_default_wep_key(wl,
3641 				key_idx,
3642 				wlvif->sta.hlid);
3643 		if (ret < 0)
3644 			goto out_sleep;
3645 	}
3646 
3647 out_sleep:
3648 	pm_runtime_mark_last_busy(wl->dev);
3649 	pm_runtime_put_autosuspend(wl->dev);
3650 
3651 out_unlock:
3652 	mutex_unlock(&wl->mutex);
3653 }
3654 
3655 void wlcore_regdomain_config(struct wl1271 *wl)
3656 {
3657 	int ret;
3658 
3659 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3660 		return;
3661 
3662 	mutex_lock(&wl->mutex);
3663 
3664 	if (unlikely(wl->state != WLCORE_STATE_ON))
3665 		goto out;
3666 
3667 	ret = pm_runtime_get_sync(wl->dev);
3668 	if (ret < 0)
3669 		goto out;
3670 
3671 	ret = wlcore_cmd_regdomain_config_locked(wl);
3672 	if (ret < 0) {
3673 		wl12xx_queue_recovery_work(wl);
3674 		goto out;
3675 	}
3676 
3677 	pm_runtime_mark_last_busy(wl->dev);
3678 	pm_runtime_put_autosuspend(wl->dev);
3679 out:
3680 	mutex_unlock(&wl->mutex);
3681 }
3682 
3683 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3684 			     struct ieee80211_vif *vif,
3685 			     struct ieee80211_scan_request *hw_req)
3686 {
3687 	struct cfg80211_scan_request *req = &hw_req->req;
3688 	struct wl1271 *wl = hw->priv;
3689 	int ret;
3690 	u8 *ssid = NULL;
3691 	size_t len = 0;
3692 
3693 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3694 
3695 	if (req->n_ssids) {
3696 		ssid = req->ssids[0].ssid;
3697 		len = req->ssids[0].ssid_len;
3698 	}
3699 
3700 	mutex_lock(&wl->mutex);
3701 
3702 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3703 		/*
3704 		 * We cannot return -EBUSY here because cfg80211 will expect
3705 		 * a call to ieee80211_scan_completed if we do - in this case
3706 		 * there won't be any call.
3707 		 */
3708 		ret = -EAGAIN;
3709 		goto out;
3710 	}
3711 
3712 	ret = pm_runtime_get_sync(wl->dev);
3713 	if (ret < 0) {
3714 		pm_runtime_put_noidle(wl->dev);
3715 		goto out;
3716 	}
3717 
3718 	/* fail if there is any role in ROC */
3719 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3720 		/* don't allow scanning right now */
3721 		ret = -EBUSY;
3722 		goto out_sleep;
3723 	}
3724 
3725 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3726 out_sleep:
3727 	pm_runtime_mark_last_busy(wl->dev);
3728 	pm_runtime_put_autosuspend(wl->dev);
3729 out:
3730 	mutex_unlock(&wl->mutex);
3731 
3732 	return ret;
3733 }
3734 
3735 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3736 				     struct ieee80211_vif *vif)
3737 {
3738 	struct wl1271 *wl = hw->priv;
3739 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3740 	struct cfg80211_scan_info info = {
3741 		.aborted = true,
3742 	};
3743 	int ret;
3744 
3745 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3746 
3747 	mutex_lock(&wl->mutex);
3748 
3749 	if (unlikely(wl->state != WLCORE_STATE_ON))
3750 		goto out;
3751 
3752 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3753 		goto out;
3754 
3755 	ret = pm_runtime_get_sync(wl->dev);
3756 	if (ret < 0) {
3757 		pm_runtime_put_noidle(wl->dev);
3758 		goto out;
3759 	}
3760 
3761 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3762 		ret = wl->ops->scan_stop(wl, wlvif);
3763 		if (ret < 0)
3764 			goto out_sleep;
3765 	}
3766 
3767 	/*
3768 	 * Rearm the tx watchdog just before idling scan. This
3769 	 * prevents just-finished scans from triggering the watchdog
3770 	 */
3771 	wl12xx_rearm_tx_watchdog_locked(wl);
3772 
3773 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3774 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3775 	wl->scan_wlvif = NULL;
3776 	wl->scan.req = NULL;
3777 	ieee80211_scan_completed(wl->hw, &info);
3778 
3779 out_sleep:
3780 	pm_runtime_mark_last_busy(wl->dev);
3781 	pm_runtime_put_autosuspend(wl->dev);
3782 out:
3783 	mutex_unlock(&wl->mutex);
3784 
3785 	cancel_delayed_work_sync(&wl->scan_complete_work);
3786 }
3787 
3788 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3789 				      struct ieee80211_vif *vif,
3790 				      struct cfg80211_sched_scan_request *req,
3791 				      struct ieee80211_scan_ies *ies)
3792 {
3793 	struct wl1271 *wl = hw->priv;
3794 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3795 	int ret;
3796 
3797 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3798 
3799 	mutex_lock(&wl->mutex);
3800 
3801 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3802 		ret = -EAGAIN;
3803 		goto out;
3804 	}
3805 
3806 	ret = pm_runtime_get_sync(wl->dev);
3807 	if (ret < 0) {
3808 		pm_runtime_put_noidle(wl->dev);
3809 		goto out;
3810 	}
3811 
3812 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3813 	if (ret < 0)
3814 		goto out_sleep;
3815 
3816 	wl->sched_vif = wlvif;
3817 
3818 out_sleep:
3819 	pm_runtime_mark_last_busy(wl->dev);
3820 	pm_runtime_put_autosuspend(wl->dev);
3821 out:
3822 	mutex_unlock(&wl->mutex);
3823 	return ret;
3824 }
3825 
3826 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3827 				     struct ieee80211_vif *vif)
3828 {
3829 	struct wl1271 *wl = hw->priv;
3830 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3831 	int ret;
3832 
3833 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3834 
3835 	mutex_lock(&wl->mutex);
3836 
3837 	if (unlikely(wl->state != WLCORE_STATE_ON))
3838 		goto out;
3839 
3840 	ret = pm_runtime_get_sync(wl->dev);
3841 	if (ret < 0) {
3842 		pm_runtime_put_noidle(wl->dev);
3843 		goto out;
3844 	}
3845 
3846 	wl->ops->sched_scan_stop(wl, wlvif);
3847 
3848 	pm_runtime_mark_last_busy(wl->dev);
3849 	pm_runtime_put_autosuspend(wl->dev);
3850 out:
3851 	mutex_unlock(&wl->mutex);
3852 
3853 	return 0;
3854 }
3855 
3856 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3857 {
3858 	struct wl1271 *wl = hw->priv;
3859 	int ret = 0;
3860 
3861 	mutex_lock(&wl->mutex);
3862 
3863 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3864 		ret = -EAGAIN;
3865 		goto out;
3866 	}
3867 
3868 	ret = pm_runtime_get_sync(wl->dev);
3869 	if (ret < 0) {
3870 		pm_runtime_put_noidle(wl->dev);
3871 		goto out;
3872 	}
3873 
3874 	ret = wl1271_acx_frag_threshold(wl, value);
3875 	if (ret < 0)
3876 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3877 
3878 	pm_runtime_mark_last_busy(wl->dev);
3879 	pm_runtime_put_autosuspend(wl->dev);
3880 
3881 out:
3882 	mutex_unlock(&wl->mutex);
3883 
3884 	return ret;
3885 }
3886 
3887 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3888 {
3889 	struct wl1271 *wl = hw->priv;
3890 	struct wl12xx_vif *wlvif;
3891 	int ret = 0;
3892 
3893 	mutex_lock(&wl->mutex);
3894 
3895 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3896 		ret = -EAGAIN;
3897 		goto out;
3898 	}
3899 
3900 	ret = pm_runtime_get_sync(wl->dev);
3901 	if (ret < 0) {
3902 		pm_runtime_put_noidle(wl->dev);
3903 		goto out;
3904 	}
3905 
3906 	wl12xx_for_each_wlvif(wl, wlvif) {
3907 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3908 		if (ret < 0)
3909 			wl1271_warning("set rts threshold failed: %d", ret);
3910 	}
3911 	pm_runtime_mark_last_busy(wl->dev);
3912 	pm_runtime_put_autosuspend(wl->dev);
3913 
3914 out:
3915 	mutex_unlock(&wl->mutex);
3916 
3917 	return ret;
3918 }
3919 
3920 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3921 {
3922 	int len;
3923 	const u8 *next, *end = skb->data + skb->len;
3924 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3925 					skb->len - ieoffset);
3926 	if (!ie)
3927 		return;
3928 	len = ie[1] + 2;
3929 	next = ie + len;
3930 	memmove(ie, next, end - next);
3931 	skb_trim(skb, skb->len - len);
3932 }
3933 
3934 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3935 					    unsigned int oui, u8 oui_type,
3936 					    int ieoffset)
3937 {
3938 	int len;
3939 	const u8 *next, *end = skb->data + skb->len;
3940 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3941 					       skb->data + ieoffset,
3942 					       skb->len - ieoffset);
3943 	if (!ie)
3944 		return;
3945 	len = ie[1] + 2;
3946 	next = ie + len;
3947 	memmove(ie, next, end - next);
3948 	skb_trim(skb, skb->len - len);
3949 }
3950 
3951 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3952 					 struct ieee80211_vif *vif)
3953 {
3954 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3955 	struct sk_buff *skb;
3956 	int ret;
3957 
3958 	skb = ieee80211_proberesp_get(wl->hw, vif);
3959 	if (!skb)
3960 		return -EOPNOTSUPP;
3961 
3962 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3963 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3964 				      skb->data,
3965 				      skb->len, 0,
3966 				      rates);
3967 	dev_kfree_skb(skb);
3968 
3969 	if (ret < 0)
3970 		goto out;
3971 
3972 	wl1271_debug(DEBUG_AP, "probe response updated");
3973 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3974 
3975 out:
3976 	return ret;
3977 }
3978 
3979 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3980 					     struct ieee80211_vif *vif,
3981 					     u8 *probe_rsp_data,
3982 					     size_t probe_rsp_len,
3983 					     u32 rates)
3984 {
3985 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3986 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3987 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3988 	int ssid_ie_offset, ie_offset, templ_len;
3989 	const u8 *ptr;
3990 
3991 	/* no need to change probe response if the SSID is set correctly */
3992 	if (wlvif->ssid_len > 0)
3993 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3994 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3995 					       probe_rsp_data,
3996 					       probe_rsp_len, 0,
3997 					       rates);
3998 
3999 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4000 		wl1271_error("probe_rsp template too big");
4001 		return -EINVAL;
4002 	}
4003 
4004 	/* start searching from IE offset */
4005 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4006 
4007 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4008 			       probe_rsp_len - ie_offset);
4009 	if (!ptr) {
4010 		wl1271_error("No SSID in beacon!");
4011 		return -EINVAL;
4012 	}
4013 
4014 	ssid_ie_offset = ptr - probe_rsp_data;
4015 	ptr += (ptr[1] + 2);
4016 
4017 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4018 
4019 	/* insert SSID from bss_conf */
4020 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4021 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4022 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4023 	       bss_conf->ssid, bss_conf->ssid_len);
4024 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4025 
4026 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4027 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4028 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4029 
4030 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4031 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4032 				       probe_rsp_templ,
4033 				       templ_len, 0,
4034 				       rates);
4035 }
4036 
4037 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4038 				       struct ieee80211_vif *vif,
4039 				       struct ieee80211_bss_conf *bss_conf,
4040 				       u32 changed)
4041 {
4042 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4043 	int ret = 0;
4044 
4045 	if (changed & BSS_CHANGED_ERP_SLOT) {
4046 		if (bss_conf->use_short_slot)
4047 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4048 		else
4049 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4050 		if (ret < 0) {
4051 			wl1271_warning("Set slot time failed %d", ret);
4052 			goto out;
4053 		}
4054 	}
4055 
4056 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4057 		if (bss_conf->use_short_preamble)
4058 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4059 		else
4060 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4061 	}
4062 
4063 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4064 		if (bss_conf->use_cts_prot)
4065 			ret = wl1271_acx_cts_protect(wl, wlvif,
4066 						     CTSPROTECT_ENABLE);
4067 		else
4068 			ret = wl1271_acx_cts_protect(wl, wlvif,
4069 						     CTSPROTECT_DISABLE);
4070 		if (ret < 0) {
4071 			wl1271_warning("Set ctsprotect failed %d", ret);
4072 			goto out;
4073 		}
4074 	}
4075 
4076 out:
4077 	return ret;
4078 }
4079 
4080 static int wlcore_set_beacon_template(struct wl1271 *wl,
4081 				      struct ieee80211_vif *vif,
4082 				      bool is_ap)
4083 {
4084 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4085 	struct ieee80211_hdr *hdr;
4086 	u32 min_rate;
4087 	int ret;
4088 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4089 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4090 	u16 tmpl_id;
4091 
4092 	if (!beacon) {
4093 		ret = -EINVAL;
4094 		goto out;
4095 	}
4096 
4097 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4098 
4099 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4100 	if (ret < 0) {
4101 		dev_kfree_skb(beacon);
4102 		goto out;
4103 	}
4104 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4105 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4106 		CMD_TEMPL_BEACON;
4107 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4108 				      beacon->data,
4109 				      beacon->len, 0,
4110 				      min_rate);
4111 	if (ret < 0) {
4112 		dev_kfree_skb(beacon);
4113 		goto out;
4114 	}
4115 
4116 	wlvif->wmm_enabled =
4117 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4118 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4119 					beacon->data + ieoffset,
4120 					beacon->len - ieoffset);
4121 
4122 	/*
4123 	 * In case we already have a probe-resp beacon set explicitly
4124 	 * by usermode, don't use the beacon data.
4125 	 */
4126 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4127 		goto end_bcn;
4128 
4129 	/* remove TIM ie from probe response */
4130 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4131 
4132 	/*
4133 	 * remove p2p ie from probe response.
4134 	 * the fw reponds to probe requests that don't include
4135 	 * the p2p ie. probe requests with p2p ie will be passed,
4136 	 * and will be responded by the supplicant (the spec
4137 	 * forbids including the p2p ie when responding to probe
4138 	 * requests that didn't include it).
4139 	 */
4140 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4141 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4142 
4143 	hdr = (struct ieee80211_hdr *) beacon->data;
4144 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4145 					 IEEE80211_STYPE_PROBE_RESP);
4146 	if (is_ap)
4147 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4148 							   beacon->data,
4149 							   beacon->len,
4150 							   min_rate);
4151 	else
4152 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4153 					      CMD_TEMPL_PROBE_RESPONSE,
4154 					      beacon->data,
4155 					      beacon->len, 0,
4156 					      min_rate);
4157 end_bcn:
4158 	dev_kfree_skb(beacon);
4159 	if (ret < 0)
4160 		goto out;
4161 
4162 out:
4163 	return ret;
4164 }
4165 
4166 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4167 					  struct ieee80211_vif *vif,
4168 					  struct ieee80211_bss_conf *bss_conf,
4169 					  u32 changed)
4170 {
4171 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4172 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4173 	int ret = 0;
4174 
4175 	if (changed & BSS_CHANGED_BEACON_INT) {
4176 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4177 			bss_conf->beacon_int);
4178 
4179 		wlvif->beacon_int = bss_conf->beacon_int;
4180 	}
4181 
4182 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4183 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4184 
4185 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4186 	}
4187 
4188 	if (changed & BSS_CHANGED_BEACON) {
4189 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4190 		if (ret < 0)
4191 			goto out;
4192 
4193 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4194 				       &wlvif->flags)) {
4195 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4196 			if (ret < 0)
4197 				goto out;
4198 		}
4199 	}
4200 out:
4201 	if (ret != 0)
4202 		wl1271_error("beacon info change failed: %d", ret);
4203 	return ret;
4204 }
4205 
4206 /* AP mode changes */
4207 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4208 				       struct ieee80211_vif *vif,
4209 				       struct ieee80211_bss_conf *bss_conf,
4210 				       u32 changed)
4211 {
4212 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4213 	int ret = 0;
4214 
4215 	if (changed & BSS_CHANGED_BASIC_RATES) {
4216 		u32 rates = bss_conf->basic_rates;
4217 
4218 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4219 								 wlvif->band);
4220 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4221 							wlvif->basic_rate_set);
4222 
4223 		ret = wl1271_init_ap_rates(wl, wlvif);
4224 		if (ret < 0) {
4225 			wl1271_error("AP rate policy change failed %d", ret);
4226 			goto out;
4227 		}
4228 
4229 		ret = wl1271_ap_init_templates(wl, vif);
4230 		if (ret < 0)
4231 			goto out;
4232 
4233 		/* No need to set probe resp template for mesh */
4234 		if (!ieee80211_vif_is_mesh(vif)) {
4235 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4236 							    wlvif->basic_rate,
4237 							    vif);
4238 			if (ret < 0)
4239 				goto out;
4240 		}
4241 
4242 		ret = wlcore_set_beacon_template(wl, vif, true);
4243 		if (ret < 0)
4244 			goto out;
4245 	}
4246 
4247 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4248 	if (ret < 0)
4249 		goto out;
4250 
4251 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4252 		if (bss_conf->enable_beacon) {
4253 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4254 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4255 				if (ret < 0)
4256 					goto out;
4257 
4258 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4259 				if (ret < 0)
4260 					goto out;
4261 
4262 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4263 				wl1271_debug(DEBUG_AP, "started AP");
4264 			}
4265 		} else {
4266 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4267 				/*
4268 				 * AP might be in ROC in case we have just
4269 				 * sent auth reply. handle it.
4270 				 */
4271 				if (test_bit(wlvif->role_id, wl->roc_map))
4272 					wl12xx_croc(wl, wlvif->role_id);
4273 
4274 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4275 				if (ret < 0)
4276 					goto out;
4277 
4278 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4279 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4280 					  &wlvif->flags);
4281 				wl1271_debug(DEBUG_AP, "stopped AP");
4282 			}
4283 		}
4284 	}
4285 
4286 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4287 	if (ret < 0)
4288 		goto out;
4289 
4290 	/* Handle HT information change */
4291 	if ((changed & BSS_CHANGED_HT) &&
4292 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4293 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4294 					bss_conf->ht_operation_mode);
4295 		if (ret < 0) {
4296 			wl1271_warning("Set ht information failed %d", ret);
4297 			goto out;
4298 		}
4299 	}
4300 
4301 out:
4302 	return;
4303 }
4304 
4305 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4306 			    struct ieee80211_bss_conf *bss_conf,
4307 			    u32 sta_rate_set)
4308 {
4309 	u32 rates;
4310 	int ret;
4311 
4312 	wl1271_debug(DEBUG_MAC80211,
4313 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4314 	     bss_conf->bssid, bss_conf->aid,
4315 	     bss_conf->beacon_int,
4316 	     bss_conf->basic_rates, sta_rate_set);
4317 
4318 	wlvif->beacon_int = bss_conf->beacon_int;
4319 	rates = bss_conf->basic_rates;
4320 	wlvif->basic_rate_set =
4321 		wl1271_tx_enabled_rates_get(wl, rates,
4322 					    wlvif->band);
4323 	wlvif->basic_rate =
4324 		wl1271_tx_min_rate_get(wl,
4325 				       wlvif->basic_rate_set);
4326 
4327 	if (sta_rate_set)
4328 		wlvif->rate_set =
4329 			wl1271_tx_enabled_rates_get(wl,
4330 						sta_rate_set,
4331 						wlvif->band);
4332 
4333 	/* we only support sched_scan while not connected */
4334 	if (wl->sched_vif == wlvif)
4335 		wl->ops->sched_scan_stop(wl, wlvif);
4336 
4337 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4338 	if (ret < 0)
4339 		return ret;
4340 
4341 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4342 	if (ret < 0)
4343 		return ret;
4344 
4345 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4346 	if (ret < 0)
4347 		return ret;
4348 
4349 	wlcore_set_ssid(wl, wlvif);
4350 
4351 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4352 
4353 	return 0;
4354 }
4355 
4356 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4357 {
4358 	int ret;
4359 
4360 	/* revert back to minimum rates for the current band */
4361 	wl1271_set_band_rate(wl, wlvif);
4362 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4363 
4364 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4365 	if (ret < 0)
4366 		return ret;
4367 
4368 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4369 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4370 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4371 		if (ret < 0)
4372 			return ret;
4373 	}
4374 
4375 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4376 	return 0;
4377 }
4378 /* STA/IBSS mode changes */
4379 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4380 					struct ieee80211_vif *vif,
4381 					struct ieee80211_bss_conf *bss_conf,
4382 					u32 changed)
4383 {
4384 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4385 	bool do_join = false;
4386 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4387 	bool ibss_joined = false;
4388 	u32 sta_rate_set = 0;
4389 	int ret;
4390 	struct ieee80211_sta *sta;
4391 	bool sta_exists = false;
4392 	struct ieee80211_sta_ht_cap sta_ht_cap;
4393 
4394 	if (is_ibss) {
4395 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4396 						     changed);
4397 		if (ret < 0)
4398 			goto out;
4399 	}
4400 
4401 	if (changed & BSS_CHANGED_IBSS) {
4402 		if (bss_conf->ibss_joined) {
4403 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4404 			ibss_joined = true;
4405 		} else {
4406 			wlcore_unset_assoc(wl, wlvif);
4407 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4408 		}
4409 	}
4410 
4411 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4412 		do_join = true;
4413 
4414 	/* Need to update the SSID (for filtering etc) */
4415 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4416 		do_join = true;
4417 
4418 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4419 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4420 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4421 
4422 		do_join = true;
4423 	}
4424 
4425 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4426 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4427 
4428 	if (changed & BSS_CHANGED_CQM) {
4429 		bool enable = false;
4430 		if (bss_conf->cqm_rssi_thold)
4431 			enable = true;
4432 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4433 						  bss_conf->cqm_rssi_thold,
4434 						  bss_conf->cqm_rssi_hyst);
4435 		if (ret < 0)
4436 			goto out;
4437 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4438 	}
4439 
4440 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4441 		       BSS_CHANGED_ASSOC)) {
4442 		rcu_read_lock();
4443 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4444 		if (sta) {
4445 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4446 
4447 			/* save the supp_rates of the ap */
4448 			sta_rate_set = sta->supp_rates[wlvif->band];
4449 			if (sta->ht_cap.ht_supported)
4450 				sta_rate_set |=
4451 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4452 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4453 			sta_ht_cap = sta->ht_cap;
4454 			sta_exists = true;
4455 		}
4456 
4457 		rcu_read_unlock();
4458 	}
4459 
4460 	if (changed & BSS_CHANGED_BSSID) {
4461 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4462 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4463 					       sta_rate_set);
4464 			if (ret < 0)
4465 				goto out;
4466 
4467 			/* Need to update the BSSID (for filtering etc) */
4468 			do_join = true;
4469 		} else {
4470 			ret = wlcore_clear_bssid(wl, wlvif);
4471 			if (ret < 0)
4472 				goto out;
4473 		}
4474 	}
4475 
4476 	if (changed & BSS_CHANGED_IBSS) {
4477 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4478 			     bss_conf->ibss_joined);
4479 
4480 		if (bss_conf->ibss_joined) {
4481 			u32 rates = bss_conf->basic_rates;
4482 			wlvif->basic_rate_set =
4483 				wl1271_tx_enabled_rates_get(wl, rates,
4484 							    wlvif->band);
4485 			wlvif->basic_rate =
4486 				wl1271_tx_min_rate_get(wl,
4487 						       wlvif->basic_rate_set);
4488 
4489 			/* by default, use 11b + OFDM rates */
4490 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4491 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4492 			if (ret < 0)
4493 				goto out;
4494 		}
4495 	}
4496 
4497 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4498 		/* enable beacon filtering */
4499 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4500 		if (ret < 0)
4501 			goto out;
4502 	}
4503 
4504 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4505 	if (ret < 0)
4506 		goto out;
4507 
4508 	if (do_join) {
4509 		ret = wlcore_join(wl, wlvif);
4510 		if (ret < 0) {
4511 			wl1271_warning("cmd join failed %d", ret);
4512 			goto out;
4513 		}
4514 	}
4515 
4516 	if (changed & BSS_CHANGED_ASSOC) {
4517 		if (bss_conf->assoc) {
4518 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4519 					       sta_rate_set);
4520 			if (ret < 0)
4521 				goto out;
4522 
4523 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4524 				wl12xx_set_authorized(wl, wlvif);
4525 		} else {
4526 			wlcore_unset_assoc(wl, wlvif);
4527 		}
4528 	}
4529 
4530 	if (changed & BSS_CHANGED_PS) {
4531 		if ((bss_conf->ps) &&
4532 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4533 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4534 			int ps_mode;
4535 			char *ps_mode_str;
4536 
4537 			if (wl->conf.conn.forced_ps) {
4538 				ps_mode = STATION_POWER_SAVE_MODE;
4539 				ps_mode_str = "forced";
4540 			} else {
4541 				ps_mode = STATION_AUTO_PS_MODE;
4542 				ps_mode_str = "auto";
4543 			}
4544 
4545 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4546 
4547 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4548 			if (ret < 0)
4549 				wl1271_warning("enter %s ps failed %d",
4550 					       ps_mode_str, ret);
4551 		} else if (!bss_conf->ps &&
4552 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4553 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4554 
4555 			ret = wl1271_ps_set_mode(wl, wlvif,
4556 						 STATION_ACTIVE_MODE);
4557 			if (ret < 0)
4558 				wl1271_warning("exit auto ps failed %d", ret);
4559 		}
4560 	}
4561 
4562 	/* Handle new association with HT. Do this after join. */
4563 	if (sta_exists) {
4564 		bool enabled =
4565 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4566 
4567 		ret = wlcore_hw_set_peer_cap(wl,
4568 					     &sta_ht_cap,
4569 					     enabled,
4570 					     wlvif->rate_set,
4571 					     wlvif->sta.hlid);
4572 		if (ret < 0) {
4573 			wl1271_warning("Set ht cap failed %d", ret);
4574 			goto out;
4575 
4576 		}
4577 
4578 		if (enabled) {
4579 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4580 						bss_conf->ht_operation_mode);
4581 			if (ret < 0) {
4582 				wl1271_warning("Set ht information failed %d",
4583 					       ret);
4584 				goto out;
4585 			}
4586 		}
4587 	}
4588 
4589 	/* Handle arp filtering. Done after join. */
4590 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4591 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4592 		__be32 addr = bss_conf->arp_addr_list[0];
4593 		wlvif->sta.qos = bss_conf->qos;
4594 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4595 
4596 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4597 			wlvif->ip_addr = addr;
4598 			/*
4599 			 * The template should have been configured only upon
4600 			 * association. however, it seems that the correct ip
4601 			 * isn't being set (when sending), so we have to
4602 			 * reconfigure the template upon every ip change.
4603 			 */
4604 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4605 			if (ret < 0) {
4606 				wl1271_warning("build arp rsp failed: %d", ret);
4607 				goto out;
4608 			}
4609 
4610 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4611 				(ACX_ARP_FILTER_ARP_FILTERING |
4612 				 ACX_ARP_FILTER_AUTO_ARP),
4613 				addr);
4614 		} else {
4615 			wlvif->ip_addr = 0;
4616 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4617 		}
4618 
4619 		if (ret < 0)
4620 			goto out;
4621 	}
4622 
4623 out:
4624 	return;
4625 }
4626 
4627 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4628 				       struct ieee80211_vif *vif,
4629 				       struct ieee80211_bss_conf *bss_conf,
4630 				       u32 changed)
4631 {
4632 	struct wl1271 *wl = hw->priv;
4633 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4634 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4635 	int ret;
4636 
4637 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4638 		     wlvif->role_id, (int)changed);
4639 
4640 	/*
4641 	 * make sure to cancel pending disconnections if our association
4642 	 * state changed
4643 	 */
4644 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4645 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4646 
4647 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4648 	    !bss_conf->enable_beacon)
4649 		wl1271_tx_flush(wl);
4650 
4651 	mutex_lock(&wl->mutex);
4652 
4653 	if (unlikely(wl->state != WLCORE_STATE_ON))
4654 		goto out;
4655 
4656 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4657 		goto out;
4658 
4659 	ret = pm_runtime_get_sync(wl->dev);
4660 	if (ret < 0) {
4661 		pm_runtime_put_noidle(wl->dev);
4662 		goto out;
4663 	}
4664 
4665 	if ((changed & BSS_CHANGED_TXPOWER) &&
4666 	    bss_conf->txpower != wlvif->power_level) {
4667 
4668 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4669 		if (ret < 0)
4670 			goto out;
4671 
4672 		wlvif->power_level = bss_conf->txpower;
4673 	}
4674 
4675 	if (is_ap)
4676 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4677 	else
4678 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4679 
4680 	pm_runtime_mark_last_busy(wl->dev);
4681 	pm_runtime_put_autosuspend(wl->dev);
4682 
4683 out:
4684 	mutex_unlock(&wl->mutex);
4685 }
4686 
4687 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4688 				 struct ieee80211_chanctx_conf *ctx)
4689 {
4690 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4691 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4692 		     cfg80211_get_chandef_type(&ctx->def));
4693 	return 0;
4694 }
4695 
4696 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4697 				     struct ieee80211_chanctx_conf *ctx)
4698 {
4699 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4700 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4701 		     cfg80211_get_chandef_type(&ctx->def));
4702 }
4703 
4704 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4705 				     struct ieee80211_chanctx_conf *ctx,
4706 				     u32 changed)
4707 {
4708 	struct wl1271 *wl = hw->priv;
4709 	struct wl12xx_vif *wlvif;
4710 	int ret;
4711 	int channel = ieee80211_frequency_to_channel(
4712 		ctx->def.chan->center_freq);
4713 
4714 	wl1271_debug(DEBUG_MAC80211,
4715 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4716 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4717 
4718 	mutex_lock(&wl->mutex);
4719 
4720 	ret = pm_runtime_get_sync(wl->dev);
4721 	if (ret < 0) {
4722 		pm_runtime_put_noidle(wl->dev);
4723 		goto out;
4724 	}
4725 
4726 	wl12xx_for_each_wlvif(wl, wlvif) {
4727 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4728 
4729 		rcu_read_lock();
4730 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4731 			rcu_read_unlock();
4732 			continue;
4733 		}
4734 		rcu_read_unlock();
4735 
4736 		/* start radar if needed */
4737 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4738 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4739 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4740 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4741 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4742 			wlcore_hw_set_cac(wl, wlvif, true);
4743 			wlvif->radar_enabled = true;
4744 		}
4745 	}
4746 
4747 	pm_runtime_mark_last_busy(wl->dev);
4748 	pm_runtime_put_autosuspend(wl->dev);
4749 out:
4750 	mutex_unlock(&wl->mutex);
4751 }
4752 
4753 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4754 					struct ieee80211_vif *vif,
4755 					struct ieee80211_chanctx_conf *ctx)
4756 {
4757 	struct wl1271 *wl = hw->priv;
4758 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4759 	int channel = ieee80211_frequency_to_channel(
4760 		ctx->def.chan->center_freq);
4761 	int ret = -EINVAL;
4762 
4763 	wl1271_debug(DEBUG_MAC80211,
4764 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4765 		     wlvif->role_id, channel,
4766 		     cfg80211_get_chandef_type(&ctx->def),
4767 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4768 
4769 	mutex_lock(&wl->mutex);
4770 
4771 	if (unlikely(wl->state != WLCORE_STATE_ON))
4772 		goto out;
4773 
4774 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4775 		goto out;
4776 
4777 	ret = pm_runtime_get_sync(wl->dev);
4778 	if (ret < 0) {
4779 		pm_runtime_put_noidle(wl->dev);
4780 		goto out;
4781 	}
4782 
4783 	wlvif->band = ctx->def.chan->band;
4784 	wlvif->channel = channel;
4785 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4786 
4787 	/* update default rates according to the band */
4788 	wl1271_set_band_rate(wl, wlvif);
4789 
4790 	if (ctx->radar_enabled &&
4791 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4792 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4793 		wlcore_hw_set_cac(wl, wlvif, true);
4794 		wlvif->radar_enabled = true;
4795 	}
4796 
4797 	pm_runtime_mark_last_busy(wl->dev);
4798 	pm_runtime_put_autosuspend(wl->dev);
4799 out:
4800 	mutex_unlock(&wl->mutex);
4801 
4802 	return 0;
4803 }
4804 
4805 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4806 					   struct ieee80211_vif *vif,
4807 					   struct ieee80211_chanctx_conf *ctx)
4808 {
4809 	struct wl1271 *wl = hw->priv;
4810 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4811 	int ret;
4812 
4813 	wl1271_debug(DEBUG_MAC80211,
4814 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4815 		     wlvif->role_id,
4816 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4817 		     cfg80211_get_chandef_type(&ctx->def));
4818 
4819 	wl1271_tx_flush(wl);
4820 
4821 	mutex_lock(&wl->mutex);
4822 
4823 	if (unlikely(wl->state != WLCORE_STATE_ON))
4824 		goto out;
4825 
4826 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4827 		goto out;
4828 
4829 	ret = pm_runtime_get_sync(wl->dev);
4830 	if (ret < 0) {
4831 		pm_runtime_put_noidle(wl->dev);
4832 		goto out;
4833 	}
4834 
4835 	if (wlvif->radar_enabled) {
4836 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4837 		wlcore_hw_set_cac(wl, wlvif, false);
4838 		wlvif->radar_enabled = false;
4839 	}
4840 
4841 	pm_runtime_mark_last_busy(wl->dev);
4842 	pm_runtime_put_autosuspend(wl->dev);
4843 out:
4844 	mutex_unlock(&wl->mutex);
4845 }
4846 
4847 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4848 				    struct wl12xx_vif *wlvif,
4849 				    struct ieee80211_chanctx_conf *new_ctx)
4850 {
4851 	int channel = ieee80211_frequency_to_channel(
4852 		new_ctx->def.chan->center_freq);
4853 
4854 	wl1271_debug(DEBUG_MAC80211,
4855 		     "switch vif (role %d) %d -> %d chan_type: %d",
4856 		     wlvif->role_id, wlvif->channel, channel,
4857 		     cfg80211_get_chandef_type(&new_ctx->def));
4858 
4859 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4860 		return 0;
4861 
4862 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4863 
4864 	if (wlvif->radar_enabled) {
4865 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4866 		wlcore_hw_set_cac(wl, wlvif, false);
4867 		wlvif->radar_enabled = false;
4868 	}
4869 
4870 	wlvif->band = new_ctx->def.chan->band;
4871 	wlvif->channel = channel;
4872 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4873 
4874 	/* start radar if needed */
4875 	if (new_ctx->radar_enabled) {
4876 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4877 		wlcore_hw_set_cac(wl, wlvif, true);
4878 		wlvif->radar_enabled = true;
4879 	}
4880 
4881 	return 0;
4882 }
4883 
4884 static int
4885 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4886 			     struct ieee80211_vif_chanctx_switch *vifs,
4887 			     int n_vifs,
4888 			     enum ieee80211_chanctx_switch_mode mode)
4889 {
4890 	struct wl1271 *wl = hw->priv;
4891 	int i, ret;
4892 
4893 	wl1271_debug(DEBUG_MAC80211,
4894 		     "mac80211 switch chanctx n_vifs %d mode %d",
4895 		     n_vifs, mode);
4896 
4897 	mutex_lock(&wl->mutex);
4898 
4899 	ret = pm_runtime_get_sync(wl->dev);
4900 	if (ret < 0) {
4901 		pm_runtime_put_noidle(wl->dev);
4902 		goto out;
4903 	}
4904 
4905 	for (i = 0; i < n_vifs; i++) {
4906 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4907 
4908 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4909 		if (ret)
4910 			goto out_sleep;
4911 	}
4912 out_sleep:
4913 	pm_runtime_mark_last_busy(wl->dev);
4914 	pm_runtime_put_autosuspend(wl->dev);
4915 out:
4916 	mutex_unlock(&wl->mutex);
4917 
4918 	return 0;
4919 }
4920 
4921 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4922 			     struct ieee80211_vif *vif, u16 queue,
4923 			     const struct ieee80211_tx_queue_params *params)
4924 {
4925 	struct wl1271 *wl = hw->priv;
4926 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4927 	u8 ps_scheme;
4928 	int ret = 0;
4929 
4930 	if (wlcore_is_p2p_mgmt(wlvif))
4931 		return 0;
4932 
4933 	mutex_lock(&wl->mutex);
4934 
4935 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4936 
4937 	if (params->uapsd)
4938 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4939 	else
4940 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4941 
4942 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4943 		goto out;
4944 
4945 	ret = pm_runtime_get_sync(wl->dev);
4946 	if (ret < 0) {
4947 		pm_runtime_put_noidle(wl->dev);
4948 		goto out;
4949 	}
4950 
4951 	/*
4952 	 * the txop is confed in units of 32us by the mac80211,
4953 	 * we need us
4954 	 */
4955 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4956 				params->cw_min, params->cw_max,
4957 				params->aifs, params->txop << 5);
4958 	if (ret < 0)
4959 		goto out_sleep;
4960 
4961 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4962 				 CONF_CHANNEL_TYPE_EDCF,
4963 				 wl1271_tx_get_queue(queue),
4964 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4965 				 0, 0);
4966 
4967 out_sleep:
4968 	pm_runtime_mark_last_busy(wl->dev);
4969 	pm_runtime_put_autosuspend(wl->dev);
4970 
4971 out:
4972 	mutex_unlock(&wl->mutex);
4973 
4974 	return ret;
4975 }
4976 
4977 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4978 			     struct ieee80211_vif *vif)
4979 {
4980 
4981 	struct wl1271 *wl = hw->priv;
4982 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4983 	u64 mactime = ULLONG_MAX;
4984 	int ret;
4985 
4986 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4987 
4988 	mutex_lock(&wl->mutex);
4989 
4990 	if (unlikely(wl->state != WLCORE_STATE_ON))
4991 		goto out;
4992 
4993 	ret = pm_runtime_get_sync(wl->dev);
4994 	if (ret < 0) {
4995 		pm_runtime_put_noidle(wl->dev);
4996 		goto out;
4997 	}
4998 
4999 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5000 	if (ret < 0)
5001 		goto out_sleep;
5002 
5003 out_sleep:
5004 	pm_runtime_mark_last_busy(wl->dev);
5005 	pm_runtime_put_autosuspend(wl->dev);
5006 
5007 out:
5008 	mutex_unlock(&wl->mutex);
5009 	return mactime;
5010 }
5011 
5012 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5013 				struct survey_info *survey)
5014 {
5015 	struct ieee80211_conf *conf = &hw->conf;
5016 
5017 	if (idx != 0)
5018 		return -ENOENT;
5019 
5020 	survey->channel = conf->chandef.chan;
5021 	survey->filled = 0;
5022 	return 0;
5023 }
5024 
5025 static int wl1271_allocate_sta(struct wl1271 *wl,
5026 			     struct wl12xx_vif *wlvif,
5027 			     struct ieee80211_sta *sta)
5028 {
5029 	struct wl1271_station *wl_sta;
5030 	int ret;
5031 
5032 
5033 	if (wl->active_sta_count >= wl->max_ap_stations) {
5034 		wl1271_warning("could not allocate HLID - too much stations");
5035 		return -EBUSY;
5036 	}
5037 
5038 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5039 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5040 	if (ret < 0) {
5041 		wl1271_warning("could not allocate HLID - too many links");
5042 		return -EBUSY;
5043 	}
5044 
5045 	/* use the previous security seq, if this is a recovery/resume */
5046 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5047 
5048 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5049 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5050 	wl->active_sta_count++;
5051 	return 0;
5052 }
5053 
5054 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5055 {
5056 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5057 		return;
5058 
5059 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5060 	__clear_bit(hlid, &wl->ap_ps_map);
5061 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5062 
5063 	/*
5064 	 * save the last used PN in the private part of iee80211_sta,
5065 	 * in case of recovery/suspend
5066 	 */
5067 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5068 
5069 	wl12xx_free_link(wl, wlvif, &hlid);
5070 	wl->active_sta_count--;
5071 
5072 	/*
5073 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5074 	 * chance to return STA-buffered packets before complaining.
5075 	 */
5076 	if (wl->active_sta_count == 0)
5077 		wl12xx_rearm_tx_watchdog_locked(wl);
5078 }
5079 
5080 static int wl12xx_sta_add(struct wl1271 *wl,
5081 			  struct wl12xx_vif *wlvif,
5082 			  struct ieee80211_sta *sta)
5083 {
5084 	struct wl1271_station *wl_sta;
5085 	int ret = 0;
5086 	u8 hlid;
5087 
5088 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5089 
5090 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5091 	if (ret < 0)
5092 		return ret;
5093 
5094 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5095 	hlid = wl_sta->hlid;
5096 
5097 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5098 	if (ret < 0)
5099 		wl1271_free_sta(wl, wlvif, hlid);
5100 
5101 	return ret;
5102 }
5103 
5104 static int wl12xx_sta_remove(struct wl1271 *wl,
5105 			     struct wl12xx_vif *wlvif,
5106 			     struct ieee80211_sta *sta)
5107 {
5108 	struct wl1271_station *wl_sta;
5109 	int ret = 0, id;
5110 
5111 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5112 
5113 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5114 	id = wl_sta->hlid;
5115 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5116 		return -EINVAL;
5117 
5118 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5119 	if (ret < 0)
5120 		return ret;
5121 
5122 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5123 	return ret;
5124 }
5125 
5126 static void wlcore_roc_if_possible(struct wl1271 *wl,
5127 				   struct wl12xx_vif *wlvif)
5128 {
5129 	if (find_first_bit(wl->roc_map,
5130 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5131 		return;
5132 
5133 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5134 		return;
5135 
5136 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5137 }
5138 
5139 /*
5140  * when wl_sta is NULL, we treat this call as if coming from a
5141  * pending auth reply.
5142  * wl->mutex must be taken and the FW must be awake when the call
5143  * takes place.
5144  */
5145 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5146 			      struct wl1271_station *wl_sta, bool in_conn)
5147 {
5148 	if (in_conn) {
5149 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5150 			return;
5151 
5152 		if (!wlvif->ap_pending_auth_reply &&
5153 		    !wlvif->inconn_count)
5154 			wlcore_roc_if_possible(wl, wlvif);
5155 
5156 		if (wl_sta) {
5157 			wl_sta->in_connection = true;
5158 			wlvif->inconn_count++;
5159 		} else {
5160 			wlvif->ap_pending_auth_reply = true;
5161 		}
5162 	} else {
5163 		if (wl_sta && !wl_sta->in_connection)
5164 			return;
5165 
5166 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5167 			return;
5168 
5169 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5170 			return;
5171 
5172 		if (wl_sta) {
5173 			wl_sta->in_connection = false;
5174 			wlvif->inconn_count--;
5175 		} else {
5176 			wlvif->ap_pending_auth_reply = false;
5177 		}
5178 
5179 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5180 		    test_bit(wlvif->role_id, wl->roc_map))
5181 			wl12xx_croc(wl, wlvif->role_id);
5182 	}
5183 }
5184 
5185 static int wl12xx_update_sta_state(struct wl1271 *wl,
5186 				   struct wl12xx_vif *wlvif,
5187 				   struct ieee80211_sta *sta,
5188 				   enum ieee80211_sta_state old_state,
5189 				   enum ieee80211_sta_state new_state)
5190 {
5191 	struct wl1271_station *wl_sta;
5192 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5193 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5194 	int ret;
5195 
5196 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5197 
5198 	/* Add station (AP mode) */
5199 	if (is_ap &&
5200 	    old_state == IEEE80211_STA_NOTEXIST &&
5201 	    new_state == IEEE80211_STA_NONE) {
5202 		ret = wl12xx_sta_add(wl, wlvif, sta);
5203 		if (ret)
5204 			return ret;
5205 
5206 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5207 	}
5208 
5209 	/* Remove station (AP mode) */
5210 	if (is_ap &&
5211 	    old_state == IEEE80211_STA_NONE &&
5212 	    new_state == IEEE80211_STA_NOTEXIST) {
5213 		/* must not fail */
5214 		wl12xx_sta_remove(wl, wlvif, sta);
5215 
5216 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5217 	}
5218 
5219 	/* Authorize station (AP mode) */
5220 	if (is_ap &&
5221 	    new_state == IEEE80211_STA_AUTHORIZED) {
5222 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5223 		if (ret < 0)
5224 			return ret;
5225 
5226 		/* reconfigure rates */
5227 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5228 		if (ret < 0)
5229 			return ret;
5230 
5231 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5232 						     wl_sta->hlid);
5233 		if (ret)
5234 			return ret;
5235 
5236 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5237 	}
5238 
5239 	/* Authorize station */
5240 	if (is_sta &&
5241 	    new_state == IEEE80211_STA_AUTHORIZED) {
5242 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5243 		ret = wl12xx_set_authorized(wl, wlvif);
5244 		if (ret)
5245 			return ret;
5246 	}
5247 
5248 	if (is_sta &&
5249 	    old_state == IEEE80211_STA_AUTHORIZED &&
5250 	    new_state == IEEE80211_STA_ASSOC) {
5251 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5252 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5253 	}
5254 
5255 	/* save seq number on disassoc (suspend) */
5256 	if (is_sta &&
5257 	    old_state == IEEE80211_STA_ASSOC &&
5258 	    new_state == IEEE80211_STA_AUTH) {
5259 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5260 		wlvif->total_freed_pkts = 0;
5261 	}
5262 
5263 	/* restore seq number on assoc (resume) */
5264 	if (is_sta &&
5265 	    old_state == IEEE80211_STA_AUTH &&
5266 	    new_state == IEEE80211_STA_ASSOC) {
5267 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5268 	}
5269 
5270 	/* clear ROCs on failure or authorization */
5271 	if (is_sta &&
5272 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5273 	     new_state == IEEE80211_STA_NOTEXIST)) {
5274 		if (test_bit(wlvif->role_id, wl->roc_map))
5275 			wl12xx_croc(wl, wlvif->role_id);
5276 	}
5277 
5278 	if (is_sta &&
5279 	    old_state == IEEE80211_STA_NOTEXIST &&
5280 	    new_state == IEEE80211_STA_NONE) {
5281 		if (find_first_bit(wl->roc_map,
5282 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5283 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5284 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5285 				   wlvif->band, wlvif->channel);
5286 		}
5287 	}
5288 	return 0;
5289 }
5290 
5291 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5292 			       struct ieee80211_vif *vif,
5293 			       struct ieee80211_sta *sta,
5294 			       enum ieee80211_sta_state old_state,
5295 			       enum ieee80211_sta_state new_state)
5296 {
5297 	struct wl1271 *wl = hw->priv;
5298 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5299 	int ret;
5300 
5301 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5302 		     sta->aid, old_state, new_state);
5303 
5304 	mutex_lock(&wl->mutex);
5305 
5306 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5307 		ret = -EBUSY;
5308 		goto out;
5309 	}
5310 
5311 	ret = pm_runtime_get_sync(wl->dev);
5312 	if (ret < 0) {
5313 		pm_runtime_put_noidle(wl->dev);
5314 		goto out;
5315 	}
5316 
5317 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5318 
5319 	pm_runtime_mark_last_busy(wl->dev);
5320 	pm_runtime_put_autosuspend(wl->dev);
5321 out:
5322 	mutex_unlock(&wl->mutex);
5323 	if (new_state < old_state)
5324 		return 0;
5325 	return ret;
5326 }
5327 
5328 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5329 				  struct ieee80211_vif *vif,
5330 				  struct ieee80211_ampdu_params *params)
5331 {
5332 	struct wl1271 *wl = hw->priv;
5333 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5334 	int ret;
5335 	u8 hlid, *ba_bitmap;
5336 	struct ieee80211_sta *sta = params->sta;
5337 	enum ieee80211_ampdu_mlme_action action = params->action;
5338 	u16 tid = params->tid;
5339 	u16 *ssn = &params->ssn;
5340 
5341 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5342 		     tid);
5343 
5344 	/* sanity check - the fields in FW are only 8bits wide */
5345 	if (WARN_ON(tid > 0xFF))
5346 		return -ENOTSUPP;
5347 
5348 	mutex_lock(&wl->mutex);
5349 
5350 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5351 		ret = -EAGAIN;
5352 		goto out;
5353 	}
5354 
5355 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5356 		hlid = wlvif->sta.hlid;
5357 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5358 		struct wl1271_station *wl_sta;
5359 
5360 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5361 		hlid = wl_sta->hlid;
5362 	} else {
5363 		ret = -EINVAL;
5364 		goto out;
5365 	}
5366 
5367 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5368 
5369 	ret = pm_runtime_get_sync(wl->dev);
5370 	if (ret < 0) {
5371 		pm_runtime_put_noidle(wl->dev);
5372 		goto out;
5373 	}
5374 
5375 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5376 		     tid, action);
5377 
5378 	switch (action) {
5379 	case IEEE80211_AMPDU_RX_START:
5380 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5381 			ret = -ENOTSUPP;
5382 			break;
5383 		}
5384 
5385 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5386 			ret = -EBUSY;
5387 			wl1271_error("exceeded max RX BA sessions");
5388 			break;
5389 		}
5390 
5391 		if (*ba_bitmap & BIT(tid)) {
5392 			ret = -EINVAL;
5393 			wl1271_error("cannot enable RX BA session on active "
5394 				     "tid: %d", tid);
5395 			break;
5396 		}
5397 
5398 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5399 				hlid,
5400 				params->buf_size);
5401 
5402 		if (!ret) {
5403 			*ba_bitmap |= BIT(tid);
5404 			wl->ba_rx_session_count++;
5405 		}
5406 		break;
5407 
5408 	case IEEE80211_AMPDU_RX_STOP:
5409 		if (!(*ba_bitmap & BIT(tid))) {
5410 			/*
5411 			 * this happens on reconfig - so only output a debug
5412 			 * message for now, and don't fail the function.
5413 			 */
5414 			wl1271_debug(DEBUG_MAC80211,
5415 				     "no active RX BA session on tid: %d",
5416 				     tid);
5417 			ret = 0;
5418 			break;
5419 		}
5420 
5421 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5422 							 hlid, 0);
5423 		if (!ret) {
5424 			*ba_bitmap &= ~BIT(tid);
5425 			wl->ba_rx_session_count--;
5426 		}
5427 		break;
5428 
5429 	/*
5430 	 * The BA initiator session management in FW independently.
5431 	 * Falling break here on purpose for all TX APDU commands.
5432 	 */
5433 	case IEEE80211_AMPDU_TX_START:
5434 	case IEEE80211_AMPDU_TX_STOP_CONT:
5435 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5436 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5437 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5438 		ret = -EINVAL;
5439 		break;
5440 
5441 	default:
5442 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5443 		ret = -EINVAL;
5444 	}
5445 
5446 	pm_runtime_mark_last_busy(wl->dev);
5447 	pm_runtime_put_autosuspend(wl->dev);
5448 
5449 out:
5450 	mutex_unlock(&wl->mutex);
5451 
5452 	return ret;
5453 }
5454 
5455 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5456 				   struct ieee80211_vif *vif,
5457 				   const struct cfg80211_bitrate_mask *mask)
5458 {
5459 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5460 	struct wl1271 *wl = hw->priv;
5461 	int i, ret = 0;
5462 
5463 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5464 		mask->control[NL80211_BAND_2GHZ].legacy,
5465 		mask->control[NL80211_BAND_5GHZ].legacy);
5466 
5467 	mutex_lock(&wl->mutex);
5468 
5469 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5470 		wlvif->bitrate_masks[i] =
5471 			wl1271_tx_enabled_rates_get(wl,
5472 						    mask->control[i].legacy,
5473 						    i);
5474 
5475 	if (unlikely(wl->state != WLCORE_STATE_ON))
5476 		goto out;
5477 
5478 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5479 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5480 
5481 		ret = pm_runtime_get_sync(wl->dev);
5482 		if (ret < 0) {
5483 			pm_runtime_put_noidle(wl->dev);
5484 			goto out;
5485 		}
5486 
5487 		wl1271_set_band_rate(wl, wlvif);
5488 		wlvif->basic_rate =
5489 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5490 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5491 
5492 		pm_runtime_mark_last_busy(wl->dev);
5493 		pm_runtime_put_autosuspend(wl->dev);
5494 	}
5495 out:
5496 	mutex_unlock(&wl->mutex);
5497 
5498 	return ret;
5499 }
5500 
5501 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5502 				     struct ieee80211_vif *vif,
5503 				     struct ieee80211_channel_switch *ch_switch)
5504 {
5505 	struct wl1271 *wl = hw->priv;
5506 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5507 	int ret;
5508 
5509 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5510 
5511 	wl1271_tx_flush(wl);
5512 
5513 	mutex_lock(&wl->mutex);
5514 
5515 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5516 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5517 			ieee80211_chswitch_done(vif, false);
5518 		goto out;
5519 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5520 		goto out;
5521 	}
5522 
5523 	ret = pm_runtime_get_sync(wl->dev);
5524 	if (ret < 0) {
5525 		pm_runtime_put_noidle(wl->dev);
5526 		goto out;
5527 	}
5528 
5529 	/* TODO: change mac80211 to pass vif as param */
5530 
5531 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5532 		unsigned long delay_usec;
5533 
5534 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5535 		if (ret)
5536 			goto out_sleep;
5537 
5538 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5539 
5540 		/* indicate failure 5 seconds after channel switch time */
5541 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5542 			ch_switch->count;
5543 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5544 					     usecs_to_jiffies(delay_usec) +
5545 					     msecs_to_jiffies(5000));
5546 	}
5547 
5548 out_sleep:
5549 	pm_runtime_mark_last_busy(wl->dev);
5550 	pm_runtime_put_autosuspend(wl->dev);
5551 
5552 out:
5553 	mutex_unlock(&wl->mutex);
5554 }
5555 
5556 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5557 					struct wl12xx_vif *wlvif,
5558 					u8 eid)
5559 {
5560 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5561 	struct sk_buff *beacon =
5562 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5563 
5564 	if (!beacon)
5565 		return NULL;
5566 
5567 	return cfg80211_find_ie(eid,
5568 				beacon->data + ieoffset,
5569 				beacon->len - ieoffset);
5570 }
5571 
5572 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5573 				u8 *csa_count)
5574 {
5575 	const u8 *ie;
5576 	const struct ieee80211_channel_sw_ie *ie_csa;
5577 
5578 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5579 	if (!ie)
5580 		return -EINVAL;
5581 
5582 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5583 	*csa_count = ie_csa->count;
5584 
5585 	return 0;
5586 }
5587 
5588 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5589 					    struct ieee80211_vif *vif,
5590 					    struct cfg80211_chan_def *chandef)
5591 {
5592 	struct wl1271 *wl = hw->priv;
5593 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5594 	struct ieee80211_channel_switch ch_switch = {
5595 		.block_tx = true,
5596 		.chandef = *chandef,
5597 	};
5598 	int ret;
5599 
5600 	wl1271_debug(DEBUG_MAC80211,
5601 		     "mac80211 channel switch beacon (role %d)",
5602 		     wlvif->role_id);
5603 
5604 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5605 	if (ret < 0) {
5606 		wl1271_error("error getting beacon (for CSA counter)");
5607 		return;
5608 	}
5609 
5610 	mutex_lock(&wl->mutex);
5611 
5612 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5613 		ret = -EBUSY;
5614 		goto out;
5615 	}
5616 
5617 	ret = pm_runtime_get_sync(wl->dev);
5618 	if (ret < 0) {
5619 		pm_runtime_put_noidle(wl->dev);
5620 		goto out;
5621 	}
5622 
5623 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5624 	if (ret)
5625 		goto out_sleep;
5626 
5627 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5628 
5629 out_sleep:
5630 	pm_runtime_mark_last_busy(wl->dev);
5631 	pm_runtime_put_autosuspend(wl->dev);
5632 out:
5633 	mutex_unlock(&wl->mutex);
5634 }
5635 
5636 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5637 			    u32 queues, bool drop)
5638 {
5639 	struct wl1271 *wl = hw->priv;
5640 
5641 	wl1271_tx_flush(wl);
5642 }
5643 
5644 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5645 				       struct ieee80211_vif *vif,
5646 				       struct ieee80211_channel *chan,
5647 				       int duration,
5648 				       enum ieee80211_roc_type type)
5649 {
5650 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5651 	struct wl1271 *wl = hw->priv;
5652 	int channel, active_roc, ret = 0;
5653 
5654 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5655 
5656 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5657 		     channel, wlvif->role_id);
5658 
5659 	mutex_lock(&wl->mutex);
5660 
5661 	if (unlikely(wl->state != WLCORE_STATE_ON))
5662 		goto out;
5663 
5664 	/* return EBUSY if we can't ROC right now */
5665 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5666 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5667 		wl1271_warning("active roc on role %d", active_roc);
5668 		ret = -EBUSY;
5669 		goto out;
5670 	}
5671 
5672 	ret = pm_runtime_get_sync(wl->dev);
5673 	if (ret < 0) {
5674 		pm_runtime_put_noidle(wl->dev);
5675 		goto out;
5676 	}
5677 
5678 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5679 	if (ret < 0)
5680 		goto out_sleep;
5681 
5682 	wl->roc_vif = vif;
5683 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5684 				     msecs_to_jiffies(duration));
5685 out_sleep:
5686 	pm_runtime_mark_last_busy(wl->dev);
5687 	pm_runtime_put_autosuspend(wl->dev);
5688 out:
5689 	mutex_unlock(&wl->mutex);
5690 	return ret;
5691 }
5692 
5693 static int __wlcore_roc_completed(struct wl1271 *wl)
5694 {
5695 	struct wl12xx_vif *wlvif;
5696 	int ret;
5697 
5698 	/* already completed */
5699 	if (unlikely(!wl->roc_vif))
5700 		return 0;
5701 
5702 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5703 
5704 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5705 		return -EBUSY;
5706 
5707 	ret = wl12xx_stop_dev(wl, wlvif);
5708 	if (ret < 0)
5709 		return ret;
5710 
5711 	wl->roc_vif = NULL;
5712 
5713 	return 0;
5714 }
5715 
5716 static int wlcore_roc_completed(struct wl1271 *wl)
5717 {
5718 	int ret;
5719 
5720 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5721 
5722 	mutex_lock(&wl->mutex);
5723 
5724 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5725 		ret = -EBUSY;
5726 		goto out;
5727 	}
5728 
5729 	ret = pm_runtime_get_sync(wl->dev);
5730 	if (ret < 0) {
5731 		pm_runtime_put_noidle(wl->dev);
5732 		goto out;
5733 	}
5734 
5735 	ret = __wlcore_roc_completed(wl);
5736 
5737 	pm_runtime_mark_last_busy(wl->dev);
5738 	pm_runtime_put_autosuspend(wl->dev);
5739 out:
5740 	mutex_unlock(&wl->mutex);
5741 
5742 	return ret;
5743 }
5744 
5745 static void wlcore_roc_complete_work(struct work_struct *work)
5746 {
5747 	struct delayed_work *dwork;
5748 	struct wl1271 *wl;
5749 	int ret;
5750 
5751 	dwork = to_delayed_work(work);
5752 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5753 
5754 	ret = wlcore_roc_completed(wl);
5755 	if (!ret)
5756 		ieee80211_remain_on_channel_expired(wl->hw);
5757 }
5758 
5759 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5760 					      struct ieee80211_vif *vif)
5761 {
5762 	struct wl1271 *wl = hw->priv;
5763 
5764 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5765 
5766 	/* TODO: per-vif */
5767 	wl1271_tx_flush(wl);
5768 
5769 	/*
5770 	 * we can't just flush_work here, because it might deadlock
5771 	 * (as we might get called from the same workqueue)
5772 	 */
5773 	cancel_delayed_work_sync(&wl->roc_complete_work);
5774 	wlcore_roc_completed(wl);
5775 
5776 	return 0;
5777 }
5778 
5779 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5780 				    struct ieee80211_vif *vif,
5781 				    struct ieee80211_sta *sta,
5782 				    u32 changed)
5783 {
5784 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5785 
5786 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5787 
5788 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5789 		return;
5790 
5791 	/* this callback is atomic, so schedule a new work */
5792 	wlvif->rc_update_bw = sta->bandwidth;
5793 	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5794 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5795 }
5796 
5797 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5798 				     struct ieee80211_vif *vif,
5799 				     struct ieee80211_sta *sta,
5800 				     struct station_info *sinfo)
5801 {
5802 	struct wl1271 *wl = hw->priv;
5803 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5804 	s8 rssi_dbm;
5805 	int ret;
5806 
5807 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5808 
5809 	mutex_lock(&wl->mutex);
5810 
5811 	if (unlikely(wl->state != WLCORE_STATE_ON))
5812 		goto out;
5813 
5814 	ret = pm_runtime_get_sync(wl->dev);
5815 	if (ret < 0) {
5816 		pm_runtime_put_noidle(wl->dev);
5817 		goto out_sleep;
5818 	}
5819 
5820 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5821 	if (ret < 0)
5822 		goto out_sleep;
5823 
5824 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5825 	sinfo->signal = rssi_dbm;
5826 
5827 out_sleep:
5828 	pm_runtime_mark_last_busy(wl->dev);
5829 	pm_runtime_put_autosuspend(wl->dev);
5830 
5831 out:
5832 	mutex_unlock(&wl->mutex);
5833 }
5834 
5835 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5836 					     struct ieee80211_sta *sta)
5837 {
5838 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5839 	struct wl1271 *wl = hw->priv;
5840 	u8 hlid = wl_sta->hlid;
5841 
5842 	/* return in units of Kbps */
5843 	return (wl->links[hlid].fw_rate_mbps * 1000);
5844 }
5845 
5846 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5847 {
5848 	struct wl1271 *wl = hw->priv;
5849 	bool ret = false;
5850 
5851 	mutex_lock(&wl->mutex);
5852 
5853 	if (unlikely(wl->state != WLCORE_STATE_ON))
5854 		goto out;
5855 
5856 	/* packets are considered pending if in the TX queue or the FW */
5857 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5858 out:
5859 	mutex_unlock(&wl->mutex);
5860 
5861 	return ret;
5862 }
5863 
5864 /* can't be const, mac80211 writes to this */
5865 static struct ieee80211_rate wl1271_rates[] = {
5866 	{ .bitrate = 10,
5867 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5868 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5869 	{ .bitrate = 20,
5870 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5871 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5872 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5873 	{ .bitrate = 55,
5874 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5875 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5876 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5877 	{ .bitrate = 110,
5878 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5879 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5880 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5881 	{ .bitrate = 60,
5882 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5883 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5884 	{ .bitrate = 90,
5885 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5886 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5887 	{ .bitrate = 120,
5888 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5889 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5890 	{ .bitrate = 180,
5891 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5892 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5893 	{ .bitrate = 240,
5894 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5895 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5896 	{ .bitrate = 360,
5897 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5898 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5899 	{ .bitrate = 480,
5900 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5901 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5902 	{ .bitrate = 540,
5903 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5904 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5905 };
5906 
5907 /* can't be const, mac80211 writes to this */
5908 static struct ieee80211_channel wl1271_channels[] = {
5909 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5910 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5911 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5912 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5913 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5914 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5915 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5916 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5917 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5918 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5919 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5920 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5921 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5922 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5923 };
5924 
5925 /* can't be const, mac80211 writes to this */
5926 static struct ieee80211_supported_band wl1271_band_2ghz = {
5927 	.channels = wl1271_channels,
5928 	.n_channels = ARRAY_SIZE(wl1271_channels),
5929 	.bitrates = wl1271_rates,
5930 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5931 };
5932 
5933 /* 5 GHz data rates for WL1273 */
5934 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5935 	{ .bitrate = 60,
5936 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5937 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5938 	{ .bitrate = 90,
5939 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5940 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5941 	{ .bitrate = 120,
5942 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5943 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5944 	{ .bitrate = 180,
5945 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5946 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5947 	{ .bitrate = 240,
5948 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5949 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5950 	{ .bitrate = 360,
5951 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5952 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5953 	{ .bitrate = 480,
5954 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5955 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5956 	{ .bitrate = 540,
5957 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5958 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5959 };
5960 
5961 /* 5 GHz band channels for WL1273 */
5962 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5963 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5964 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5965 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5966 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5967 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5968 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5969 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5970 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5971 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5972 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5973 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5974 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5975 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5976 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5977 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5991 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5992 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5993 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5994 };
5995 
5996 static struct ieee80211_supported_band wl1271_band_5ghz = {
5997 	.channels = wl1271_channels_5ghz,
5998 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5999 	.bitrates = wl1271_rates_5ghz,
6000 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6001 };
6002 
6003 static const struct ieee80211_ops wl1271_ops = {
6004 	.start = wl1271_op_start,
6005 	.stop = wlcore_op_stop,
6006 	.add_interface = wl1271_op_add_interface,
6007 	.remove_interface = wl1271_op_remove_interface,
6008 	.change_interface = wl12xx_op_change_interface,
6009 #ifdef CONFIG_PM
6010 	.suspend = wl1271_op_suspend,
6011 	.resume = wl1271_op_resume,
6012 #endif
6013 	.config = wl1271_op_config,
6014 	.prepare_multicast = wl1271_op_prepare_multicast,
6015 	.configure_filter = wl1271_op_configure_filter,
6016 	.tx = wl1271_op_tx,
6017 	.set_key = wlcore_op_set_key,
6018 	.hw_scan = wl1271_op_hw_scan,
6019 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6020 	.sched_scan_start = wl1271_op_sched_scan_start,
6021 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6022 	.bss_info_changed = wl1271_op_bss_info_changed,
6023 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6024 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6025 	.conf_tx = wl1271_op_conf_tx,
6026 	.get_tsf = wl1271_op_get_tsf,
6027 	.get_survey = wl1271_op_get_survey,
6028 	.sta_state = wl12xx_op_sta_state,
6029 	.ampdu_action = wl1271_op_ampdu_action,
6030 	.tx_frames_pending = wl1271_tx_frames_pending,
6031 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6032 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6033 	.channel_switch = wl12xx_op_channel_switch,
6034 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6035 	.flush = wlcore_op_flush,
6036 	.remain_on_channel = wlcore_op_remain_on_channel,
6037 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6038 	.add_chanctx = wlcore_op_add_chanctx,
6039 	.remove_chanctx = wlcore_op_remove_chanctx,
6040 	.change_chanctx = wlcore_op_change_chanctx,
6041 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6042 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6043 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6044 	.sta_rc_update = wlcore_op_sta_rc_update,
6045 	.sta_statistics = wlcore_op_sta_statistics,
6046 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6047 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6048 };
6049 
6050 
6051 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6052 {
6053 	u8 idx;
6054 
6055 	BUG_ON(band >= 2);
6056 
6057 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6058 		wl1271_error("Illegal RX rate from HW: %d", rate);
6059 		return 0;
6060 	}
6061 
6062 	idx = wl->band_rate_to_idx[band][rate];
6063 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6064 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6065 		return 0;
6066 	}
6067 
6068 	return idx;
6069 }
6070 
6071 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6072 {
6073 	int i;
6074 
6075 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6076 		     oui, nic);
6077 
6078 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6079 		wl1271_warning("NIC part of the MAC address wraps around!");
6080 
6081 	for (i = 0; i < wl->num_mac_addr; i++) {
6082 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6083 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6084 		wl->addresses[i].addr[2] = (u8) oui;
6085 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6086 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6087 		wl->addresses[i].addr[5] = (u8) nic;
6088 		nic++;
6089 	}
6090 
6091 	/* we may be one address short at the most */
6092 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6093 
6094 	/*
6095 	 * turn on the LAA bit in the first address and use it as
6096 	 * the last address.
6097 	 */
6098 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6099 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6100 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6101 		       sizeof(wl->addresses[0]));
6102 		/* LAA bit */
6103 		wl->addresses[idx].addr[0] |= BIT(1);
6104 	}
6105 
6106 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6107 	wl->hw->wiphy->addresses = wl->addresses;
6108 }
6109 
6110 static int wl12xx_get_hw_info(struct wl1271 *wl)
6111 {
6112 	int ret;
6113 
6114 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6115 	if (ret < 0)
6116 		goto out;
6117 
6118 	wl->fuse_oui_addr = 0;
6119 	wl->fuse_nic_addr = 0;
6120 
6121 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6122 	if (ret < 0)
6123 		goto out;
6124 
6125 	if (wl->ops->get_mac)
6126 		ret = wl->ops->get_mac(wl);
6127 
6128 out:
6129 	return ret;
6130 }
6131 
6132 static int wl1271_register_hw(struct wl1271 *wl)
6133 {
6134 	int ret;
6135 	u32 oui_addr = 0, nic_addr = 0;
6136 	struct platform_device *pdev = wl->pdev;
6137 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6138 
6139 	if (wl->mac80211_registered)
6140 		return 0;
6141 
6142 	if (wl->nvs_len >= 12) {
6143 		/* NOTE: The wl->nvs->nvs element must be first, in
6144 		 * order to simplify the casting, we assume it is at
6145 		 * the beginning of the wl->nvs structure.
6146 		 */
6147 		u8 *nvs_ptr = (u8 *)wl->nvs;
6148 
6149 		oui_addr =
6150 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6151 		nic_addr =
6152 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6153 	}
6154 
6155 	/* if the MAC address is zeroed in the NVS derive from fuse */
6156 	if (oui_addr == 0 && nic_addr == 0) {
6157 		oui_addr = wl->fuse_oui_addr;
6158 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6159 		nic_addr = wl->fuse_nic_addr + 1;
6160 	}
6161 
6162 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6163 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6164 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6165 			wl1271_warning("This default nvs file can be removed from the file system");
6166 		} else {
6167 			wl1271_warning("Your device performance is not optimized.");
6168 			wl1271_warning("Please use the calibrator tool to configure your device.");
6169 		}
6170 
6171 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6172 			wl1271_warning("Fuse mac address is zero. using random mac");
6173 			/* Use TI oui and a random nic */
6174 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6175 			nic_addr = get_random_int();
6176 		} else {
6177 			oui_addr = wl->fuse_oui_addr;
6178 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6179 			nic_addr = wl->fuse_nic_addr + 1;
6180 		}
6181 	}
6182 
6183 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6184 
6185 	ret = ieee80211_register_hw(wl->hw);
6186 	if (ret < 0) {
6187 		wl1271_error("unable to register mac80211 hw: %d", ret);
6188 		goto out;
6189 	}
6190 
6191 	wl->mac80211_registered = true;
6192 
6193 	wl1271_debugfs_init(wl);
6194 
6195 	wl1271_notice("loaded");
6196 
6197 out:
6198 	return ret;
6199 }
6200 
6201 static void wl1271_unregister_hw(struct wl1271 *wl)
6202 {
6203 	if (wl->plt)
6204 		wl1271_plt_stop(wl);
6205 
6206 	ieee80211_unregister_hw(wl->hw);
6207 	wl->mac80211_registered = false;
6208 
6209 }
6210 
6211 static int wl1271_init_ieee80211(struct wl1271 *wl)
6212 {
6213 	int i;
6214 	static const u32 cipher_suites[] = {
6215 		WLAN_CIPHER_SUITE_WEP40,
6216 		WLAN_CIPHER_SUITE_WEP104,
6217 		WLAN_CIPHER_SUITE_TKIP,
6218 		WLAN_CIPHER_SUITE_CCMP,
6219 		WL1271_CIPHER_SUITE_GEM,
6220 		WLAN_CIPHER_SUITE_AES_CMAC,
6221 	};
6222 
6223 	/* The tx descriptor buffer */
6224 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6225 
6226 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6227 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6228 
6229 	/* unit us */
6230 	/* FIXME: find a proper value */
6231 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6232 
6233 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6234 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6235 	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6236 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6237 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6238 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6239 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6240 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6241 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6242 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6243 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6244 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6245 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6246 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6247 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6248 
6249 	wl->hw->wiphy->cipher_suites = cipher_suites;
6250 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6251 
6252 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6253 					 BIT(NL80211_IFTYPE_AP) |
6254 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6255 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6256 #ifdef CONFIG_MAC80211_MESH
6257 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6258 #endif
6259 					 BIT(NL80211_IFTYPE_P2P_GO);
6260 
6261 	wl->hw->wiphy->max_scan_ssids = 1;
6262 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6263 	wl->hw->wiphy->max_match_sets = 16;
6264 	/*
6265 	 * Maximum length of elements in scanning probe request templates
6266 	 * should be the maximum length possible for a template, without
6267 	 * the IEEE80211 header of the template
6268 	 */
6269 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6270 			sizeof(struct ieee80211_header);
6271 
6272 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6273 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6274 		sizeof(struct ieee80211_header);
6275 
6276 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6277 
6278 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6279 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6280 				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6281 				WIPHY_FLAG_IBSS_RSN;
6282 
6283 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6284 
6285 	/* make sure all our channels fit in the scanned_ch bitmask */
6286 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6287 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6288 		     WL1271_MAX_CHANNELS);
6289 	/*
6290 	* clear channel flags from the previous usage
6291 	* and restore max_power & max_antenna_gain values.
6292 	*/
6293 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6294 		wl1271_band_2ghz.channels[i].flags = 0;
6295 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6296 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6297 	}
6298 
6299 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6300 		wl1271_band_5ghz.channels[i].flags = 0;
6301 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6302 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6303 	}
6304 
6305 	/*
6306 	 * We keep local copies of the band structs because we need to
6307 	 * modify them on a per-device basis.
6308 	 */
6309 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6310 	       sizeof(wl1271_band_2ghz));
6311 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6312 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6313 	       sizeof(*wl->ht_cap));
6314 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6315 	       sizeof(wl1271_band_5ghz));
6316 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6317 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6318 	       sizeof(*wl->ht_cap));
6319 
6320 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6321 		&wl->bands[NL80211_BAND_2GHZ];
6322 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6323 		&wl->bands[NL80211_BAND_5GHZ];
6324 
6325 	/*
6326 	 * allow 4 queues per mac address we support +
6327 	 * 1 cab queue per mac + one global offchannel Tx queue
6328 	 */
6329 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6330 
6331 	/* the last queue is the offchannel queue */
6332 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6333 	wl->hw->max_rates = 1;
6334 
6335 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6336 
6337 	/* the FW answers probe-requests in AP-mode */
6338 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6339 	wl->hw->wiphy->probe_resp_offload =
6340 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6341 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6342 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6343 
6344 	/* allowed interface combinations */
6345 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6346 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6347 
6348 	/* register vendor commands */
6349 	wlcore_set_vendor_commands(wl->hw->wiphy);
6350 
6351 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6352 
6353 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6354 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6355 
6356 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6357 
6358 	return 0;
6359 }
6360 
6361 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6362 				     u32 mbox_size)
6363 {
6364 	struct ieee80211_hw *hw;
6365 	struct wl1271 *wl;
6366 	int i, j, ret;
6367 	unsigned int order;
6368 
6369 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6370 	if (!hw) {
6371 		wl1271_error("could not alloc ieee80211_hw");
6372 		ret = -ENOMEM;
6373 		goto err_hw_alloc;
6374 	}
6375 
6376 	wl = hw->priv;
6377 	memset(wl, 0, sizeof(*wl));
6378 
6379 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6380 	if (!wl->priv) {
6381 		wl1271_error("could not alloc wl priv");
6382 		ret = -ENOMEM;
6383 		goto err_priv_alloc;
6384 	}
6385 
6386 	INIT_LIST_HEAD(&wl->wlvif_list);
6387 
6388 	wl->hw = hw;
6389 
6390 	/*
6391 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6392 	 * we don't allocate any additional resource here, so that's fine.
6393 	 */
6394 	for (i = 0; i < NUM_TX_QUEUES; i++)
6395 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6396 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6397 
6398 	skb_queue_head_init(&wl->deferred_rx_queue);
6399 	skb_queue_head_init(&wl->deferred_tx_queue);
6400 
6401 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6402 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6403 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6404 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6405 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6406 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6407 
6408 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6409 	if (!wl->freezable_wq) {
6410 		ret = -ENOMEM;
6411 		goto err_hw;
6412 	}
6413 
6414 	wl->channel = 0;
6415 	wl->rx_counter = 0;
6416 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6417 	wl->band = NL80211_BAND_2GHZ;
6418 	wl->channel_type = NL80211_CHAN_NO_HT;
6419 	wl->flags = 0;
6420 	wl->sg_enabled = true;
6421 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6422 	wl->recovery_count = 0;
6423 	wl->hw_pg_ver = -1;
6424 	wl->ap_ps_map = 0;
6425 	wl->ap_fw_ps_map = 0;
6426 	wl->quirks = 0;
6427 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6428 	wl->active_sta_count = 0;
6429 	wl->active_link_count = 0;
6430 	wl->fwlog_size = 0;
6431 
6432 	/* The system link is always allocated */
6433 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6434 
6435 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6436 	for (i = 0; i < wl->num_tx_desc; i++)
6437 		wl->tx_frames[i] = NULL;
6438 
6439 	spin_lock_init(&wl->wl_lock);
6440 
6441 	wl->state = WLCORE_STATE_OFF;
6442 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6443 	mutex_init(&wl->mutex);
6444 	mutex_init(&wl->flush_mutex);
6445 	init_completion(&wl->nvs_loading_complete);
6446 
6447 	order = get_order(aggr_buf_size);
6448 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6449 	if (!wl->aggr_buf) {
6450 		ret = -ENOMEM;
6451 		goto err_wq;
6452 	}
6453 	wl->aggr_buf_size = aggr_buf_size;
6454 
6455 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6456 	if (!wl->dummy_packet) {
6457 		ret = -ENOMEM;
6458 		goto err_aggr;
6459 	}
6460 
6461 	/* Allocate one page for the FW log */
6462 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6463 	if (!wl->fwlog) {
6464 		ret = -ENOMEM;
6465 		goto err_dummy_packet;
6466 	}
6467 
6468 	wl->mbox_size = mbox_size;
6469 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6470 	if (!wl->mbox) {
6471 		ret = -ENOMEM;
6472 		goto err_fwlog;
6473 	}
6474 
6475 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6476 	if (!wl->buffer_32) {
6477 		ret = -ENOMEM;
6478 		goto err_mbox;
6479 	}
6480 
6481 	return hw;
6482 
6483 err_mbox:
6484 	kfree(wl->mbox);
6485 
6486 err_fwlog:
6487 	free_page((unsigned long)wl->fwlog);
6488 
6489 err_dummy_packet:
6490 	dev_kfree_skb(wl->dummy_packet);
6491 
6492 err_aggr:
6493 	free_pages((unsigned long)wl->aggr_buf, order);
6494 
6495 err_wq:
6496 	destroy_workqueue(wl->freezable_wq);
6497 
6498 err_hw:
6499 	wl1271_debugfs_exit(wl);
6500 	kfree(wl->priv);
6501 
6502 err_priv_alloc:
6503 	ieee80211_free_hw(hw);
6504 
6505 err_hw_alloc:
6506 
6507 	return ERR_PTR(ret);
6508 }
6509 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6510 
6511 int wlcore_free_hw(struct wl1271 *wl)
6512 {
6513 	/* Unblock any fwlog readers */
6514 	mutex_lock(&wl->mutex);
6515 	wl->fwlog_size = -1;
6516 	mutex_unlock(&wl->mutex);
6517 
6518 	wlcore_sysfs_free(wl);
6519 
6520 	kfree(wl->buffer_32);
6521 	kfree(wl->mbox);
6522 	free_page((unsigned long)wl->fwlog);
6523 	dev_kfree_skb(wl->dummy_packet);
6524 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6525 
6526 	wl1271_debugfs_exit(wl);
6527 
6528 	vfree(wl->fw);
6529 	wl->fw = NULL;
6530 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6531 	kfree(wl->nvs);
6532 	wl->nvs = NULL;
6533 
6534 	kfree(wl->raw_fw_status);
6535 	kfree(wl->fw_status);
6536 	kfree(wl->tx_res_if);
6537 	destroy_workqueue(wl->freezable_wq);
6538 
6539 	kfree(wl->priv);
6540 	ieee80211_free_hw(wl->hw);
6541 
6542 	return 0;
6543 }
6544 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6545 
6546 #ifdef CONFIG_PM
6547 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6548 	.flags = WIPHY_WOWLAN_ANY,
6549 	.n_patterns = WL1271_MAX_RX_FILTERS,
6550 	.pattern_min_len = 1,
6551 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6552 };
6553 #endif
6554 
6555 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6556 {
6557 	return IRQ_WAKE_THREAD;
6558 }
6559 
6560 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6561 {
6562 	struct wl1271 *wl = context;
6563 	struct platform_device *pdev = wl->pdev;
6564 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6565 	struct resource *res;
6566 
6567 	int ret;
6568 	irq_handler_t hardirq_fn = NULL;
6569 
6570 	if (fw) {
6571 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6572 		if (!wl->nvs) {
6573 			wl1271_error("Could not allocate nvs data");
6574 			goto out;
6575 		}
6576 		wl->nvs_len = fw->size;
6577 	} else if (pdev_data->family->nvs_name) {
6578 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6579 			     pdev_data->family->nvs_name);
6580 		wl->nvs = NULL;
6581 		wl->nvs_len = 0;
6582 	} else {
6583 		wl->nvs = NULL;
6584 		wl->nvs_len = 0;
6585 	}
6586 
6587 	ret = wl->ops->setup(wl);
6588 	if (ret < 0)
6589 		goto out_free_nvs;
6590 
6591 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6592 
6593 	/* adjust some runtime configuration parameters */
6594 	wlcore_adjust_conf(wl);
6595 
6596 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6597 	if (!res) {
6598 		wl1271_error("Could not get IRQ resource");
6599 		goto out_free_nvs;
6600 	}
6601 
6602 	wl->irq = res->start;
6603 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6604 	wl->if_ops = pdev_data->if_ops;
6605 
6606 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6607 		hardirq_fn = wlcore_hardirq;
6608 	else
6609 		wl->irq_flags |= IRQF_ONESHOT;
6610 
6611 	ret = wl12xx_set_power_on(wl);
6612 	if (ret < 0)
6613 		goto out_free_nvs;
6614 
6615 	ret = wl12xx_get_hw_info(wl);
6616 	if (ret < 0) {
6617 		wl1271_error("couldn't get hw info");
6618 		wl1271_power_off(wl);
6619 		goto out_free_nvs;
6620 	}
6621 
6622 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6623 				   wl->irq_flags, pdev->name, wl);
6624 	if (ret < 0) {
6625 		wl1271_error("interrupt configuration failed");
6626 		wl1271_power_off(wl);
6627 		goto out_free_nvs;
6628 	}
6629 
6630 #ifdef CONFIG_PM
6631 	device_init_wakeup(wl->dev, true);
6632 
6633 	ret = enable_irq_wake(wl->irq);
6634 	if (!ret) {
6635 		wl->irq_wake_enabled = true;
6636 		if (pdev_data->pwr_in_suspend)
6637 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6638 	}
6639 
6640 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6641 	if (res) {
6642 		wl->wakeirq = res->start;
6643 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6644 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6645 		if (ret)
6646 			wl->wakeirq = -ENODEV;
6647 	} else {
6648 		wl->wakeirq = -ENODEV;
6649 	}
6650 #endif
6651 	disable_irq(wl->irq);
6652 	wl1271_power_off(wl);
6653 
6654 	ret = wl->ops->identify_chip(wl);
6655 	if (ret < 0)
6656 		goto out_irq;
6657 
6658 	ret = wl1271_init_ieee80211(wl);
6659 	if (ret)
6660 		goto out_irq;
6661 
6662 	ret = wl1271_register_hw(wl);
6663 	if (ret)
6664 		goto out_irq;
6665 
6666 	ret = wlcore_sysfs_init(wl);
6667 	if (ret)
6668 		goto out_unreg;
6669 
6670 	wl->initialized = true;
6671 	goto out;
6672 
6673 out_unreg:
6674 	wl1271_unregister_hw(wl);
6675 
6676 out_irq:
6677 	if (wl->wakeirq >= 0)
6678 		dev_pm_clear_wake_irq(wl->dev);
6679 	device_init_wakeup(wl->dev, false);
6680 	free_irq(wl->irq, wl);
6681 
6682 out_free_nvs:
6683 	kfree(wl->nvs);
6684 
6685 out:
6686 	release_firmware(fw);
6687 	complete_all(&wl->nvs_loading_complete);
6688 }
6689 
6690 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6691 {
6692 	struct wl1271 *wl = dev_get_drvdata(dev);
6693 	struct wl12xx_vif *wlvif;
6694 	int error;
6695 
6696 	/* We do not enter elp sleep in PLT mode */
6697 	if (wl->plt)
6698 		return 0;
6699 
6700 	/* Nothing to do if no ELP mode requested */
6701 	if (wl->sleep_auth != WL1271_PSM_ELP)
6702 		return 0;
6703 
6704 	wl12xx_for_each_wlvif(wl, wlvif) {
6705 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6706 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6707 			return -EBUSY;
6708 	}
6709 
6710 	wl1271_debug(DEBUG_PSM, "chip to elp");
6711 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6712 	if (error < 0) {
6713 		wl12xx_queue_recovery_work(wl);
6714 
6715 		return error;
6716 	}
6717 
6718 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6719 
6720 	return 0;
6721 }
6722 
6723 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6724 {
6725 	struct wl1271 *wl = dev_get_drvdata(dev);
6726 	DECLARE_COMPLETION_ONSTACK(compl);
6727 	unsigned long flags;
6728 	int ret;
6729 	unsigned long start_time = jiffies;
6730 	bool pending = false;
6731 	bool recovery = false;
6732 
6733 	/* Nothing to do if no ELP mode requested */
6734 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6735 		return 0;
6736 
6737 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6738 
6739 	spin_lock_irqsave(&wl->wl_lock, flags);
6740 	if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6741 		pending = true;
6742 	else
6743 		wl->elp_compl = &compl;
6744 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6745 
6746 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6747 	if (ret < 0) {
6748 		recovery = true;
6749 		goto err;
6750 	}
6751 
6752 	if (!pending) {
6753 		ret = wait_for_completion_timeout(&compl,
6754 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6755 		if (ret == 0) {
6756 			wl1271_warning("ELP wakeup timeout!");
6757 
6758 			/* Return no error for runtime PM for recovery */
6759 			ret = 0;
6760 			recovery = true;
6761 			goto err;
6762 		}
6763 	}
6764 
6765 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6766 
6767 	wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6768 		     jiffies_to_msecs(jiffies - start_time));
6769 
6770 	return 0;
6771 
6772 err:
6773 	spin_lock_irqsave(&wl->wl_lock, flags);
6774 	wl->elp_compl = NULL;
6775 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6776 
6777 	if (recovery) {
6778 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6779 		wl12xx_queue_recovery_work(wl);
6780 	}
6781 
6782 	return ret;
6783 }
6784 
6785 static const struct dev_pm_ops wlcore_pm_ops = {
6786 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6787 			   wlcore_runtime_resume,
6788 			   NULL)
6789 };
6790 
6791 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6792 {
6793 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6794 	const char *nvs_name;
6795 	int ret = 0;
6796 
6797 	if (!wl->ops || !wl->ptable || !pdev_data)
6798 		return -EINVAL;
6799 
6800 	wl->dev = &pdev->dev;
6801 	wl->pdev = pdev;
6802 	platform_set_drvdata(pdev, wl);
6803 
6804 	if (pdev_data->family && pdev_data->family->nvs_name) {
6805 		nvs_name = pdev_data->family->nvs_name;
6806 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6807 					      nvs_name, &pdev->dev, GFP_KERNEL,
6808 					      wl, wlcore_nvs_cb);
6809 		if (ret < 0) {
6810 			wl1271_error("request_firmware_nowait failed for %s: %d",
6811 				     nvs_name, ret);
6812 			complete_all(&wl->nvs_loading_complete);
6813 		}
6814 	} else {
6815 		wlcore_nvs_cb(NULL, wl);
6816 	}
6817 
6818 	wl->dev->driver->pm = &wlcore_pm_ops;
6819 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6820 	pm_runtime_use_autosuspend(wl->dev);
6821 	pm_runtime_enable(wl->dev);
6822 
6823 	return ret;
6824 }
6825 EXPORT_SYMBOL_GPL(wlcore_probe);
6826 
6827 int wlcore_remove(struct platform_device *pdev)
6828 {
6829 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6830 	struct wl1271 *wl = platform_get_drvdata(pdev);
6831 	int error;
6832 
6833 	error = pm_runtime_get_sync(wl->dev);
6834 	if (error < 0)
6835 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6836 
6837 	wl->dev->driver->pm = NULL;
6838 
6839 	if (pdev_data->family && pdev_data->family->nvs_name)
6840 		wait_for_completion(&wl->nvs_loading_complete);
6841 	if (!wl->initialized)
6842 		return 0;
6843 
6844 	if (wl->wakeirq >= 0) {
6845 		dev_pm_clear_wake_irq(wl->dev);
6846 		wl->wakeirq = -ENODEV;
6847 	}
6848 
6849 	device_init_wakeup(wl->dev, false);
6850 
6851 	if (wl->irq_wake_enabled)
6852 		disable_irq_wake(wl->irq);
6853 
6854 	wl1271_unregister_hw(wl);
6855 
6856 	pm_runtime_put_sync(wl->dev);
6857 	pm_runtime_dont_use_autosuspend(wl->dev);
6858 	pm_runtime_disable(wl->dev);
6859 
6860 	free_irq(wl->irq, wl);
6861 	wlcore_free_hw(wl);
6862 
6863 	return 0;
6864 }
6865 EXPORT_SYMBOL_GPL(wlcore_remove);
6866 
6867 u32 wl12xx_debug_level = DEBUG_NONE;
6868 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6869 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6870 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6871 
6872 module_param_named(fwlog, fwlog_param, charp, 0);
6873 MODULE_PARM_DESC(fwlog,
6874 		 "FW logger options: continuous, dbgpins or disable");
6875 
6876 module_param(fwlog_mem_blocks, int, 0600);
6877 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6878 
6879 module_param(bug_on_recovery, int, 0600);
6880 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6881 
6882 module_param(no_recovery, int, 0600);
6883 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6884 
6885 MODULE_LICENSE("GPL");
6886 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6887 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6888