xref: /linux/drivers/net/wireless/ti/wlcore/main.c (revision 61d0b5a4b2777dcf5daef245e212b3c1fa8091ca)
1 
2 /*
3  * This file is part of wl1271
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  *
7  * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21  * 02110-1301 USA
22  *
23  */
24 
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37 
38 #include "wlcore.h"
39 #include "debug.h"
40 #include "wl12xx_80211.h"
41 #include "io.h"
42 #include "event.h"
43 #include "tx.h"
44 #include "rx.h"
45 #include "ps.h"
46 #include "init.h"
47 #include "debugfs.h"
48 #include "cmd.h"
49 #include "boot.h"
50 #include "testmode.h"
51 #include "scan.h"
52 #include "hw_ops.h"
53 
54 #define WL1271_BOOT_RETRIES 3
55 
56 #define WL1271_BOOT_RETRIES 3
57 
58 static char *fwlog_param;
59 static int bug_on_recovery = -1;
60 static int no_recovery     = -1;
61 
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 					 struct ieee80211_vif *vif,
64 					 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67 
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 				 struct wl12xx_vif *wlvif)
70 {
71 	int ret;
72 
73 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
74 		return -EINVAL;
75 
76 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
77 		return 0;
78 
79 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
80 		return 0;
81 
82 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
83 	if (ret < 0)
84 		return ret;
85 
86 	wl1271_info("Association completed.");
87 	return 0;
88 }
89 
90 static void wl1271_reg_notify(struct wiphy *wiphy,
91 			      struct regulatory_request *request)
92 {
93 	struct ieee80211_supported_band *band;
94 	struct ieee80211_channel *ch;
95 	int i;
96 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
97 	struct wl1271 *wl = hw->priv;
98 
99 	band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 	for (i = 0; i < band->n_channels; i++) {
101 		ch = &band->channels[i];
102 		if (ch->flags & IEEE80211_CHAN_DISABLED)
103 			continue;
104 
105 		if (ch->flags & IEEE80211_CHAN_RADAR)
106 			ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 				     IEEE80211_CHAN_PASSIVE_SCAN;
108 
109 	}
110 
111 	if (likely(wl->state == WLCORE_STATE_ON))
112 		wlcore_regdomain_config(wl);
113 }
114 
115 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
116 				   bool enable)
117 {
118 	int ret = 0;
119 
120 	/* we should hold wl->mutex */
121 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
122 	if (ret < 0)
123 		goto out;
124 
125 	if (enable)
126 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
127 	else
128 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
129 out:
130 	return ret;
131 }
132 
133 /*
134  * this function is being called when the rx_streaming interval
135  * has beed changed or rx_streaming should be disabled
136  */
137 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
138 {
139 	int ret = 0;
140 	int period = wl->conf.rx_streaming.interval;
141 
142 	/* don't reconfigure if rx_streaming is disabled */
143 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
144 		goto out;
145 
146 	/* reconfigure/disable according to new streaming_period */
147 	if (period &&
148 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
149 	    (wl->conf.rx_streaming.always ||
150 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
151 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 	else {
153 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
154 		/* don't cancel_work_sync since we might deadlock */
155 		del_timer_sync(&wlvif->rx_streaming_timer);
156 	}
157 out:
158 	return ret;
159 }
160 
161 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
162 {
163 	int ret;
164 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
165 						rx_streaming_enable_work);
166 	struct wl1271 *wl = wlvif->wl;
167 
168 	mutex_lock(&wl->mutex);
169 
170 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
171 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
172 	    (!wl->conf.rx_streaming.always &&
173 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
174 		goto out;
175 
176 	if (!wl->conf.rx_streaming.interval)
177 		goto out;
178 
179 	ret = wl1271_ps_elp_wakeup(wl);
180 	if (ret < 0)
181 		goto out;
182 
183 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
184 	if (ret < 0)
185 		goto out_sleep;
186 
187 	/* stop it after some time of inactivity */
188 	mod_timer(&wlvif->rx_streaming_timer,
189 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
190 
191 out_sleep:
192 	wl1271_ps_elp_sleep(wl);
193 out:
194 	mutex_unlock(&wl->mutex);
195 }
196 
197 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
198 {
199 	int ret;
200 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
201 						rx_streaming_disable_work);
202 	struct wl1271 *wl = wlvif->wl;
203 
204 	mutex_lock(&wl->mutex);
205 
206 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
207 		goto out;
208 
209 	ret = wl1271_ps_elp_wakeup(wl);
210 	if (ret < 0)
211 		goto out;
212 
213 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
214 	if (ret)
215 		goto out_sleep;
216 
217 out_sleep:
218 	wl1271_ps_elp_sleep(wl);
219 out:
220 	mutex_unlock(&wl->mutex);
221 }
222 
223 static void wl1271_rx_streaming_timer(unsigned long data)
224 {
225 	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
226 	struct wl1271 *wl = wlvif->wl;
227 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
228 }
229 
230 /* wl->mutex must be taken */
231 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
232 {
233 	/* if the watchdog is not armed, don't do anything */
234 	if (wl->tx_allocated_blocks == 0)
235 		return;
236 
237 	cancel_delayed_work(&wl->tx_watchdog_work);
238 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
239 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
240 }
241 
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 {
244 	struct delayed_work *dwork;
245 	struct wl1271 *wl;
246 
247 	dwork = container_of(work, struct delayed_work, work);
248 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249 
250 	mutex_lock(&wl->mutex);
251 
252 	if (unlikely(wl->state != WLCORE_STATE_ON))
253 		goto out;
254 
255 	/* Tx went out in the meantime - everything is ok */
256 	if (unlikely(wl->tx_allocated_blocks == 0))
257 		goto out;
258 
259 	/*
260 	 * if a ROC is in progress, we might not have any Tx for a long
261 	 * time (e.g. pending Tx on the non-ROC channels)
262 	 */
263 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 			     wl->conf.tx.tx_watchdog_timeout);
266 		wl12xx_rearm_tx_watchdog_locked(wl);
267 		goto out;
268 	}
269 
270 	/*
271 	 * if a scan is in progress, we might not have any Tx for a long
272 	 * time
273 	 */
274 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 			     wl->conf.tx.tx_watchdog_timeout);
277 		wl12xx_rearm_tx_watchdog_locked(wl);
278 		goto out;
279 	}
280 
281 	/*
282 	* AP might cache a frame for a long time for a sleeping station,
283 	* so rearm the timer if there's an AP interface with stations. If
284 	* Tx is genuinely stuck we will most hopefully discover it when all
285 	* stations are removed due to inactivity.
286 	*/
287 	if (wl->active_sta_count) {
288 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 			     " %d stations",
290 			      wl->conf.tx.tx_watchdog_timeout,
291 			      wl->active_sta_count);
292 		wl12xx_rearm_tx_watchdog_locked(wl);
293 		goto out;
294 	}
295 
296 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 		     wl->conf.tx.tx_watchdog_timeout);
298 	wl12xx_queue_recovery_work(wl);
299 
300 out:
301 	mutex_unlock(&wl->mutex);
302 }
303 
304 static void wlcore_adjust_conf(struct wl1271 *wl)
305 {
306 	/* Adjust settings according to optional module parameters */
307 
308 	if (fwlog_param) {
309 		if (!strcmp(fwlog_param, "continuous")) {
310 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
311 		} else if (!strcmp(fwlog_param, "ondemand")) {
312 			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
313 		} else if (!strcmp(fwlog_param, "dbgpins")) {
314 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
315 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
316 		} else if (!strcmp(fwlog_param, "disable")) {
317 			wl->conf.fwlog.mem_blocks = 0;
318 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
319 		} else {
320 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
321 		}
322 	}
323 
324 	if (bug_on_recovery != -1)
325 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
326 
327 	if (no_recovery != -1)
328 		wl->conf.recovery.no_recovery = (u8) no_recovery;
329 }
330 
331 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
332 					struct wl12xx_vif *wlvif,
333 					u8 hlid, u8 tx_pkts)
334 {
335 	bool fw_ps, single_link;
336 
337 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
338 	single_link = (wl->active_link_count == 1);
339 
340 	/*
341 	 * Wake up from high level PS if the STA is asleep with too little
342 	 * packets in FW or if the STA is awake.
343 	 */
344 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
345 		wl12xx_ps_link_end(wl, wlvif, hlid);
346 
347 	/*
348 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
349 	 * Make an exception if this is the only connected link. In this
350 	 * case FW-memory congestion is less of a problem.
351 	 */
352 	else if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
353 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
354 }
355 
356 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
357 					   struct wl12xx_vif *wlvif,
358 					   struct wl_fw_status_2 *status)
359 {
360 	u32 cur_fw_ps_map;
361 	u8 hlid;
362 
363 	cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
364 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
365 		wl1271_debug(DEBUG_PSM,
366 			     "link ps prev 0x%x cur 0x%x changed 0x%x",
367 			     wl->ap_fw_ps_map, cur_fw_ps_map,
368 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
369 
370 		wl->ap_fw_ps_map = cur_fw_ps_map;
371 	}
372 
373 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
374 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
375 					    wl->links[hlid].allocated_pkts);
376 }
377 
378 static int wlcore_fw_status(struct wl1271 *wl,
379 			    struct wl_fw_status_1 *status_1,
380 			    struct wl_fw_status_2 *status_2)
381 {
382 	struct wl12xx_vif *wlvif;
383 	struct timespec ts;
384 	u32 old_tx_blk_count = wl->tx_blocks_available;
385 	int avail, freed_blocks;
386 	int i;
387 	size_t status_len;
388 	int ret;
389 	struct wl1271_link *lnk;
390 
391 	status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
392 		sizeof(*status_2) + wl->fw_status_priv_len;
393 
394 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
395 				   status_len, false);
396 	if (ret < 0)
397 		return ret;
398 
399 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
400 		     "drv_rx_counter = %d, tx_results_counter = %d)",
401 		     status_1->intr,
402 		     status_1->fw_rx_counter,
403 		     status_1->drv_rx_counter,
404 		     status_1->tx_results_counter);
405 
406 	for (i = 0; i < NUM_TX_QUEUES; i++) {
407 		/* prevent wrap-around in freed-packets counter */
408 		wl->tx_allocated_pkts[i] -=
409 				(status_2->counters.tx_released_pkts[i] -
410 				wl->tx_pkts_freed[i]) & 0xff;
411 
412 		wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
413 	}
414 
415 
416 	for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
417 		lnk = &wl->links[i];
418 		/* prevent wrap-around in freed-packets counter */
419 		lnk->allocated_pkts -=
420 			(status_2->counters.tx_lnk_free_pkts[i] -
421 			 lnk->prev_freed_pkts) & 0xff;
422 
423 		lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
424 	}
425 
426 	/* prevent wrap-around in total blocks counter */
427 	if (likely(wl->tx_blocks_freed <=
428 		   le32_to_cpu(status_2->total_released_blks)))
429 		freed_blocks = le32_to_cpu(status_2->total_released_blks) -
430 			       wl->tx_blocks_freed;
431 	else
432 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
433 			       le32_to_cpu(status_2->total_released_blks);
434 
435 	wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
436 
437 	wl->tx_allocated_blocks -= freed_blocks;
438 
439 	/*
440 	 * If the FW freed some blocks:
441 	 * If we still have allocated blocks - re-arm the timer, Tx is
442 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
443 	 */
444 	if (freed_blocks) {
445 		if (wl->tx_allocated_blocks)
446 			wl12xx_rearm_tx_watchdog_locked(wl);
447 		else
448 			cancel_delayed_work(&wl->tx_watchdog_work);
449 	}
450 
451 	avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
452 
453 	/*
454 	 * The FW might change the total number of TX memblocks before
455 	 * we get a notification about blocks being released. Thus, the
456 	 * available blocks calculation might yield a temporary result
457 	 * which is lower than the actual available blocks. Keeping in
458 	 * mind that only blocks that were allocated can be moved from
459 	 * TX to RX, tx_blocks_available should never decrease here.
460 	 */
461 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
462 				      avail);
463 
464 	/* if more blocks are available now, tx work can be scheduled */
465 	if (wl->tx_blocks_available > old_tx_blk_count)
466 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
467 
468 	/* for AP update num of allocated TX blocks per link and ps status */
469 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
470 		wl12xx_irq_update_links_status(wl, wlvif, status_2);
471 	}
472 
473 	/* update the host-chipset time offset */
474 	getnstimeofday(&ts);
475 	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
476 		(s64)le32_to_cpu(status_2->fw_localtime);
477 
478 	wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
479 
480 	return 0;
481 }
482 
483 static void wl1271_flush_deferred_work(struct wl1271 *wl)
484 {
485 	struct sk_buff *skb;
486 
487 	/* Pass all received frames to the network stack */
488 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
489 		ieee80211_rx_ni(wl->hw, skb);
490 
491 	/* Return sent skbs to the network stack */
492 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
493 		ieee80211_tx_status_ni(wl->hw, skb);
494 }
495 
496 static void wl1271_netstack_work(struct work_struct *work)
497 {
498 	struct wl1271 *wl =
499 		container_of(work, struct wl1271, netstack_work);
500 
501 	do {
502 		wl1271_flush_deferred_work(wl);
503 	} while (skb_queue_len(&wl->deferred_rx_queue));
504 }
505 
506 #define WL1271_IRQ_MAX_LOOPS 256
507 
508 static int wlcore_irq_locked(struct wl1271 *wl)
509 {
510 	int ret = 0;
511 	u32 intr;
512 	int loopcount = WL1271_IRQ_MAX_LOOPS;
513 	bool done = false;
514 	unsigned int defer_count;
515 	unsigned long flags;
516 
517 	/*
518 	 * In case edge triggered interrupt must be used, we cannot iterate
519 	 * more than once without introducing race conditions with the hardirq.
520 	 */
521 	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
522 		loopcount = 1;
523 
524 	wl1271_debug(DEBUG_IRQ, "IRQ work");
525 
526 	if (unlikely(wl->state != WLCORE_STATE_ON))
527 		goto out;
528 
529 	ret = wl1271_ps_elp_wakeup(wl);
530 	if (ret < 0)
531 		goto out;
532 
533 	while (!done && loopcount--) {
534 		/*
535 		 * In order to avoid a race with the hardirq, clear the flag
536 		 * before acknowledging the chip. Since the mutex is held,
537 		 * wl1271_ps_elp_wakeup cannot be called concurrently.
538 		 */
539 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
540 		smp_mb__after_clear_bit();
541 
542 		ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
543 		if (ret < 0)
544 			goto out;
545 
546 		wlcore_hw_tx_immediate_compl(wl);
547 
548 		intr = le32_to_cpu(wl->fw_status_1->intr);
549 		intr &= WLCORE_ALL_INTR_MASK;
550 		if (!intr) {
551 			done = true;
552 			continue;
553 		}
554 
555 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
556 			wl1271_error("HW watchdog interrupt received! starting recovery.");
557 			wl->watchdog_recovery = true;
558 			ret = -EIO;
559 
560 			/* restarting the chip. ignore any other interrupt. */
561 			goto out;
562 		}
563 
564 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
565 			wl1271_error("SW watchdog interrupt received! "
566 				     "starting recovery.");
567 			wl->watchdog_recovery = true;
568 			ret = -EIO;
569 
570 			/* restarting the chip. ignore any other interrupt. */
571 			goto out;
572 		}
573 
574 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
575 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
576 
577 			ret = wlcore_rx(wl, wl->fw_status_1);
578 			if (ret < 0)
579 				goto out;
580 
581 			/* Check if any tx blocks were freed */
582 			spin_lock_irqsave(&wl->wl_lock, flags);
583 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
584 			    wl1271_tx_total_queue_count(wl) > 0) {
585 				spin_unlock_irqrestore(&wl->wl_lock, flags);
586 				/*
587 				 * In order to avoid starvation of the TX path,
588 				 * call the work function directly.
589 				 */
590 				ret = wlcore_tx_work_locked(wl);
591 				if (ret < 0)
592 					goto out;
593 			} else {
594 				spin_unlock_irqrestore(&wl->wl_lock, flags);
595 			}
596 
597 			/* check for tx results */
598 			ret = wlcore_hw_tx_delayed_compl(wl);
599 			if (ret < 0)
600 				goto out;
601 
602 			/* Make sure the deferred queues don't get too long */
603 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
604 				      skb_queue_len(&wl->deferred_rx_queue);
605 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
606 				wl1271_flush_deferred_work(wl);
607 		}
608 
609 		if (intr & WL1271_ACX_INTR_EVENT_A) {
610 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
611 			ret = wl1271_event_handle(wl, 0);
612 			if (ret < 0)
613 				goto out;
614 		}
615 
616 		if (intr & WL1271_ACX_INTR_EVENT_B) {
617 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
618 			ret = wl1271_event_handle(wl, 1);
619 			if (ret < 0)
620 				goto out;
621 		}
622 
623 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
624 			wl1271_debug(DEBUG_IRQ,
625 				     "WL1271_ACX_INTR_INIT_COMPLETE");
626 
627 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
628 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
629 	}
630 
631 	wl1271_ps_elp_sleep(wl);
632 
633 out:
634 	return ret;
635 }
636 
637 static irqreturn_t wlcore_irq(int irq, void *cookie)
638 {
639 	int ret;
640 	unsigned long flags;
641 	struct wl1271 *wl = cookie;
642 
643 	/* TX might be handled here, avoid redundant work */
644 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
645 	cancel_work_sync(&wl->tx_work);
646 
647 	mutex_lock(&wl->mutex);
648 
649 	ret = wlcore_irq_locked(wl);
650 	if (ret)
651 		wl12xx_queue_recovery_work(wl);
652 
653 	spin_lock_irqsave(&wl->wl_lock, flags);
654 	/* In case TX was not handled here, queue TX work */
655 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
656 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
657 	    wl1271_tx_total_queue_count(wl) > 0)
658 		ieee80211_queue_work(wl->hw, &wl->tx_work);
659 	spin_unlock_irqrestore(&wl->wl_lock, flags);
660 
661 	mutex_unlock(&wl->mutex);
662 
663 	return IRQ_HANDLED;
664 }
665 
666 struct vif_counter_data {
667 	u8 counter;
668 
669 	struct ieee80211_vif *cur_vif;
670 	bool cur_vif_running;
671 };
672 
673 static void wl12xx_vif_count_iter(void *data, u8 *mac,
674 				  struct ieee80211_vif *vif)
675 {
676 	struct vif_counter_data *counter = data;
677 
678 	counter->counter++;
679 	if (counter->cur_vif == vif)
680 		counter->cur_vif_running = true;
681 }
682 
683 /* caller must not hold wl->mutex, as it might deadlock */
684 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
685 			       struct ieee80211_vif *cur_vif,
686 			       struct vif_counter_data *data)
687 {
688 	memset(data, 0, sizeof(*data));
689 	data->cur_vif = cur_vif;
690 
691 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
692 					    wl12xx_vif_count_iter, data);
693 }
694 
695 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
696 {
697 	const struct firmware *fw;
698 	const char *fw_name;
699 	enum wl12xx_fw_type fw_type;
700 	int ret;
701 
702 	if (plt) {
703 		fw_type = WL12XX_FW_TYPE_PLT;
704 		fw_name = wl->plt_fw_name;
705 	} else {
706 		/*
707 		 * we can't call wl12xx_get_vif_count() here because
708 		 * wl->mutex is taken, so use the cached last_vif_count value
709 		 */
710 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
711 			fw_type = WL12XX_FW_TYPE_MULTI;
712 			fw_name = wl->mr_fw_name;
713 		} else {
714 			fw_type = WL12XX_FW_TYPE_NORMAL;
715 			fw_name = wl->sr_fw_name;
716 		}
717 	}
718 
719 	if (wl->fw_type == fw_type)
720 		return 0;
721 
722 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
723 
724 	ret = request_firmware(&fw, fw_name, wl->dev);
725 
726 	if (ret < 0) {
727 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
728 		return ret;
729 	}
730 
731 	if (fw->size % 4) {
732 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
733 			     fw->size);
734 		ret = -EILSEQ;
735 		goto out;
736 	}
737 
738 	vfree(wl->fw);
739 	wl->fw_type = WL12XX_FW_TYPE_NONE;
740 	wl->fw_len = fw->size;
741 	wl->fw = vmalloc(wl->fw_len);
742 
743 	if (!wl->fw) {
744 		wl1271_error("could not allocate memory for the firmware");
745 		ret = -ENOMEM;
746 		goto out;
747 	}
748 
749 	memcpy(wl->fw, fw->data, wl->fw_len);
750 	ret = 0;
751 	wl->fw_type = fw_type;
752 out:
753 	release_firmware(fw);
754 
755 	return ret;
756 }
757 
758 void wl12xx_queue_recovery_work(struct wl1271 *wl)
759 {
760 	WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
761 
762 	/* Avoid a recursive recovery */
763 	if (wl->state == WLCORE_STATE_ON) {
764 		wl->state = WLCORE_STATE_RESTARTING;
765 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
766 		wlcore_disable_interrupts_nosync(wl);
767 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
768 	}
769 }
770 
771 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
772 {
773 	size_t len = 0;
774 
775 	/* The FW log is a length-value list, find where the log end */
776 	while (len < maxlen) {
777 		if (memblock[len] == 0)
778 			break;
779 		if (len + memblock[len] + 1 > maxlen)
780 			break;
781 		len += memblock[len] + 1;
782 	}
783 
784 	/* Make sure we have enough room */
785 	len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
786 
787 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
788 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
789 	wl->fwlog_size += len;
790 
791 	return len;
792 }
793 
794 #define WLCORE_FW_LOG_END 0x2000000
795 
796 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
797 {
798 	u32 addr;
799 	u32 offset;
800 	u32 end_of_log;
801 	u8 *block;
802 	int ret;
803 
804 	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
805 	    (wl->conf.fwlog.mem_blocks == 0))
806 		return;
807 
808 	wl1271_info("Reading FW panic log");
809 
810 	block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
811 	if (!block)
812 		return;
813 
814 	/*
815 	 * Make sure the chip is awake and the logger isn't active.
816 	 * Do not send a stop fwlog command if the fw is hanged or if
817 	 * dbgpins are used (due to some fw bug).
818 	 */
819 	if (wl1271_ps_elp_wakeup(wl))
820 		goto out;
821 	if (!wl->watchdog_recovery &&
822 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
823 		wl12xx_cmd_stop_fwlog(wl);
824 
825 	/* Read the first memory block address */
826 	ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
827 	if (ret < 0)
828 		goto out;
829 
830 	addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
831 	if (!addr)
832 		goto out;
833 
834 	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
835 		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
836 		end_of_log = WLCORE_FW_LOG_END;
837 	} else {
838 		offset = sizeof(addr);
839 		end_of_log = addr;
840 	}
841 
842 	/* Traverse the memory blocks linked list */
843 	do {
844 		memset(block, 0, WL12XX_HW_BLOCK_SIZE);
845 		ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
846 					 false);
847 		if (ret < 0)
848 			goto out;
849 
850 		/*
851 		 * Memory blocks are linked to one another. The first 4 bytes
852 		 * of each memory block hold the hardware address of the next
853 		 * one. The last memory block points to the first one in
854 		 * on demand mode and is equal to 0x2000000 in continuous mode.
855 		 */
856 		addr = le32_to_cpup((__le32 *)block);
857 		if (!wl12xx_copy_fwlog(wl, block + offset,
858 				       WL12XX_HW_BLOCK_SIZE - offset))
859 			break;
860 	} while (addr && (addr != end_of_log));
861 
862 	wake_up_interruptible(&wl->fwlog_waitq);
863 
864 out:
865 	kfree(block);
866 }
867 
868 static void wlcore_print_recovery(struct wl1271 *wl)
869 {
870 	u32 pc = 0;
871 	u32 hint_sts = 0;
872 	int ret;
873 
874 	wl1271_info("Hardware recovery in progress. FW ver: %s",
875 		    wl->chip.fw_ver_str);
876 
877 	/* change partitions momentarily so we can read the FW pc */
878 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
879 	if (ret < 0)
880 		return;
881 
882 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
883 	if (ret < 0)
884 		return;
885 
886 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
887 	if (ret < 0)
888 		return;
889 
890 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
891 				pc, hint_sts, ++wl->recovery_count);
892 
893 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
894 }
895 
896 
897 static void wl1271_recovery_work(struct work_struct *work)
898 {
899 	struct wl1271 *wl =
900 		container_of(work, struct wl1271, recovery_work);
901 	struct wl12xx_vif *wlvif;
902 	struct ieee80211_vif *vif;
903 
904 	mutex_lock(&wl->mutex);
905 
906 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
907 		goto out_unlock;
908 
909 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
910 		wl12xx_read_fwlog_panic(wl);
911 		wlcore_print_recovery(wl);
912 	}
913 
914 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
915 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
916 
917 	if (wl->conf.recovery.no_recovery) {
918 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
919 		goto out_unlock;
920 	}
921 
922 	/*
923 	 * Advance security sequence number to overcome potential progress
924 	 * in the firmware during recovery. This doens't hurt if the network is
925 	 * not encrypted.
926 	 */
927 	wl12xx_for_each_wlvif(wl, wlvif) {
928 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
929 		    test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
930 			wlvif->tx_security_seq +=
931 				WL1271_TX_SQN_POST_RECOVERY_PADDING;
932 	}
933 
934 	/* Prevent spurious TX during FW restart */
935 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
936 
937 	/* reboot the chipset */
938 	while (!list_empty(&wl->wlvif_list)) {
939 		wlvif = list_first_entry(&wl->wlvif_list,
940 				       struct wl12xx_vif, list);
941 		vif = wl12xx_wlvif_to_vif(wlvif);
942 		__wl1271_op_remove_interface(wl, vif, false);
943 	}
944 
945 	wlcore_op_stop_locked(wl);
946 
947 	ieee80211_restart_hw(wl->hw);
948 
949 	/*
950 	 * Its safe to enable TX now - the queues are stopped after a request
951 	 * to restart the HW.
952 	 */
953 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
954 
955 out_unlock:
956 	wl->watchdog_recovery = false;
957 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
958 	mutex_unlock(&wl->mutex);
959 }
960 
961 static int wlcore_fw_wakeup(struct wl1271 *wl)
962 {
963 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
964 }
965 
966 static int wl1271_setup(struct wl1271 *wl)
967 {
968 	wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
969 				  sizeof(*wl->fw_status_2) +
970 				  wl->fw_status_priv_len, GFP_KERNEL);
971 	if (!wl->fw_status_1)
972 		return -ENOMEM;
973 
974 	wl->fw_status_2 = (struct wl_fw_status_2 *)
975 				(((u8 *) wl->fw_status_1) +
976 				WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
977 
978 	wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
979 	if (!wl->tx_res_if) {
980 		kfree(wl->fw_status_1);
981 		return -ENOMEM;
982 	}
983 
984 	return 0;
985 }
986 
987 static int wl12xx_set_power_on(struct wl1271 *wl)
988 {
989 	int ret;
990 
991 	msleep(WL1271_PRE_POWER_ON_SLEEP);
992 	ret = wl1271_power_on(wl);
993 	if (ret < 0)
994 		goto out;
995 	msleep(WL1271_POWER_ON_SLEEP);
996 	wl1271_io_reset(wl);
997 	wl1271_io_init(wl);
998 
999 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1000 	if (ret < 0)
1001 		goto fail;
1002 
1003 	/* ELP module wake up */
1004 	ret = wlcore_fw_wakeup(wl);
1005 	if (ret < 0)
1006 		goto fail;
1007 
1008 out:
1009 	return ret;
1010 
1011 fail:
1012 	wl1271_power_off(wl);
1013 	return ret;
1014 }
1015 
1016 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1017 {
1018 	int ret = 0;
1019 
1020 	ret = wl12xx_set_power_on(wl);
1021 	if (ret < 0)
1022 		goto out;
1023 
1024 	/*
1025 	 * For wl127x based devices we could use the default block
1026 	 * size (512 bytes), but due to a bug in the sdio driver, we
1027 	 * need to set it explicitly after the chip is powered on.  To
1028 	 * simplify the code and since the performance impact is
1029 	 * negligible, we use the same block size for all different
1030 	 * chip types.
1031 	 *
1032 	 * Check if the bus supports blocksize alignment and, if it
1033 	 * doesn't, make sure we don't have the quirk.
1034 	 */
1035 	if (!wl1271_set_block_size(wl))
1036 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1037 
1038 	/* TODO: make sure the lower driver has set things up correctly */
1039 
1040 	ret = wl1271_setup(wl);
1041 	if (ret < 0)
1042 		goto out;
1043 
1044 	ret = wl12xx_fetch_firmware(wl, plt);
1045 	if (ret < 0)
1046 		goto out;
1047 
1048 out:
1049 	return ret;
1050 }
1051 
1052 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1053 {
1054 	int retries = WL1271_BOOT_RETRIES;
1055 	struct wiphy *wiphy = wl->hw->wiphy;
1056 
1057 	static const char* const PLT_MODE[] = {
1058 		"PLT_OFF",
1059 		"PLT_ON",
1060 		"PLT_FEM_DETECT"
1061 	};
1062 
1063 	int ret;
1064 
1065 	mutex_lock(&wl->mutex);
1066 
1067 	wl1271_notice("power up");
1068 
1069 	if (wl->state != WLCORE_STATE_OFF) {
1070 		wl1271_error("cannot go into PLT state because not "
1071 			     "in off state: %d", wl->state);
1072 		ret = -EBUSY;
1073 		goto out;
1074 	}
1075 
1076 	/* Indicate to lower levels that we are now in PLT mode */
1077 	wl->plt = true;
1078 	wl->plt_mode = plt_mode;
1079 
1080 	while (retries) {
1081 		retries--;
1082 		ret = wl12xx_chip_wakeup(wl, true);
1083 		if (ret < 0)
1084 			goto power_off;
1085 
1086 		ret = wl->ops->plt_init(wl);
1087 		if (ret < 0)
1088 			goto power_off;
1089 
1090 		wl->state = WLCORE_STATE_ON;
1091 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1092 			      PLT_MODE[plt_mode],
1093 			      wl->chip.fw_ver_str);
1094 
1095 		/* update hw/fw version info in wiphy struct */
1096 		wiphy->hw_version = wl->chip.id;
1097 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1098 			sizeof(wiphy->fw_version));
1099 
1100 		goto out;
1101 
1102 power_off:
1103 		wl1271_power_off(wl);
1104 	}
1105 
1106 	wl->plt = false;
1107 	wl->plt_mode = PLT_OFF;
1108 
1109 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1110 		     WL1271_BOOT_RETRIES);
1111 out:
1112 	mutex_unlock(&wl->mutex);
1113 
1114 	return ret;
1115 }
1116 
1117 int wl1271_plt_stop(struct wl1271 *wl)
1118 {
1119 	int ret = 0;
1120 
1121 	wl1271_notice("power down");
1122 
1123 	/*
1124 	 * Interrupts must be disabled before setting the state to OFF.
1125 	 * Otherwise, the interrupt handler might be called and exit without
1126 	 * reading the interrupt status.
1127 	 */
1128 	wlcore_disable_interrupts(wl);
1129 	mutex_lock(&wl->mutex);
1130 	if (!wl->plt) {
1131 		mutex_unlock(&wl->mutex);
1132 
1133 		/*
1134 		 * This will not necessarily enable interrupts as interrupts
1135 		 * may have been disabled when op_stop was called. It will,
1136 		 * however, balance the above call to disable_interrupts().
1137 		 */
1138 		wlcore_enable_interrupts(wl);
1139 
1140 		wl1271_error("cannot power down because not in PLT "
1141 			     "state: %d", wl->state);
1142 		ret = -EBUSY;
1143 		goto out;
1144 	}
1145 
1146 	mutex_unlock(&wl->mutex);
1147 
1148 	wl1271_flush_deferred_work(wl);
1149 	cancel_work_sync(&wl->netstack_work);
1150 	cancel_work_sync(&wl->recovery_work);
1151 	cancel_delayed_work_sync(&wl->elp_work);
1152 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1153 
1154 	mutex_lock(&wl->mutex);
1155 	wl1271_power_off(wl);
1156 	wl->flags = 0;
1157 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1158 	wl->state = WLCORE_STATE_OFF;
1159 	wl->plt = false;
1160 	wl->plt_mode = PLT_OFF;
1161 	wl->rx_counter = 0;
1162 	mutex_unlock(&wl->mutex);
1163 
1164 out:
1165 	return ret;
1166 }
1167 
1168 static void wl1271_op_tx(struct ieee80211_hw *hw,
1169 			 struct ieee80211_tx_control *control,
1170 			 struct sk_buff *skb)
1171 {
1172 	struct wl1271 *wl = hw->priv;
1173 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1174 	struct ieee80211_vif *vif = info->control.vif;
1175 	struct wl12xx_vif *wlvif = NULL;
1176 	unsigned long flags;
1177 	int q, mapping;
1178 	u8 hlid;
1179 
1180 	if (!vif) {
1181 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1182 		ieee80211_free_txskb(hw, skb);
1183 		return;
1184 	}
1185 
1186 	wlvif = wl12xx_vif_to_data(vif);
1187 	mapping = skb_get_queue_mapping(skb);
1188 	q = wl1271_tx_get_queue(mapping);
1189 
1190 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1191 
1192 	spin_lock_irqsave(&wl->wl_lock, flags);
1193 
1194 	/*
1195 	 * drop the packet if the link is invalid or the queue is stopped
1196 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1197 	 * allow these packets through.
1198 	 */
1199 	if (hlid == WL12XX_INVALID_LINK_ID ||
1200 	    (!test_bit(hlid, wlvif->links_map)) ||
1201 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1202 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1203 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1204 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1205 		ieee80211_free_txskb(hw, skb);
1206 		goto out;
1207 	}
1208 
1209 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1210 		     hlid, q, skb->len);
1211 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1212 
1213 	wl->tx_queue_count[q]++;
1214 	wlvif->tx_queue_count[q]++;
1215 
1216 	/*
1217 	 * The workqueue is slow to process the tx_queue and we need stop
1218 	 * the queue here, otherwise the queue will get too long.
1219 	 */
1220 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1221 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1222 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1223 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1224 		wlcore_stop_queue_locked(wl, wlvif, q,
1225 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1226 	}
1227 
1228 	/*
1229 	 * The chip specific setup must run before the first TX packet -
1230 	 * before that, the tx_work will not be initialized!
1231 	 */
1232 
1233 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1234 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1235 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1236 
1237 out:
1238 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1239 }
1240 
1241 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1242 {
1243 	unsigned long flags;
1244 	int q;
1245 
1246 	/* no need to queue a new dummy packet if one is already pending */
1247 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1248 		return 0;
1249 
1250 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1251 
1252 	spin_lock_irqsave(&wl->wl_lock, flags);
1253 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1254 	wl->tx_queue_count[q]++;
1255 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1256 
1257 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1258 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1259 		return wlcore_tx_work_locked(wl);
1260 
1261 	/*
1262 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1263 	 * interrupt handler function
1264 	 */
1265 	return 0;
1266 }
1267 
1268 /*
1269  * The size of the dummy packet should be at least 1400 bytes. However, in
1270  * order to minimize the number of bus transactions, aligning it to 512 bytes
1271  * boundaries could be beneficial, performance wise
1272  */
1273 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1274 
1275 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1276 {
1277 	struct sk_buff *skb;
1278 	struct ieee80211_hdr_3addr *hdr;
1279 	unsigned int dummy_packet_size;
1280 
1281 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1282 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1283 
1284 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1285 	if (!skb) {
1286 		wl1271_warning("Failed to allocate a dummy packet skb");
1287 		return NULL;
1288 	}
1289 
1290 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1291 
1292 	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1293 	memset(hdr, 0, sizeof(*hdr));
1294 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1295 					 IEEE80211_STYPE_NULLFUNC |
1296 					 IEEE80211_FCTL_TODS);
1297 
1298 	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1299 
1300 	/* Dummy packets require the TID to be management */
1301 	skb->priority = WL1271_TID_MGMT;
1302 
1303 	/* Initialize all fields that might be used */
1304 	skb_set_queue_mapping(skb, 0);
1305 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1306 
1307 	return skb;
1308 }
1309 
1310 
1311 #ifdef CONFIG_PM
1312 static int
1313 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1314 {
1315 	int num_fields = 0, in_field = 0, fields_size = 0;
1316 	int i, pattern_len = 0;
1317 
1318 	if (!p->mask) {
1319 		wl1271_warning("No mask in WoWLAN pattern");
1320 		return -EINVAL;
1321 	}
1322 
1323 	/*
1324 	 * The pattern is broken up into segments of bytes at different offsets
1325 	 * that need to be checked by the FW filter. Each segment is called
1326 	 * a field in the FW API. We verify that the total number of fields
1327 	 * required for this pattern won't exceed FW limits (8)
1328 	 * as well as the total fields buffer won't exceed the FW limit.
1329 	 * Note that if there's a pattern which crosses Ethernet/IP header
1330 	 * boundary a new field is required.
1331 	 */
1332 	for (i = 0; i < p->pattern_len; i++) {
1333 		if (test_bit(i, (unsigned long *)p->mask)) {
1334 			if (!in_field) {
1335 				in_field = 1;
1336 				pattern_len = 1;
1337 			} else {
1338 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1339 					num_fields++;
1340 					fields_size += pattern_len +
1341 						RX_FILTER_FIELD_OVERHEAD;
1342 					pattern_len = 1;
1343 				} else
1344 					pattern_len++;
1345 			}
1346 		} else {
1347 			if (in_field) {
1348 				in_field = 0;
1349 				fields_size += pattern_len +
1350 					RX_FILTER_FIELD_OVERHEAD;
1351 				num_fields++;
1352 			}
1353 		}
1354 	}
1355 
1356 	if (in_field) {
1357 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1358 		num_fields++;
1359 	}
1360 
1361 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1362 		wl1271_warning("RX Filter too complex. Too many segments");
1363 		return -EINVAL;
1364 	}
1365 
1366 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1367 		wl1271_warning("RX filter pattern is too big");
1368 		return -E2BIG;
1369 	}
1370 
1371 	return 0;
1372 }
1373 
1374 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1375 {
1376 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1377 }
1378 
1379 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1380 {
1381 	int i;
1382 
1383 	if (filter == NULL)
1384 		return;
1385 
1386 	for (i = 0; i < filter->num_fields; i++)
1387 		kfree(filter->fields[i].pattern);
1388 
1389 	kfree(filter);
1390 }
1391 
1392 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1393 				 u16 offset, u8 flags,
1394 				 u8 *pattern, u8 len)
1395 {
1396 	struct wl12xx_rx_filter_field *field;
1397 
1398 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1399 		wl1271_warning("Max fields per RX filter. can't alloc another");
1400 		return -EINVAL;
1401 	}
1402 
1403 	field = &filter->fields[filter->num_fields];
1404 
1405 	field->pattern = kzalloc(len, GFP_KERNEL);
1406 	if (!field->pattern) {
1407 		wl1271_warning("Failed to allocate RX filter pattern");
1408 		return -ENOMEM;
1409 	}
1410 
1411 	filter->num_fields++;
1412 
1413 	field->offset = cpu_to_le16(offset);
1414 	field->flags = flags;
1415 	field->len = len;
1416 	memcpy(field->pattern, pattern, len);
1417 
1418 	return 0;
1419 }
1420 
1421 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1422 {
1423 	int i, fields_size = 0;
1424 
1425 	for (i = 0; i < filter->num_fields; i++)
1426 		fields_size += filter->fields[i].len +
1427 			sizeof(struct wl12xx_rx_filter_field) -
1428 			sizeof(u8 *);
1429 
1430 	return fields_size;
1431 }
1432 
1433 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1434 				    u8 *buf)
1435 {
1436 	int i;
1437 	struct wl12xx_rx_filter_field *field;
1438 
1439 	for (i = 0; i < filter->num_fields; i++) {
1440 		field = (struct wl12xx_rx_filter_field *)buf;
1441 
1442 		field->offset = filter->fields[i].offset;
1443 		field->flags = filter->fields[i].flags;
1444 		field->len = filter->fields[i].len;
1445 
1446 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1447 		buf += sizeof(struct wl12xx_rx_filter_field) -
1448 			sizeof(u8 *) + field->len;
1449 	}
1450 }
1451 
1452 /*
1453  * Allocates an RX filter returned through f
1454  * which needs to be freed using rx_filter_free()
1455  */
1456 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1457 	struct cfg80211_wowlan_trig_pkt_pattern *p,
1458 	struct wl12xx_rx_filter **f)
1459 {
1460 	int i, j, ret = 0;
1461 	struct wl12xx_rx_filter *filter;
1462 	u16 offset;
1463 	u8 flags, len;
1464 
1465 	filter = wl1271_rx_filter_alloc();
1466 	if (!filter) {
1467 		wl1271_warning("Failed to alloc rx filter");
1468 		ret = -ENOMEM;
1469 		goto err;
1470 	}
1471 
1472 	i = 0;
1473 	while (i < p->pattern_len) {
1474 		if (!test_bit(i, (unsigned long *)p->mask)) {
1475 			i++;
1476 			continue;
1477 		}
1478 
1479 		for (j = i; j < p->pattern_len; j++) {
1480 			if (!test_bit(j, (unsigned long *)p->mask))
1481 				break;
1482 
1483 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1484 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1485 				break;
1486 		}
1487 
1488 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1489 			offset = i;
1490 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1491 		} else {
1492 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1493 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1494 		}
1495 
1496 		len = j - i;
1497 
1498 		ret = wl1271_rx_filter_alloc_field(filter,
1499 						   offset,
1500 						   flags,
1501 						   &p->pattern[i], len);
1502 		if (ret)
1503 			goto err;
1504 
1505 		i = j;
1506 	}
1507 
1508 	filter->action = FILTER_SIGNAL;
1509 
1510 	*f = filter;
1511 	return 0;
1512 
1513 err:
1514 	wl1271_rx_filter_free(filter);
1515 	*f = NULL;
1516 
1517 	return ret;
1518 }
1519 
1520 static int wl1271_configure_wowlan(struct wl1271 *wl,
1521 				   struct cfg80211_wowlan *wow)
1522 {
1523 	int i, ret;
1524 
1525 	if (!wow || wow->any || !wow->n_patterns) {
1526 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1527 							  FILTER_SIGNAL);
1528 		if (ret)
1529 			goto out;
1530 
1531 		ret = wl1271_rx_filter_clear_all(wl);
1532 		if (ret)
1533 			goto out;
1534 
1535 		return 0;
1536 	}
1537 
1538 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1539 		return -EINVAL;
1540 
1541 	/* Validate all incoming patterns before clearing current FW state */
1542 	for (i = 0; i < wow->n_patterns; i++) {
1543 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1544 		if (ret) {
1545 			wl1271_warning("Bad wowlan pattern %d", i);
1546 			return ret;
1547 		}
1548 	}
1549 
1550 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1551 	if (ret)
1552 		goto out;
1553 
1554 	ret = wl1271_rx_filter_clear_all(wl);
1555 	if (ret)
1556 		goto out;
1557 
1558 	/* Translate WoWLAN patterns into filters */
1559 	for (i = 0; i < wow->n_patterns; i++) {
1560 		struct cfg80211_wowlan_trig_pkt_pattern *p;
1561 		struct wl12xx_rx_filter *filter = NULL;
1562 
1563 		p = &wow->patterns[i];
1564 
1565 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1566 		if (ret) {
1567 			wl1271_warning("Failed to create an RX filter from "
1568 				       "wowlan pattern %d", i);
1569 			goto out;
1570 		}
1571 
1572 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1573 
1574 		wl1271_rx_filter_free(filter);
1575 		if (ret)
1576 			goto out;
1577 	}
1578 
1579 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1580 
1581 out:
1582 	return ret;
1583 }
1584 
1585 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1586 					struct wl12xx_vif *wlvif,
1587 					struct cfg80211_wowlan *wow)
1588 {
1589 	int ret = 0;
1590 
1591 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1592 		goto out;
1593 
1594 	ret = wl1271_ps_elp_wakeup(wl);
1595 	if (ret < 0)
1596 		goto out;
1597 
1598 	ret = wl1271_configure_wowlan(wl, wow);
1599 	if (ret < 0)
1600 		goto out_sleep;
1601 
1602 	if ((wl->conf.conn.suspend_wake_up_event ==
1603 	     wl->conf.conn.wake_up_event) &&
1604 	    (wl->conf.conn.suspend_listen_interval ==
1605 	     wl->conf.conn.listen_interval))
1606 		goto out_sleep;
1607 
1608 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1609 				    wl->conf.conn.suspend_wake_up_event,
1610 				    wl->conf.conn.suspend_listen_interval);
1611 
1612 	if (ret < 0)
1613 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1614 
1615 out_sleep:
1616 	wl1271_ps_elp_sleep(wl);
1617 out:
1618 	return ret;
1619 
1620 }
1621 
1622 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1623 				       struct wl12xx_vif *wlvif)
1624 {
1625 	int ret = 0;
1626 
1627 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1628 		goto out;
1629 
1630 	ret = wl1271_ps_elp_wakeup(wl);
1631 	if (ret < 0)
1632 		goto out;
1633 
1634 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1635 
1636 	wl1271_ps_elp_sleep(wl);
1637 out:
1638 	return ret;
1639 
1640 }
1641 
1642 static int wl1271_configure_suspend(struct wl1271 *wl,
1643 				    struct wl12xx_vif *wlvif,
1644 				    struct cfg80211_wowlan *wow)
1645 {
1646 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1647 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1648 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1649 		return wl1271_configure_suspend_ap(wl, wlvif);
1650 	return 0;
1651 }
1652 
1653 static void wl1271_configure_resume(struct wl1271 *wl,
1654 				    struct wl12xx_vif *wlvif)
1655 {
1656 	int ret = 0;
1657 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1658 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1659 
1660 	if ((!is_ap) && (!is_sta))
1661 		return;
1662 
1663 	if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1664 		return;
1665 
1666 	ret = wl1271_ps_elp_wakeup(wl);
1667 	if (ret < 0)
1668 		return;
1669 
1670 	if (is_sta) {
1671 		wl1271_configure_wowlan(wl, NULL);
1672 
1673 		if ((wl->conf.conn.suspend_wake_up_event ==
1674 		     wl->conf.conn.wake_up_event) &&
1675 		    (wl->conf.conn.suspend_listen_interval ==
1676 		     wl->conf.conn.listen_interval))
1677 			goto out_sleep;
1678 
1679 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1680 				    wl->conf.conn.wake_up_event,
1681 				    wl->conf.conn.listen_interval);
1682 
1683 		if (ret < 0)
1684 			wl1271_error("resume: wake up conditions failed: %d",
1685 				     ret);
1686 
1687 	} else if (is_ap) {
1688 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1689 	}
1690 
1691 out_sleep:
1692 	wl1271_ps_elp_sleep(wl);
1693 }
1694 
1695 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1696 			    struct cfg80211_wowlan *wow)
1697 {
1698 	struct wl1271 *wl = hw->priv;
1699 	struct wl12xx_vif *wlvif;
1700 	int ret;
1701 
1702 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1703 	WARN_ON(!wow);
1704 
1705 	/* we want to perform the recovery before suspending */
1706 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1707 		wl1271_warning("postponing suspend to perform recovery");
1708 		return -EBUSY;
1709 	}
1710 
1711 	wl1271_tx_flush(wl);
1712 
1713 	mutex_lock(&wl->mutex);
1714 	wl->wow_enabled = true;
1715 	wl12xx_for_each_wlvif(wl, wlvif) {
1716 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1717 		if (ret < 0) {
1718 			mutex_unlock(&wl->mutex);
1719 			wl1271_warning("couldn't prepare device to suspend");
1720 			return ret;
1721 		}
1722 	}
1723 	mutex_unlock(&wl->mutex);
1724 	/* flush any remaining work */
1725 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1726 
1727 	/*
1728 	 * disable and re-enable interrupts in order to flush
1729 	 * the threaded_irq
1730 	 */
1731 	wlcore_disable_interrupts(wl);
1732 
1733 	/*
1734 	 * set suspended flag to avoid triggering a new threaded_irq
1735 	 * work. no need for spinlock as interrupts are disabled.
1736 	 */
1737 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1738 
1739 	wlcore_enable_interrupts(wl);
1740 	flush_work(&wl->tx_work);
1741 	flush_delayed_work(&wl->elp_work);
1742 
1743 	return 0;
1744 }
1745 
1746 static int wl1271_op_resume(struct ieee80211_hw *hw)
1747 {
1748 	struct wl1271 *wl = hw->priv;
1749 	struct wl12xx_vif *wlvif;
1750 	unsigned long flags;
1751 	bool run_irq_work = false, pending_recovery;
1752 	int ret;
1753 
1754 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1755 		     wl->wow_enabled);
1756 	WARN_ON(!wl->wow_enabled);
1757 
1758 	/*
1759 	 * re-enable irq_work enqueuing, and call irq_work directly if
1760 	 * there is a pending work.
1761 	 */
1762 	spin_lock_irqsave(&wl->wl_lock, flags);
1763 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1764 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1765 		run_irq_work = true;
1766 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1767 
1768 	mutex_lock(&wl->mutex);
1769 
1770 	/* test the recovery flag before calling any SDIO functions */
1771 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1772 				    &wl->flags);
1773 
1774 	if (run_irq_work) {
1775 		wl1271_debug(DEBUG_MAC80211,
1776 			     "run postponed irq_work directly");
1777 
1778 		/* don't talk to the HW if recovery is pending */
1779 		if (!pending_recovery) {
1780 			ret = wlcore_irq_locked(wl);
1781 			if (ret)
1782 				wl12xx_queue_recovery_work(wl);
1783 		}
1784 
1785 		wlcore_enable_interrupts(wl);
1786 	}
1787 
1788 	if (pending_recovery) {
1789 		wl1271_warning("queuing forgotten recovery on resume");
1790 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1791 		goto out;
1792 	}
1793 
1794 	wl12xx_for_each_wlvif(wl, wlvif) {
1795 		wl1271_configure_resume(wl, wlvif);
1796 	}
1797 
1798 out:
1799 	wl->wow_enabled = false;
1800 	mutex_unlock(&wl->mutex);
1801 
1802 	return 0;
1803 }
1804 #endif
1805 
1806 static int wl1271_op_start(struct ieee80211_hw *hw)
1807 {
1808 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1809 
1810 	/*
1811 	 * We have to delay the booting of the hardware because
1812 	 * we need to know the local MAC address before downloading and
1813 	 * initializing the firmware. The MAC address cannot be changed
1814 	 * after boot, and without the proper MAC address, the firmware
1815 	 * will not function properly.
1816 	 *
1817 	 * The MAC address is first known when the corresponding interface
1818 	 * is added. That is where we will initialize the hardware.
1819 	 */
1820 
1821 	return 0;
1822 }
1823 
1824 static void wlcore_op_stop_locked(struct wl1271 *wl)
1825 {
1826 	int i;
1827 
1828 	if (wl->state == WLCORE_STATE_OFF) {
1829 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1830 					&wl->flags))
1831 			wlcore_enable_interrupts(wl);
1832 
1833 		return;
1834 	}
1835 
1836 	/*
1837 	 * this must be before the cancel_work calls below, so that the work
1838 	 * functions don't perform further work.
1839 	 */
1840 	wl->state = WLCORE_STATE_OFF;
1841 
1842 	/*
1843 	 * Use the nosync variant to disable interrupts, so the mutex could be
1844 	 * held while doing so without deadlocking.
1845 	 */
1846 	wlcore_disable_interrupts_nosync(wl);
1847 
1848 	mutex_unlock(&wl->mutex);
1849 
1850 	wlcore_synchronize_interrupts(wl);
1851 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1852 		cancel_work_sync(&wl->recovery_work);
1853 	wl1271_flush_deferred_work(wl);
1854 	cancel_delayed_work_sync(&wl->scan_complete_work);
1855 	cancel_work_sync(&wl->netstack_work);
1856 	cancel_work_sync(&wl->tx_work);
1857 	cancel_delayed_work_sync(&wl->elp_work);
1858 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1859 
1860 	/* let's notify MAC80211 about the remaining pending TX frames */
1861 	mutex_lock(&wl->mutex);
1862 	wl12xx_tx_reset(wl);
1863 
1864 	wl1271_power_off(wl);
1865 	/*
1866 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1867 	 * an interrupt storm. Now that the power is down, it is safe to
1868 	 * re-enable interrupts to balance the disable depth
1869 	 */
1870 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1871 		wlcore_enable_interrupts(wl);
1872 
1873 	wl->band = IEEE80211_BAND_2GHZ;
1874 
1875 	wl->rx_counter = 0;
1876 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1877 	wl->channel_type = NL80211_CHAN_NO_HT;
1878 	wl->tx_blocks_available = 0;
1879 	wl->tx_allocated_blocks = 0;
1880 	wl->tx_results_count = 0;
1881 	wl->tx_packets_count = 0;
1882 	wl->time_offset = 0;
1883 	wl->ap_fw_ps_map = 0;
1884 	wl->ap_ps_map = 0;
1885 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1886 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1887 	memset(wl->links_map, 0, sizeof(wl->links_map));
1888 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1889 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1890 	wl->active_sta_count = 0;
1891 	wl->active_link_count = 0;
1892 
1893 	/* The system link is always allocated */
1894 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1895 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1896 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1897 
1898 	/*
1899 	 * this is performed after the cancel_work calls and the associated
1900 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1901 	 * get executed before all these vars have been reset.
1902 	 */
1903 	wl->flags = 0;
1904 
1905 	wl->tx_blocks_freed = 0;
1906 
1907 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1908 		wl->tx_pkts_freed[i] = 0;
1909 		wl->tx_allocated_pkts[i] = 0;
1910 	}
1911 
1912 	wl1271_debugfs_reset(wl);
1913 
1914 	kfree(wl->fw_status_1);
1915 	wl->fw_status_1 = NULL;
1916 	wl->fw_status_2 = NULL;
1917 	kfree(wl->tx_res_if);
1918 	wl->tx_res_if = NULL;
1919 	kfree(wl->target_mem_map);
1920 	wl->target_mem_map = NULL;
1921 
1922 	/*
1923 	 * FW channels must be re-calibrated after recovery,
1924 	 * clear the last Reg-Domain channel configuration.
1925 	 */
1926 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1927 }
1928 
1929 static void wlcore_op_stop(struct ieee80211_hw *hw)
1930 {
1931 	struct wl1271 *wl = hw->priv;
1932 
1933 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1934 
1935 	mutex_lock(&wl->mutex);
1936 
1937 	wlcore_op_stop_locked(wl);
1938 
1939 	mutex_unlock(&wl->mutex);
1940 }
1941 
1942 static void wlcore_channel_switch_work(struct work_struct *work)
1943 {
1944 	struct delayed_work *dwork;
1945 	struct wl1271 *wl;
1946 	struct ieee80211_vif *vif;
1947 	struct wl12xx_vif *wlvif;
1948 	int ret;
1949 
1950 	dwork = container_of(work, struct delayed_work, work);
1951 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1952 	wl = wlvif->wl;
1953 
1954 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1955 
1956 	mutex_lock(&wl->mutex);
1957 
1958 	if (unlikely(wl->state != WLCORE_STATE_ON))
1959 		goto out;
1960 
1961 	/* check the channel switch is still ongoing */
1962 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1963 		goto out;
1964 
1965 	vif = wl12xx_wlvif_to_vif(wlvif);
1966 	ieee80211_chswitch_done(vif, false);
1967 
1968 	ret = wl1271_ps_elp_wakeup(wl);
1969 	if (ret < 0)
1970 		goto out;
1971 
1972 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
1973 
1974 	wl1271_ps_elp_sleep(wl);
1975 out:
1976 	mutex_unlock(&wl->mutex);
1977 }
1978 
1979 static void wlcore_connection_loss_work(struct work_struct *work)
1980 {
1981 	struct delayed_work *dwork;
1982 	struct wl1271 *wl;
1983 	struct ieee80211_vif *vif;
1984 	struct wl12xx_vif *wlvif;
1985 
1986 	dwork = container_of(work, struct delayed_work, work);
1987 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
1988 	wl = wlvif->wl;
1989 
1990 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
1991 
1992 	mutex_lock(&wl->mutex);
1993 
1994 	if (unlikely(wl->state != WLCORE_STATE_ON))
1995 		goto out;
1996 
1997 	/* Call mac80211 connection loss */
1998 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1999 		goto out;
2000 
2001 	vif = wl12xx_wlvif_to_vif(wlvif);
2002 	ieee80211_connection_loss(vif);
2003 out:
2004 	mutex_unlock(&wl->mutex);
2005 }
2006 
2007 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2008 {
2009 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2010 					WL12XX_MAX_RATE_POLICIES);
2011 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2012 		return -EBUSY;
2013 
2014 	__set_bit(policy, wl->rate_policies_map);
2015 	*idx = policy;
2016 	return 0;
2017 }
2018 
2019 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2020 {
2021 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2022 		return;
2023 
2024 	__clear_bit(*idx, wl->rate_policies_map);
2025 	*idx = WL12XX_MAX_RATE_POLICIES;
2026 }
2027 
2028 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2029 {
2030 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2031 					WLCORE_MAX_KLV_TEMPLATES);
2032 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2033 		return -EBUSY;
2034 
2035 	__set_bit(policy, wl->klv_templates_map);
2036 	*idx = policy;
2037 	return 0;
2038 }
2039 
2040 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2041 {
2042 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2043 		return;
2044 
2045 	__clear_bit(*idx, wl->klv_templates_map);
2046 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2047 }
2048 
2049 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2050 {
2051 	switch (wlvif->bss_type) {
2052 	case BSS_TYPE_AP_BSS:
2053 		if (wlvif->p2p)
2054 			return WL1271_ROLE_P2P_GO;
2055 		else
2056 			return WL1271_ROLE_AP;
2057 
2058 	case BSS_TYPE_STA_BSS:
2059 		if (wlvif->p2p)
2060 			return WL1271_ROLE_P2P_CL;
2061 		else
2062 			return WL1271_ROLE_STA;
2063 
2064 	case BSS_TYPE_IBSS:
2065 		return WL1271_ROLE_IBSS;
2066 
2067 	default:
2068 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2069 	}
2070 	return WL12XX_INVALID_ROLE_TYPE;
2071 }
2072 
2073 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2074 {
2075 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2076 	int i;
2077 
2078 	/* clear everything but the persistent data */
2079 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2080 
2081 	switch (ieee80211_vif_type_p2p(vif)) {
2082 	case NL80211_IFTYPE_P2P_CLIENT:
2083 		wlvif->p2p = 1;
2084 		/* fall-through */
2085 	case NL80211_IFTYPE_STATION:
2086 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2087 		break;
2088 	case NL80211_IFTYPE_ADHOC:
2089 		wlvif->bss_type = BSS_TYPE_IBSS;
2090 		break;
2091 	case NL80211_IFTYPE_P2P_GO:
2092 		wlvif->p2p = 1;
2093 		/* fall-through */
2094 	case NL80211_IFTYPE_AP:
2095 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2096 		break;
2097 	default:
2098 		wlvif->bss_type = MAX_BSS_TYPE;
2099 		return -EOPNOTSUPP;
2100 	}
2101 
2102 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2103 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2104 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2105 
2106 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2107 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2108 		/* init sta/ibss data */
2109 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2110 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2111 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2112 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2113 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2114 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2115 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2116 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2117 	} else {
2118 		/* init ap data */
2119 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2120 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2121 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2122 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2123 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2124 			wl12xx_allocate_rate_policy(wl,
2125 						&wlvif->ap.ucast_rate_idx[i]);
2126 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2127 		/*
2128 		 * TODO: check if basic_rate shouldn't be
2129 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2130 		 * instead (the same thing for STA above).
2131 		*/
2132 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2133 		/* TODO: this seems to be used only for STA, check it */
2134 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2135 	}
2136 
2137 	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2138 	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2139 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2140 
2141 	/*
2142 	 * mac80211 configures some values globally, while we treat them
2143 	 * per-interface. thus, on init, we have to copy them from wl
2144 	 */
2145 	wlvif->band = wl->band;
2146 	wlvif->channel = wl->channel;
2147 	wlvif->power_level = wl->power_level;
2148 	wlvif->channel_type = wl->channel_type;
2149 
2150 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2151 		  wl1271_rx_streaming_enable_work);
2152 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2153 		  wl1271_rx_streaming_disable_work);
2154 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2155 			  wlcore_channel_switch_work);
2156 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2157 			  wlcore_connection_loss_work);
2158 	INIT_LIST_HEAD(&wlvif->list);
2159 
2160 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2161 		    (unsigned long) wlvif);
2162 	return 0;
2163 }
2164 
2165 static int wl12xx_init_fw(struct wl1271 *wl)
2166 {
2167 	int retries = WL1271_BOOT_RETRIES;
2168 	bool booted = false;
2169 	struct wiphy *wiphy = wl->hw->wiphy;
2170 	int ret;
2171 
2172 	while (retries) {
2173 		retries--;
2174 		ret = wl12xx_chip_wakeup(wl, false);
2175 		if (ret < 0)
2176 			goto power_off;
2177 
2178 		ret = wl->ops->boot(wl);
2179 		if (ret < 0)
2180 			goto power_off;
2181 
2182 		ret = wl1271_hw_init(wl);
2183 		if (ret < 0)
2184 			goto irq_disable;
2185 
2186 		booted = true;
2187 		break;
2188 
2189 irq_disable:
2190 		mutex_unlock(&wl->mutex);
2191 		/* Unlocking the mutex in the middle of handling is
2192 		   inherently unsafe. In this case we deem it safe to do,
2193 		   because we need to let any possibly pending IRQ out of
2194 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2195 		   work function will not do anything.) Also, any other
2196 		   possible concurrent operations will fail due to the
2197 		   current state, hence the wl1271 struct should be safe. */
2198 		wlcore_disable_interrupts(wl);
2199 		wl1271_flush_deferred_work(wl);
2200 		cancel_work_sync(&wl->netstack_work);
2201 		mutex_lock(&wl->mutex);
2202 power_off:
2203 		wl1271_power_off(wl);
2204 	}
2205 
2206 	if (!booted) {
2207 		wl1271_error("firmware boot failed despite %d retries",
2208 			     WL1271_BOOT_RETRIES);
2209 		goto out;
2210 	}
2211 
2212 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2213 
2214 	/* update hw/fw version info in wiphy struct */
2215 	wiphy->hw_version = wl->chip.id;
2216 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2217 		sizeof(wiphy->fw_version));
2218 
2219 	/*
2220 	 * Now we know if 11a is supported (info from the NVS), so disable
2221 	 * 11a channels if not supported
2222 	 */
2223 	if (!wl->enable_11a)
2224 		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2225 
2226 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2227 		     wl->enable_11a ? "" : "not ");
2228 
2229 	wl->state = WLCORE_STATE_ON;
2230 out:
2231 	return ret;
2232 }
2233 
2234 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2235 {
2236 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2237 }
2238 
2239 /*
2240  * Check whether a fw switch (i.e. moving from one loaded
2241  * fw to another) is needed. This function is also responsible
2242  * for updating wl->last_vif_count, so it must be called before
2243  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2244  * will be used).
2245  */
2246 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2247 				  struct vif_counter_data vif_counter_data,
2248 				  bool add)
2249 {
2250 	enum wl12xx_fw_type current_fw = wl->fw_type;
2251 	u8 vif_count = vif_counter_data.counter;
2252 
2253 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2254 		return false;
2255 
2256 	/* increase the vif count if this is a new vif */
2257 	if (add && !vif_counter_data.cur_vif_running)
2258 		vif_count++;
2259 
2260 	wl->last_vif_count = vif_count;
2261 
2262 	/* no need for fw change if the device is OFF */
2263 	if (wl->state == WLCORE_STATE_OFF)
2264 		return false;
2265 
2266 	/* no need for fw change if a single fw is used */
2267 	if (!wl->mr_fw_name)
2268 		return false;
2269 
2270 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2271 		return true;
2272 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2273 		return true;
2274 
2275 	return false;
2276 }
2277 
2278 /*
2279  * Enter "forced psm". Make sure the sta is in psm against the ap,
2280  * to make the fw switch a bit more disconnection-persistent.
2281  */
2282 static void wl12xx_force_active_psm(struct wl1271 *wl)
2283 {
2284 	struct wl12xx_vif *wlvif;
2285 
2286 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2287 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2288 	}
2289 }
2290 
2291 struct wlcore_hw_queue_iter_data {
2292 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2293 	/* current vif */
2294 	struct ieee80211_vif *vif;
2295 	/* is the current vif among those iterated */
2296 	bool cur_running;
2297 };
2298 
2299 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2300 				 struct ieee80211_vif *vif)
2301 {
2302 	struct wlcore_hw_queue_iter_data *iter_data = data;
2303 
2304 	if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2305 		return;
2306 
2307 	if (iter_data->cur_running || vif == iter_data->vif) {
2308 		iter_data->cur_running = true;
2309 		return;
2310 	}
2311 
2312 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2313 }
2314 
2315 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2316 					 struct wl12xx_vif *wlvif)
2317 {
2318 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2319 	struct wlcore_hw_queue_iter_data iter_data = {};
2320 	int i, q_base;
2321 
2322 	iter_data.vif = vif;
2323 
2324 	/* mark all bits taken by active interfaces */
2325 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2326 					IEEE80211_IFACE_ITER_RESUME_ALL,
2327 					wlcore_hw_queue_iter, &iter_data);
2328 
2329 	/* the current vif is already running in mac80211 (resume/recovery) */
2330 	if (iter_data.cur_running) {
2331 		wlvif->hw_queue_base = vif->hw_queue[0];
2332 		wl1271_debug(DEBUG_MAC80211,
2333 			     "using pre-allocated hw queue base %d",
2334 			     wlvif->hw_queue_base);
2335 
2336 		/* interface type might have changed type */
2337 		goto adjust_cab_queue;
2338 	}
2339 
2340 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2341 				     WLCORE_NUM_MAC_ADDRESSES);
2342 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2343 		return -EBUSY;
2344 
2345 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2346 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2347 		     wlvif->hw_queue_base);
2348 
2349 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2350 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2351 		/* register hw queues in mac80211 */
2352 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2353 	}
2354 
2355 adjust_cab_queue:
2356 	/* the last places are reserved for cab queues per interface */
2357 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2358 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2359 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2360 	else
2361 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2362 
2363 	return 0;
2364 }
2365 
2366 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2367 				   struct ieee80211_vif *vif)
2368 {
2369 	struct wl1271 *wl = hw->priv;
2370 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2371 	struct vif_counter_data vif_count;
2372 	int ret = 0;
2373 	u8 role_type;
2374 
2375 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2376 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2377 
2378 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2379 		     ieee80211_vif_type_p2p(vif), vif->addr);
2380 
2381 	wl12xx_get_vif_count(hw, vif, &vif_count);
2382 
2383 	mutex_lock(&wl->mutex);
2384 	ret = wl1271_ps_elp_wakeup(wl);
2385 	if (ret < 0)
2386 		goto out_unlock;
2387 
2388 	/*
2389 	 * in some very corner case HW recovery scenarios its possible to
2390 	 * get here before __wl1271_op_remove_interface is complete, so
2391 	 * opt out if that is the case.
2392 	 */
2393 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2394 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2395 		ret = -EBUSY;
2396 		goto out;
2397 	}
2398 
2399 
2400 	ret = wl12xx_init_vif_data(wl, vif);
2401 	if (ret < 0)
2402 		goto out;
2403 
2404 	wlvif->wl = wl;
2405 	role_type = wl12xx_get_role_type(wl, wlvif);
2406 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2407 		ret = -EINVAL;
2408 		goto out;
2409 	}
2410 
2411 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2412 	if (ret < 0)
2413 		goto out;
2414 
2415 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2416 		wl12xx_force_active_psm(wl);
2417 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2418 		mutex_unlock(&wl->mutex);
2419 		wl1271_recovery_work(&wl->recovery_work);
2420 		return 0;
2421 	}
2422 
2423 	/*
2424 	 * TODO: after the nvs issue will be solved, move this block
2425 	 * to start(), and make sure here the driver is ON.
2426 	 */
2427 	if (wl->state == WLCORE_STATE_OFF) {
2428 		/*
2429 		 * we still need this in order to configure the fw
2430 		 * while uploading the nvs
2431 		 */
2432 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2433 
2434 		ret = wl12xx_init_fw(wl);
2435 		if (ret < 0)
2436 			goto out;
2437 	}
2438 
2439 	ret = wl12xx_cmd_role_enable(wl, vif->addr,
2440 				     role_type, &wlvif->role_id);
2441 	if (ret < 0)
2442 		goto out;
2443 
2444 	ret = wl1271_init_vif_specific(wl, vif);
2445 	if (ret < 0)
2446 		goto out;
2447 
2448 	list_add(&wlvif->list, &wl->wlvif_list);
2449 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2450 
2451 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2452 		wl->ap_count++;
2453 	else
2454 		wl->sta_count++;
2455 out:
2456 	wl1271_ps_elp_sleep(wl);
2457 out_unlock:
2458 	mutex_unlock(&wl->mutex);
2459 
2460 	return ret;
2461 }
2462 
2463 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2464 					 struct ieee80211_vif *vif,
2465 					 bool reset_tx_queues)
2466 {
2467 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2468 	int i, ret;
2469 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2470 
2471 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2472 
2473 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2474 		return;
2475 
2476 	/* because of hardware recovery, we may get here twice */
2477 	if (wl->state == WLCORE_STATE_OFF)
2478 		return;
2479 
2480 	wl1271_info("down");
2481 
2482 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2483 	    wl->scan_wlvif == wlvif) {
2484 		/*
2485 		 * Rearm the tx watchdog just before idling scan. This
2486 		 * prevents just-finished scans from triggering the watchdog
2487 		 */
2488 		wl12xx_rearm_tx_watchdog_locked(wl);
2489 
2490 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2491 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2492 		wl->scan_wlvif = NULL;
2493 		wl->scan.req = NULL;
2494 		ieee80211_scan_completed(wl->hw, true);
2495 	}
2496 
2497 	if (wl->sched_vif == wlvif) {
2498 		ieee80211_sched_scan_stopped(wl->hw);
2499 		wl->sched_vif = NULL;
2500 	}
2501 
2502 	if (wl->roc_vif == vif) {
2503 		wl->roc_vif = NULL;
2504 		ieee80211_remain_on_channel_expired(wl->hw);
2505 	}
2506 
2507 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2508 		/* disable active roles */
2509 		ret = wl1271_ps_elp_wakeup(wl);
2510 		if (ret < 0)
2511 			goto deinit;
2512 
2513 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2514 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2515 			if (wl12xx_dev_role_started(wlvif))
2516 				wl12xx_stop_dev(wl, wlvif);
2517 		}
2518 
2519 		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2520 		if (ret < 0)
2521 			goto deinit;
2522 
2523 		wl1271_ps_elp_sleep(wl);
2524 	}
2525 deinit:
2526 	/* clear all hlids (except system_hlid) */
2527 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2528 
2529 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2530 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2531 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2532 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2533 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2534 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2535 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2536 	} else {
2537 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2538 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2539 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2540 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2541 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2542 			wl12xx_free_rate_policy(wl,
2543 						&wlvif->ap.ucast_rate_idx[i]);
2544 		wl1271_free_ap_keys(wl, wlvif);
2545 	}
2546 
2547 	dev_kfree_skb(wlvif->probereq);
2548 	wlvif->probereq = NULL;
2549 	wl12xx_tx_reset_wlvif(wl, wlvif);
2550 	if (wl->last_wlvif == wlvif)
2551 		wl->last_wlvif = NULL;
2552 	list_del(&wlvif->list);
2553 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2554 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2555 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2556 
2557 	if (is_ap)
2558 		wl->ap_count--;
2559 	else
2560 		wl->sta_count--;
2561 
2562 	/*
2563 	 * Last AP, have more stations. Configure sleep auth according to STA.
2564 	 * Don't do thin on unintended recovery.
2565 	 */
2566 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2567 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2568 		goto unlock;
2569 
2570 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2571 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2572 		/* Configure for power according to debugfs */
2573 		if (sta_auth != WL1271_PSM_ILLEGAL)
2574 			wl1271_acx_sleep_auth(wl, sta_auth);
2575 		/* Configure for ELP power saving */
2576 		else
2577 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2578 	}
2579 
2580 unlock:
2581 	mutex_unlock(&wl->mutex);
2582 
2583 	del_timer_sync(&wlvif->rx_streaming_timer);
2584 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2585 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2586 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2587 
2588 	mutex_lock(&wl->mutex);
2589 }
2590 
2591 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2592 				       struct ieee80211_vif *vif)
2593 {
2594 	struct wl1271 *wl = hw->priv;
2595 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2596 	struct wl12xx_vif *iter;
2597 	struct vif_counter_data vif_count;
2598 
2599 	wl12xx_get_vif_count(hw, vif, &vif_count);
2600 	mutex_lock(&wl->mutex);
2601 
2602 	if (wl->state == WLCORE_STATE_OFF ||
2603 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2604 		goto out;
2605 
2606 	/*
2607 	 * wl->vif can be null here if someone shuts down the interface
2608 	 * just when hardware recovery has been started.
2609 	 */
2610 	wl12xx_for_each_wlvif(wl, iter) {
2611 		if (iter != wlvif)
2612 			continue;
2613 
2614 		__wl1271_op_remove_interface(wl, vif, true);
2615 		break;
2616 	}
2617 	WARN_ON(iter != wlvif);
2618 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2619 		wl12xx_force_active_psm(wl);
2620 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2621 		wl12xx_queue_recovery_work(wl);
2622 	}
2623 out:
2624 	mutex_unlock(&wl->mutex);
2625 }
2626 
2627 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2628 				      struct ieee80211_vif *vif,
2629 				      enum nl80211_iftype new_type, bool p2p)
2630 {
2631 	struct wl1271 *wl = hw->priv;
2632 	int ret;
2633 
2634 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2635 	wl1271_op_remove_interface(hw, vif);
2636 
2637 	vif->type = new_type;
2638 	vif->p2p = p2p;
2639 	ret = wl1271_op_add_interface(hw, vif);
2640 
2641 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2642 	return ret;
2643 }
2644 
2645 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2646 {
2647 	int ret;
2648 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2649 
2650 	/*
2651 	 * One of the side effects of the JOIN command is that is clears
2652 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2653 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2654 	 * Currently the only valid scenario for JOIN during association
2655 	 * is on roaming, in which case we will also be given new keys.
2656 	 * Keep the below message for now, unless it starts bothering
2657 	 * users who really like to roam a lot :)
2658 	 */
2659 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2660 		wl1271_info("JOIN while associated.");
2661 
2662 	/* clear encryption type */
2663 	wlvif->encryption_type = KEY_NONE;
2664 
2665 	if (is_ibss)
2666 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2667 	else {
2668 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2669 			/*
2670 			 * TODO: this is an ugly workaround for wl12xx fw
2671 			 * bug - we are not able to tx/rx after the first
2672 			 * start_sta, so make dummy start+stop calls,
2673 			 * and then call start_sta again.
2674 			 * this should be fixed in the fw.
2675 			 */
2676 			wl12xx_cmd_role_start_sta(wl, wlvif);
2677 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2678 		}
2679 
2680 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2681 	}
2682 
2683 	return ret;
2684 }
2685 
2686 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2687 			    int offset)
2688 {
2689 	u8 ssid_len;
2690 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2691 					 skb->len - offset);
2692 
2693 	if (!ptr) {
2694 		wl1271_error("No SSID in IEs!");
2695 		return -ENOENT;
2696 	}
2697 
2698 	ssid_len = ptr[1];
2699 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2700 		wl1271_error("SSID is too long!");
2701 		return -EINVAL;
2702 	}
2703 
2704 	wlvif->ssid_len = ssid_len;
2705 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2706 	return 0;
2707 }
2708 
2709 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2710 {
2711 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2712 	struct sk_buff *skb;
2713 	int ieoffset;
2714 
2715 	/* we currently only support setting the ssid from the ap probe req */
2716 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2717 		return -EINVAL;
2718 
2719 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2720 	if (!skb)
2721 		return -EINVAL;
2722 
2723 	ieoffset = offsetof(struct ieee80211_mgmt,
2724 			    u.probe_req.variable);
2725 	wl1271_ssid_set(wlvif, skb, ieoffset);
2726 	dev_kfree_skb(skb);
2727 
2728 	return 0;
2729 }
2730 
2731 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2732 			    struct ieee80211_bss_conf *bss_conf,
2733 			    u32 sta_rate_set)
2734 {
2735 	int ieoffset;
2736 	int ret;
2737 
2738 	wlvif->aid = bss_conf->aid;
2739 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2740 	wlvif->beacon_int = bss_conf->beacon_int;
2741 	wlvif->wmm_enabled = bss_conf->qos;
2742 
2743 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2744 
2745 	/*
2746 	 * with wl1271, we don't need to update the
2747 	 * beacon_int and dtim_period, because the firmware
2748 	 * updates it by itself when the first beacon is
2749 	 * received after a join.
2750 	 */
2751 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2752 	if (ret < 0)
2753 		return ret;
2754 
2755 	/*
2756 	 * Get a template for hardware connection maintenance
2757 	 */
2758 	dev_kfree_skb(wlvif->probereq);
2759 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2760 							wlvif,
2761 							NULL);
2762 	ieoffset = offsetof(struct ieee80211_mgmt,
2763 			    u.probe_req.variable);
2764 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2765 
2766 	/* enable the connection monitoring feature */
2767 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2768 	if (ret < 0)
2769 		return ret;
2770 
2771 	/*
2772 	 * The join command disable the keep-alive mode, shut down its process,
2773 	 * and also clear the template config, so we need to reset it all after
2774 	 * the join. The acx_aid starts the keep-alive process, and the order
2775 	 * of the commands below is relevant.
2776 	 */
2777 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2778 	if (ret < 0)
2779 		return ret;
2780 
2781 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2782 	if (ret < 0)
2783 		return ret;
2784 
2785 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2786 	if (ret < 0)
2787 		return ret;
2788 
2789 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2790 					   wlvif->sta.klv_template_id,
2791 					   ACX_KEEP_ALIVE_TPL_VALID);
2792 	if (ret < 0)
2793 		return ret;
2794 
2795 	/*
2796 	 * The default fw psm configuration is AUTO, while mac80211 default
2797 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2798 	 */
2799 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2800 	if (ret < 0)
2801 		return ret;
2802 
2803 	if (sta_rate_set) {
2804 		wlvif->rate_set =
2805 			wl1271_tx_enabled_rates_get(wl,
2806 						    sta_rate_set,
2807 						    wlvif->band);
2808 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2809 		if (ret < 0)
2810 			return ret;
2811 	}
2812 
2813 	return ret;
2814 }
2815 
2816 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2817 {
2818 	int ret;
2819 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2820 
2821 	/* make sure we are connected (sta) joined */
2822 	if (sta &&
2823 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2824 		return false;
2825 
2826 	/* make sure we are joined (ibss) */
2827 	if (!sta &&
2828 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2829 		return false;
2830 
2831 	if (sta) {
2832 		/* use defaults when not associated */
2833 		wlvif->aid = 0;
2834 
2835 		/* free probe-request template */
2836 		dev_kfree_skb(wlvif->probereq);
2837 		wlvif->probereq = NULL;
2838 
2839 		/* disable connection monitor features */
2840 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2841 		if (ret < 0)
2842 			return ret;
2843 
2844 		/* Disable the keep-alive feature */
2845 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2846 		if (ret < 0)
2847 			return ret;
2848 	}
2849 
2850 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2851 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2852 
2853 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
2854 		ieee80211_chswitch_done(vif, false);
2855 		cancel_delayed_work(&wlvif->channel_switch_work);
2856 	}
2857 
2858 	/* invalidate keep-alive template */
2859 	wl1271_acx_keep_alive_config(wl, wlvif,
2860 				     wlvif->sta.klv_template_id,
2861 				     ACX_KEEP_ALIVE_TPL_INVALID);
2862 
2863 	/* reset TX security counters on a clean disconnect */
2864 	wlvif->tx_security_last_seq_lsb = 0;
2865 	wlvif->tx_security_seq = 0;
2866 
2867 	return 0;
2868 }
2869 
2870 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2871 {
2872 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2873 	wlvif->rate_set = wlvif->basic_rate_set;
2874 }
2875 
2876 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2877 			     struct ieee80211_conf *conf, u32 changed)
2878 {
2879 	int ret;
2880 
2881 	if (conf->power_level != wlvif->power_level) {
2882 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2883 		if (ret < 0)
2884 			return ret;
2885 
2886 		wlvif->power_level = conf->power_level;
2887 	}
2888 
2889 	return 0;
2890 }
2891 
2892 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2893 {
2894 	struct wl1271 *wl = hw->priv;
2895 	struct wl12xx_vif *wlvif;
2896 	struct ieee80211_conf *conf = &hw->conf;
2897 	int ret = 0;
2898 
2899 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2900 		     " changed 0x%x",
2901 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2902 		     conf->power_level,
2903 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2904 			 changed);
2905 
2906 	mutex_lock(&wl->mutex);
2907 
2908 	if (changed & IEEE80211_CONF_CHANGE_POWER)
2909 		wl->power_level = conf->power_level;
2910 
2911 	if (unlikely(wl->state != WLCORE_STATE_ON))
2912 		goto out;
2913 
2914 	ret = wl1271_ps_elp_wakeup(wl);
2915 	if (ret < 0)
2916 		goto out;
2917 
2918 	/* configure each interface */
2919 	wl12xx_for_each_wlvif(wl, wlvif) {
2920 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2921 		if (ret < 0)
2922 			goto out_sleep;
2923 	}
2924 
2925 out_sleep:
2926 	wl1271_ps_elp_sleep(wl);
2927 
2928 out:
2929 	mutex_unlock(&wl->mutex);
2930 
2931 	return ret;
2932 }
2933 
2934 struct wl1271_filter_params {
2935 	bool enabled;
2936 	int mc_list_length;
2937 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2938 };
2939 
2940 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2941 				       struct netdev_hw_addr_list *mc_list)
2942 {
2943 	struct wl1271_filter_params *fp;
2944 	struct netdev_hw_addr *ha;
2945 
2946 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2947 	if (!fp) {
2948 		wl1271_error("Out of memory setting filters.");
2949 		return 0;
2950 	}
2951 
2952 	/* update multicast filtering parameters */
2953 	fp->mc_list_length = 0;
2954 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2955 		fp->enabled = false;
2956 	} else {
2957 		fp->enabled = true;
2958 		netdev_hw_addr_list_for_each(ha, mc_list) {
2959 			memcpy(fp->mc_list[fp->mc_list_length],
2960 					ha->addr, ETH_ALEN);
2961 			fp->mc_list_length++;
2962 		}
2963 	}
2964 
2965 	return (u64)(unsigned long)fp;
2966 }
2967 
2968 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2969 				  FIF_ALLMULTI | \
2970 				  FIF_FCSFAIL | \
2971 				  FIF_BCN_PRBRESP_PROMISC | \
2972 				  FIF_CONTROL | \
2973 				  FIF_OTHER_BSS)
2974 
2975 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2976 				       unsigned int changed,
2977 				       unsigned int *total, u64 multicast)
2978 {
2979 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2980 	struct wl1271 *wl = hw->priv;
2981 	struct wl12xx_vif *wlvif;
2982 
2983 	int ret;
2984 
2985 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2986 		     " total %x", changed, *total);
2987 
2988 	mutex_lock(&wl->mutex);
2989 
2990 	*total &= WL1271_SUPPORTED_FILTERS;
2991 	changed &= WL1271_SUPPORTED_FILTERS;
2992 
2993 	if (unlikely(wl->state != WLCORE_STATE_ON))
2994 		goto out;
2995 
2996 	ret = wl1271_ps_elp_wakeup(wl);
2997 	if (ret < 0)
2998 		goto out;
2999 
3000 	wl12xx_for_each_wlvif(wl, wlvif) {
3001 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3002 			if (*total & FIF_ALLMULTI)
3003 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3004 								   false,
3005 								   NULL, 0);
3006 			else if (fp)
3007 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3008 							fp->enabled,
3009 							fp->mc_list,
3010 							fp->mc_list_length);
3011 			if (ret < 0)
3012 				goto out_sleep;
3013 		}
3014 	}
3015 
3016 	/*
3017 	 * the fw doesn't provide an api to configure the filters. instead,
3018 	 * the filters configuration is based on the active roles / ROC
3019 	 * state.
3020 	 */
3021 
3022 out_sleep:
3023 	wl1271_ps_elp_sleep(wl);
3024 
3025 out:
3026 	mutex_unlock(&wl->mutex);
3027 	kfree(fp);
3028 }
3029 
3030 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3031 				u8 id, u8 key_type, u8 key_size,
3032 				const u8 *key, u8 hlid, u32 tx_seq_32,
3033 				u16 tx_seq_16)
3034 {
3035 	struct wl1271_ap_key *ap_key;
3036 	int i;
3037 
3038 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3039 
3040 	if (key_size > MAX_KEY_SIZE)
3041 		return -EINVAL;
3042 
3043 	/*
3044 	 * Find next free entry in ap_keys. Also check we are not replacing
3045 	 * an existing key.
3046 	 */
3047 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3048 		if (wlvif->ap.recorded_keys[i] == NULL)
3049 			break;
3050 
3051 		if (wlvif->ap.recorded_keys[i]->id == id) {
3052 			wl1271_warning("trying to record key replacement");
3053 			return -EINVAL;
3054 		}
3055 	}
3056 
3057 	if (i == MAX_NUM_KEYS)
3058 		return -EBUSY;
3059 
3060 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3061 	if (!ap_key)
3062 		return -ENOMEM;
3063 
3064 	ap_key->id = id;
3065 	ap_key->key_type = key_type;
3066 	ap_key->key_size = key_size;
3067 	memcpy(ap_key->key, key, key_size);
3068 	ap_key->hlid = hlid;
3069 	ap_key->tx_seq_32 = tx_seq_32;
3070 	ap_key->tx_seq_16 = tx_seq_16;
3071 
3072 	wlvif->ap.recorded_keys[i] = ap_key;
3073 	return 0;
3074 }
3075 
3076 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3077 {
3078 	int i;
3079 
3080 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3081 		kfree(wlvif->ap.recorded_keys[i]);
3082 		wlvif->ap.recorded_keys[i] = NULL;
3083 	}
3084 }
3085 
3086 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3087 {
3088 	int i, ret = 0;
3089 	struct wl1271_ap_key *key;
3090 	bool wep_key_added = false;
3091 
3092 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3093 		u8 hlid;
3094 		if (wlvif->ap.recorded_keys[i] == NULL)
3095 			break;
3096 
3097 		key = wlvif->ap.recorded_keys[i];
3098 		hlid = key->hlid;
3099 		if (hlid == WL12XX_INVALID_LINK_ID)
3100 			hlid = wlvif->ap.bcast_hlid;
3101 
3102 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3103 					    key->id, key->key_type,
3104 					    key->key_size, key->key,
3105 					    hlid, key->tx_seq_32,
3106 					    key->tx_seq_16);
3107 		if (ret < 0)
3108 			goto out;
3109 
3110 		if (key->key_type == KEY_WEP)
3111 			wep_key_added = true;
3112 	}
3113 
3114 	if (wep_key_added) {
3115 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3116 						     wlvif->ap.bcast_hlid);
3117 		if (ret < 0)
3118 			goto out;
3119 	}
3120 
3121 out:
3122 	wl1271_free_ap_keys(wl, wlvif);
3123 	return ret;
3124 }
3125 
3126 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3127 		       u16 action, u8 id, u8 key_type,
3128 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3129 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3130 {
3131 	int ret;
3132 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3133 
3134 	if (is_ap) {
3135 		struct wl1271_station *wl_sta;
3136 		u8 hlid;
3137 
3138 		if (sta) {
3139 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3140 			hlid = wl_sta->hlid;
3141 		} else {
3142 			hlid = wlvif->ap.bcast_hlid;
3143 		}
3144 
3145 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3146 			/*
3147 			 * We do not support removing keys after AP shutdown.
3148 			 * Pretend we do to make mac80211 happy.
3149 			 */
3150 			if (action != KEY_ADD_OR_REPLACE)
3151 				return 0;
3152 
3153 			ret = wl1271_record_ap_key(wl, wlvif, id,
3154 					     key_type, key_size,
3155 					     key, hlid, tx_seq_32,
3156 					     tx_seq_16);
3157 		} else {
3158 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3159 					     id, key_type, key_size,
3160 					     key, hlid, tx_seq_32,
3161 					     tx_seq_16);
3162 		}
3163 
3164 		if (ret < 0)
3165 			return ret;
3166 	} else {
3167 		const u8 *addr;
3168 		static const u8 bcast_addr[ETH_ALEN] = {
3169 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3170 		};
3171 
3172 		addr = sta ? sta->addr : bcast_addr;
3173 
3174 		if (is_zero_ether_addr(addr)) {
3175 			/* We dont support TX only encryption */
3176 			return -EOPNOTSUPP;
3177 		}
3178 
3179 		/* The wl1271 does not allow to remove unicast keys - they
3180 		   will be cleared automatically on next CMD_JOIN. Ignore the
3181 		   request silently, as we dont want the mac80211 to emit
3182 		   an error message. */
3183 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3184 			return 0;
3185 
3186 		/* don't remove key if hlid was already deleted */
3187 		if (action == KEY_REMOVE &&
3188 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3189 			return 0;
3190 
3191 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3192 					     id, key_type, key_size,
3193 					     key, addr, tx_seq_32,
3194 					     tx_seq_16);
3195 		if (ret < 0)
3196 			return ret;
3197 
3198 		/* the default WEP key needs to be configured at least once */
3199 		if (key_type == KEY_WEP) {
3200 			ret = wl12xx_cmd_set_default_wep_key(wl,
3201 							wlvif->default_key,
3202 							wlvif->sta.hlid);
3203 			if (ret < 0)
3204 				return ret;
3205 		}
3206 	}
3207 
3208 	return 0;
3209 }
3210 
3211 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3212 			     struct ieee80211_vif *vif,
3213 			     struct ieee80211_sta *sta,
3214 			     struct ieee80211_key_conf *key_conf)
3215 {
3216 	struct wl1271 *wl = hw->priv;
3217 	int ret;
3218 	bool might_change_spare =
3219 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3220 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3221 
3222 	if (might_change_spare) {
3223 		/*
3224 		 * stop the queues and flush to ensure the next packets are
3225 		 * in sync with FW spare block accounting
3226 		 */
3227 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3228 		wl1271_tx_flush(wl);
3229 	}
3230 
3231 	mutex_lock(&wl->mutex);
3232 
3233 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3234 		ret = -EAGAIN;
3235 		goto out_wake_queues;
3236 	}
3237 
3238 	ret = wl1271_ps_elp_wakeup(wl);
3239 	if (ret < 0)
3240 		goto out_wake_queues;
3241 
3242 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3243 
3244 	wl1271_ps_elp_sleep(wl);
3245 
3246 out_wake_queues:
3247 	if (might_change_spare)
3248 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3249 
3250 	mutex_unlock(&wl->mutex);
3251 
3252 	return ret;
3253 }
3254 
3255 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3256 		   struct ieee80211_vif *vif,
3257 		   struct ieee80211_sta *sta,
3258 		   struct ieee80211_key_conf *key_conf)
3259 {
3260 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3261 	int ret;
3262 	u32 tx_seq_32 = 0;
3263 	u16 tx_seq_16 = 0;
3264 	u8 key_type;
3265 
3266 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3267 
3268 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3269 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3270 		     key_conf->cipher, key_conf->keyidx,
3271 		     key_conf->keylen, key_conf->flags);
3272 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3273 
3274 	switch (key_conf->cipher) {
3275 	case WLAN_CIPHER_SUITE_WEP40:
3276 	case WLAN_CIPHER_SUITE_WEP104:
3277 		key_type = KEY_WEP;
3278 
3279 		key_conf->hw_key_idx = key_conf->keyidx;
3280 		break;
3281 	case WLAN_CIPHER_SUITE_TKIP:
3282 		key_type = KEY_TKIP;
3283 
3284 		key_conf->hw_key_idx = key_conf->keyidx;
3285 		tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3286 		tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3287 		break;
3288 	case WLAN_CIPHER_SUITE_CCMP:
3289 		key_type = KEY_AES;
3290 
3291 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3292 		tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3293 		tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3294 		break;
3295 	case WL1271_CIPHER_SUITE_GEM:
3296 		key_type = KEY_GEM;
3297 		tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3298 		tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3299 		break;
3300 	default:
3301 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3302 
3303 		return -EOPNOTSUPP;
3304 	}
3305 
3306 	switch (cmd) {
3307 	case SET_KEY:
3308 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3309 				 key_conf->keyidx, key_type,
3310 				 key_conf->keylen, key_conf->key,
3311 				 tx_seq_32, tx_seq_16, sta);
3312 		if (ret < 0) {
3313 			wl1271_error("Could not add or replace key");
3314 			return ret;
3315 		}
3316 
3317 		/*
3318 		 * reconfiguring arp response if the unicast (or common)
3319 		 * encryption key type was changed
3320 		 */
3321 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3322 		    (sta || key_type == KEY_WEP) &&
3323 		    wlvif->encryption_type != key_type) {
3324 			wlvif->encryption_type = key_type;
3325 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3326 			if (ret < 0) {
3327 				wl1271_warning("build arp rsp failed: %d", ret);
3328 				return ret;
3329 			}
3330 		}
3331 		break;
3332 
3333 	case DISABLE_KEY:
3334 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3335 				     key_conf->keyidx, key_type,
3336 				     key_conf->keylen, key_conf->key,
3337 				     0, 0, sta);
3338 		if (ret < 0) {
3339 			wl1271_error("Could not remove key");
3340 			return ret;
3341 		}
3342 		break;
3343 
3344 	default:
3345 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3346 		return -EOPNOTSUPP;
3347 	}
3348 
3349 	return ret;
3350 }
3351 EXPORT_SYMBOL_GPL(wlcore_set_key);
3352 
3353 void wlcore_regdomain_config(struct wl1271 *wl)
3354 {
3355 	int ret;
3356 
3357 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3358 		return;
3359 
3360 	mutex_lock(&wl->mutex);
3361 	ret = wl1271_ps_elp_wakeup(wl);
3362 	if (ret < 0)
3363 		goto out;
3364 
3365 	ret = wlcore_cmd_regdomain_config_locked(wl);
3366 	if (ret < 0) {
3367 		wl12xx_queue_recovery_work(wl);
3368 		goto out;
3369 	}
3370 
3371 	wl1271_ps_elp_sleep(wl);
3372 out:
3373 	mutex_unlock(&wl->mutex);
3374 }
3375 
3376 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3377 			     struct ieee80211_vif *vif,
3378 			     struct cfg80211_scan_request *req)
3379 {
3380 	struct wl1271 *wl = hw->priv;
3381 	int ret;
3382 	u8 *ssid = NULL;
3383 	size_t len = 0;
3384 
3385 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3386 
3387 	if (req->n_ssids) {
3388 		ssid = req->ssids[0].ssid;
3389 		len = req->ssids[0].ssid_len;
3390 	}
3391 
3392 	mutex_lock(&wl->mutex);
3393 
3394 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3395 		/*
3396 		 * We cannot return -EBUSY here because cfg80211 will expect
3397 		 * a call to ieee80211_scan_completed if we do - in this case
3398 		 * there won't be any call.
3399 		 */
3400 		ret = -EAGAIN;
3401 		goto out;
3402 	}
3403 
3404 	ret = wl1271_ps_elp_wakeup(wl);
3405 	if (ret < 0)
3406 		goto out;
3407 
3408 	/* fail if there is any role in ROC */
3409 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3410 		/* don't allow scanning right now */
3411 		ret = -EBUSY;
3412 		goto out_sleep;
3413 	}
3414 
3415 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3416 out_sleep:
3417 	wl1271_ps_elp_sleep(wl);
3418 out:
3419 	mutex_unlock(&wl->mutex);
3420 
3421 	return ret;
3422 }
3423 
3424 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3425 				     struct ieee80211_vif *vif)
3426 {
3427 	struct wl1271 *wl = hw->priv;
3428 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3429 	int ret;
3430 
3431 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3432 
3433 	mutex_lock(&wl->mutex);
3434 
3435 	if (unlikely(wl->state != WLCORE_STATE_ON))
3436 		goto out;
3437 
3438 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3439 		goto out;
3440 
3441 	ret = wl1271_ps_elp_wakeup(wl);
3442 	if (ret < 0)
3443 		goto out;
3444 
3445 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3446 		ret = wl->ops->scan_stop(wl, wlvif);
3447 		if (ret < 0)
3448 			goto out_sleep;
3449 	}
3450 
3451 	/*
3452 	 * Rearm the tx watchdog just before idling scan. This
3453 	 * prevents just-finished scans from triggering the watchdog
3454 	 */
3455 	wl12xx_rearm_tx_watchdog_locked(wl);
3456 
3457 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3458 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3459 	wl->scan_wlvif = NULL;
3460 	wl->scan.req = NULL;
3461 	ieee80211_scan_completed(wl->hw, true);
3462 
3463 out_sleep:
3464 	wl1271_ps_elp_sleep(wl);
3465 out:
3466 	mutex_unlock(&wl->mutex);
3467 
3468 	cancel_delayed_work_sync(&wl->scan_complete_work);
3469 }
3470 
3471 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3472 				      struct ieee80211_vif *vif,
3473 				      struct cfg80211_sched_scan_request *req,
3474 				      struct ieee80211_sched_scan_ies *ies)
3475 {
3476 	struct wl1271 *wl = hw->priv;
3477 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3478 	int ret;
3479 
3480 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3481 
3482 	mutex_lock(&wl->mutex);
3483 
3484 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3485 		ret = -EAGAIN;
3486 		goto out;
3487 	}
3488 
3489 	ret = wl1271_ps_elp_wakeup(wl);
3490 	if (ret < 0)
3491 		goto out;
3492 
3493 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3494 	if (ret < 0)
3495 		goto out_sleep;
3496 
3497 	wl->sched_vif = wlvif;
3498 
3499 out_sleep:
3500 	wl1271_ps_elp_sleep(wl);
3501 out:
3502 	mutex_unlock(&wl->mutex);
3503 	return ret;
3504 }
3505 
3506 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3507 				      struct ieee80211_vif *vif)
3508 {
3509 	struct wl1271 *wl = hw->priv;
3510 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3511 	int ret;
3512 
3513 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3514 
3515 	mutex_lock(&wl->mutex);
3516 
3517 	if (unlikely(wl->state != WLCORE_STATE_ON))
3518 		goto out;
3519 
3520 	ret = wl1271_ps_elp_wakeup(wl);
3521 	if (ret < 0)
3522 		goto out;
3523 
3524 	wl->ops->sched_scan_stop(wl, wlvif);
3525 
3526 	wl1271_ps_elp_sleep(wl);
3527 out:
3528 	mutex_unlock(&wl->mutex);
3529 }
3530 
3531 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3532 {
3533 	struct wl1271 *wl = hw->priv;
3534 	int ret = 0;
3535 
3536 	mutex_lock(&wl->mutex);
3537 
3538 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3539 		ret = -EAGAIN;
3540 		goto out;
3541 	}
3542 
3543 	ret = wl1271_ps_elp_wakeup(wl);
3544 	if (ret < 0)
3545 		goto out;
3546 
3547 	ret = wl1271_acx_frag_threshold(wl, value);
3548 	if (ret < 0)
3549 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3550 
3551 	wl1271_ps_elp_sleep(wl);
3552 
3553 out:
3554 	mutex_unlock(&wl->mutex);
3555 
3556 	return ret;
3557 }
3558 
3559 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3560 {
3561 	struct wl1271 *wl = hw->priv;
3562 	struct wl12xx_vif *wlvif;
3563 	int ret = 0;
3564 
3565 	mutex_lock(&wl->mutex);
3566 
3567 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3568 		ret = -EAGAIN;
3569 		goto out;
3570 	}
3571 
3572 	ret = wl1271_ps_elp_wakeup(wl);
3573 	if (ret < 0)
3574 		goto out;
3575 
3576 	wl12xx_for_each_wlvif(wl, wlvif) {
3577 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3578 		if (ret < 0)
3579 			wl1271_warning("set rts threshold failed: %d", ret);
3580 	}
3581 	wl1271_ps_elp_sleep(wl);
3582 
3583 out:
3584 	mutex_unlock(&wl->mutex);
3585 
3586 	return ret;
3587 }
3588 
3589 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3590 {
3591 	int len;
3592 	const u8 *next, *end = skb->data + skb->len;
3593 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3594 					skb->len - ieoffset);
3595 	if (!ie)
3596 		return;
3597 	len = ie[1] + 2;
3598 	next = ie + len;
3599 	memmove(ie, next, end - next);
3600 	skb_trim(skb, skb->len - len);
3601 }
3602 
3603 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3604 					    unsigned int oui, u8 oui_type,
3605 					    int ieoffset)
3606 {
3607 	int len;
3608 	const u8 *next, *end = skb->data + skb->len;
3609 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3610 					       skb->data + ieoffset,
3611 					       skb->len - ieoffset);
3612 	if (!ie)
3613 		return;
3614 	len = ie[1] + 2;
3615 	next = ie + len;
3616 	memmove(ie, next, end - next);
3617 	skb_trim(skb, skb->len - len);
3618 }
3619 
3620 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3621 					 struct ieee80211_vif *vif)
3622 {
3623 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3624 	struct sk_buff *skb;
3625 	int ret;
3626 
3627 	skb = ieee80211_proberesp_get(wl->hw, vif);
3628 	if (!skb)
3629 		return -EOPNOTSUPP;
3630 
3631 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3632 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3633 				      skb->data,
3634 				      skb->len, 0,
3635 				      rates);
3636 	dev_kfree_skb(skb);
3637 
3638 	if (ret < 0)
3639 		goto out;
3640 
3641 	wl1271_debug(DEBUG_AP, "probe response updated");
3642 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3643 
3644 out:
3645 	return ret;
3646 }
3647 
3648 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3649 					     struct ieee80211_vif *vif,
3650 					     u8 *probe_rsp_data,
3651 					     size_t probe_rsp_len,
3652 					     u32 rates)
3653 {
3654 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3655 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3656 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3657 	int ssid_ie_offset, ie_offset, templ_len;
3658 	const u8 *ptr;
3659 
3660 	/* no need to change probe response if the SSID is set correctly */
3661 	if (wlvif->ssid_len > 0)
3662 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3663 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3664 					       probe_rsp_data,
3665 					       probe_rsp_len, 0,
3666 					       rates);
3667 
3668 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3669 		wl1271_error("probe_rsp template too big");
3670 		return -EINVAL;
3671 	}
3672 
3673 	/* start searching from IE offset */
3674 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3675 
3676 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3677 			       probe_rsp_len - ie_offset);
3678 	if (!ptr) {
3679 		wl1271_error("No SSID in beacon!");
3680 		return -EINVAL;
3681 	}
3682 
3683 	ssid_ie_offset = ptr - probe_rsp_data;
3684 	ptr += (ptr[1] + 2);
3685 
3686 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3687 
3688 	/* insert SSID from bss_conf */
3689 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3690 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3691 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3692 	       bss_conf->ssid, bss_conf->ssid_len);
3693 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3694 
3695 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3696 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3697 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3698 
3699 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3700 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3701 				       probe_rsp_templ,
3702 				       templ_len, 0,
3703 				       rates);
3704 }
3705 
3706 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3707 				       struct ieee80211_vif *vif,
3708 				       struct ieee80211_bss_conf *bss_conf,
3709 				       u32 changed)
3710 {
3711 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3712 	int ret = 0;
3713 
3714 	if (changed & BSS_CHANGED_ERP_SLOT) {
3715 		if (bss_conf->use_short_slot)
3716 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3717 		else
3718 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3719 		if (ret < 0) {
3720 			wl1271_warning("Set slot time failed %d", ret);
3721 			goto out;
3722 		}
3723 	}
3724 
3725 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3726 		if (bss_conf->use_short_preamble)
3727 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3728 		else
3729 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3730 	}
3731 
3732 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3733 		if (bss_conf->use_cts_prot)
3734 			ret = wl1271_acx_cts_protect(wl, wlvif,
3735 						     CTSPROTECT_ENABLE);
3736 		else
3737 			ret = wl1271_acx_cts_protect(wl, wlvif,
3738 						     CTSPROTECT_DISABLE);
3739 		if (ret < 0) {
3740 			wl1271_warning("Set ctsprotect failed %d", ret);
3741 			goto out;
3742 		}
3743 	}
3744 
3745 out:
3746 	return ret;
3747 }
3748 
3749 static int wlcore_set_beacon_template(struct wl1271 *wl,
3750 				      struct ieee80211_vif *vif,
3751 				      bool is_ap)
3752 {
3753 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3754 	struct ieee80211_hdr *hdr;
3755 	u32 min_rate;
3756 	int ret;
3757 	int ieoffset = offsetof(struct ieee80211_mgmt,
3758 				u.beacon.variable);
3759 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3760 	u16 tmpl_id;
3761 
3762 	if (!beacon) {
3763 		ret = -EINVAL;
3764 		goto out;
3765 	}
3766 
3767 	wl1271_debug(DEBUG_MASTER, "beacon updated");
3768 
3769 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3770 	if (ret < 0) {
3771 		dev_kfree_skb(beacon);
3772 		goto out;
3773 	}
3774 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3775 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3776 		CMD_TEMPL_BEACON;
3777 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3778 				      beacon->data,
3779 				      beacon->len, 0,
3780 				      min_rate);
3781 	if (ret < 0) {
3782 		dev_kfree_skb(beacon);
3783 		goto out;
3784 	}
3785 
3786 	wlvif->wmm_enabled =
3787 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3788 					WLAN_OUI_TYPE_MICROSOFT_WMM,
3789 					beacon->data + ieoffset,
3790 					beacon->len - ieoffset);
3791 
3792 	/*
3793 	 * In case we already have a probe-resp beacon set explicitly
3794 	 * by usermode, don't use the beacon data.
3795 	 */
3796 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3797 		goto end_bcn;
3798 
3799 	/* remove TIM ie from probe response */
3800 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3801 
3802 	/*
3803 	 * remove p2p ie from probe response.
3804 	 * the fw reponds to probe requests that don't include
3805 	 * the p2p ie. probe requests with p2p ie will be passed,
3806 	 * and will be responded by the supplicant (the spec
3807 	 * forbids including the p2p ie when responding to probe
3808 	 * requests that didn't include it).
3809 	 */
3810 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3811 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3812 
3813 	hdr = (struct ieee80211_hdr *) beacon->data;
3814 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3815 					 IEEE80211_STYPE_PROBE_RESP);
3816 	if (is_ap)
3817 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3818 							   beacon->data,
3819 							   beacon->len,
3820 							   min_rate);
3821 	else
3822 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3823 					      CMD_TEMPL_PROBE_RESPONSE,
3824 					      beacon->data,
3825 					      beacon->len, 0,
3826 					      min_rate);
3827 end_bcn:
3828 	dev_kfree_skb(beacon);
3829 	if (ret < 0)
3830 		goto out;
3831 
3832 out:
3833 	return ret;
3834 }
3835 
3836 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3837 					  struct ieee80211_vif *vif,
3838 					  struct ieee80211_bss_conf *bss_conf,
3839 					  u32 changed)
3840 {
3841 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3842 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3843 	int ret = 0;
3844 
3845 	if (changed & BSS_CHANGED_BEACON_INT) {
3846 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3847 			bss_conf->beacon_int);
3848 
3849 		wlvif->beacon_int = bss_conf->beacon_int;
3850 	}
3851 
3852 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3853 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3854 
3855 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3856 	}
3857 
3858 	if (changed & BSS_CHANGED_BEACON) {
3859 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
3860 		if (ret < 0)
3861 			goto out;
3862 	}
3863 
3864 out:
3865 	if (ret != 0)
3866 		wl1271_error("beacon info change failed: %d", ret);
3867 	return ret;
3868 }
3869 
3870 /* AP mode changes */
3871 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3872 				       struct ieee80211_vif *vif,
3873 				       struct ieee80211_bss_conf *bss_conf,
3874 				       u32 changed)
3875 {
3876 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3877 	int ret = 0;
3878 
3879 	if (changed & BSS_CHANGED_BASIC_RATES) {
3880 		u32 rates = bss_conf->basic_rates;
3881 
3882 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3883 								 wlvif->band);
3884 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3885 							wlvif->basic_rate_set);
3886 
3887 		ret = wl1271_init_ap_rates(wl, wlvif);
3888 		if (ret < 0) {
3889 			wl1271_error("AP rate policy change failed %d", ret);
3890 			goto out;
3891 		}
3892 
3893 		ret = wl1271_ap_init_templates(wl, vif);
3894 		if (ret < 0)
3895 			goto out;
3896 
3897 		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3898 		if (ret < 0)
3899 			goto out;
3900 
3901 		ret = wlcore_set_beacon_template(wl, vif, true);
3902 		if (ret < 0)
3903 			goto out;
3904 	}
3905 
3906 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3907 	if (ret < 0)
3908 		goto out;
3909 
3910 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
3911 		if (bss_conf->enable_beacon) {
3912 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3913 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3914 				if (ret < 0)
3915 					goto out;
3916 
3917 				ret = wl1271_ap_init_hwenc(wl, wlvif);
3918 				if (ret < 0)
3919 					goto out;
3920 
3921 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3922 				wl1271_debug(DEBUG_AP, "started AP");
3923 			}
3924 		} else {
3925 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3926 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3927 				if (ret < 0)
3928 					goto out;
3929 
3930 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3931 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3932 					  &wlvif->flags);
3933 				wl1271_debug(DEBUG_AP, "stopped AP");
3934 			}
3935 		}
3936 	}
3937 
3938 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3939 	if (ret < 0)
3940 		goto out;
3941 
3942 	/* Handle HT information change */
3943 	if ((changed & BSS_CHANGED_HT) &&
3944 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3945 		ret = wl1271_acx_set_ht_information(wl, wlvif,
3946 					bss_conf->ht_operation_mode);
3947 		if (ret < 0) {
3948 			wl1271_warning("Set ht information failed %d", ret);
3949 			goto out;
3950 		}
3951 	}
3952 
3953 out:
3954 	return;
3955 }
3956 
3957 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3958 			    struct ieee80211_bss_conf *bss_conf,
3959 			    u32 sta_rate_set)
3960 {
3961 	u32 rates;
3962 	int ret;
3963 
3964 	wl1271_debug(DEBUG_MAC80211,
3965 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3966 	     bss_conf->bssid, bss_conf->aid,
3967 	     bss_conf->beacon_int,
3968 	     bss_conf->basic_rates, sta_rate_set);
3969 
3970 	wlvif->beacon_int = bss_conf->beacon_int;
3971 	rates = bss_conf->basic_rates;
3972 	wlvif->basic_rate_set =
3973 		wl1271_tx_enabled_rates_get(wl, rates,
3974 					    wlvif->band);
3975 	wlvif->basic_rate =
3976 		wl1271_tx_min_rate_get(wl,
3977 				       wlvif->basic_rate_set);
3978 
3979 	if (sta_rate_set)
3980 		wlvif->rate_set =
3981 			wl1271_tx_enabled_rates_get(wl,
3982 						sta_rate_set,
3983 						wlvif->band);
3984 
3985 	/* we only support sched_scan while not connected */
3986 	if (wl->sched_vif == wlvif)
3987 		wl->ops->sched_scan_stop(wl, wlvif);
3988 
3989 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3990 	if (ret < 0)
3991 		return ret;
3992 
3993 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
3994 	if (ret < 0)
3995 		return ret;
3996 
3997 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
3998 	if (ret < 0)
3999 		return ret;
4000 
4001 	wlcore_set_ssid(wl, wlvif);
4002 
4003 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4004 
4005 	return 0;
4006 }
4007 
4008 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4009 {
4010 	int ret;
4011 
4012 	/* revert back to minimum rates for the current band */
4013 	wl1271_set_band_rate(wl, wlvif);
4014 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4015 
4016 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4017 	if (ret < 0)
4018 		return ret;
4019 
4020 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4021 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4022 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4023 		if (ret < 0)
4024 			return ret;
4025 	}
4026 
4027 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4028 	return 0;
4029 }
4030 /* STA/IBSS mode changes */
4031 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4032 					struct ieee80211_vif *vif,
4033 					struct ieee80211_bss_conf *bss_conf,
4034 					u32 changed)
4035 {
4036 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4037 	bool do_join = false;
4038 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4039 	bool ibss_joined = false;
4040 	u32 sta_rate_set = 0;
4041 	int ret;
4042 	struct ieee80211_sta *sta;
4043 	bool sta_exists = false;
4044 	struct ieee80211_sta_ht_cap sta_ht_cap;
4045 
4046 	if (is_ibss) {
4047 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4048 						     changed);
4049 		if (ret < 0)
4050 			goto out;
4051 	}
4052 
4053 	if (changed & BSS_CHANGED_IBSS) {
4054 		if (bss_conf->ibss_joined) {
4055 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4056 			ibss_joined = true;
4057 		} else {
4058 			wlcore_unset_assoc(wl, wlvif);
4059 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4060 		}
4061 	}
4062 
4063 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4064 		do_join = true;
4065 
4066 	/* Need to update the SSID (for filtering etc) */
4067 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4068 		do_join = true;
4069 
4070 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4071 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4072 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4073 
4074 		do_join = true;
4075 	}
4076 
4077 	if (changed & BSS_CHANGED_CQM) {
4078 		bool enable = false;
4079 		if (bss_conf->cqm_rssi_thold)
4080 			enable = true;
4081 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4082 						  bss_conf->cqm_rssi_thold,
4083 						  bss_conf->cqm_rssi_hyst);
4084 		if (ret < 0)
4085 			goto out;
4086 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4087 	}
4088 
4089 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4090 		       BSS_CHANGED_ASSOC)) {
4091 		rcu_read_lock();
4092 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4093 		if (sta) {
4094 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4095 
4096 			/* save the supp_rates of the ap */
4097 			sta_rate_set = sta->supp_rates[wlvif->band];
4098 			if (sta->ht_cap.ht_supported)
4099 				sta_rate_set |=
4100 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4101 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4102 			sta_ht_cap = sta->ht_cap;
4103 			sta_exists = true;
4104 		}
4105 
4106 		rcu_read_unlock();
4107 	}
4108 
4109 	if (changed & BSS_CHANGED_BSSID) {
4110 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4111 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4112 					       sta_rate_set);
4113 			if (ret < 0)
4114 				goto out;
4115 
4116 			/* Need to update the BSSID (for filtering etc) */
4117 			do_join = true;
4118 		} else {
4119 			ret = wlcore_clear_bssid(wl, wlvif);
4120 			if (ret < 0)
4121 				goto out;
4122 		}
4123 	}
4124 
4125 	if (changed & BSS_CHANGED_IBSS) {
4126 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4127 			     bss_conf->ibss_joined);
4128 
4129 		if (bss_conf->ibss_joined) {
4130 			u32 rates = bss_conf->basic_rates;
4131 			wlvif->basic_rate_set =
4132 				wl1271_tx_enabled_rates_get(wl, rates,
4133 							    wlvif->band);
4134 			wlvif->basic_rate =
4135 				wl1271_tx_min_rate_get(wl,
4136 						       wlvif->basic_rate_set);
4137 
4138 			/* by default, use 11b + OFDM rates */
4139 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4140 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4141 			if (ret < 0)
4142 				goto out;
4143 		}
4144 	}
4145 
4146 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4147 	if (ret < 0)
4148 		goto out;
4149 
4150 	if (do_join) {
4151 		ret = wlcore_join(wl, wlvif);
4152 		if (ret < 0) {
4153 			wl1271_warning("cmd join failed %d", ret);
4154 			goto out;
4155 		}
4156 	}
4157 
4158 	if (changed & BSS_CHANGED_ASSOC) {
4159 		if (bss_conf->assoc) {
4160 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4161 					       sta_rate_set);
4162 			if (ret < 0)
4163 				goto out;
4164 
4165 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4166 				wl12xx_set_authorized(wl, wlvif);
4167 		} else {
4168 			wlcore_unset_assoc(wl, wlvif);
4169 		}
4170 	}
4171 
4172 	if (changed & BSS_CHANGED_PS) {
4173 		if ((bss_conf->ps) &&
4174 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4175 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4176 			int ps_mode;
4177 			char *ps_mode_str;
4178 
4179 			if (wl->conf.conn.forced_ps) {
4180 				ps_mode = STATION_POWER_SAVE_MODE;
4181 				ps_mode_str = "forced";
4182 			} else {
4183 				ps_mode = STATION_AUTO_PS_MODE;
4184 				ps_mode_str = "auto";
4185 			}
4186 
4187 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4188 
4189 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4190 			if (ret < 0)
4191 				wl1271_warning("enter %s ps failed %d",
4192 					       ps_mode_str, ret);
4193 		} else if (!bss_conf->ps &&
4194 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4195 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4196 
4197 			ret = wl1271_ps_set_mode(wl, wlvif,
4198 						 STATION_ACTIVE_MODE);
4199 			if (ret < 0)
4200 				wl1271_warning("exit auto ps failed %d", ret);
4201 		}
4202 	}
4203 
4204 	/* Handle new association with HT. Do this after join. */
4205 	if (sta_exists &&
4206 	    (changed & BSS_CHANGED_HT)) {
4207 		bool enabled =
4208 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4209 
4210 		ret = wlcore_hw_set_peer_cap(wl,
4211 					     &sta_ht_cap,
4212 					     enabled,
4213 					     wlvif->rate_set,
4214 					     wlvif->sta.hlid);
4215 		if (ret < 0) {
4216 			wl1271_warning("Set ht cap failed %d", ret);
4217 			goto out;
4218 
4219 		}
4220 
4221 		if (enabled) {
4222 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4223 						bss_conf->ht_operation_mode);
4224 			if (ret < 0) {
4225 				wl1271_warning("Set ht information failed %d",
4226 					       ret);
4227 				goto out;
4228 			}
4229 		}
4230 	}
4231 
4232 	/* Handle arp filtering. Done after join. */
4233 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4234 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4235 		__be32 addr = bss_conf->arp_addr_list[0];
4236 		wlvif->sta.qos = bss_conf->qos;
4237 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4238 
4239 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4240 			wlvif->ip_addr = addr;
4241 			/*
4242 			 * The template should have been configured only upon
4243 			 * association. however, it seems that the correct ip
4244 			 * isn't being set (when sending), so we have to
4245 			 * reconfigure the template upon every ip change.
4246 			 */
4247 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4248 			if (ret < 0) {
4249 				wl1271_warning("build arp rsp failed: %d", ret);
4250 				goto out;
4251 			}
4252 
4253 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4254 				(ACX_ARP_FILTER_ARP_FILTERING |
4255 				 ACX_ARP_FILTER_AUTO_ARP),
4256 				addr);
4257 		} else {
4258 			wlvif->ip_addr = 0;
4259 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4260 		}
4261 
4262 		if (ret < 0)
4263 			goto out;
4264 	}
4265 
4266 out:
4267 	return;
4268 }
4269 
4270 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4271 				       struct ieee80211_vif *vif,
4272 				       struct ieee80211_bss_conf *bss_conf,
4273 				       u32 changed)
4274 {
4275 	struct wl1271 *wl = hw->priv;
4276 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4277 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4278 	int ret;
4279 
4280 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4281 		     wlvif->role_id, (int)changed);
4282 
4283 	/*
4284 	 * make sure to cancel pending disconnections if our association
4285 	 * state changed
4286 	 */
4287 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4288 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4289 
4290 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4291 	    !bss_conf->enable_beacon)
4292 		wl1271_tx_flush(wl);
4293 
4294 	mutex_lock(&wl->mutex);
4295 
4296 	if (unlikely(wl->state != WLCORE_STATE_ON))
4297 		goto out;
4298 
4299 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4300 		goto out;
4301 
4302 	ret = wl1271_ps_elp_wakeup(wl);
4303 	if (ret < 0)
4304 		goto out;
4305 
4306 	if (is_ap)
4307 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4308 	else
4309 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4310 
4311 	wl1271_ps_elp_sleep(wl);
4312 
4313 out:
4314 	mutex_unlock(&wl->mutex);
4315 }
4316 
4317 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4318 				 struct ieee80211_chanctx_conf *ctx)
4319 {
4320 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4321 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4322 		     cfg80211_get_chandef_type(&ctx->def));
4323 	return 0;
4324 }
4325 
4326 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4327 				     struct ieee80211_chanctx_conf *ctx)
4328 {
4329 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4330 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4331 		     cfg80211_get_chandef_type(&ctx->def));
4332 }
4333 
4334 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4335 				     struct ieee80211_chanctx_conf *ctx,
4336 				     u32 changed)
4337 {
4338 	wl1271_debug(DEBUG_MAC80211,
4339 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4340 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4341 		     cfg80211_get_chandef_type(&ctx->def), changed);
4342 }
4343 
4344 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4345 					struct ieee80211_vif *vif,
4346 					struct ieee80211_chanctx_conf *ctx)
4347 {
4348 	struct wl1271 *wl = hw->priv;
4349 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4350 	int channel = ieee80211_frequency_to_channel(
4351 		ctx->def.chan->center_freq);
4352 
4353 	wl1271_debug(DEBUG_MAC80211,
4354 		     "mac80211 assign chanctx (role %d) %d (type %d)",
4355 		     wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4356 
4357 	mutex_lock(&wl->mutex);
4358 
4359 	wlvif->band = ctx->def.chan->band;
4360 	wlvif->channel = channel;
4361 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4362 
4363 	/* update default rates according to the band */
4364 	wl1271_set_band_rate(wl, wlvif);
4365 
4366 	mutex_unlock(&wl->mutex);
4367 
4368 	return 0;
4369 }
4370 
4371 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4372 					   struct ieee80211_vif *vif,
4373 					   struct ieee80211_chanctx_conf *ctx)
4374 {
4375 	struct wl1271 *wl = hw->priv;
4376 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4377 
4378 	wl1271_debug(DEBUG_MAC80211,
4379 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4380 		     wlvif->role_id,
4381 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4382 		     cfg80211_get_chandef_type(&ctx->def));
4383 
4384 	wl1271_tx_flush(wl);
4385 }
4386 
4387 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4388 			     struct ieee80211_vif *vif, u16 queue,
4389 			     const struct ieee80211_tx_queue_params *params)
4390 {
4391 	struct wl1271 *wl = hw->priv;
4392 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4393 	u8 ps_scheme;
4394 	int ret = 0;
4395 
4396 	mutex_lock(&wl->mutex);
4397 
4398 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4399 
4400 	if (params->uapsd)
4401 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4402 	else
4403 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4404 
4405 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4406 		goto out;
4407 
4408 	ret = wl1271_ps_elp_wakeup(wl);
4409 	if (ret < 0)
4410 		goto out;
4411 
4412 	/*
4413 	 * the txop is confed in units of 32us by the mac80211,
4414 	 * we need us
4415 	 */
4416 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4417 				params->cw_min, params->cw_max,
4418 				params->aifs, params->txop << 5);
4419 	if (ret < 0)
4420 		goto out_sleep;
4421 
4422 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4423 				 CONF_CHANNEL_TYPE_EDCF,
4424 				 wl1271_tx_get_queue(queue),
4425 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4426 				 0, 0);
4427 
4428 out_sleep:
4429 	wl1271_ps_elp_sleep(wl);
4430 
4431 out:
4432 	mutex_unlock(&wl->mutex);
4433 
4434 	return ret;
4435 }
4436 
4437 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4438 			     struct ieee80211_vif *vif)
4439 {
4440 
4441 	struct wl1271 *wl = hw->priv;
4442 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4443 	u64 mactime = ULLONG_MAX;
4444 	int ret;
4445 
4446 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4447 
4448 	mutex_lock(&wl->mutex);
4449 
4450 	if (unlikely(wl->state != WLCORE_STATE_ON))
4451 		goto out;
4452 
4453 	ret = wl1271_ps_elp_wakeup(wl);
4454 	if (ret < 0)
4455 		goto out;
4456 
4457 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4458 	if (ret < 0)
4459 		goto out_sleep;
4460 
4461 out_sleep:
4462 	wl1271_ps_elp_sleep(wl);
4463 
4464 out:
4465 	mutex_unlock(&wl->mutex);
4466 	return mactime;
4467 }
4468 
4469 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4470 				struct survey_info *survey)
4471 {
4472 	struct ieee80211_conf *conf = &hw->conf;
4473 
4474 	if (idx != 0)
4475 		return -ENOENT;
4476 
4477 	survey->channel = conf->channel;
4478 	survey->filled = 0;
4479 	return 0;
4480 }
4481 
4482 static int wl1271_allocate_sta(struct wl1271 *wl,
4483 			     struct wl12xx_vif *wlvif,
4484 			     struct ieee80211_sta *sta)
4485 {
4486 	struct wl1271_station *wl_sta;
4487 	int ret;
4488 
4489 
4490 	if (wl->active_sta_count >= AP_MAX_STATIONS) {
4491 		wl1271_warning("could not allocate HLID - too much stations");
4492 		return -EBUSY;
4493 	}
4494 
4495 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4496 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4497 	if (ret < 0) {
4498 		wl1271_warning("could not allocate HLID - too many links");
4499 		return -EBUSY;
4500 	}
4501 
4502 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4503 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4504 	wl->active_sta_count++;
4505 	return 0;
4506 }
4507 
4508 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4509 {
4510 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4511 		return;
4512 
4513 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4514 	__clear_bit(hlid, &wl->ap_ps_map);
4515 	__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4516 	wl12xx_free_link(wl, wlvif, &hlid);
4517 	wl->active_sta_count--;
4518 
4519 	/*
4520 	 * rearm the tx watchdog when the last STA is freed - give the FW a
4521 	 * chance to return STA-buffered packets before complaining.
4522 	 */
4523 	if (wl->active_sta_count == 0)
4524 		wl12xx_rearm_tx_watchdog_locked(wl);
4525 }
4526 
4527 static int wl12xx_sta_add(struct wl1271 *wl,
4528 			  struct wl12xx_vif *wlvif,
4529 			  struct ieee80211_sta *sta)
4530 {
4531 	struct wl1271_station *wl_sta;
4532 	int ret = 0;
4533 	u8 hlid;
4534 
4535 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4536 
4537 	ret = wl1271_allocate_sta(wl, wlvif, sta);
4538 	if (ret < 0)
4539 		return ret;
4540 
4541 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4542 	hlid = wl_sta->hlid;
4543 
4544 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4545 	if (ret < 0)
4546 		wl1271_free_sta(wl, wlvif, hlid);
4547 
4548 	return ret;
4549 }
4550 
4551 static int wl12xx_sta_remove(struct wl1271 *wl,
4552 			     struct wl12xx_vif *wlvif,
4553 			     struct ieee80211_sta *sta)
4554 {
4555 	struct wl1271_station *wl_sta;
4556 	int ret = 0, id;
4557 
4558 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4559 
4560 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4561 	id = wl_sta->hlid;
4562 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4563 		return -EINVAL;
4564 
4565 	ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4566 	if (ret < 0)
4567 		return ret;
4568 
4569 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4570 	return ret;
4571 }
4572 
4573 static void wlcore_roc_if_possible(struct wl1271 *wl,
4574 				   struct wl12xx_vif *wlvif)
4575 {
4576 	if (find_first_bit(wl->roc_map,
4577 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4578 		return;
4579 
4580 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4581 		return;
4582 
4583 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4584 }
4585 
4586 static void wlcore_update_inconn_sta(struct wl1271 *wl,
4587 				     struct wl12xx_vif *wlvif,
4588 				     struct wl1271_station *wl_sta,
4589 				     bool in_connection)
4590 {
4591 	if (in_connection) {
4592 		if (WARN_ON(wl_sta->in_connection))
4593 			return;
4594 		wl_sta->in_connection = true;
4595 		if (!wlvif->inconn_count++)
4596 			wlcore_roc_if_possible(wl, wlvif);
4597 	} else {
4598 		if (!wl_sta->in_connection)
4599 			return;
4600 
4601 		wl_sta->in_connection = false;
4602 		wlvif->inconn_count--;
4603 		if (WARN_ON(wlvif->inconn_count < 0))
4604 			return;
4605 
4606 		if (!wlvif->inconn_count)
4607 			if (test_bit(wlvif->role_id, wl->roc_map))
4608 				wl12xx_croc(wl, wlvif->role_id);
4609 	}
4610 }
4611 
4612 static int wl12xx_update_sta_state(struct wl1271 *wl,
4613 				   struct wl12xx_vif *wlvif,
4614 				   struct ieee80211_sta *sta,
4615 				   enum ieee80211_sta_state old_state,
4616 				   enum ieee80211_sta_state new_state)
4617 {
4618 	struct wl1271_station *wl_sta;
4619 	u8 hlid;
4620 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4621 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4622 	int ret;
4623 
4624 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4625 	hlid = wl_sta->hlid;
4626 
4627 	/* Add station (AP mode) */
4628 	if (is_ap &&
4629 	    old_state == IEEE80211_STA_NOTEXIST &&
4630 	    new_state == IEEE80211_STA_NONE) {
4631 		ret = wl12xx_sta_add(wl, wlvif, sta);
4632 		if (ret)
4633 			return ret;
4634 
4635 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4636 	}
4637 
4638 	/* Remove station (AP mode) */
4639 	if (is_ap &&
4640 	    old_state == IEEE80211_STA_NONE &&
4641 	    new_state == IEEE80211_STA_NOTEXIST) {
4642 		/* must not fail */
4643 		wl12xx_sta_remove(wl, wlvif, sta);
4644 
4645 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4646 	}
4647 
4648 	/* Authorize station (AP mode) */
4649 	if (is_ap &&
4650 	    new_state == IEEE80211_STA_AUTHORIZED) {
4651 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid);
4652 		if (ret < 0)
4653 			return ret;
4654 
4655 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4656 						     hlid);
4657 		if (ret)
4658 			return ret;
4659 
4660 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4661 	}
4662 
4663 	/* Authorize station */
4664 	if (is_sta &&
4665 	    new_state == IEEE80211_STA_AUTHORIZED) {
4666 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4667 		ret = wl12xx_set_authorized(wl, wlvif);
4668 		if (ret)
4669 			return ret;
4670 	}
4671 
4672 	if (is_sta &&
4673 	    old_state == IEEE80211_STA_AUTHORIZED &&
4674 	    new_state == IEEE80211_STA_ASSOC) {
4675 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4676 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4677 	}
4678 
4679 	/* clear ROCs on failure or authorization */
4680 	if (is_sta &&
4681 	    (new_state == IEEE80211_STA_AUTHORIZED ||
4682 	     new_state == IEEE80211_STA_NOTEXIST)) {
4683 		if (test_bit(wlvif->role_id, wl->roc_map))
4684 			wl12xx_croc(wl, wlvif->role_id);
4685 	}
4686 
4687 	if (is_sta &&
4688 	    old_state == IEEE80211_STA_NOTEXIST &&
4689 	    new_state == IEEE80211_STA_NONE) {
4690 		if (find_first_bit(wl->roc_map,
4691 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4692 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4693 			wl12xx_roc(wl, wlvif, wlvif->role_id,
4694 				   wlvif->band, wlvif->channel);
4695 		}
4696 	}
4697 	return 0;
4698 }
4699 
4700 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4701 			       struct ieee80211_vif *vif,
4702 			       struct ieee80211_sta *sta,
4703 			       enum ieee80211_sta_state old_state,
4704 			       enum ieee80211_sta_state new_state)
4705 {
4706 	struct wl1271 *wl = hw->priv;
4707 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4708 	int ret;
4709 
4710 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4711 		     sta->aid, old_state, new_state);
4712 
4713 	mutex_lock(&wl->mutex);
4714 
4715 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4716 		ret = -EBUSY;
4717 		goto out;
4718 	}
4719 
4720 	ret = wl1271_ps_elp_wakeup(wl);
4721 	if (ret < 0)
4722 		goto out;
4723 
4724 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4725 
4726 	wl1271_ps_elp_sleep(wl);
4727 out:
4728 	mutex_unlock(&wl->mutex);
4729 	if (new_state < old_state)
4730 		return 0;
4731 	return ret;
4732 }
4733 
4734 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4735 				  struct ieee80211_vif *vif,
4736 				  enum ieee80211_ampdu_mlme_action action,
4737 				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4738 				  u8 buf_size)
4739 {
4740 	struct wl1271 *wl = hw->priv;
4741 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4742 	int ret;
4743 	u8 hlid, *ba_bitmap;
4744 
4745 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4746 		     tid);
4747 
4748 	/* sanity check - the fields in FW are only 8bits wide */
4749 	if (WARN_ON(tid > 0xFF))
4750 		return -ENOTSUPP;
4751 
4752 	mutex_lock(&wl->mutex);
4753 
4754 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4755 		ret = -EAGAIN;
4756 		goto out;
4757 	}
4758 
4759 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4760 		hlid = wlvif->sta.hlid;
4761 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4762 		struct wl1271_station *wl_sta;
4763 
4764 		wl_sta = (struct wl1271_station *)sta->drv_priv;
4765 		hlid = wl_sta->hlid;
4766 	} else {
4767 		ret = -EINVAL;
4768 		goto out;
4769 	}
4770 
4771 	ba_bitmap = &wl->links[hlid].ba_bitmap;
4772 
4773 	ret = wl1271_ps_elp_wakeup(wl);
4774 	if (ret < 0)
4775 		goto out;
4776 
4777 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4778 		     tid, action);
4779 
4780 	switch (action) {
4781 	case IEEE80211_AMPDU_RX_START:
4782 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
4783 			ret = -ENOTSUPP;
4784 			break;
4785 		}
4786 
4787 		if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4788 			ret = -EBUSY;
4789 			wl1271_error("exceeded max RX BA sessions");
4790 			break;
4791 		}
4792 
4793 		if (*ba_bitmap & BIT(tid)) {
4794 			ret = -EINVAL;
4795 			wl1271_error("cannot enable RX BA session on active "
4796 				     "tid: %d", tid);
4797 			break;
4798 		}
4799 
4800 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4801 							 hlid);
4802 		if (!ret) {
4803 			*ba_bitmap |= BIT(tid);
4804 			wl->ba_rx_session_count++;
4805 		}
4806 		break;
4807 
4808 	case IEEE80211_AMPDU_RX_STOP:
4809 		if (!(*ba_bitmap & BIT(tid))) {
4810 			/*
4811 			 * this happens on reconfig - so only output a debug
4812 			 * message for now, and don't fail the function.
4813 			 */
4814 			wl1271_debug(DEBUG_MAC80211,
4815 				     "no active RX BA session on tid: %d",
4816 				     tid);
4817 			ret = 0;
4818 			break;
4819 		}
4820 
4821 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4822 							 hlid);
4823 		if (!ret) {
4824 			*ba_bitmap &= ~BIT(tid);
4825 			wl->ba_rx_session_count--;
4826 		}
4827 		break;
4828 
4829 	/*
4830 	 * The BA initiator session management in FW independently.
4831 	 * Falling break here on purpose for all TX APDU commands.
4832 	 */
4833 	case IEEE80211_AMPDU_TX_START:
4834 	case IEEE80211_AMPDU_TX_STOP_CONT:
4835 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
4836 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
4837 	case IEEE80211_AMPDU_TX_OPERATIONAL:
4838 		ret = -EINVAL;
4839 		break;
4840 
4841 	default:
4842 		wl1271_error("Incorrect ampdu action id=%x\n", action);
4843 		ret = -EINVAL;
4844 	}
4845 
4846 	wl1271_ps_elp_sleep(wl);
4847 
4848 out:
4849 	mutex_unlock(&wl->mutex);
4850 
4851 	return ret;
4852 }
4853 
4854 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4855 				   struct ieee80211_vif *vif,
4856 				   const struct cfg80211_bitrate_mask *mask)
4857 {
4858 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4859 	struct wl1271 *wl = hw->priv;
4860 	int i, ret = 0;
4861 
4862 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4863 		mask->control[NL80211_BAND_2GHZ].legacy,
4864 		mask->control[NL80211_BAND_5GHZ].legacy);
4865 
4866 	mutex_lock(&wl->mutex);
4867 
4868 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
4869 		wlvif->bitrate_masks[i] =
4870 			wl1271_tx_enabled_rates_get(wl,
4871 						    mask->control[i].legacy,
4872 						    i);
4873 
4874 	if (unlikely(wl->state != WLCORE_STATE_ON))
4875 		goto out;
4876 
4877 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4878 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4879 
4880 		ret = wl1271_ps_elp_wakeup(wl);
4881 		if (ret < 0)
4882 			goto out;
4883 
4884 		wl1271_set_band_rate(wl, wlvif);
4885 		wlvif->basic_rate =
4886 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4887 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4888 
4889 		wl1271_ps_elp_sleep(wl);
4890 	}
4891 out:
4892 	mutex_unlock(&wl->mutex);
4893 
4894 	return ret;
4895 }
4896 
4897 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4898 				     struct ieee80211_channel_switch *ch_switch)
4899 {
4900 	struct wl1271 *wl = hw->priv;
4901 	struct wl12xx_vif *wlvif;
4902 	int ret;
4903 
4904 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4905 
4906 	wl1271_tx_flush(wl);
4907 
4908 	mutex_lock(&wl->mutex);
4909 
4910 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4911 		wl12xx_for_each_wlvif_sta(wl, wlvif) {
4912 			struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4913 			ieee80211_chswitch_done(vif, false);
4914 		}
4915 		goto out;
4916 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4917 		goto out;
4918 	}
4919 
4920 	ret = wl1271_ps_elp_wakeup(wl);
4921 	if (ret < 0)
4922 		goto out;
4923 
4924 	/* TODO: change mac80211 to pass vif as param */
4925 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
4926 		unsigned long delay_usec;
4927 
4928 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
4929 		if (ret)
4930 			goto out_sleep;
4931 
4932 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4933 
4934 		/* indicate failure 5 seconds after channel switch time */
4935 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
4936 			     ch_switch->count;
4937 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
4938 				usecs_to_jiffies(delay_usec) +
4939 				msecs_to_jiffies(5000));
4940 	}
4941 
4942 out_sleep:
4943 	wl1271_ps_elp_sleep(wl);
4944 
4945 out:
4946 	mutex_unlock(&wl->mutex);
4947 }
4948 
4949 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4950 {
4951 	struct wl1271 *wl = hw->priv;
4952 
4953 	wl1271_tx_flush(wl);
4954 }
4955 
4956 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4957 				       struct ieee80211_vif *vif,
4958 				       struct ieee80211_channel *chan,
4959 				       int duration)
4960 {
4961 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4962 	struct wl1271 *wl = hw->priv;
4963 	int channel, ret = 0;
4964 
4965 	channel = ieee80211_frequency_to_channel(chan->center_freq);
4966 
4967 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
4968 		     channel, wlvif->role_id);
4969 
4970 	mutex_lock(&wl->mutex);
4971 
4972 	if (unlikely(wl->state != WLCORE_STATE_ON))
4973 		goto out;
4974 
4975 	/* return EBUSY if we can't ROC right now */
4976 	if (WARN_ON(wl->roc_vif ||
4977 		    find_first_bit(wl->roc_map,
4978 				   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
4979 		ret = -EBUSY;
4980 		goto out;
4981 	}
4982 
4983 	ret = wl1271_ps_elp_wakeup(wl);
4984 	if (ret < 0)
4985 		goto out;
4986 
4987 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
4988 	if (ret < 0)
4989 		goto out_sleep;
4990 
4991 	wl->roc_vif = vif;
4992 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
4993 				     msecs_to_jiffies(duration));
4994 out_sleep:
4995 	wl1271_ps_elp_sleep(wl);
4996 out:
4997 	mutex_unlock(&wl->mutex);
4998 	return ret;
4999 }
5000 
5001 static int __wlcore_roc_completed(struct wl1271 *wl)
5002 {
5003 	struct wl12xx_vif *wlvif;
5004 	int ret;
5005 
5006 	/* already completed */
5007 	if (unlikely(!wl->roc_vif))
5008 		return 0;
5009 
5010 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5011 
5012 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5013 		return -EBUSY;
5014 
5015 	ret = wl12xx_stop_dev(wl, wlvif);
5016 	if (ret < 0)
5017 		return ret;
5018 
5019 	wl->roc_vif = NULL;
5020 
5021 	return 0;
5022 }
5023 
5024 static int wlcore_roc_completed(struct wl1271 *wl)
5025 {
5026 	int ret;
5027 
5028 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5029 
5030 	mutex_lock(&wl->mutex);
5031 
5032 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5033 		ret = -EBUSY;
5034 		goto out;
5035 	}
5036 
5037 	ret = wl1271_ps_elp_wakeup(wl);
5038 	if (ret < 0)
5039 		goto out;
5040 
5041 	ret = __wlcore_roc_completed(wl);
5042 
5043 	wl1271_ps_elp_sleep(wl);
5044 out:
5045 	mutex_unlock(&wl->mutex);
5046 
5047 	return ret;
5048 }
5049 
5050 static void wlcore_roc_complete_work(struct work_struct *work)
5051 {
5052 	struct delayed_work *dwork;
5053 	struct wl1271 *wl;
5054 	int ret;
5055 
5056 	dwork = container_of(work, struct delayed_work, work);
5057 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5058 
5059 	ret = wlcore_roc_completed(wl);
5060 	if (!ret)
5061 		ieee80211_remain_on_channel_expired(wl->hw);
5062 }
5063 
5064 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5065 {
5066 	struct wl1271 *wl = hw->priv;
5067 
5068 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5069 
5070 	/* TODO: per-vif */
5071 	wl1271_tx_flush(wl);
5072 
5073 	/*
5074 	 * we can't just flush_work here, because it might deadlock
5075 	 * (as we might get called from the same workqueue)
5076 	 */
5077 	cancel_delayed_work_sync(&wl->roc_complete_work);
5078 	wlcore_roc_completed(wl);
5079 
5080 	return 0;
5081 }
5082 
5083 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5084 				    struct ieee80211_vif *vif,
5085 				    struct ieee80211_sta *sta,
5086 				    u32 changed)
5087 {
5088 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5089 	struct wl1271 *wl = hw->priv;
5090 
5091 	wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5092 }
5093 
5094 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5095 {
5096 	struct wl1271 *wl = hw->priv;
5097 	bool ret = false;
5098 
5099 	mutex_lock(&wl->mutex);
5100 
5101 	if (unlikely(wl->state != WLCORE_STATE_ON))
5102 		goto out;
5103 
5104 	/* packets are considered pending if in the TX queue or the FW */
5105 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5106 out:
5107 	mutex_unlock(&wl->mutex);
5108 
5109 	return ret;
5110 }
5111 
5112 /* can't be const, mac80211 writes to this */
5113 static struct ieee80211_rate wl1271_rates[] = {
5114 	{ .bitrate = 10,
5115 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5116 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5117 	{ .bitrate = 20,
5118 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5119 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5120 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5121 	{ .bitrate = 55,
5122 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5123 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5124 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5125 	{ .bitrate = 110,
5126 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5127 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5128 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5129 	{ .bitrate = 60,
5130 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5131 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5132 	{ .bitrate = 90,
5133 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5134 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5135 	{ .bitrate = 120,
5136 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5137 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5138 	{ .bitrate = 180,
5139 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5140 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5141 	{ .bitrate = 240,
5142 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5143 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5144 	{ .bitrate = 360,
5145 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5146 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5147 	{ .bitrate = 480,
5148 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5149 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5150 	{ .bitrate = 540,
5151 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5152 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5153 };
5154 
5155 /* can't be const, mac80211 writes to this */
5156 static struct ieee80211_channel wl1271_channels[] = {
5157 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5158 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5159 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5160 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5161 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5162 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5163 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5164 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5165 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5166 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5167 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5168 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5169 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5170 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5171 };
5172 
5173 /* can't be const, mac80211 writes to this */
5174 static struct ieee80211_supported_band wl1271_band_2ghz = {
5175 	.channels = wl1271_channels,
5176 	.n_channels = ARRAY_SIZE(wl1271_channels),
5177 	.bitrates = wl1271_rates,
5178 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5179 };
5180 
5181 /* 5 GHz data rates for WL1273 */
5182 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5183 	{ .bitrate = 60,
5184 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5185 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5186 	{ .bitrate = 90,
5187 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5188 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5189 	{ .bitrate = 120,
5190 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5191 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5192 	{ .bitrate = 180,
5193 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5194 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5195 	{ .bitrate = 240,
5196 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5197 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5198 	{ .bitrate = 360,
5199 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5200 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5201 	{ .bitrate = 480,
5202 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5203 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5204 	{ .bitrate = 540,
5205 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5206 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5207 };
5208 
5209 /* 5 GHz band channels for WL1273 */
5210 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5211 	{ .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
5212 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5213 	{ .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
5214 	{ .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
5215 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5216 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5217 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5218 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5219 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5220 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5221 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5222 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5223 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5224 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5225 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5226 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5227 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5228 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5229 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5230 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5231 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5232 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5233 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5234 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5235 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5236 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5237 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5238 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5239 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5240 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5241 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5242 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5243 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5244 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5245 };
5246 
5247 static struct ieee80211_supported_band wl1271_band_5ghz = {
5248 	.channels = wl1271_channels_5ghz,
5249 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5250 	.bitrates = wl1271_rates_5ghz,
5251 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5252 };
5253 
5254 static const struct ieee80211_ops wl1271_ops = {
5255 	.start = wl1271_op_start,
5256 	.stop = wlcore_op_stop,
5257 	.add_interface = wl1271_op_add_interface,
5258 	.remove_interface = wl1271_op_remove_interface,
5259 	.change_interface = wl12xx_op_change_interface,
5260 #ifdef CONFIG_PM
5261 	.suspend = wl1271_op_suspend,
5262 	.resume = wl1271_op_resume,
5263 #endif
5264 	.config = wl1271_op_config,
5265 	.prepare_multicast = wl1271_op_prepare_multicast,
5266 	.configure_filter = wl1271_op_configure_filter,
5267 	.tx = wl1271_op_tx,
5268 	.set_key = wlcore_op_set_key,
5269 	.hw_scan = wl1271_op_hw_scan,
5270 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5271 	.sched_scan_start = wl1271_op_sched_scan_start,
5272 	.sched_scan_stop = wl1271_op_sched_scan_stop,
5273 	.bss_info_changed = wl1271_op_bss_info_changed,
5274 	.set_frag_threshold = wl1271_op_set_frag_threshold,
5275 	.set_rts_threshold = wl1271_op_set_rts_threshold,
5276 	.conf_tx = wl1271_op_conf_tx,
5277 	.get_tsf = wl1271_op_get_tsf,
5278 	.get_survey = wl1271_op_get_survey,
5279 	.sta_state = wl12xx_op_sta_state,
5280 	.ampdu_action = wl1271_op_ampdu_action,
5281 	.tx_frames_pending = wl1271_tx_frames_pending,
5282 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5283 	.channel_switch = wl12xx_op_channel_switch,
5284 	.flush = wlcore_op_flush,
5285 	.remain_on_channel = wlcore_op_remain_on_channel,
5286 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5287 	.add_chanctx = wlcore_op_add_chanctx,
5288 	.remove_chanctx = wlcore_op_remove_chanctx,
5289 	.change_chanctx = wlcore_op_change_chanctx,
5290 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5291 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5292 	.sta_rc_update = wlcore_op_sta_rc_update,
5293 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5294 };
5295 
5296 
5297 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5298 {
5299 	u8 idx;
5300 
5301 	BUG_ON(band >= 2);
5302 
5303 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5304 		wl1271_error("Illegal RX rate from HW: %d", rate);
5305 		return 0;
5306 	}
5307 
5308 	idx = wl->band_rate_to_idx[band][rate];
5309 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5310 		wl1271_error("Unsupported RX rate from HW: %d", rate);
5311 		return 0;
5312 	}
5313 
5314 	return idx;
5315 }
5316 
5317 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5318 					       struct device_attribute *attr,
5319 					       char *buf)
5320 {
5321 	struct wl1271 *wl = dev_get_drvdata(dev);
5322 	ssize_t len;
5323 
5324 	len = PAGE_SIZE;
5325 
5326 	mutex_lock(&wl->mutex);
5327 	len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5328 		       wl->sg_enabled);
5329 	mutex_unlock(&wl->mutex);
5330 
5331 	return len;
5332 
5333 }
5334 
5335 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5336 						struct device_attribute *attr,
5337 						const char *buf, size_t count)
5338 {
5339 	struct wl1271 *wl = dev_get_drvdata(dev);
5340 	unsigned long res;
5341 	int ret;
5342 
5343 	ret = kstrtoul(buf, 10, &res);
5344 	if (ret < 0) {
5345 		wl1271_warning("incorrect value written to bt_coex_mode");
5346 		return count;
5347 	}
5348 
5349 	mutex_lock(&wl->mutex);
5350 
5351 	res = !!res;
5352 
5353 	if (res == wl->sg_enabled)
5354 		goto out;
5355 
5356 	wl->sg_enabled = res;
5357 
5358 	if (unlikely(wl->state != WLCORE_STATE_ON))
5359 		goto out;
5360 
5361 	ret = wl1271_ps_elp_wakeup(wl);
5362 	if (ret < 0)
5363 		goto out;
5364 
5365 	wl1271_acx_sg_enable(wl, wl->sg_enabled);
5366 	wl1271_ps_elp_sleep(wl);
5367 
5368  out:
5369 	mutex_unlock(&wl->mutex);
5370 	return count;
5371 }
5372 
5373 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5374 		   wl1271_sysfs_show_bt_coex_state,
5375 		   wl1271_sysfs_store_bt_coex_state);
5376 
5377 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5378 					   struct device_attribute *attr,
5379 					   char *buf)
5380 {
5381 	struct wl1271 *wl = dev_get_drvdata(dev);
5382 	ssize_t len;
5383 
5384 	len = PAGE_SIZE;
5385 
5386 	mutex_lock(&wl->mutex);
5387 	if (wl->hw_pg_ver >= 0)
5388 		len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5389 	else
5390 		len = snprintf(buf, len, "n/a\n");
5391 	mutex_unlock(&wl->mutex);
5392 
5393 	return len;
5394 }
5395 
5396 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5397 		   wl1271_sysfs_show_hw_pg_ver, NULL);
5398 
5399 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5400 				       struct bin_attribute *bin_attr,
5401 				       char *buffer, loff_t pos, size_t count)
5402 {
5403 	struct device *dev = container_of(kobj, struct device, kobj);
5404 	struct wl1271 *wl = dev_get_drvdata(dev);
5405 	ssize_t len;
5406 	int ret;
5407 
5408 	ret = mutex_lock_interruptible(&wl->mutex);
5409 	if (ret < 0)
5410 		return -ERESTARTSYS;
5411 
5412 	/* Let only one thread read the log at a time, blocking others */
5413 	while (wl->fwlog_size == 0) {
5414 		DEFINE_WAIT(wait);
5415 
5416 		prepare_to_wait_exclusive(&wl->fwlog_waitq,
5417 					  &wait,
5418 					  TASK_INTERRUPTIBLE);
5419 
5420 		if (wl->fwlog_size != 0) {
5421 			finish_wait(&wl->fwlog_waitq, &wait);
5422 			break;
5423 		}
5424 
5425 		mutex_unlock(&wl->mutex);
5426 
5427 		schedule();
5428 		finish_wait(&wl->fwlog_waitq, &wait);
5429 
5430 		if (signal_pending(current))
5431 			return -ERESTARTSYS;
5432 
5433 		ret = mutex_lock_interruptible(&wl->mutex);
5434 		if (ret < 0)
5435 			return -ERESTARTSYS;
5436 	}
5437 
5438 	/* Check if the fwlog is still valid */
5439 	if (wl->fwlog_size < 0) {
5440 		mutex_unlock(&wl->mutex);
5441 		return 0;
5442 	}
5443 
5444 	/* Seeking is not supported - old logs are not kept. Disregard pos. */
5445 	len = min(count, (size_t)wl->fwlog_size);
5446 	wl->fwlog_size -= len;
5447 	memcpy(buffer, wl->fwlog, len);
5448 
5449 	/* Make room for new messages */
5450 	memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5451 
5452 	mutex_unlock(&wl->mutex);
5453 
5454 	return len;
5455 }
5456 
5457 static struct bin_attribute fwlog_attr = {
5458 	.attr = {.name = "fwlog", .mode = S_IRUSR},
5459 	.read = wl1271_sysfs_read_fwlog,
5460 };
5461 
5462 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5463 {
5464 	int i;
5465 
5466 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5467 		     oui, nic);
5468 
5469 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5470 		wl1271_warning("NIC part of the MAC address wraps around!");
5471 
5472 	for (i = 0; i < wl->num_mac_addr; i++) {
5473 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5474 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5475 		wl->addresses[i].addr[2] = (u8) oui;
5476 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5477 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5478 		wl->addresses[i].addr[5] = (u8) nic;
5479 		nic++;
5480 	}
5481 
5482 	/* we may be one address short at the most */
5483 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5484 
5485 	/*
5486 	 * turn on the LAA bit in the first address and use it as
5487 	 * the last address.
5488 	 */
5489 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5490 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5491 		memcpy(&wl->addresses[idx], &wl->addresses[0],
5492 		       sizeof(wl->addresses[0]));
5493 		/* LAA bit */
5494 		wl->addresses[idx].addr[2] |= BIT(1);
5495 	}
5496 
5497 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5498 	wl->hw->wiphy->addresses = wl->addresses;
5499 }
5500 
5501 static int wl12xx_get_hw_info(struct wl1271 *wl)
5502 {
5503 	int ret;
5504 
5505 	ret = wl12xx_set_power_on(wl);
5506 	if (ret < 0)
5507 		return ret;
5508 
5509 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5510 	if (ret < 0)
5511 		goto out;
5512 
5513 	wl->fuse_oui_addr = 0;
5514 	wl->fuse_nic_addr = 0;
5515 
5516 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5517 	if (ret < 0)
5518 		goto out;
5519 
5520 	if (wl->ops->get_mac)
5521 		ret = wl->ops->get_mac(wl);
5522 
5523 out:
5524 	wl1271_power_off(wl);
5525 	return ret;
5526 }
5527 
5528 static int wl1271_register_hw(struct wl1271 *wl)
5529 {
5530 	int ret;
5531 	u32 oui_addr = 0, nic_addr = 0;
5532 
5533 	if (wl->mac80211_registered)
5534 		return 0;
5535 
5536 	if (wl->nvs_len >= 12) {
5537 		/* NOTE: The wl->nvs->nvs element must be first, in
5538 		 * order to simplify the casting, we assume it is at
5539 		 * the beginning of the wl->nvs structure.
5540 		 */
5541 		u8 *nvs_ptr = (u8 *)wl->nvs;
5542 
5543 		oui_addr =
5544 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5545 		nic_addr =
5546 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5547 	}
5548 
5549 	/* if the MAC address is zeroed in the NVS derive from fuse */
5550 	if (oui_addr == 0 && nic_addr == 0) {
5551 		oui_addr = wl->fuse_oui_addr;
5552 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
5553 		nic_addr = wl->fuse_nic_addr + 1;
5554 	}
5555 
5556 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5557 
5558 	ret = ieee80211_register_hw(wl->hw);
5559 	if (ret < 0) {
5560 		wl1271_error("unable to register mac80211 hw: %d", ret);
5561 		goto out;
5562 	}
5563 
5564 	wl->mac80211_registered = true;
5565 
5566 	wl1271_debugfs_init(wl);
5567 
5568 	wl1271_notice("loaded");
5569 
5570 out:
5571 	return ret;
5572 }
5573 
5574 static void wl1271_unregister_hw(struct wl1271 *wl)
5575 {
5576 	if (wl->plt)
5577 		wl1271_plt_stop(wl);
5578 
5579 	ieee80211_unregister_hw(wl->hw);
5580 	wl->mac80211_registered = false;
5581 
5582 }
5583 
5584 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5585 	{
5586 		.max = 3,
5587 		.types = BIT(NL80211_IFTYPE_STATION),
5588 	},
5589 	{
5590 		.max = 1,
5591 		.types = BIT(NL80211_IFTYPE_AP) |
5592 			 BIT(NL80211_IFTYPE_P2P_GO) |
5593 			 BIT(NL80211_IFTYPE_P2P_CLIENT),
5594 	},
5595 };
5596 
5597 static struct ieee80211_iface_combination
5598 wlcore_iface_combinations[] = {
5599 	{
5600 	  .max_interfaces = 3,
5601 	  .limits = wlcore_iface_limits,
5602 	  .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5603 	},
5604 };
5605 
5606 static int wl1271_init_ieee80211(struct wl1271 *wl)
5607 {
5608 	int i;
5609 	static const u32 cipher_suites[] = {
5610 		WLAN_CIPHER_SUITE_WEP40,
5611 		WLAN_CIPHER_SUITE_WEP104,
5612 		WLAN_CIPHER_SUITE_TKIP,
5613 		WLAN_CIPHER_SUITE_CCMP,
5614 		WL1271_CIPHER_SUITE_GEM,
5615 	};
5616 
5617 	/* The tx descriptor buffer */
5618 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5619 
5620 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5621 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5622 
5623 	/* unit us */
5624 	/* FIXME: find a proper value */
5625 	wl->hw->channel_change_time = 10000;
5626 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5627 
5628 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5629 		IEEE80211_HW_SUPPORTS_PS |
5630 		IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5631 		IEEE80211_HW_SUPPORTS_UAPSD |
5632 		IEEE80211_HW_HAS_RATE_CONTROL |
5633 		IEEE80211_HW_CONNECTION_MONITOR |
5634 		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5635 		IEEE80211_HW_SPECTRUM_MGMT |
5636 		IEEE80211_HW_AP_LINK_PS |
5637 		IEEE80211_HW_AMPDU_AGGREGATION |
5638 		IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5639 		IEEE80211_HW_QUEUE_CONTROL;
5640 
5641 	wl->hw->wiphy->cipher_suites = cipher_suites;
5642 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5643 
5644 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5645 		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5646 		BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5647 	wl->hw->wiphy->max_scan_ssids = 1;
5648 	wl->hw->wiphy->max_sched_scan_ssids = 16;
5649 	wl->hw->wiphy->max_match_sets = 16;
5650 	/*
5651 	 * Maximum length of elements in scanning probe request templates
5652 	 * should be the maximum length possible for a template, without
5653 	 * the IEEE80211 header of the template
5654 	 */
5655 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5656 			sizeof(struct ieee80211_header);
5657 
5658 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5659 		sizeof(struct ieee80211_header);
5660 
5661 	wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5662 
5663 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5664 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5665 
5666 	/* make sure all our channels fit in the scanned_ch bitmask */
5667 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5668 		     ARRAY_SIZE(wl1271_channels_5ghz) >
5669 		     WL1271_MAX_CHANNELS);
5670 	/*
5671 	* clear channel flags from the previous usage
5672 	* and restore max_power & max_antenna_gain values.
5673 	*/
5674 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5675 		wl1271_band_2ghz.channels[i].flags = 0;
5676 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5677 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5678 	}
5679 
5680 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5681 		wl1271_band_5ghz.channels[i].flags = 0;
5682 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5683 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5684 	}
5685 
5686 	/*
5687 	 * We keep local copies of the band structs because we need to
5688 	 * modify them on a per-device basis.
5689 	 */
5690 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5691 	       sizeof(wl1271_band_2ghz));
5692 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5693 	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
5694 	       sizeof(*wl->ht_cap));
5695 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5696 	       sizeof(wl1271_band_5ghz));
5697 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5698 	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
5699 	       sizeof(*wl->ht_cap));
5700 
5701 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5702 		&wl->bands[IEEE80211_BAND_2GHZ];
5703 	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5704 		&wl->bands[IEEE80211_BAND_5GHZ];
5705 
5706 	/*
5707 	 * allow 4 queues per mac address we support +
5708 	 * 1 cab queue per mac + one global offchannel Tx queue
5709 	 */
5710 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5711 
5712 	/* the last queue is the offchannel queue */
5713 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5714 	wl->hw->max_rates = 1;
5715 
5716 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5717 
5718 	/* the FW answers probe-requests in AP-mode */
5719 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5720 	wl->hw->wiphy->probe_resp_offload =
5721 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5722 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5723 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5724 
5725 	/* allowed interface combinations */
5726 	wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5727 	wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5728 	wl->hw->wiphy->n_iface_combinations =
5729 		ARRAY_SIZE(wlcore_iface_combinations);
5730 
5731 	SET_IEEE80211_DEV(wl->hw, wl->dev);
5732 
5733 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
5734 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5735 
5736 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5737 
5738 	return 0;
5739 }
5740 
5741 #define WL1271_DEFAULT_CHANNEL 0
5742 
5743 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5744 				     u32 mbox_size)
5745 {
5746 	struct ieee80211_hw *hw;
5747 	struct wl1271 *wl;
5748 	int i, j, ret;
5749 	unsigned int order;
5750 
5751 	BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5752 
5753 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5754 	if (!hw) {
5755 		wl1271_error("could not alloc ieee80211_hw");
5756 		ret = -ENOMEM;
5757 		goto err_hw_alloc;
5758 	}
5759 
5760 	wl = hw->priv;
5761 	memset(wl, 0, sizeof(*wl));
5762 
5763 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
5764 	if (!wl->priv) {
5765 		wl1271_error("could not alloc wl priv");
5766 		ret = -ENOMEM;
5767 		goto err_priv_alloc;
5768 	}
5769 
5770 	INIT_LIST_HEAD(&wl->wlvif_list);
5771 
5772 	wl->hw = hw;
5773 
5774 	for (i = 0; i < NUM_TX_QUEUES; i++)
5775 		for (j = 0; j < WL12XX_MAX_LINKS; j++)
5776 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
5777 
5778 	skb_queue_head_init(&wl->deferred_rx_queue);
5779 	skb_queue_head_init(&wl->deferred_tx_queue);
5780 
5781 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5782 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5783 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
5784 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5785 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5786 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5787 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5788 
5789 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5790 	if (!wl->freezable_wq) {
5791 		ret = -ENOMEM;
5792 		goto err_hw;
5793 	}
5794 
5795 	wl->channel = WL1271_DEFAULT_CHANNEL;
5796 	wl->rx_counter = 0;
5797 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5798 	wl->band = IEEE80211_BAND_2GHZ;
5799 	wl->channel_type = NL80211_CHAN_NO_HT;
5800 	wl->flags = 0;
5801 	wl->sg_enabled = true;
5802 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
5803 	wl->recovery_count = 0;
5804 	wl->hw_pg_ver = -1;
5805 	wl->ap_ps_map = 0;
5806 	wl->ap_fw_ps_map = 0;
5807 	wl->quirks = 0;
5808 	wl->platform_quirks = 0;
5809 	wl->system_hlid = WL12XX_SYSTEM_HLID;
5810 	wl->active_sta_count = 0;
5811 	wl->active_link_count = 0;
5812 	wl->fwlog_size = 0;
5813 	init_waitqueue_head(&wl->fwlog_waitq);
5814 
5815 	/* The system link is always allocated */
5816 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5817 
5818 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5819 	for (i = 0; i < wl->num_tx_desc; i++)
5820 		wl->tx_frames[i] = NULL;
5821 
5822 	spin_lock_init(&wl->wl_lock);
5823 
5824 	wl->state = WLCORE_STATE_OFF;
5825 	wl->fw_type = WL12XX_FW_TYPE_NONE;
5826 	mutex_init(&wl->mutex);
5827 	mutex_init(&wl->flush_mutex);
5828 	init_completion(&wl->nvs_loading_complete);
5829 
5830 	order = get_order(aggr_buf_size);
5831 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5832 	if (!wl->aggr_buf) {
5833 		ret = -ENOMEM;
5834 		goto err_wq;
5835 	}
5836 	wl->aggr_buf_size = aggr_buf_size;
5837 
5838 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5839 	if (!wl->dummy_packet) {
5840 		ret = -ENOMEM;
5841 		goto err_aggr;
5842 	}
5843 
5844 	/* Allocate one page for the FW log */
5845 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5846 	if (!wl->fwlog) {
5847 		ret = -ENOMEM;
5848 		goto err_dummy_packet;
5849 	}
5850 
5851 	wl->mbox_size = mbox_size;
5852 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5853 	if (!wl->mbox) {
5854 		ret = -ENOMEM;
5855 		goto err_fwlog;
5856 	}
5857 
5858 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5859 	if (!wl->buffer_32) {
5860 		ret = -ENOMEM;
5861 		goto err_mbox;
5862 	}
5863 
5864 	return hw;
5865 
5866 err_mbox:
5867 	kfree(wl->mbox);
5868 
5869 err_fwlog:
5870 	free_page((unsigned long)wl->fwlog);
5871 
5872 err_dummy_packet:
5873 	dev_kfree_skb(wl->dummy_packet);
5874 
5875 err_aggr:
5876 	free_pages((unsigned long)wl->aggr_buf, order);
5877 
5878 err_wq:
5879 	destroy_workqueue(wl->freezable_wq);
5880 
5881 err_hw:
5882 	wl1271_debugfs_exit(wl);
5883 	kfree(wl->priv);
5884 
5885 err_priv_alloc:
5886 	ieee80211_free_hw(hw);
5887 
5888 err_hw_alloc:
5889 
5890 	return ERR_PTR(ret);
5891 }
5892 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5893 
5894 int wlcore_free_hw(struct wl1271 *wl)
5895 {
5896 	/* Unblock any fwlog readers */
5897 	mutex_lock(&wl->mutex);
5898 	wl->fwlog_size = -1;
5899 	wake_up_interruptible_all(&wl->fwlog_waitq);
5900 	mutex_unlock(&wl->mutex);
5901 
5902 	device_remove_bin_file(wl->dev, &fwlog_attr);
5903 
5904 	device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5905 
5906 	device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5907 	kfree(wl->buffer_32);
5908 	kfree(wl->mbox);
5909 	free_page((unsigned long)wl->fwlog);
5910 	dev_kfree_skb(wl->dummy_packet);
5911 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5912 
5913 	wl1271_debugfs_exit(wl);
5914 
5915 	vfree(wl->fw);
5916 	wl->fw = NULL;
5917 	wl->fw_type = WL12XX_FW_TYPE_NONE;
5918 	kfree(wl->nvs);
5919 	wl->nvs = NULL;
5920 
5921 	kfree(wl->fw_status_1);
5922 	kfree(wl->tx_res_if);
5923 	destroy_workqueue(wl->freezable_wq);
5924 
5925 	kfree(wl->priv);
5926 	ieee80211_free_hw(wl->hw);
5927 
5928 	return 0;
5929 }
5930 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5931 
5932 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5933 {
5934 	struct wl1271 *wl = cookie;
5935 	unsigned long flags;
5936 
5937 	wl1271_debug(DEBUG_IRQ, "IRQ");
5938 
5939 	/* complete the ELP completion */
5940 	spin_lock_irqsave(&wl->wl_lock, flags);
5941 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5942 	if (wl->elp_compl) {
5943 		complete(wl->elp_compl);
5944 		wl->elp_compl = NULL;
5945 	}
5946 
5947 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5948 		/* don't enqueue a work right now. mark it as pending */
5949 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5950 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5951 		disable_irq_nosync(wl->irq);
5952 		pm_wakeup_event(wl->dev, 0);
5953 		spin_unlock_irqrestore(&wl->wl_lock, flags);
5954 		return IRQ_HANDLED;
5955 	}
5956 	spin_unlock_irqrestore(&wl->wl_lock, flags);
5957 
5958 	return IRQ_WAKE_THREAD;
5959 }
5960 
5961 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5962 {
5963 	struct wl1271 *wl = context;
5964 	struct platform_device *pdev = wl->pdev;
5965 	struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
5966 	struct wl12xx_platform_data *pdata = pdev_data->pdata;
5967 	unsigned long irqflags;
5968 	int ret;
5969 
5970 	if (fw) {
5971 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5972 		if (!wl->nvs) {
5973 			wl1271_error("Could not allocate nvs data");
5974 			goto out;
5975 		}
5976 		wl->nvs_len = fw->size;
5977 	} else {
5978 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5979 			     WL12XX_NVS_NAME);
5980 		wl->nvs = NULL;
5981 		wl->nvs_len = 0;
5982 	}
5983 
5984 	ret = wl->ops->setup(wl);
5985 	if (ret < 0)
5986 		goto out_free_nvs;
5987 
5988 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5989 
5990 	/* adjust some runtime configuration parameters */
5991 	wlcore_adjust_conf(wl);
5992 
5993 	wl->irq = platform_get_irq(pdev, 0);
5994 	wl->platform_quirks = pdata->platform_quirks;
5995 	wl->if_ops = pdev_data->if_ops;
5996 
5997 	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5998 		irqflags = IRQF_TRIGGER_RISING;
5999 	else
6000 		irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6001 
6002 	ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
6003 				   irqflags,
6004 				   pdev->name, wl);
6005 	if (ret < 0) {
6006 		wl1271_error("request_irq() failed: %d", ret);
6007 		goto out_free_nvs;
6008 	}
6009 
6010 #ifdef CONFIG_PM
6011 	ret = enable_irq_wake(wl->irq);
6012 	if (!ret) {
6013 		wl->irq_wake_enabled = true;
6014 		device_init_wakeup(wl->dev, 1);
6015 		if (pdata->pwr_in_suspend) {
6016 			wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
6017 			wl->hw->wiphy->wowlan.n_patterns =
6018 				WL1271_MAX_RX_FILTERS;
6019 			wl->hw->wiphy->wowlan.pattern_min_len = 1;
6020 			wl->hw->wiphy->wowlan.pattern_max_len =
6021 				WL1271_RX_FILTER_MAX_PATTERN_SIZE;
6022 		}
6023 	}
6024 #endif
6025 	disable_irq(wl->irq);
6026 
6027 	ret = wl12xx_get_hw_info(wl);
6028 	if (ret < 0) {
6029 		wl1271_error("couldn't get hw info");
6030 		goto out_irq;
6031 	}
6032 
6033 	ret = wl->ops->identify_chip(wl);
6034 	if (ret < 0)
6035 		goto out_irq;
6036 
6037 	ret = wl1271_init_ieee80211(wl);
6038 	if (ret)
6039 		goto out_irq;
6040 
6041 	ret = wl1271_register_hw(wl);
6042 	if (ret)
6043 		goto out_irq;
6044 
6045 	/* Create sysfs file to control bt coex state */
6046 	ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
6047 	if (ret < 0) {
6048 		wl1271_error("failed to create sysfs file bt_coex_state");
6049 		goto out_unreg;
6050 	}
6051 
6052 	/* Create sysfs file to get HW PG version */
6053 	ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
6054 	if (ret < 0) {
6055 		wl1271_error("failed to create sysfs file hw_pg_ver");
6056 		goto out_bt_coex_state;
6057 	}
6058 
6059 	/* Create sysfs file for the FW log */
6060 	ret = device_create_bin_file(wl->dev, &fwlog_attr);
6061 	if (ret < 0) {
6062 		wl1271_error("failed to create sysfs file fwlog");
6063 		goto out_hw_pg_ver;
6064 	}
6065 
6066 	wl->initialized = true;
6067 	goto out;
6068 
6069 out_hw_pg_ver:
6070 	device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
6071 
6072 out_bt_coex_state:
6073 	device_remove_file(wl->dev, &dev_attr_bt_coex_state);
6074 
6075 out_unreg:
6076 	wl1271_unregister_hw(wl);
6077 
6078 out_irq:
6079 	free_irq(wl->irq, wl);
6080 
6081 out_free_nvs:
6082 	kfree(wl->nvs);
6083 
6084 out:
6085 	release_firmware(fw);
6086 	complete_all(&wl->nvs_loading_complete);
6087 }
6088 
6089 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6090 {
6091 	int ret;
6092 
6093 	if (!wl->ops || !wl->ptable)
6094 		return -EINVAL;
6095 
6096 	wl->dev = &pdev->dev;
6097 	wl->pdev = pdev;
6098 	platform_set_drvdata(pdev, wl);
6099 
6100 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6101 				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6102 				      wl, wlcore_nvs_cb);
6103 	if (ret < 0) {
6104 		wl1271_error("request_firmware_nowait failed: %d", ret);
6105 		complete_all(&wl->nvs_loading_complete);
6106 	}
6107 
6108 	return ret;
6109 }
6110 EXPORT_SYMBOL_GPL(wlcore_probe);
6111 
6112 int wlcore_remove(struct platform_device *pdev)
6113 {
6114 	struct wl1271 *wl = platform_get_drvdata(pdev);
6115 
6116 	wait_for_completion(&wl->nvs_loading_complete);
6117 	if (!wl->initialized)
6118 		return 0;
6119 
6120 	if (wl->irq_wake_enabled) {
6121 		device_init_wakeup(wl->dev, 0);
6122 		disable_irq_wake(wl->irq);
6123 	}
6124 	wl1271_unregister_hw(wl);
6125 	free_irq(wl->irq, wl);
6126 	wlcore_free_hw(wl);
6127 
6128 	return 0;
6129 }
6130 EXPORT_SYMBOL_GPL(wlcore_remove);
6131 
6132 u32 wl12xx_debug_level = DEBUG_NONE;
6133 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6134 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6135 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6136 
6137 module_param_named(fwlog, fwlog_param, charp, 0);
6138 MODULE_PARM_DESC(fwlog,
6139 		 "FW logger options: continuous, ondemand, dbgpins or disable");
6140 
6141 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6142 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6143 
6144 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6145 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6146 
6147 MODULE_LICENSE("GPL");
6148 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6149 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6150 MODULE_FIRMWARE(WL12XX_NVS_NAME);
6151