xref: /linux/drivers/net/wireless/ti/wlcore/main.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17 
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31 
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_WAKEUP_TIMEOUT 500
34 
35 static char *fwlog_param;
36 static int fwlog_mem_blocks = -1;
37 static int bug_on_recovery = -1;
38 static int no_recovery     = -1;
39 
40 static void __wl1271_op_remove_interface(struct wl1271 *wl,
41 					 struct ieee80211_vif *vif,
42 					 bool reset_tx_queues);
43 static void wlcore_op_stop_locked(struct wl1271 *wl);
44 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
45 
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)46 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
47 {
48 	int ret;
49 
50 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
51 		return -EINVAL;
52 
53 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
54 		return 0;
55 
56 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
57 		return 0;
58 
59 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
60 	if (ret < 0)
61 		return ret;
62 
63 	wl1271_info("Association completed.");
64 	return 0;
65 }
66 
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)67 static void wl1271_reg_notify(struct wiphy *wiphy,
68 			      struct regulatory_request *request)
69 {
70 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
71 	struct wl1271 *wl = hw->priv;
72 
73 	/* copy the current dfs region */
74 	if (request)
75 		wl->dfs_region = request->dfs_region;
76 
77 	wlcore_regdomain_config(wl);
78 }
79 
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)80 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
81 				   bool enable)
82 {
83 	int ret = 0;
84 
85 	/* we should hold wl->mutex */
86 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
87 	if (ret < 0)
88 		goto out;
89 
90 	if (enable)
91 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
92 	else
93 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
94 out:
95 	return ret;
96 }
97 
98 /*
99  * this function is being called when the rx_streaming interval
100  * has beed changed or rx_streaming should be disabled
101  */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)102 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
103 {
104 	int ret = 0;
105 	int period = wl->conf.rx_streaming.interval;
106 
107 	/* don't reconfigure if rx_streaming is disabled */
108 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
109 		goto out;
110 
111 	/* reconfigure/disable according to new streaming_period */
112 	if (period &&
113 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
114 	    (wl->conf.rx_streaming.always ||
115 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
116 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
117 	else {
118 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
119 		/* don't cancel_work_sync since we might deadlock */
120 		del_timer_sync(&wlvif->rx_streaming_timer);
121 	}
122 out:
123 	return ret;
124 }
125 
wl1271_rx_streaming_enable_work(struct work_struct * work)126 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
127 {
128 	int ret;
129 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
130 						rx_streaming_enable_work);
131 	struct wl1271 *wl = wlvif->wl;
132 
133 	mutex_lock(&wl->mutex);
134 
135 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
136 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
137 	    (!wl->conf.rx_streaming.always &&
138 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
139 		goto out;
140 
141 	if (!wl->conf.rx_streaming.interval)
142 		goto out;
143 
144 	ret = pm_runtime_resume_and_get(wl->dev);
145 	if (ret < 0)
146 		goto out;
147 
148 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
149 	if (ret < 0)
150 		goto out_sleep;
151 
152 	/* stop it after some time of inactivity */
153 	mod_timer(&wlvif->rx_streaming_timer,
154 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
155 
156 out_sleep:
157 	pm_runtime_mark_last_busy(wl->dev);
158 	pm_runtime_put_autosuspend(wl->dev);
159 out:
160 	mutex_unlock(&wl->mutex);
161 }
162 
wl1271_rx_streaming_disable_work(struct work_struct * work)163 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
164 {
165 	int ret;
166 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
167 						rx_streaming_disable_work);
168 	struct wl1271 *wl = wlvif->wl;
169 
170 	mutex_lock(&wl->mutex);
171 
172 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
173 		goto out;
174 
175 	ret = pm_runtime_resume_and_get(wl->dev);
176 	if (ret < 0)
177 		goto out;
178 
179 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
180 	if (ret)
181 		goto out_sleep;
182 
183 out_sleep:
184 	pm_runtime_mark_last_busy(wl->dev);
185 	pm_runtime_put_autosuspend(wl->dev);
186 out:
187 	mutex_unlock(&wl->mutex);
188 }
189 
wl1271_rx_streaming_timer(struct timer_list * t)190 static void wl1271_rx_streaming_timer(struct timer_list *t)
191 {
192 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
193 	struct wl1271 *wl = wlvif->wl;
194 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
195 }
196 
197 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)198 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
199 {
200 	/* if the watchdog is not armed, don't do anything */
201 	if (wl->tx_allocated_blocks == 0)
202 		return;
203 
204 	cancel_delayed_work(&wl->tx_watchdog_work);
205 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
206 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
207 }
208 
wlcore_rc_update_work(struct work_struct * work)209 static void wlcore_rc_update_work(struct work_struct *work)
210 {
211 	int ret;
212 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
213 						rc_update_work);
214 	struct wl1271 *wl = wlvif->wl;
215 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
216 
217 	mutex_lock(&wl->mutex);
218 
219 	if (unlikely(wl->state != WLCORE_STATE_ON))
220 		goto out;
221 
222 	ret = pm_runtime_resume_and_get(wl->dev);
223 	if (ret < 0)
224 		goto out;
225 
226 	if (ieee80211_vif_is_mesh(vif)) {
227 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
228 						     true, wlvif->sta.hlid);
229 		if (ret < 0)
230 			goto out_sleep;
231 	} else {
232 		wlcore_hw_sta_rc_update(wl, wlvif);
233 	}
234 
235 out_sleep:
236 	pm_runtime_mark_last_busy(wl->dev);
237 	pm_runtime_put_autosuspend(wl->dev);
238 out:
239 	mutex_unlock(&wl->mutex);
240 }
241 
wl12xx_tx_watchdog_work(struct work_struct * work)242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 {
244 	struct delayed_work *dwork;
245 	struct wl1271 *wl;
246 
247 	dwork = to_delayed_work(work);
248 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249 
250 	mutex_lock(&wl->mutex);
251 
252 	if (unlikely(wl->state != WLCORE_STATE_ON))
253 		goto out;
254 
255 	/* Tx went out in the meantime - everything is ok */
256 	if (unlikely(wl->tx_allocated_blocks == 0))
257 		goto out;
258 
259 	/*
260 	 * if a ROC is in progress, we might not have any Tx for a long
261 	 * time (e.g. pending Tx on the non-ROC channels)
262 	 */
263 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 			     wl->conf.tx.tx_watchdog_timeout);
266 		wl12xx_rearm_tx_watchdog_locked(wl);
267 		goto out;
268 	}
269 
270 	/*
271 	 * if a scan is in progress, we might not have any Tx for a long
272 	 * time
273 	 */
274 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 			     wl->conf.tx.tx_watchdog_timeout);
277 		wl12xx_rearm_tx_watchdog_locked(wl);
278 		goto out;
279 	}
280 
281 	/*
282 	* AP might cache a frame for a long time for a sleeping station,
283 	* so rearm the timer if there's an AP interface with stations. If
284 	* Tx is genuinely stuck we will most hopefully discover it when all
285 	* stations are removed due to inactivity.
286 	*/
287 	if (wl->active_sta_count) {
288 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 			     " %d stations",
290 			      wl->conf.tx.tx_watchdog_timeout,
291 			      wl->active_sta_count);
292 		wl12xx_rearm_tx_watchdog_locked(wl);
293 		goto out;
294 	}
295 
296 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 		     wl->conf.tx.tx_watchdog_timeout);
298 	wl12xx_queue_recovery_work(wl);
299 
300 out:
301 	mutex_unlock(&wl->mutex);
302 }
303 
wlcore_adjust_conf(struct wl1271 * wl)304 static void wlcore_adjust_conf(struct wl1271 *wl)
305 {
306 
307 	if (fwlog_param) {
308 		if (!strcmp(fwlog_param, "continuous")) {
309 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
310 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
311 		} else if (!strcmp(fwlog_param, "dbgpins")) {
312 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 		} else if (!strcmp(fwlog_param, "disable")) {
315 			wl->conf.fwlog.mem_blocks = 0;
316 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 		} else {
318 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
319 		}
320 	}
321 
322 	if (bug_on_recovery != -1)
323 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
324 
325 	if (no_recovery != -1)
326 		wl->conf.recovery.no_recovery = (u8) no_recovery;
327 }
328 
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)329 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
330 					struct wl12xx_vif *wlvif,
331 					u8 hlid, u8 tx_pkts)
332 {
333 	bool fw_ps;
334 
335 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
336 
337 	/*
338 	 * Wake up from high level PS if the STA is asleep with too little
339 	 * packets in FW or if the STA is awake.
340 	 */
341 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
342 		wl12xx_ps_link_end(wl, wlvif, hlid);
343 
344 	/*
345 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 	 * Make an exception if this is the only connected link. In this
347 	 * case FW-memory congestion is less of a problem.
348 	 * Note that a single connected STA means 2*ap_count + 1 active links,
349 	 * since we must account for the global and broadcast AP links
350 	 * for each AP. The "fw_ps" check assures us the other link is a STA
351 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
352 	 */
353 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
354 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
356 }
357 
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 					   struct wl12xx_vif *wlvif,
360 					   struct wl_fw_status *status)
361 {
362 	unsigned long cur_fw_ps_map;
363 	u8 hlid;
364 
365 	cur_fw_ps_map = status->link_ps_bitmap;
366 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 		wl1271_debug(DEBUG_PSM,
368 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
369 			     wl->ap_fw_ps_map, cur_fw_ps_map,
370 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
371 
372 		wl->ap_fw_ps_map = cur_fw_ps_map;
373 	}
374 
375 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
376 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 					    wl->links[hlid].allocated_pkts);
378 }
379 
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)380 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
381 {
382 	struct wl12xx_vif *wlvifsta;
383 	struct wl12xx_vif *wlvifap;
384 	struct wl12xx_vif *wlvif;
385 	u32 old_tx_blk_count = wl->tx_blocks_available;
386 	int avail, freed_blocks;
387 	int i;
388 	int ret;
389 	struct wl1271_link *lnk;
390 
391 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
392 				   wl->raw_fw_status,
393 				   wl->fw_status_len, false);
394 	if (ret < 0)
395 		return ret;
396 
397 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, status);
398 
399 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
400 		     "drv_rx_counter = %d, tx_results_counter = %d)",
401 		     status->intr,
402 		     status->fw_rx_counter,
403 		     status->drv_rx_counter,
404 		     status->tx_results_counter);
405 
406 	for (i = 0; i < NUM_TX_QUEUES; i++) {
407 		/* prevent wrap-around in freed-packets counter */
408 		wl->tx_allocated_pkts[i] -=
409 				(status->counters.tx_released_pkts[i] -
410 				wl->tx_pkts_freed[i]) & 0xff;
411 
412 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
413 	}
414 
415 	/* Find an authorized STA vif */
416 	wlvifsta = NULL;
417 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
418 		if (wlvif->sta.hlid != WL12XX_INVALID_LINK_ID &&
419 		    test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) {
420 			wlvifsta = wlvif;
421 			break;
422 		}
423 	}
424 
425 	/* Find a started AP vif */
426 	wlvifap = NULL;
427 	wl12xx_for_each_wlvif(wl, wlvif) {
428 		if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
429 		    wlvif->inconn_count == 0 &&
430 		    test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
431 			wlvifap = wlvif;
432 			break;
433 		}
434 	}
435 
436 	for_each_set_bit(i, wl->links_map, wl->num_links) {
437 		u16 diff16, sec_pn16;
438 		u8 diff, tx_lnk_free_pkts;
439 
440 		lnk = &wl->links[i];
441 
442 		/* prevent wrap-around in freed-packets counter */
443 		tx_lnk_free_pkts = status->counters.tx_lnk_free_pkts[i];
444 		diff = (tx_lnk_free_pkts - lnk->prev_freed_pkts) & 0xff;
445 
446 		if (diff) {
447 			lnk->allocated_pkts -= diff;
448 			lnk->prev_freed_pkts = tx_lnk_free_pkts;
449 		}
450 
451 		/* Get the current sec_pn16 value if present */
452 		if (status->counters.tx_lnk_sec_pn16)
453 			sec_pn16 = __le16_to_cpu(status->counters.tx_lnk_sec_pn16[i]);
454 		else
455 			sec_pn16 = 0;
456 		/* prevent wrap-around in pn16 counter */
457 		diff16 = (sec_pn16 - lnk->prev_sec_pn16) & 0xffff;
458 
459 		/* FIXME: since free_pkts is a 8-bit counter of packets that
460 		 * rolls over, it can become zero. If it is zero, then we
461 		 * omit processing below. Is that really correct?
462 		 */
463 		if (tx_lnk_free_pkts <= 0)
464 			continue;
465 
466 		/* For a station that has an authorized link: */
467 		if (wlvifsta && wlvifsta->sta.hlid == i) {
468 			if (wlvifsta->encryption_type == KEY_TKIP ||
469 			    wlvifsta->encryption_type == KEY_AES) {
470 				if (diff16) {
471 					lnk->prev_sec_pn16 = sec_pn16;
472 					/* accumulate the prev_freed_pkts
473 					 * counter according to the PN from
474 					 * firmware
475 					 */
476 					lnk->total_freed_pkts += diff16;
477 				}
478 			} else {
479 				if (diff)
480 					/* accumulate the prev_freed_pkts
481 					 * counter according to the free packets
482 					 * count from firmware
483 					 */
484 					lnk->total_freed_pkts += diff;
485 			}
486 		}
487 
488 		/* For an AP that has been started */
489 		if (wlvifap && test_bit(i, wlvifap->ap.sta_hlid_map)) {
490 			if (wlvifap->encryption_type == KEY_TKIP ||
491 			    wlvifap->encryption_type == KEY_AES) {
492 				if (diff16) {
493 					lnk->prev_sec_pn16 = sec_pn16;
494 					/* accumulate the prev_freed_pkts
495 					 * counter according to the PN from
496 					 * firmware
497 					 */
498 					lnk->total_freed_pkts += diff16;
499 				}
500 			} else {
501 				if (diff)
502 					/* accumulate the prev_freed_pkts
503 					 * counter according to the free packets
504 					 * count from firmware
505 					 */
506 					lnk->total_freed_pkts += diff;
507 			}
508 		}
509 	}
510 
511 	/* prevent wrap-around in total blocks counter */
512 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
513 		freed_blocks = status->total_released_blks -
514 			       wl->tx_blocks_freed;
515 	else
516 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
517 			       status->total_released_blks;
518 
519 	wl->tx_blocks_freed = status->total_released_blks;
520 
521 	wl->tx_allocated_blocks -= freed_blocks;
522 
523 	/*
524 	 * If the FW freed some blocks:
525 	 * If we still have allocated blocks - re-arm the timer, Tx is
526 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
527 	 */
528 	if (freed_blocks) {
529 		if (wl->tx_allocated_blocks)
530 			wl12xx_rearm_tx_watchdog_locked(wl);
531 		else
532 			cancel_delayed_work(&wl->tx_watchdog_work);
533 	}
534 
535 	avail = status->tx_total - wl->tx_allocated_blocks;
536 
537 	/*
538 	 * The FW might change the total number of TX memblocks before
539 	 * we get a notification about blocks being released. Thus, the
540 	 * available blocks calculation might yield a temporary result
541 	 * which is lower than the actual available blocks. Keeping in
542 	 * mind that only blocks that were allocated can be moved from
543 	 * TX to RX, tx_blocks_available should never decrease here.
544 	 */
545 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
546 				      avail);
547 
548 	/* if more blocks are available now, tx work can be scheduled */
549 	if (wl->tx_blocks_available > old_tx_blk_count)
550 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
551 
552 	/* for AP update num of allocated TX blocks per link and ps status */
553 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
554 		wl12xx_irq_update_links_status(wl, wlvif, status);
555 	}
556 
557 	/* update the host-chipset time offset */
558 	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
559 		(s64)(status->fw_localtime);
560 
561 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
562 
563 	return 0;
564 }
565 
wl1271_flush_deferred_work(struct wl1271 * wl)566 static void wl1271_flush_deferred_work(struct wl1271 *wl)
567 {
568 	struct sk_buff *skb;
569 
570 	/* Pass all received frames to the network stack */
571 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
572 		ieee80211_rx_ni(wl->hw, skb);
573 
574 	/* Return sent skbs to the network stack */
575 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
576 		ieee80211_tx_status_ni(wl->hw, skb);
577 }
578 
wl1271_netstack_work(struct work_struct * work)579 static void wl1271_netstack_work(struct work_struct *work)
580 {
581 	struct wl1271 *wl =
582 		container_of(work, struct wl1271, netstack_work);
583 
584 	do {
585 		wl1271_flush_deferred_work(wl);
586 	} while (skb_queue_len(&wl->deferred_rx_queue));
587 }
588 
589 #define WL1271_IRQ_MAX_LOOPS 256
590 
wlcore_irq_locked(struct wl1271 * wl)591 static int wlcore_irq_locked(struct wl1271 *wl)
592 {
593 	int ret = 0;
594 	u32 intr;
595 	int loopcount = WL1271_IRQ_MAX_LOOPS;
596 	bool run_tx_queue = true;
597 	bool done = false;
598 	unsigned int defer_count;
599 	unsigned long flags;
600 
601 	/*
602 	 * In case edge triggered interrupt must be used, we cannot iterate
603 	 * more than once without introducing race conditions with the hardirq.
604 	 */
605 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
606 		loopcount = 1;
607 
608 	wl1271_debug(DEBUG_IRQ, "IRQ work");
609 
610 	if (unlikely(wl->state != WLCORE_STATE_ON))
611 		goto out;
612 
613 	ret = pm_runtime_resume_and_get(wl->dev);
614 	if (ret < 0)
615 		goto out;
616 
617 	while (!done && loopcount--) {
618 		smp_mb__after_atomic();
619 
620 		ret = wlcore_fw_status(wl, wl->fw_status);
621 		if (ret < 0)
622 			goto err_ret;
623 
624 		wlcore_hw_tx_immediate_compl(wl);
625 
626 		intr = wl->fw_status->intr;
627 		intr &= WLCORE_ALL_INTR_MASK;
628 		if (!intr) {
629 			done = true;
630 			continue;
631 		}
632 
633 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
634 			wl1271_error("HW watchdog interrupt received! starting recovery.");
635 			wl->watchdog_recovery = true;
636 			ret = -EIO;
637 
638 			/* restarting the chip. ignore any other interrupt. */
639 			goto err_ret;
640 		}
641 
642 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
643 			wl1271_error("SW watchdog interrupt received! "
644 				     "starting recovery.");
645 			wl->watchdog_recovery = true;
646 			ret = -EIO;
647 
648 			/* restarting the chip. ignore any other interrupt. */
649 			goto err_ret;
650 		}
651 
652 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
653 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
654 
655 			ret = wlcore_rx(wl, wl->fw_status);
656 			if (ret < 0)
657 				goto err_ret;
658 
659 			/* Check if any tx blocks were freed */
660 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
661 				if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
662 					if (!wl1271_tx_total_queue_count(wl))
663 						run_tx_queue = false;
664 					spin_unlock_irqrestore(&wl->wl_lock, flags);
665 				}
666 
667 				/*
668 				 * In order to avoid starvation of the TX path,
669 				 * call the work function directly.
670 				 */
671 				if (run_tx_queue) {
672 					ret = wlcore_tx_work_locked(wl);
673 					if (ret < 0)
674 						goto err_ret;
675 				}
676 			}
677 
678 			/* check for tx results */
679 			ret = wlcore_hw_tx_delayed_compl(wl);
680 			if (ret < 0)
681 				goto err_ret;
682 
683 			/* Make sure the deferred queues don't get too long */
684 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
685 				      skb_queue_len(&wl->deferred_rx_queue);
686 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
687 				wl1271_flush_deferred_work(wl);
688 		}
689 
690 		if (intr & WL1271_ACX_INTR_EVENT_A) {
691 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
692 			ret = wl1271_event_handle(wl, 0);
693 			if (ret < 0)
694 				goto err_ret;
695 		}
696 
697 		if (intr & WL1271_ACX_INTR_EVENT_B) {
698 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
699 			ret = wl1271_event_handle(wl, 1);
700 			if (ret < 0)
701 				goto err_ret;
702 		}
703 
704 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
705 			wl1271_debug(DEBUG_IRQ,
706 				     "WL1271_ACX_INTR_INIT_COMPLETE");
707 
708 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
709 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
710 	}
711 
712 err_ret:
713 	pm_runtime_mark_last_busy(wl->dev);
714 	pm_runtime_put_autosuspend(wl->dev);
715 
716 out:
717 	return ret;
718 }
719 
wlcore_irq(int irq,void * cookie)720 static irqreturn_t wlcore_irq(int irq, void *cookie)
721 {
722 	int ret;
723 	unsigned long flags;
724 	struct wl1271 *wl = cookie;
725 	bool queue_tx_work = true;
726 
727 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
728 
729 	/* complete the ELP completion */
730 	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
731 		spin_lock_irqsave(&wl->wl_lock, flags);
732 		if (wl->elp_compl)
733 			complete(wl->elp_compl);
734 		spin_unlock_irqrestore(&wl->wl_lock, flags);
735 	}
736 
737 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
738 		/* don't enqueue a work right now. mark it as pending */
739 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
740 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
741 		spin_lock_irqsave(&wl->wl_lock, flags);
742 		disable_irq_nosync(wl->irq);
743 		pm_wakeup_event(wl->dev, 0);
744 		spin_unlock_irqrestore(&wl->wl_lock, flags);
745 		goto out_handled;
746 	}
747 
748 	/* TX might be handled here, avoid redundant work */
749 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
750 	cancel_work_sync(&wl->tx_work);
751 
752 	mutex_lock(&wl->mutex);
753 
754 	ret = wlcore_irq_locked(wl);
755 	if (ret)
756 		wl12xx_queue_recovery_work(wl);
757 
758 	/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
759 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
760 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
761 		if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
762 			if (!wl1271_tx_total_queue_count(wl))
763 				queue_tx_work = false;
764 			spin_unlock_irqrestore(&wl->wl_lock, flags);
765 		}
766 		if (queue_tx_work)
767 			ieee80211_queue_work(wl->hw, &wl->tx_work);
768 	}
769 
770 	mutex_unlock(&wl->mutex);
771 
772 out_handled:
773 	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
774 
775 	return IRQ_HANDLED;
776 }
777 
778 struct vif_counter_data {
779 	u8 counter;
780 
781 	struct ieee80211_vif *cur_vif;
782 	bool cur_vif_running;
783 };
784 
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)785 static void wl12xx_vif_count_iter(void *data, u8 *mac,
786 				  struct ieee80211_vif *vif)
787 {
788 	struct vif_counter_data *counter = data;
789 
790 	counter->counter++;
791 	if (counter->cur_vif == vif)
792 		counter->cur_vif_running = true;
793 }
794 
795 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)796 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
797 			       struct ieee80211_vif *cur_vif,
798 			       struct vif_counter_data *data)
799 {
800 	memset(data, 0, sizeof(*data));
801 	data->cur_vif = cur_vif;
802 
803 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
804 					    wl12xx_vif_count_iter, data);
805 }
806 
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)807 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
808 {
809 	const struct firmware *fw;
810 	const char *fw_name;
811 	enum wl12xx_fw_type fw_type;
812 	int ret;
813 
814 	if (plt) {
815 		fw_type = WL12XX_FW_TYPE_PLT;
816 		fw_name = wl->plt_fw_name;
817 	} else {
818 		/*
819 		 * we can't call wl12xx_get_vif_count() here because
820 		 * wl->mutex is taken, so use the cached last_vif_count value
821 		 */
822 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
823 			fw_type = WL12XX_FW_TYPE_MULTI;
824 			fw_name = wl->mr_fw_name;
825 		} else {
826 			fw_type = WL12XX_FW_TYPE_NORMAL;
827 			fw_name = wl->sr_fw_name;
828 		}
829 	}
830 
831 	if (wl->fw_type == fw_type)
832 		return 0;
833 
834 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
835 
836 	ret = request_firmware(&fw, fw_name, wl->dev);
837 
838 	if (ret < 0) {
839 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
840 		return ret;
841 	}
842 
843 	if (fw->size % 4) {
844 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
845 			     fw->size);
846 		ret = -EILSEQ;
847 		goto out;
848 	}
849 
850 	vfree(wl->fw);
851 	wl->fw_type = WL12XX_FW_TYPE_NONE;
852 	wl->fw_len = fw->size;
853 	wl->fw = vmalloc(wl->fw_len);
854 
855 	if (!wl->fw) {
856 		wl1271_error("could not allocate memory for the firmware");
857 		ret = -ENOMEM;
858 		goto out;
859 	}
860 
861 	memcpy(wl->fw, fw->data, wl->fw_len);
862 	ret = 0;
863 	wl->fw_type = fw_type;
864 out:
865 	release_firmware(fw);
866 
867 	return ret;
868 }
869 
wl12xx_queue_recovery_work(struct wl1271 * wl)870 void wl12xx_queue_recovery_work(struct wl1271 *wl)
871 {
872 	/* Avoid a recursive recovery */
873 	if (wl->state == WLCORE_STATE_ON) {
874 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
875 				  &wl->flags));
876 
877 		wl->state = WLCORE_STATE_RESTARTING;
878 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
879 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
880 	}
881 }
882 
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)883 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
884 {
885 	size_t len;
886 
887 	/* Make sure we have enough room */
888 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
889 
890 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
891 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
892 	wl->fwlog_size += len;
893 
894 	return len;
895 }
896 
wl12xx_read_fwlog_panic(struct wl1271 * wl)897 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
898 {
899 	u32 end_of_log = 0;
900 	int error;
901 
902 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
903 		return;
904 
905 	wl1271_info("Reading FW panic log");
906 
907 	/*
908 	 * Make sure the chip is awake and the logger isn't active.
909 	 * Do not send a stop fwlog command if the fw is hanged or if
910 	 * dbgpins are used (due to some fw bug).
911 	 */
912 	error = pm_runtime_resume_and_get(wl->dev);
913 	if (error < 0)
914 		return;
915 	if (!wl->watchdog_recovery &&
916 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
917 		wl12xx_cmd_stop_fwlog(wl);
918 
919 	/* Traverse the memory blocks linked list */
920 	do {
921 		end_of_log = wlcore_event_fw_logger(wl);
922 		if (end_of_log == 0) {
923 			msleep(100);
924 			end_of_log = wlcore_event_fw_logger(wl);
925 		}
926 	} while (end_of_log != 0);
927 }
928 
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)929 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
930 				   u8 hlid, struct ieee80211_sta *sta)
931 {
932 	struct wl1271_station *wl_sta;
933 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
934 
935 	wl_sta = (void *)sta->drv_priv;
936 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
937 
938 	/*
939 	 * increment the initial seq number on recovery to account for
940 	 * transmitted packets that we haven't yet got in the FW status
941 	 */
942 	if (wlvif->encryption_type == KEY_GEM)
943 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
944 
945 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
946 		wl_sta->total_freed_pkts += sqn_recovery_padding;
947 }
948 
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)949 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
950 					struct wl12xx_vif *wlvif,
951 					u8 hlid, const u8 *addr)
952 {
953 	struct ieee80211_sta *sta;
954 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
955 
956 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
957 		    is_zero_ether_addr(addr)))
958 		return;
959 
960 	rcu_read_lock();
961 	sta = ieee80211_find_sta(vif, addr);
962 	if (sta)
963 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
964 	rcu_read_unlock();
965 }
966 
wlcore_print_recovery(struct wl1271 * wl)967 static void wlcore_print_recovery(struct wl1271 *wl)
968 {
969 	u32 pc = 0;
970 	u32 hint_sts = 0;
971 	int ret;
972 
973 	wl1271_info("Hardware recovery in progress. FW ver: %s",
974 		    wl->chip.fw_ver_str);
975 
976 	/* change partitions momentarily so we can read the FW pc */
977 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
978 	if (ret < 0)
979 		return;
980 
981 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
982 	if (ret < 0)
983 		return;
984 
985 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
986 	if (ret < 0)
987 		return;
988 
989 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
990 				pc, hint_sts, ++wl->recovery_count);
991 
992 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
993 }
994 
995 
wl1271_recovery_work(struct work_struct * work)996 static void wl1271_recovery_work(struct work_struct *work)
997 {
998 	struct wl1271 *wl =
999 		container_of(work, struct wl1271, recovery_work);
1000 	struct wl12xx_vif *wlvif;
1001 	struct ieee80211_vif *vif;
1002 	int error;
1003 
1004 	mutex_lock(&wl->mutex);
1005 
1006 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
1007 		goto out_unlock;
1008 
1009 	error = pm_runtime_resume_and_get(wl->dev);
1010 	if (error < 0)
1011 		wl1271_warning("Enable for recovery failed");
1012 	wlcore_disable_interrupts_nosync(wl);
1013 
1014 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
1015 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
1016 			wl12xx_read_fwlog_panic(wl);
1017 		wlcore_print_recovery(wl);
1018 	}
1019 
1020 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
1021 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1022 
1023 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
1024 
1025 	if (wl->conf.recovery.no_recovery) {
1026 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1027 		goto out_unlock;
1028 	}
1029 
1030 	/* Prevent spurious TX during FW restart */
1031 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1032 
1033 	/* reboot the chipset */
1034 	while (!list_empty(&wl->wlvif_list)) {
1035 		wlvif = list_first_entry(&wl->wlvif_list,
1036 				       struct wl12xx_vif, list);
1037 		vif = wl12xx_wlvif_to_vif(wlvif);
1038 
1039 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1040 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1041 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1042 						    vif->bss_conf.bssid);
1043 		}
1044 
1045 		__wl1271_op_remove_interface(wl, vif, false);
1046 	}
1047 
1048 	wlcore_op_stop_locked(wl);
1049 	pm_runtime_mark_last_busy(wl->dev);
1050 	pm_runtime_put_autosuspend(wl->dev);
1051 
1052 	ieee80211_restart_hw(wl->hw);
1053 
1054 	/*
1055 	 * Its safe to enable TX now - the queues are stopped after a request
1056 	 * to restart the HW.
1057 	 */
1058 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1059 
1060 out_unlock:
1061 	wl->watchdog_recovery = false;
1062 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1063 	mutex_unlock(&wl->mutex);
1064 }
1065 
wlcore_fw_wakeup(struct wl1271 * wl)1066 static int wlcore_fw_wakeup(struct wl1271 *wl)
1067 {
1068 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1069 }
1070 
wl1271_setup(struct wl1271 * wl)1071 static int wl1271_setup(struct wl1271 *wl)
1072 {
1073 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1074 	if (!wl->raw_fw_status)
1075 		goto err;
1076 
1077 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1078 	if (!wl->fw_status)
1079 		goto err;
1080 
1081 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1082 	if (!wl->tx_res_if)
1083 		goto err;
1084 
1085 	return 0;
1086 err:
1087 	kfree(wl->fw_status);
1088 	kfree(wl->raw_fw_status);
1089 	return -ENOMEM;
1090 }
1091 
wl12xx_set_power_on(struct wl1271 * wl)1092 static int wl12xx_set_power_on(struct wl1271 *wl)
1093 {
1094 	int ret;
1095 
1096 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1097 	ret = wl1271_power_on(wl);
1098 	if (ret < 0)
1099 		goto out;
1100 	msleep(WL1271_POWER_ON_SLEEP);
1101 	wl1271_io_reset(wl);
1102 	wl1271_io_init(wl);
1103 
1104 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1105 	if (ret < 0)
1106 		goto fail;
1107 
1108 	/* ELP module wake up */
1109 	ret = wlcore_fw_wakeup(wl);
1110 	if (ret < 0)
1111 		goto fail;
1112 
1113 out:
1114 	return ret;
1115 
1116 fail:
1117 	wl1271_power_off(wl);
1118 	return ret;
1119 }
1120 
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1121 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1122 {
1123 	int ret = 0;
1124 
1125 	ret = wl12xx_set_power_on(wl);
1126 	if (ret < 0)
1127 		goto out;
1128 
1129 	/*
1130 	 * For wl127x based devices we could use the default block
1131 	 * size (512 bytes), but due to a bug in the sdio driver, we
1132 	 * need to set it explicitly after the chip is powered on.  To
1133 	 * simplify the code and since the performance impact is
1134 	 * negligible, we use the same block size for all different
1135 	 * chip types.
1136 	 *
1137 	 * Check if the bus supports blocksize alignment and, if it
1138 	 * doesn't, make sure we don't have the quirk.
1139 	 */
1140 	if (!wl1271_set_block_size(wl))
1141 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1142 
1143 	/* TODO: make sure the lower driver has set things up correctly */
1144 
1145 	ret = wl1271_setup(wl);
1146 	if (ret < 0)
1147 		goto out;
1148 
1149 	ret = wl12xx_fetch_firmware(wl, plt);
1150 	if (ret < 0) {
1151 		kfree(wl->fw_status);
1152 		kfree(wl->raw_fw_status);
1153 		kfree(wl->tx_res_if);
1154 	}
1155 
1156 out:
1157 	return ret;
1158 }
1159 
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1160 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1161 {
1162 	int retries = WL1271_BOOT_RETRIES;
1163 	struct wiphy *wiphy = wl->hw->wiphy;
1164 
1165 	static const char* const PLT_MODE[] = {
1166 		"PLT_OFF",
1167 		"PLT_ON",
1168 		"PLT_FEM_DETECT",
1169 		"PLT_CHIP_AWAKE"
1170 	};
1171 
1172 	int ret;
1173 
1174 	mutex_lock(&wl->mutex);
1175 
1176 	wl1271_notice("power up");
1177 
1178 	if (wl->state != WLCORE_STATE_OFF) {
1179 		wl1271_error("cannot go into PLT state because not "
1180 			     "in off state: %d", wl->state);
1181 		ret = -EBUSY;
1182 		goto out;
1183 	}
1184 
1185 	/* Indicate to lower levels that we are now in PLT mode */
1186 	wl->plt = true;
1187 	wl->plt_mode = plt_mode;
1188 
1189 	while (retries) {
1190 		retries--;
1191 		ret = wl12xx_chip_wakeup(wl, true);
1192 		if (ret < 0)
1193 			goto power_off;
1194 
1195 		if (plt_mode != PLT_CHIP_AWAKE) {
1196 			ret = wl->ops->plt_init(wl);
1197 			if (ret < 0)
1198 				goto power_off;
1199 		}
1200 
1201 		wl->state = WLCORE_STATE_ON;
1202 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1203 			      PLT_MODE[plt_mode],
1204 			      wl->chip.fw_ver_str);
1205 
1206 		/* update hw/fw version info in wiphy struct */
1207 		wiphy->hw_version = wl->chip.id;
1208 		strscpy(wiphy->fw_version, wl->chip.fw_ver_str,
1209 			sizeof(wiphy->fw_version));
1210 
1211 		goto out;
1212 
1213 power_off:
1214 		wl1271_power_off(wl);
1215 	}
1216 
1217 	wl->plt = false;
1218 	wl->plt_mode = PLT_OFF;
1219 
1220 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1221 		     WL1271_BOOT_RETRIES);
1222 out:
1223 	mutex_unlock(&wl->mutex);
1224 
1225 	return ret;
1226 }
1227 
wl1271_plt_stop(struct wl1271 * wl)1228 int wl1271_plt_stop(struct wl1271 *wl)
1229 {
1230 	int ret = 0;
1231 
1232 	wl1271_notice("power down");
1233 
1234 	/*
1235 	 * Interrupts must be disabled before setting the state to OFF.
1236 	 * Otherwise, the interrupt handler might be called and exit without
1237 	 * reading the interrupt status.
1238 	 */
1239 	wlcore_disable_interrupts(wl);
1240 	mutex_lock(&wl->mutex);
1241 	if (!wl->plt) {
1242 		mutex_unlock(&wl->mutex);
1243 
1244 		/*
1245 		 * This will not necessarily enable interrupts as interrupts
1246 		 * may have been disabled when op_stop was called. It will,
1247 		 * however, balance the above call to disable_interrupts().
1248 		 */
1249 		wlcore_enable_interrupts(wl);
1250 
1251 		wl1271_error("cannot power down because not in PLT "
1252 			     "state: %d", wl->state);
1253 		ret = -EBUSY;
1254 		goto out;
1255 	}
1256 
1257 	mutex_unlock(&wl->mutex);
1258 
1259 	wl1271_flush_deferred_work(wl);
1260 	cancel_work_sync(&wl->netstack_work);
1261 	cancel_work_sync(&wl->recovery_work);
1262 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1263 
1264 	mutex_lock(&wl->mutex);
1265 	wl1271_power_off(wl);
1266 	wl->flags = 0;
1267 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1268 	wl->state = WLCORE_STATE_OFF;
1269 	wl->plt = false;
1270 	wl->plt_mode = PLT_OFF;
1271 	wl->rx_counter = 0;
1272 	mutex_unlock(&wl->mutex);
1273 
1274 out:
1275 	return ret;
1276 }
1277 
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1278 static void wl1271_op_tx(struct ieee80211_hw *hw,
1279 			 struct ieee80211_tx_control *control,
1280 			 struct sk_buff *skb)
1281 {
1282 	struct wl1271 *wl = hw->priv;
1283 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1284 	struct ieee80211_vif *vif = info->control.vif;
1285 	struct wl12xx_vif *wlvif = NULL;
1286 	unsigned long flags;
1287 	int q, mapping;
1288 	u8 hlid;
1289 
1290 	if (!vif) {
1291 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1292 		ieee80211_free_txskb(hw, skb);
1293 		return;
1294 	}
1295 
1296 	wlvif = wl12xx_vif_to_data(vif);
1297 	mapping = skb_get_queue_mapping(skb);
1298 	q = wl1271_tx_get_queue(mapping);
1299 
1300 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1301 
1302 	spin_lock_irqsave(&wl->wl_lock, flags);
1303 
1304 	/*
1305 	 * drop the packet if the link is invalid or the queue is stopped
1306 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1307 	 * allow these packets through.
1308 	 */
1309 	if (hlid == WL12XX_INVALID_LINK_ID ||
1310 	    (!test_bit(hlid, wlvif->links_map)) ||
1311 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1312 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1313 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1314 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1315 		ieee80211_free_txskb(hw, skb);
1316 		goto out;
1317 	}
1318 
1319 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1320 		     hlid, q, skb->len);
1321 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1322 
1323 	wl->tx_queue_count[q]++;
1324 	wlvif->tx_queue_count[q]++;
1325 
1326 	/*
1327 	 * The workqueue is slow to process the tx_queue and we need stop
1328 	 * the queue here, otherwise the queue will get too long.
1329 	 */
1330 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1331 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1332 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1333 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1334 		wlcore_stop_queue_locked(wl, wlvif, q,
1335 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1336 	}
1337 
1338 	/*
1339 	 * The chip specific setup must run before the first TX packet -
1340 	 * before that, the tx_work will not be initialized!
1341 	 */
1342 
1343 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1344 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1345 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1346 
1347 out:
1348 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1349 }
1350 
wl1271_tx_dummy_packet(struct wl1271 * wl)1351 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1352 {
1353 	unsigned long flags;
1354 	int q;
1355 
1356 	/* no need to queue a new dummy packet if one is already pending */
1357 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1358 		return 0;
1359 
1360 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1361 
1362 	spin_lock_irqsave(&wl->wl_lock, flags);
1363 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1364 	wl->tx_queue_count[q]++;
1365 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1366 
1367 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1368 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1369 		return wlcore_tx_work_locked(wl);
1370 
1371 	/*
1372 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1373 	 * interrupt handler function
1374 	 */
1375 	return 0;
1376 }
1377 
1378 /*
1379  * The size of the dummy packet should be at least 1400 bytes. However, in
1380  * order to minimize the number of bus transactions, aligning it to 512 bytes
1381  * boundaries could be beneficial, performance wise
1382  */
1383 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1384 
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1385 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1386 {
1387 	struct sk_buff *skb;
1388 	struct ieee80211_hdr_3addr *hdr;
1389 	unsigned int dummy_packet_size;
1390 
1391 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1392 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1393 
1394 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1395 	if (!skb) {
1396 		wl1271_warning("Failed to allocate a dummy packet skb");
1397 		return NULL;
1398 	}
1399 
1400 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1401 
1402 	hdr = skb_put_zero(skb, sizeof(*hdr));
1403 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1404 					 IEEE80211_STYPE_NULLFUNC |
1405 					 IEEE80211_FCTL_TODS);
1406 
1407 	skb_put_zero(skb, dummy_packet_size);
1408 
1409 	/* Dummy packets require the TID to be management */
1410 	skb->priority = WL1271_TID_MGMT;
1411 
1412 	/* Initialize all fields that might be used */
1413 	skb_set_queue_mapping(skb, 0);
1414 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1415 
1416 	return skb;
1417 }
1418 
1419 
1420 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1421 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1422 {
1423 	int num_fields = 0, in_field = 0, fields_size = 0;
1424 	int i, pattern_len = 0;
1425 
1426 	if (!p->mask) {
1427 		wl1271_warning("No mask in WoWLAN pattern");
1428 		return -EINVAL;
1429 	}
1430 
1431 	/*
1432 	 * The pattern is broken up into segments of bytes at different offsets
1433 	 * that need to be checked by the FW filter. Each segment is called
1434 	 * a field in the FW API. We verify that the total number of fields
1435 	 * required for this pattern won't exceed FW limits (8)
1436 	 * as well as the total fields buffer won't exceed the FW limit.
1437 	 * Note that if there's a pattern which crosses Ethernet/IP header
1438 	 * boundary a new field is required.
1439 	 */
1440 	for (i = 0; i < p->pattern_len; i++) {
1441 		if (test_bit(i, (unsigned long *)p->mask)) {
1442 			if (!in_field) {
1443 				in_field = 1;
1444 				pattern_len = 1;
1445 			} else {
1446 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1447 					num_fields++;
1448 					fields_size += pattern_len +
1449 						RX_FILTER_FIELD_OVERHEAD;
1450 					pattern_len = 1;
1451 				} else
1452 					pattern_len++;
1453 			}
1454 		} else {
1455 			if (in_field) {
1456 				in_field = 0;
1457 				fields_size += pattern_len +
1458 					RX_FILTER_FIELD_OVERHEAD;
1459 				num_fields++;
1460 			}
1461 		}
1462 	}
1463 
1464 	if (in_field) {
1465 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1466 		num_fields++;
1467 	}
1468 
1469 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1470 		wl1271_warning("RX Filter too complex. Too many segments");
1471 		return -EINVAL;
1472 	}
1473 
1474 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1475 		wl1271_warning("RX filter pattern is too big");
1476 		return -E2BIG;
1477 	}
1478 
1479 	return 0;
1480 }
1481 
wl1271_rx_filter_alloc(void)1482 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1483 {
1484 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1485 }
1486 
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1487 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1488 {
1489 	int i;
1490 
1491 	if (filter == NULL)
1492 		return;
1493 
1494 	for (i = 0; i < filter->num_fields; i++)
1495 		kfree(filter->fields[i].pattern);
1496 
1497 	kfree(filter);
1498 }
1499 
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1500 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1501 				 u16 offset, u8 flags,
1502 				 const u8 *pattern, u8 len)
1503 {
1504 	struct wl12xx_rx_filter_field *field;
1505 
1506 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1507 		wl1271_warning("Max fields per RX filter. can't alloc another");
1508 		return -EINVAL;
1509 	}
1510 
1511 	field = &filter->fields[filter->num_fields];
1512 
1513 	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1514 	if (!field->pattern) {
1515 		wl1271_warning("Failed to allocate RX filter pattern");
1516 		return -ENOMEM;
1517 	}
1518 
1519 	filter->num_fields++;
1520 
1521 	field->offset = cpu_to_le16(offset);
1522 	field->flags = flags;
1523 	field->len = len;
1524 
1525 	return 0;
1526 }
1527 
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1528 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1529 {
1530 	int i, fields_size = 0;
1531 
1532 	for (i = 0; i < filter->num_fields; i++)
1533 		fields_size += filter->fields[i].len +
1534 			sizeof(struct wl12xx_rx_filter_field) -
1535 			sizeof(u8 *);
1536 
1537 	return fields_size;
1538 }
1539 
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1540 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1541 				    u8 *buf)
1542 {
1543 	int i;
1544 	struct wl12xx_rx_filter_field *field;
1545 
1546 	for (i = 0; i < filter->num_fields; i++) {
1547 		field = (struct wl12xx_rx_filter_field *)buf;
1548 
1549 		field->offset = filter->fields[i].offset;
1550 		field->flags = filter->fields[i].flags;
1551 		field->len = filter->fields[i].len;
1552 
1553 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1554 		buf += sizeof(struct wl12xx_rx_filter_field) -
1555 			sizeof(u8 *) + field->len;
1556 	}
1557 }
1558 
1559 /*
1560  * Allocates an RX filter returned through f
1561  * which needs to be freed using rx_filter_free()
1562  */
1563 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1564 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1565 					   struct wl12xx_rx_filter **f)
1566 {
1567 	int i, j, ret = 0;
1568 	struct wl12xx_rx_filter *filter;
1569 	u16 offset;
1570 	u8 flags, len;
1571 
1572 	filter = wl1271_rx_filter_alloc();
1573 	if (!filter) {
1574 		wl1271_warning("Failed to alloc rx filter");
1575 		ret = -ENOMEM;
1576 		goto err;
1577 	}
1578 
1579 	i = 0;
1580 	while (i < p->pattern_len) {
1581 		if (!test_bit(i, (unsigned long *)p->mask)) {
1582 			i++;
1583 			continue;
1584 		}
1585 
1586 		for (j = i; j < p->pattern_len; j++) {
1587 			if (!test_bit(j, (unsigned long *)p->mask))
1588 				break;
1589 
1590 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1591 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1592 				break;
1593 		}
1594 
1595 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1596 			offset = i;
1597 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1598 		} else {
1599 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1600 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1601 		}
1602 
1603 		len = j - i;
1604 
1605 		ret = wl1271_rx_filter_alloc_field(filter,
1606 						   offset,
1607 						   flags,
1608 						   &p->pattern[i], len);
1609 		if (ret)
1610 			goto err;
1611 
1612 		i = j;
1613 	}
1614 
1615 	filter->action = FILTER_SIGNAL;
1616 
1617 	*f = filter;
1618 	return 0;
1619 
1620 err:
1621 	wl1271_rx_filter_free(filter);
1622 	*f = NULL;
1623 
1624 	return ret;
1625 }
1626 
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1627 static int wl1271_configure_wowlan(struct wl1271 *wl,
1628 				   struct cfg80211_wowlan *wow)
1629 {
1630 	int i, ret;
1631 
1632 	if (!wow || wow->any || !wow->n_patterns) {
1633 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1634 							  FILTER_SIGNAL);
1635 		if (ret)
1636 			goto out;
1637 
1638 		ret = wl1271_rx_filter_clear_all(wl);
1639 		if (ret)
1640 			goto out;
1641 
1642 		return 0;
1643 	}
1644 
1645 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1646 		return -EINVAL;
1647 
1648 	/* Validate all incoming patterns before clearing current FW state */
1649 	for (i = 0; i < wow->n_patterns; i++) {
1650 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1651 		if (ret) {
1652 			wl1271_warning("Bad wowlan pattern %d", i);
1653 			return ret;
1654 		}
1655 	}
1656 
1657 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1658 	if (ret)
1659 		goto out;
1660 
1661 	ret = wl1271_rx_filter_clear_all(wl);
1662 	if (ret)
1663 		goto out;
1664 
1665 	/* Translate WoWLAN patterns into filters */
1666 	for (i = 0; i < wow->n_patterns; i++) {
1667 		struct cfg80211_pkt_pattern *p;
1668 		struct wl12xx_rx_filter *filter = NULL;
1669 
1670 		p = &wow->patterns[i];
1671 
1672 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1673 		if (ret) {
1674 			wl1271_warning("Failed to create an RX filter from "
1675 				       "wowlan pattern %d", i);
1676 			goto out;
1677 		}
1678 
1679 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1680 
1681 		wl1271_rx_filter_free(filter);
1682 		if (ret)
1683 			goto out;
1684 	}
1685 
1686 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1687 
1688 out:
1689 	return ret;
1690 }
1691 
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1692 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1693 					struct wl12xx_vif *wlvif,
1694 					struct cfg80211_wowlan *wow)
1695 {
1696 	int ret = 0;
1697 
1698 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1699 		goto out;
1700 
1701 	ret = wl1271_configure_wowlan(wl, wow);
1702 	if (ret < 0)
1703 		goto out;
1704 
1705 	if ((wl->conf.conn.suspend_wake_up_event ==
1706 	     wl->conf.conn.wake_up_event) &&
1707 	    (wl->conf.conn.suspend_listen_interval ==
1708 	     wl->conf.conn.listen_interval))
1709 		goto out;
1710 
1711 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1712 				    wl->conf.conn.suspend_wake_up_event,
1713 				    wl->conf.conn.suspend_listen_interval);
1714 
1715 	if (ret < 0)
1716 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1717 out:
1718 	return ret;
1719 
1720 }
1721 
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1722 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1723 					struct wl12xx_vif *wlvif,
1724 					struct cfg80211_wowlan *wow)
1725 {
1726 	int ret = 0;
1727 
1728 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1729 		goto out;
1730 
1731 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1732 	if (ret < 0)
1733 		goto out;
1734 
1735 	ret = wl1271_configure_wowlan(wl, wow);
1736 	if (ret < 0)
1737 		goto out;
1738 
1739 out:
1740 	return ret;
1741 
1742 }
1743 
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1744 static int wl1271_configure_suspend(struct wl1271 *wl,
1745 				    struct wl12xx_vif *wlvif,
1746 				    struct cfg80211_wowlan *wow)
1747 {
1748 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1749 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1750 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1751 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1752 	return 0;
1753 }
1754 
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1755 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1756 {
1757 	int ret = 0;
1758 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1759 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1760 
1761 	if ((!is_ap) && (!is_sta))
1762 		return;
1763 
1764 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1765 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1766 		return;
1767 
1768 	wl1271_configure_wowlan(wl, NULL);
1769 
1770 	if (is_sta) {
1771 		if ((wl->conf.conn.suspend_wake_up_event ==
1772 		     wl->conf.conn.wake_up_event) &&
1773 		    (wl->conf.conn.suspend_listen_interval ==
1774 		     wl->conf.conn.listen_interval))
1775 			return;
1776 
1777 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1778 				    wl->conf.conn.wake_up_event,
1779 				    wl->conf.conn.listen_interval);
1780 
1781 		if (ret < 0)
1782 			wl1271_error("resume: wake up conditions failed: %d",
1783 				     ret);
1784 
1785 	} else if (is_ap) {
1786 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1787 	}
1788 }
1789 
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1790 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1791 					    struct cfg80211_wowlan *wow)
1792 {
1793 	struct wl1271 *wl = hw->priv;
1794 	struct wl12xx_vif *wlvif;
1795 	unsigned long flags;
1796 	int ret;
1797 
1798 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1799 	WARN_ON(!wow);
1800 
1801 	/* we want to perform the recovery before suspending */
1802 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1803 		wl1271_warning("postponing suspend to perform recovery");
1804 		return -EBUSY;
1805 	}
1806 
1807 	wl1271_tx_flush(wl);
1808 
1809 	mutex_lock(&wl->mutex);
1810 
1811 	ret = pm_runtime_resume_and_get(wl->dev);
1812 	if (ret < 0) {
1813 		mutex_unlock(&wl->mutex);
1814 		return ret;
1815 	}
1816 
1817 	wl->wow_enabled = true;
1818 	wl12xx_for_each_wlvif(wl, wlvif) {
1819 		if (wlcore_is_p2p_mgmt(wlvif))
1820 			continue;
1821 
1822 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1823 		if (ret < 0) {
1824 			goto out_sleep;
1825 		}
1826 	}
1827 
1828 	/* disable fast link flow control notifications from FW */
1829 	ret = wlcore_hw_interrupt_notify(wl, false);
1830 	if (ret < 0)
1831 		goto out_sleep;
1832 
1833 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1834 	ret = wlcore_hw_rx_ba_filter(wl,
1835 				     !!wl->conf.conn.suspend_rx_ba_activity);
1836 	if (ret < 0)
1837 		goto out_sleep;
1838 
1839 out_sleep:
1840 	pm_runtime_put_noidle(wl->dev);
1841 	mutex_unlock(&wl->mutex);
1842 
1843 	if (ret < 0) {
1844 		wl1271_warning("couldn't prepare device to suspend");
1845 		return ret;
1846 	}
1847 
1848 	/* flush any remaining work */
1849 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1850 
1851 	flush_work(&wl->tx_work);
1852 
1853 	/*
1854 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1855 	 * it on resume anyway.
1856 	 */
1857 	cancel_delayed_work(&wl->tx_watchdog_work);
1858 
1859 	/*
1860 	 * set suspended flag to avoid triggering a new threaded_irq
1861 	 * work.
1862 	 */
1863 	spin_lock_irqsave(&wl->wl_lock, flags);
1864 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1865 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1866 
1867 	return pm_runtime_force_suspend(wl->dev);
1868 }
1869 
wl1271_op_resume(struct ieee80211_hw * hw)1870 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1871 {
1872 	struct wl1271 *wl = hw->priv;
1873 	struct wl12xx_vif *wlvif;
1874 	unsigned long flags;
1875 	bool run_irq_work = false, pending_recovery;
1876 	int ret;
1877 
1878 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1879 		     wl->wow_enabled);
1880 	WARN_ON(!wl->wow_enabled);
1881 
1882 	ret = pm_runtime_force_resume(wl->dev);
1883 	if (ret < 0) {
1884 		wl1271_error("ELP wakeup failure!");
1885 		goto out_sleep;
1886 	}
1887 
1888 	/*
1889 	 * re-enable irq_work enqueuing, and call irq_work directly if
1890 	 * there is a pending work.
1891 	 */
1892 	spin_lock_irqsave(&wl->wl_lock, flags);
1893 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1894 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1895 		run_irq_work = true;
1896 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1897 
1898 	mutex_lock(&wl->mutex);
1899 
1900 	/* test the recovery flag before calling any SDIO functions */
1901 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1902 				    &wl->flags);
1903 
1904 	if (run_irq_work) {
1905 		wl1271_debug(DEBUG_MAC80211,
1906 			     "run postponed irq_work directly");
1907 
1908 		/* don't talk to the HW if recovery is pending */
1909 		if (!pending_recovery) {
1910 			ret = wlcore_irq_locked(wl);
1911 			if (ret)
1912 				wl12xx_queue_recovery_work(wl);
1913 		}
1914 
1915 		wlcore_enable_interrupts(wl);
1916 	}
1917 
1918 	if (pending_recovery) {
1919 		wl1271_warning("queuing forgotten recovery on resume");
1920 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1921 		goto out_sleep;
1922 	}
1923 
1924 	ret = pm_runtime_resume_and_get(wl->dev);
1925 	if (ret < 0)
1926 		goto out;
1927 
1928 	wl12xx_for_each_wlvif(wl, wlvif) {
1929 		if (wlcore_is_p2p_mgmt(wlvif))
1930 			continue;
1931 
1932 		wl1271_configure_resume(wl, wlvif);
1933 	}
1934 
1935 	ret = wlcore_hw_interrupt_notify(wl, true);
1936 	if (ret < 0)
1937 		goto out_sleep;
1938 
1939 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1940 	ret = wlcore_hw_rx_ba_filter(wl, false);
1941 	if (ret < 0)
1942 		goto out_sleep;
1943 
1944 out_sleep:
1945 	pm_runtime_mark_last_busy(wl->dev);
1946 	pm_runtime_put_autosuspend(wl->dev);
1947 
1948 out:
1949 	wl->wow_enabled = false;
1950 
1951 	/*
1952 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1953 	 * That way we avoid possible conditions where Tx-complete interrupts
1954 	 * fail to arrive and we perform a spurious recovery.
1955 	 */
1956 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1957 	mutex_unlock(&wl->mutex);
1958 
1959 	return 0;
1960 }
1961 
wl1271_op_start(struct ieee80211_hw * hw)1962 static int wl1271_op_start(struct ieee80211_hw *hw)
1963 {
1964 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1965 
1966 	/*
1967 	 * We have to delay the booting of the hardware because
1968 	 * we need to know the local MAC address before downloading and
1969 	 * initializing the firmware. The MAC address cannot be changed
1970 	 * after boot, and without the proper MAC address, the firmware
1971 	 * will not function properly.
1972 	 *
1973 	 * The MAC address is first known when the corresponding interface
1974 	 * is added. That is where we will initialize the hardware.
1975 	 */
1976 
1977 	return 0;
1978 }
1979 
wlcore_op_stop_locked(struct wl1271 * wl)1980 static void wlcore_op_stop_locked(struct wl1271 *wl)
1981 {
1982 	int i;
1983 
1984 	if (wl->state == WLCORE_STATE_OFF) {
1985 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1986 					&wl->flags))
1987 			wlcore_enable_interrupts(wl);
1988 
1989 		return;
1990 	}
1991 
1992 	/*
1993 	 * this must be before the cancel_work calls below, so that the work
1994 	 * functions don't perform further work.
1995 	 */
1996 	wl->state = WLCORE_STATE_OFF;
1997 
1998 	/*
1999 	 * Use the nosync variant to disable interrupts, so the mutex could be
2000 	 * held while doing so without deadlocking.
2001 	 */
2002 	wlcore_disable_interrupts_nosync(wl);
2003 
2004 	mutex_unlock(&wl->mutex);
2005 
2006 	wlcore_synchronize_interrupts(wl);
2007 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2008 		cancel_work_sync(&wl->recovery_work);
2009 	wl1271_flush_deferred_work(wl);
2010 	cancel_delayed_work_sync(&wl->scan_complete_work);
2011 	cancel_work_sync(&wl->netstack_work);
2012 	cancel_work_sync(&wl->tx_work);
2013 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
2014 
2015 	/* let's notify MAC80211 about the remaining pending TX frames */
2016 	mutex_lock(&wl->mutex);
2017 	wl12xx_tx_reset(wl);
2018 
2019 	wl1271_power_off(wl);
2020 	/*
2021 	 * In case a recovery was scheduled, interrupts were disabled to avoid
2022 	 * an interrupt storm. Now that the power is down, it is safe to
2023 	 * re-enable interrupts to balance the disable depth
2024 	 */
2025 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2026 		wlcore_enable_interrupts(wl);
2027 
2028 	wl->band = NL80211_BAND_2GHZ;
2029 
2030 	wl->rx_counter = 0;
2031 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2032 	wl->channel_type = NL80211_CHAN_NO_HT;
2033 	wl->tx_blocks_available = 0;
2034 	wl->tx_allocated_blocks = 0;
2035 	wl->tx_results_count = 0;
2036 	wl->tx_packets_count = 0;
2037 	wl->time_offset = 0;
2038 	wl->ap_fw_ps_map = 0;
2039 	wl->ap_ps_map = 0;
2040 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
2041 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
2042 	memset(wl->links_map, 0, sizeof(wl->links_map));
2043 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
2044 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
2045 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2046 	wl->active_sta_count = 0;
2047 	wl->active_link_count = 0;
2048 
2049 	/* The system link is always allocated */
2050 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2051 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2052 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2053 
2054 	/*
2055 	 * this is performed after the cancel_work calls and the associated
2056 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2057 	 * get executed before all these vars have been reset.
2058 	 */
2059 	wl->flags = 0;
2060 
2061 	wl->tx_blocks_freed = 0;
2062 
2063 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2064 		wl->tx_pkts_freed[i] = 0;
2065 		wl->tx_allocated_pkts[i] = 0;
2066 	}
2067 
2068 	wl1271_debugfs_reset(wl);
2069 
2070 	kfree(wl->raw_fw_status);
2071 	wl->raw_fw_status = NULL;
2072 	kfree(wl->fw_status);
2073 	wl->fw_status = NULL;
2074 	kfree(wl->tx_res_if);
2075 	wl->tx_res_if = NULL;
2076 	kfree(wl->target_mem_map);
2077 	wl->target_mem_map = NULL;
2078 
2079 	/*
2080 	 * FW channels must be re-calibrated after recovery,
2081 	 * save current Reg-Domain channel configuration and clear it.
2082 	 */
2083 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2084 	       sizeof(wl->reg_ch_conf_pending));
2085 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2086 }
2087 
wlcore_op_stop(struct ieee80211_hw * hw,bool suspend)2088 static void wlcore_op_stop(struct ieee80211_hw *hw, bool suspend)
2089 {
2090 	struct wl1271 *wl = hw->priv;
2091 
2092 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2093 
2094 	mutex_lock(&wl->mutex);
2095 
2096 	wlcore_op_stop_locked(wl);
2097 
2098 	mutex_unlock(&wl->mutex);
2099 }
2100 
wlcore_channel_switch_work(struct work_struct * work)2101 static void wlcore_channel_switch_work(struct work_struct *work)
2102 {
2103 	struct delayed_work *dwork;
2104 	struct wl1271 *wl;
2105 	struct ieee80211_vif *vif;
2106 	struct wl12xx_vif *wlvif;
2107 	int ret;
2108 
2109 	dwork = to_delayed_work(work);
2110 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2111 	wl = wlvif->wl;
2112 
2113 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2114 
2115 	mutex_lock(&wl->mutex);
2116 
2117 	if (unlikely(wl->state != WLCORE_STATE_ON))
2118 		goto out;
2119 
2120 	/* check the channel switch is still ongoing */
2121 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2122 		goto out;
2123 
2124 	vif = wl12xx_wlvif_to_vif(wlvif);
2125 	ieee80211_chswitch_done(vif, false, 0);
2126 
2127 	ret = pm_runtime_resume_and_get(wl->dev);
2128 	if (ret < 0)
2129 		goto out;
2130 
2131 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2132 
2133 	pm_runtime_mark_last_busy(wl->dev);
2134 	pm_runtime_put_autosuspend(wl->dev);
2135 out:
2136 	mutex_unlock(&wl->mutex);
2137 }
2138 
wlcore_connection_loss_work(struct work_struct * work)2139 static void wlcore_connection_loss_work(struct work_struct *work)
2140 {
2141 	struct delayed_work *dwork;
2142 	struct wl1271 *wl;
2143 	struct ieee80211_vif *vif;
2144 	struct wl12xx_vif *wlvif;
2145 
2146 	dwork = to_delayed_work(work);
2147 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2148 	wl = wlvif->wl;
2149 
2150 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2151 
2152 	mutex_lock(&wl->mutex);
2153 
2154 	if (unlikely(wl->state != WLCORE_STATE_ON))
2155 		goto out;
2156 
2157 	/* Call mac80211 connection loss */
2158 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2159 		goto out;
2160 
2161 	vif = wl12xx_wlvif_to_vif(wlvif);
2162 	ieee80211_connection_loss(vif);
2163 out:
2164 	mutex_unlock(&wl->mutex);
2165 }
2166 
wlcore_pending_auth_complete_work(struct work_struct * work)2167 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2168 {
2169 	struct delayed_work *dwork;
2170 	struct wl1271 *wl;
2171 	struct wl12xx_vif *wlvif;
2172 	unsigned long time_spare;
2173 	int ret;
2174 
2175 	dwork = to_delayed_work(work);
2176 	wlvif = container_of(dwork, struct wl12xx_vif,
2177 			     pending_auth_complete_work);
2178 	wl = wlvif->wl;
2179 
2180 	mutex_lock(&wl->mutex);
2181 
2182 	if (unlikely(wl->state != WLCORE_STATE_ON))
2183 		goto out;
2184 
2185 	/*
2186 	 * Make sure a second really passed since the last auth reply. Maybe
2187 	 * a second auth reply arrived while we were stuck on the mutex.
2188 	 * Check for a little less than the timeout to protect from scheduler
2189 	 * irregularities.
2190 	 */
2191 	time_spare = jiffies +
2192 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2193 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2194 		goto out;
2195 
2196 	ret = pm_runtime_resume_and_get(wl->dev);
2197 	if (ret < 0)
2198 		goto out;
2199 
2200 	/* cancel the ROC if active */
2201 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2202 
2203 	pm_runtime_mark_last_busy(wl->dev);
2204 	pm_runtime_put_autosuspend(wl->dev);
2205 out:
2206 	mutex_unlock(&wl->mutex);
2207 }
2208 
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2209 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2210 {
2211 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2212 					WL12XX_MAX_RATE_POLICIES);
2213 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2214 		return -EBUSY;
2215 
2216 	__set_bit(policy, wl->rate_policies_map);
2217 	*idx = policy;
2218 	return 0;
2219 }
2220 
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2221 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2222 {
2223 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2224 		return;
2225 
2226 	__clear_bit(*idx, wl->rate_policies_map);
2227 	*idx = WL12XX_MAX_RATE_POLICIES;
2228 }
2229 
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2230 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2231 {
2232 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2233 					WLCORE_MAX_KLV_TEMPLATES);
2234 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2235 		return -EBUSY;
2236 
2237 	__set_bit(policy, wl->klv_templates_map);
2238 	*idx = policy;
2239 	return 0;
2240 }
2241 
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2242 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2243 {
2244 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2245 		return;
2246 
2247 	__clear_bit(*idx, wl->klv_templates_map);
2248 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2249 }
2250 
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2251 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2252 {
2253 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2254 
2255 	switch (wlvif->bss_type) {
2256 	case BSS_TYPE_AP_BSS:
2257 		if (wlvif->p2p)
2258 			return WL1271_ROLE_P2P_GO;
2259 		else if (ieee80211_vif_is_mesh(vif))
2260 			return WL1271_ROLE_MESH_POINT;
2261 		else
2262 			return WL1271_ROLE_AP;
2263 
2264 	case BSS_TYPE_STA_BSS:
2265 		if (wlvif->p2p)
2266 			return WL1271_ROLE_P2P_CL;
2267 		else
2268 			return WL1271_ROLE_STA;
2269 
2270 	case BSS_TYPE_IBSS:
2271 		return WL1271_ROLE_IBSS;
2272 
2273 	default:
2274 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2275 	}
2276 	return WL12XX_INVALID_ROLE_TYPE;
2277 }
2278 
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2279 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2280 {
2281 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2282 	int i;
2283 
2284 	/* clear everything but the persistent data */
2285 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2286 
2287 	switch (ieee80211_vif_type_p2p(vif)) {
2288 	case NL80211_IFTYPE_P2P_CLIENT:
2289 		wlvif->p2p = 1;
2290 		fallthrough;
2291 	case NL80211_IFTYPE_STATION:
2292 	case NL80211_IFTYPE_P2P_DEVICE:
2293 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2294 		break;
2295 	case NL80211_IFTYPE_ADHOC:
2296 		wlvif->bss_type = BSS_TYPE_IBSS;
2297 		break;
2298 	case NL80211_IFTYPE_P2P_GO:
2299 		wlvif->p2p = 1;
2300 		fallthrough;
2301 	case NL80211_IFTYPE_AP:
2302 	case NL80211_IFTYPE_MESH_POINT:
2303 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2304 		break;
2305 	default:
2306 		wlvif->bss_type = MAX_BSS_TYPE;
2307 		return -EOPNOTSUPP;
2308 	}
2309 
2310 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2311 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2312 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2313 
2314 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2315 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2316 		/* init sta/ibss data */
2317 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2318 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2319 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2320 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2321 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2322 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2323 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2324 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2325 	} else {
2326 		/* init ap data */
2327 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2328 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2329 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2330 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2331 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2332 			wl12xx_allocate_rate_policy(wl,
2333 						&wlvif->ap.ucast_rate_idx[i]);
2334 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2335 		/*
2336 		 * TODO: check if basic_rate shouldn't be
2337 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2338 		 * instead (the same thing for STA above).
2339 		*/
2340 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2341 		/* TODO: this seems to be used only for STA, check it */
2342 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2343 	}
2344 
2345 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2346 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2347 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2348 
2349 	/*
2350 	 * mac80211 configures some values globally, while we treat them
2351 	 * per-interface. thus, on init, we have to copy them from wl
2352 	 */
2353 	wlvif->band = wl->band;
2354 	wlvif->channel = wl->channel;
2355 	wlvif->power_level = wl->power_level;
2356 	wlvif->channel_type = wl->channel_type;
2357 
2358 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2359 		  wl1271_rx_streaming_enable_work);
2360 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2361 		  wl1271_rx_streaming_disable_work);
2362 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2363 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2364 			  wlcore_channel_switch_work);
2365 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2366 			  wlcore_connection_loss_work);
2367 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2368 			  wlcore_pending_auth_complete_work);
2369 	INIT_LIST_HEAD(&wlvif->list);
2370 
2371 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2372 	return 0;
2373 }
2374 
wl12xx_init_fw(struct wl1271 * wl)2375 static int wl12xx_init_fw(struct wl1271 *wl)
2376 {
2377 	int retries = WL1271_BOOT_RETRIES;
2378 	bool booted = false;
2379 	struct wiphy *wiphy = wl->hw->wiphy;
2380 	int ret;
2381 
2382 	while (retries) {
2383 		retries--;
2384 		ret = wl12xx_chip_wakeup(wl, false);
2385 		if (ret < 0)
2386 			goto power_off;
2387 
2388 		ret = wl->ops->boot(wl);
2389 		if (ret < 0)
2390 			goto power_off;
2391 
2392 		ret = wl1271_hw_init(wl);
2393 		if (ret < 0)
2394 			goto irq_disable;
2395 
2396 		booted = true;
2397 		break;
2398 
2399 irq_disable:
2400 		mutex_unlock(&wl->mutex);
2401 		/* Unlocking the mutex in the middle of handling is
2402 		   inherently unsafe. In this case we deem it safe to do,
2403 		   because we need to let any possibly pending IRQ out of
2404 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2405 		   work function will not do anything.) Also, any other
2406 		   possible concurrent operations will fail due to the
2407 		   current state, hence the wl1271 struct should be safe. */
2408 		wlcore_disable_interrupts(wl);
2409 		wl1271_flush_deferred_work(wl);
2410 		cancel_work_sync(&wl->netstack_work);
2411 		mutex_lock(&wl->mutex);
2412 power_off:
2413 		wl1271_power_off(wl);
2414 	}
2415 
2416 	if (!booted) {
2417 		wl1271_error("firmware boot failed despite %d retries",
2418 			     WL1271_BOOT_RETRIES);
2419 		goto out;
2420 	}
2421 
2422 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2423 
2424 	/* update hw/fw version info in wiphy struct */
2425 	wiphy->hw_version = wl->chip.id;
2426 	strscpy(wiphy->fw_version, wl->chip.fw_ver_str,
2427 		sizeof(wiphy->fw_version));
2428 
2429 	/*
2430 	 * Now we know if 11a is supported (info from the NVS), so disable
2431 	 * 11a channels if not supported
2432 	 */
2433 	if (!wl->enable_11a)
2434 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2435 
2436 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2437 		     wl->enable_11a ? "" : "not ");
2438 
2439 	wl->state = WLCORE_STATE_ON;
2440 out:
2441 	return ret;
2442 }
2443 
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2444 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2445 {
2446 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2447 }
2448 
2449 /*
2450  * Check whether a fw switch (i.e. moving from one loaded
2451  * fw to another) is needed. This function is also responsible
2452  * for updating wl->last_vif_count, so it must be called before
2453  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2454  * will be used).
2455  */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2456 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2457 				  struct vif_counter_data vif_counter_data,
2458 				  bool add)
2459 {
2460 	enum wl12xx_fw_type current_fw = wl->fw_type;
2461 	u8 vif_count = vif_counter_data.counter;
2462 
2463 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2464 		return false;
2465 
2466 	/* increase the vif count if this is a new vif */
2467 	if (add && !vif_counter_data.cur_vif_running)
2468 		vif_count++;
2469 
2470 	wl->last_vif_count = vif_count;
2471 
2472 	/* no need for fw change if the device is OFF */
2473 	if (wl->state == WLCORE_STATE_OFF)
2474 		return false;
2475 
2476 	/* no need for fw change if a single fw is used */
2477 	if (!wl->mr_fw_name)
2478 		return false;
2479 
2480 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2481 		return true;
2482 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2483 		return true;
2484 
2485 	return false;
2486 }
2487 
2488 /*
2489  * Enter "forced psm". Make sure the sta is in psm against the ap,
2490  * to make the fw switch a bit more disconnection-persistent.
2491  */
wl12xx_force_active_psm(struct wl1271 * wl)2492 static void wl12xx_force_active_psm(struct wl1271 *wl)
2493 {
2494 	struct wl12xx_vif *wlvif;
2495 
2496 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2497 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2498 	}
2499 }
2500 
2501 struct wlcore_hw_queue_iter_data {
2502 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2503 	/* current vif */
2504 	struct ieee80211_vif *vif;
2505 	/* is the current vif among those iterated */
2506 	bool cur_running;
2507 };
2508 
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2509 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2510 				 struct ieee80211_vif *vif)
2511 {
2512 	struct wlcore_hw_queue_iter_data *iter_data = data;
2513 
2514 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2515 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2516 		return;
2517 
2518 	if (iter_data->cur_running || vif == iter_data->vif) {
2519 		iter_data->cur_running = true;
2520 		return;
2521 	}
2522 
2523 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2524 }
2525 
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2526 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2527 					 struct wl12xx_vif *wlvif)
2528 {
2529 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2530 	struct wlcore_hw_queue_iter_data iter_data = {};
2531 	int i, q_base;
2532 
2533 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2534 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2535 		return 0;
2536 	}
2537 
2538 	iter_data.vif = vif;
2539 
2540 	/* mark all bits taken by active interfaces */
2541 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2542 					IEEE80211_IFACE_ITER_RESUME_ALL,
2543 					wlcore_hw_queue_iter, &iter_data);
2544 
2545 	/* the current vif is already running in mac80211 (resume/recovery) */
2546 	if (iter_data.cur_running) {
2547 		wlvif->hw_queue_base = vif->hw_queue[0];
2548 		wl1271_debug(DEBUG_MAC80211,
2549 			     "using pre-allocated hw queue base %d",
2550 			     wlvif->hw_queue_base);
2551 
2552 		/* interface type might have changed type */
2553 		goto adjust_cab_queue;
2554 	}
2555 
2556 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2557 				     WLCORE_NUM_MAC_ADDRESSES);
2558 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2559 		return -EBUSY;
2560 
2561 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2562 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2563 		     wlvif->hw_queue_base);
2564 
2565 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2566 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2567 		/* register hw queues in mac80211 */
2568 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2569 	}
2570 
2571 adjust_cab_queue:
2572 	/* the last places are reserved for cab queues per interface */
2573 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2574 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2575 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2576 	else
2577 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2578 
2579 	return 0;
2580 }
2581 
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2582 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2583 				   struct ieee80211_vif *vif)
2584 {
2585 	struct wl1271 *wl = hw->priv;
2586 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2587 	struct vif_counter_data vif_count;
2588 	int ret = 0;
2589 	u8 role_type;
2590 
2591 	if (wl->plt) {
2592 		wl1271_error("Adding Interface not allowed while in PLT mode");
2593 		return -EBUSY;
2594 	}
2595 
2596 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2597 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2598 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2599 
2600 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2601 		     ieee80211_vif_type_p2p(vif), vif->addr);
2602 
2603 	wl12xx_get_vif_count(hw, vif, &vif_count);
2604 
2605 	mutex_lock(&wl->mutex);
2606 
2607 	/*
2608 	 * in some very corner case HW recovery scenarios its possible to
2609 	 * get here before __wl1271_op_remove_interface is complete, so
2610 	 * opt out if that is the case.
2611 	 */
2612 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2613 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2614 		ret = -EBUSY;
2615 		goto out;
2616 	}
2617 
2618 
2619 	ret = wl12xx_init_vif_data(wl, vif);
2620 	if (ret < 0)
2621 		goto out;
2622 
2623 	wlvif->wl = wl;
2624 	role_type = wl12xx_get_role_type(wl, wlvif);
2625 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2626 		ret = -EINVAL;
2627 		goto out;
2628 	}
2629 
2630 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2631 	if (ret < 0)
2632 		goto out;
2633 
2634 	/*
2635 	 * TODO: after the nvs issue will be solved, move this block
2636 	 * to start(), and make sure here the driver is ON.
2637 	 */
2638 	if (wl->state == WLCORE_STATE_OFF) {
2639 		/*
2640 		 * we still need this in order to configure the fw
2641 		 * while uploading the nvs
2642 		 */
2643 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2644 
2645 		ret = wl12xx_init_fw(wl);
2646 		if (ret < 0)
2647 			goto out;
2648 	}
2649 
2650 	/*
2651 	 * Call runtime PM only after possible wl12xx_init_fw() above
2652 	 * is done. Otherwise we do not have interrupts enabled.
2653 	 */
2654 	ret = pm_runtime_resume_and_get(wl->dev);
2655 	if (ret < 0)
2656 		goto out_unlock;
2657 
2658 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2659 		wl12xx_force_active_psm(wl);
2660 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2661 		mutex_unlock(&wl->mutex);
2662 		wl1271_recovery_work(&wl->recovery_work);
2663 		return 0;
2664 	}
2665 
2666 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2667 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2668 					     role_type, &wlvif->role_id);
2669 		if (ret < 0)
2670 			goto out;
2671 
2672 		ret = wl1271_init_vif_specific(wl, vif);
2673 		if (ret < 0)
2674 			goto out;
2675 
2676 	} else {
2677 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2678 					     &wlvif->dev_role_id);
2679 		if (ret < 0)
2680 			goto out;
2681 
2682 		/* needed mainly for configuring rate policies */
2683 		ret = wl1271_sta_hw_init(wl, wlvif);
2684 		if (ret < 0)
2685 			goto out;
2686 	}
2687 
2688 	list_add(&wlvif->list, &wl->wlvif_list);
2689 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2690 
2691 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2692 		wl->ap_count++;
2693 	else
2694 		wl->sta_count++;
2695 out:
2696 	pm_runtime_mark_last_busy(wl->dev);
2697 	pm_runtime_put_autosuspend(wl->dev);
2698 out_unlock:
2699 	mutex_unlock(&wl->mutex);
2700 
2701 	return ret;
2702 }
2703 
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2704 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2705 					 struct ieee80211_vif *vif,
2706 					 bool reset_tx_queues)
2707 {
2708 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2709 	int i, ret;
2710 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2711 
2712 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2713 
2714 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2715 		return;
2716 
2717 	/* because of hardware recovery, we may get here twice */
2718 	if (wl->state == WLCORE_STATE_OFF)
2719 		return;
2720 
2721 	wl1271_info("down");
2722 
2723 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2724 	    wl->scan_wlvif == wlvif) {
2725 		struct cfg80211_scan_info info = {
2726 			.aborted = true,
2727 		};
2728 
2729 		/*
2730 		 * Rearm the tx watchdog just before idling scan. This
2731 		 * prevents just-finished scans from triggering the watchdog
2732 		 */
2733 		wl12xx_rearm_tx_watchdog_locked(wl);
2734 
2735 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2736 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2737 		wl->scan_wlvif = NULL;
2738 		wl->scan.req = NULL;
2739 		ieee80211_scan_completed(wl->hw, &info);
2740 	}
2741 
2742 	if (wl->sched_vif == wlvif)
2743 		wl->sched_vif = NULL;
2744 
2745 	if (wl->roc_vif == vif) {
2746 		wl->roc_vif = NULL;
2747 		ieee80211_remain_on_channel_expired(wl->hw);
2748 	}
2749 
2750 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2751 		/* disable active roles */
2752 		ret = pm_runtime_resume_and_get(wl->dev);
2753 		if (ret < 0)
2754 			goto deinit;
2755 
2756 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2757 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2758 			if (wl12xx_dev_role_started(wlvif))
2759 				wl12xx_stop_dev(wl, wlvif);
2760 		}
2761 
2762 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2763 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2764 			if (ret < 0) {
2765 				pm_runtime_put_noidle(wl->dev);
2766 				goto deinit;
2767 			}
2768 		} else {
2769 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2770 			if (ret < 0) {
2771 				pm_runtime_put_noidle(wl->dev);
2772 				goto deinit;
2773 			}
2774 		}
2775 
2776 		pm_runtime_mark_last_busy(wl->dev);
2777 		pm_runtime_put_autosuspend(wl->dev);
2778 	}
2779 deinit:
2780 	wl12xx_tx_reset_wlvif(wl, wlvif);
2781 
2782 	/* clear all hlids (except system_hlid) */
2783 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2784 
2785 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2786 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2787 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2788 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2789 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2790 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2791 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2792 	} else {
2793 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2794 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2795 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2796 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2797 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2798 			wl12xx_free_rate_policy(wl,
2799 						&wlvif->ap.ucast_rate_idx[i]);
2800 		wl1271_free_ap_keys(wl, wlvif);
2801 	}
2802 
2803 	dev_kfree_skb(wlvif->probereq);
2804 	wlvif->probereq = NULL;
2805 	if (wl->last_wlvif == wlvif)
2806 		wl->last_wlvif = NULL;
2807 	list_del(&wlvif->list);
2808 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2809 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2810 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2811 
2812 	if (is_ap)
2813 		wl->ap_count--;
2814 	else
2815 		wl->sta_count--;
2816 
2817 	/*
2818 	 * Last AP, have more stations. Configure sleep auth according to STA.
2819 	 * Don't do thin on unintended recovery.
2820 	 */
2821 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2822 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2823 		goto unlock;
2824 
2825 	if (wl->ap_count == 0 && is_ap) {
2826 		/* mask ap events */
2827 		wl->event_mask &= ~wl->ap_event_mask;
2828 		wl1271_event_unmask(wl);
2829 	}
2830 
2831 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2832 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2833 		/* Configure for power according to debugfs */
2834 		if (sta_auth != WL1271_PSM_ILLEGAL)
2835 			wl1271_acx_sleep_auth(wl, sta_auth);
2836 		/* Configure for ELP power saving */
2837 		else
2838 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2839 	}
2840 
2841 unlock:
2842 	mutex_unlock(&wl->mutex);
2843 
2844 	del_timer_sync(&wlvif->rx_streaming_timer);
2845 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2846 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2847 	cancel_work_sync(&wlvif->rc_update_work);
2848 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2849 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2850 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2851 
2852 	mutex_lock(&wl->mutex);
2853 }
2854 
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2855 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2856 				       struct ieee80211_vif *vif)
2857 {
2858 	struct wl1271 *wl = hw->priv;
2859 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2860 	struct wl12xx_vif *iter;
2861 	struct vif_counter_data vif_count;
2862 
2863 	wl12xx_get_vif_count(hw, vif, &vif_count);
2864 	mutex_lock(&wl->mutex);
2865 
2866 	if (wl->state == WLCORE_STATE_OFF ||
2867 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2868 		goto out;
2869 
2870 	/*
2871 	 * wl->vif can be null here if someone shuts down the interface
2872 	 * just when hardware recovery has been started.
2873 	 */
2874 	wl12xx_for_each_wlvif(wl, iter) {
2875 		if (iter != wlvif)
2876 			continue;
2877 
2878 		__wl1271_op_remove_interface(wl, vif, true);
2879 		break;
2880 	}
2881 	WARN_ON(iter != wlvif);
2882 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2883 		wl12xx_force_active_psm(wl);
2884 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2885 		wl12xx_queue_recovery_work(wl);
2886 	}
2887 out:
2888 	mutex_unlock(&wl->mutex);
2889 }
2890 
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2891 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2892 				      struct ieee80211_vif *vif,
2893 				      enum nl80211_iftype new_type, bool p2p)
2894 {
2895 	struct wl1271 *wl = hw->priv;
2896 	int ret;
2897 
2898 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2899 	wl1271_op_remove_interface(hw, vif);
2900 
2901 	vif->type = new_type;
2902 	vif->p2p = p2p;
2903 	ret = wl1271_op_add_interface(hw, vif);
2904 
2905 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2906 	return ret;
2907 }
2908 
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2909 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2910 {
2911 	int ret;
2912 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2913 
2914 	/*
2915 	 * One of the side effects of the JOIN command is that is clears
2916 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2917 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2918 	 * Currently the only valid scenario for JOIN during association
2919 	 * is on roaming, in which case we will also be given new keys.
2920 	 * Keep the below message for now, unless it starts bothering
2921 	 * users who really like to roam a lot :)
2922 	 */
2923 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2924 		wl1271_info("JOIN while associated.");
2925 
2926 	/* clear encryption type */
2927 	wlvif->encryption_type = KEY_NONE;
2928 
2929 	if (is_ibss)
2930 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2931 	else
2932 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2933 
2934 	return ret;
2935 }
2936 
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2937 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2938 			    int offset)
2939 {
2940 	u8 ssid_len;
2941 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2942 					 skb->len - offset);
2943 
2944 	if (!ptr) {
2945 		wl1271_error("No SSID in IEs!");
2946 		return -ENOENT;
2947 	}
2948 
2949 	ssid_len = ptr[1];
2950 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2951 		wl1271_error("SSID is too long!");
2952 		return -EINVAL;
2953 	}
2954 
2955 	wlvif->ssid_len = ssid_len;
2956 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2957 	return 0;
2958 }
2959 
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2960 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2961 {
2962 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2963 	struct sk_buff *skb;
2964 	int ieoffset;
2965 
2966 	/* we currently only support setting the ssid from the ap probe req */
2967 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2968 		return -EINVAL;
2969 
2970 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2971 	if (!skb)
2972 		return -EINVAL;
2973 
2974 	ieoffset = offsetof(struct ieee80211_mgmt,
2975 			    u.probe_req.variable);
2976 	wl1271_ssid_set(wlvif, skb, ieoffset);
2977 	dev_kfree_skb(skb);
2978 
2979 	return 0;
2980 }
2981 
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2982 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2983 			    struct ieee80211_bss_conf *bss_conf,
2984 			    u32 sta_rate_set)
2985 {
2986 	struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
2987 						 bss_conf);
2988 	int ieoffset;
2989 	int ret;
2990 
2991 	wlvif->aid = vif->cfg.aid;
2992 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chanreq.oper);
2993 	wlvif->beacon_int = bss_conf->beacon_int;
2994 	wlvif->wmm_enabled = bss_conf->qos;
2995 
2996 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2997 
2998 	/*
2999 	 * with wl1271, we don't need to update the
3000 	 * beacon_int and dtim_period, because the firmware
3001 	 * updates it by itself when the first beacon is
3002 	 * received after a join.
3003 	 */
3004 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3005 	if (ret < 0)
3006 		return ret;
3007 
3008 	/*
3009 	 * Get a template for hardware connection maintenance
3010 	 */
3011 	dev_kfree_skb(wlvif->probereq);
3012 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3013 							wlvif,
3014 							NULL);
3015 	ieoffset = offsetof(struct ieee80211_mgmt,
3016 			    u.probe_req.variable);
3017 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
3018 
3019 	/* enable the connection monitoring feature */
3020 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3021 	if (ret < 0)
3022 		return ret;
3023 
3024 	/*
3025 	 * The join command disable the keep-alive mode, shut down its process,
3026 	 * and also clear the template config, so we need to reset it all after
3027 	 * the join. The acx_aid starts the keep-alive process, and the order
3028 	 * of the commands below is relevant.
3029 	 */
3030 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
3031 	if (ret < 0)
3032 		return ret;
3033 
3034 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
3035 	if (ret < 0)
3036 		return ret;
3037 
3038 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
3039 	if (ret < 0)
3040 		return ret;
3041 
3042 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
3043 					   wlvif->sta.klv_template_id,
3044 					   ACX_KEEP_ALIVE_TPL_VALID);
3045 	if (ret < 0)
3046 		return ret;
3047 
3048 	/*
3049 	 * The default fw psm configuration is AUTO, while mac80211 default
3050 	 * setting is off (ACTIVE), so sync the fw with the correct value.
3051 	 */
3052 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3053 	if (ret < 0)
3054 		return ret;
3055 
3056 	if (sta_rate_set) {
3057 		wlvif->rate_set =
3058 			wl1271_tx_enabled_rates_get(wl,
3059 						    sta_rate_set,
3060 						    wlvif->band);
3061 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3062 		if (ret < 0)
3063 			return ret;
3064 	}
3065 
3066 	return ret;
3067 }
3068 
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3069 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3070 {
3071 	int ret;
3072 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3073 
3074 	/* make sure we are connected (sta) joined */
3075 	if (sta &&
3076 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3077 		return false;
3078 
3079 	/* make sure we are joined (ibss) */
3080 	if (!sta &&
3081 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3082 		return false;
3083 
3084 	if (sta) {
3085 		/* use defaults when not associated */
3086 		wlvif->aid = 0;
3087 
3088 		/* free probe-request template */
3089 		dev_kfree_skb(wlvif->probereq);
3090 		wlvif->probereq = NULL;
3091 
3092 		/* disable connection monitor features */
3093 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3094 		if (ret < 0)
3095 			return ret;
3096 
3097 		/* Disable the keep-alive feature */
3098 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3099 		if (ret < 0)
3100 			return ret;
3101 
3102 		/* disable beacon filtering */
3103 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3104 		if (ret < 0)
3105 			return ret;
3106 	}
3107 
3108 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3109 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3110 
3111 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3112 		ieee80211_chswitch_done(vif, false, 0);
3113 		cancel_delayed_work(&wlvif->channel_switch_work);
3114 	}
3115 
3116 	/* invalidate keep-alive template */
3117 	wl1271_acx_keep_alive_config(wl, wlvif,
3118 				     wlvif->sta.klv_template_id,
3119 				     ACX_KEEP_ALIVE_TPL_INVALID);
3120 
3121 	return 0;
3122 }
3123 
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3124 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3125 {
3126 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3127 	wlvif->rate_set = wlvif->basic_rate_set;
3128 }
3129 
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3130 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3131 				   bool idle)
3132 {
3133 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3134 
3135 	if (idle == cur_idle)
3136 		return;
3137 
3138 	if (idle) {
3139 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3140 	} else {
3141 		/* The current firmware only supports sched_scan in idle */
3142 		if (wl->sched_vif == wlvif)
3143 			wl->ops->sched_scan_stop(wl, wlvif);
3144 
3145 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3146 	}
3147 }
3148 
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3149 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3150 			     struct ieee80211_conf *conf, u32 changed)
3151 {
3152 	int ret;
3153 
3154 	if (wlcore_is_p2p_mgmt(wlvif))
3155 		return 0;
3156 
3157 	if (conf->power_level != wlvif->power_level) {
3158 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3159 		if (ret < 0)
3160 			return ret;
3161 
3162 		wlvif->power_level = conf->power_level;
3163 	}
3164 
3165 	return 0;
3166 }
3167 
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3168 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3169 {
3170 	struct wl1271 *wl = hw->priv;
3171 	struct wl12xx_vif *wlvif;
3172 	struct ieee80211_conf *conf = &hw->conf;
3173 	int ret = 0;
3174 
3175 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3176 		     " changed 0x%x",
3177 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3178 		     conf->power_level,
3179 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3180 			 changed);
3181 
3182 	mutex_lock(&wl->mutex);
3183 
3184 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3185 		wl->power_level = conf->power_level;
3186 
3187 	if (unlikely(wl->state != WLCORE_STATE_ON))
3188 		goto out;
3189 
3190 	ret = pm_runtime_resume_and_get(wl->dev);
3191 	if (ret < 0)
3192 		goto out;
3193 
3194 	/* configure each interface */
3195 	wl12xx_for_each_wlvif(wl, wlvif) {
3196 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3197 		if (ret < 0)
3198 			goto out_sleep;
3199 	}
3200 
3201 out_sleep:
3202 	pm_runtime_mark_last_busy(wl->dev);
3203 	pm_runtime_put_autosuspend(wl->dev);
3204 
3205 out:
3206 	mutex_unlock(&wl->mutex);
3207 
3208 	return ret;
3209 }
3210 
3211 struct wl1271_filter_params {
3212 	bool enabled;
3213 	int mc_list_length;
3214 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3215 };
3216 
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3217 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3218 				       struct netdev_hw_addr_list *mc_list)
3219 {
3220 	struct wl1271_filter_params *fp;
3221 	struct netdev_hw_addr *ha;
3222 
3223 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3224 	if (!fp) {
3225 		wl1271_error("Out of memory setting filters.");
3226 		return 0;
3227 	}
3228 
3229 	/* update multicast filtering parameters */
3230 	fp->mc_list_length = 0;
3231 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3232 		fp->enabled = false;
3233 	} else {
3234 		fp->enabled = true;
3235 		netdev_hw_addr_list_for_each(ha, mc_list) {
3236 			memcpy(fp->mc_list[fp->mc_list_length],
3237 					ha->addr, ETH_ALEN);
3238 			fp->mc_list_length++;
3239 		}
3240 	}
3241 
3242 	return (u64)(unsigned long)fp;
3243 }
3244 
3245 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3246 				  FIF_FCSFAIL | \
3247 				  FIF_BCN_PRBRESP_PROMISC | \
3248 				  FIF_CONTROL | \
3249 				  FIF_OTHER_BSS)
3250 
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3251 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3252 				       unsigned int changed,
3253 				       unsigned int *total, u64 multicast)
3254 {
3255 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3256 	struct wl1271 *wl = hw->priv;
3257 	struct wl12xx_vif *wlvif;
3258 
3259 	int ret;
3260 
3261 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3262 		     " total %x", changed, *total);
3263 
3264 	mutex_lock(&wl->mutex);
3265 
3266 	*total &= WL1271_SUPPORTED_FILTERS;
3267 	changed &= WL1271_SUPPORTED_FILTERS;
3268 
3269 	if (unlikely(wl->state != WLCORE_STATE_ON))
3270 		goto out;
3271 
3272 	ret = pm_runtime_resume_and_get(wl->dev);
3273 	if (ret < 0)
3274 		goto out;
3275 
3276 	wl12xx_for_each_wlvif(wl, wlvif) {
3277 		if (wlcore_is_p2p_mgmt(wlvif))
3278 			continue;
3279 
3280 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3281 			if (*total & FIF_ALLMULTI)
3282 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3283 								   false,
3284 								   NULL, 0);
3285 			else if (fp)
3286 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3287 							fp->enabled,
3288 							fp->mc_list,
3289 							fp->mc_list_length);
3290 			if (ret < 0)
3291 				goto out_sleep;
3292 		}
3293 
3294 		/*
3295 		 * If interface in AP mode and created with allmulticast then disable
3296 		 * the firmware filters so that all multicast packets are passed
3297 		 * This is mandatory for MDNS based discovery protocols
3298 		 */
3299 		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3300 			if (*total & FIF_ALLMULTI) {
3301 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3302 							false,
3303 							NULL, 0);
3304 				if (ret < 0)
3305 					goto out_sleep;
3306 			}
3307 		}
3308 	}
3309 
3310 	/*
3311 	 * the fw doesn't provide an api to configure the filters. instead,
3312 	 * the filters configuration is based on the active roles / ROC
3313 	 * state.
3314 	 */
3315 
3316 out_sleep:
3317 	pm_runtime_mark_last_busy(wl->dev);
3318 	pm_runtime_put_autosuspend(wl->dev);
3319 
3320 out:
3321 	mutex_unlock(&wl->mutex);
3322 	kfree(fp);
3323 }
3324 
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16,bool is_pairwise)3325 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3326 				u8 id, u8 key_type, u8 key_size,
3327 				const u8 *key, u8 hlid, u32 tx_seq_32,
3328 				u16 tx_seq_16, bool is_pairwise)
3329 {
3330 	struct wl1271_ap_key *ap_key;
3331 	int i;
3332 
3333 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3334 
3335 	if (key_size > MAX_KEY_SIZE)
3336 		return -EINVAL;
3337 
3338 	/*
3339 	 * Find next free entry in ap_keys. Also check we are not replacing
3340 	 * an existing key.
3341 	 */
3342 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3343 		if (wlvif->ap.recorded_keys[i] == NULL)
3344 			break;
3345 
3346 		if (wlvif->ap.recorded_keys[i]->id == id) {
3347 			wl1271_warning("trying to record key replacement");
3348 			return -EINVAL;
3349 		}
3350 	}
3351 
3352 	if (i == MAX_NUM_KEYS)
3353 		return -EBUSY;
3354 
3355 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3356 	if (!ap_key)
3357 		return -ENOMEM;
3358 
3359 	ap_key->id = id;
3360 	ap_key->key_type = key_type;
3361 	ap_key->key_size = key_size;
3362 	memcpy(ap_key->key, key, key_size);
3363 	ap_key->hlid = hlid;
3364 	ap_key->tx_seq_32 = tx_seq_32;
3365 	ap_key->tx_seq_16 = tx_seq_16;
3366 	ap_key->is_pairwise = is_pairwise;
3367 
3368 	wlvif->ap.recorded_keys[i] = ap_key;
3369 	return 0;
3370 }
3371 
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3372 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3373 {
3374 	int i;
3375 
3376 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3377 		kfree(wlvif->ap.recorded_keys[i]);
3378 		wlvif->ap.recorded_keys[i] = NULL;
3379 	}
3380 }
3381 
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3382 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3383 {
3384 	int i, ret = 0;
3385 	struct wl1271_ap_key *key;
3386 	bool wep_key_added = false;
3387 
3388 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3389 		u8 hlid;
3390 		if (wlvif->ap.recorded_keys[i] == NULL)
3391 			break;
3392 
3393 		key = wlvif->ap.recorded_keys[i];
3394 		hlid = key->hlid;
3395 		if (hlid == WL12XX_INVALID_LINK_ID)
3396 			hlid = wlvif->ap.bcast_hlid;
3397 
3398 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3399 					    key->id, key->key_type,
3400 					    key->key_size, key->key,
3401 					    hlid, key->tx_seq_32,
3402 					    key->tx_seq_16, key->is_pairwise);
3403 		if (ret < 0)
3404 			goto out;
3405 
3406 		if (key->key_type == KEY_WEP)
3407 			wep_key_added = true;
3408 	}
3409 
3410 	if (wep_key_added) {
3411 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3412 						     wlvif->ap.bcast_hlid);
3413 		if (ret < 0)
3414 			goto out;
3415 	}
3416 
3417 out:
3418 	wl1271_free_ap_keys(wl, wlvif);
3419 	return ret;
3420 }
3421 
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta,bool is_pairwise)3422 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3423 		       u16 action, u8 id, u8 key_type,
3424 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3425 		       u16 tx_seq_16, struct ieee80211_sta *sta,
3426 		       bool is_pairwise)
3427 {
3428 	int ret;
3429 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3430 
3431 	if (is_ap) {
3432 		struct wl1271_station *wl_sta;
3433 		u8 hlid;
3434 
3435 		if (sta) {
3436 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3437 			hlid = wl_sta->hlid;
3438 		} else {
3439 			hlid = wlvif->ap.bcast_hlid;
3440 		}
3441 
3442 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3443 			/*
3444 			 * We do not support removing keys after AP shutdown.
3445 			 * Pretend we do to make mac80211 happy.
3446 			 */
3447 			if (action != KEY_ADD_OR_REPLACE)
3448 				return 0;
3449 
3450 			ret = wl1271_record_ap_key(wl, wlvif, id,
3451 					     key_type, key_size,
3452 					     key, hlid, tx_seq_32,
3453 					     tx_seq_16, is_pairwise);
3454 		} else {
3455 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3456 					     id, key_type, key_size,
3457 					     key, hlid, tx_seq_32,
3458 					     tx_seq_16, is_pairwise);
3459 		}
3460 
3461 		if (ret < 0)
3462 			return ret;
3463 	} else {
3464 		const u8 *addr;
3465 		static const u8 bcast_addr[ETH_ALEN] = {
3466 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3467 		};
3468 
3469 		addr = sta ? sta->addr : bcast_addr;
3470 
3471 		if (is_zero_ether_addr(addr)) {
3472 			/* We dont support TX only encryption */
3473 			return -EOPNOTSUPP;
3474 		}
3475 
3476 		/* The wl1271 does not allow to remove unicast keys - they
3477 		   will be cleared automatically on next CMD_JOIN. Ignore the
3478 		   request silently, as we dont want the mac80211 to emit
3479 		   an error message. */
3480 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3481 			return 0;
3482 
3483 		/* don't remove key if hlid was already deleted */
3484 		if (action == KEY_REMOVE &&
3485 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3486 			return 0;
3487 
3488 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3489 					     id, key_type, key_size,
3490 					     key, addr, tx_seq_32,
3491 					     tx_seq_16);
3492 		if (ret < 0)
3493 			return ret;
3494 
3495 	}
3496 
3497 	return 0;
3498 }
3499 
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3500 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3501 			     struct ieee80211_vif *vif,
3502 			     struct ieee80211_sta *sta,
3503 			     struct ieee80211_key_conf *key_conf)
3504 {
3505 	struct wl1271 *wl = hw->priv;
3506 	int ret;
3507 	bool might_change_spare =
3508 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3509 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3510 
3511 	if (might_change_spare) {
3512 		/*
3513 		 * stop the queues and flush to ensure the next packets are
3514 		 * in sync with FW spare block accounting
3515 		 */
3516 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3517 		wl1271_tx_flush(wl);
3518 	}
3519 
3520 	mutex_lock(&wl->mutex);
3521 
3522 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3523 		ret = -EAGAIN;
3524 		goto out_wake_queues;
3525 	}
3526 
3527 	ret = pm_runtime_resume_and_get(wl->dev);
3528 	if (ret < 0)
3529 		goto out_wake_queues;
3530 
3531 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3532 
3533 	pm_runtime_mark_last_busy(wl->dev);
3534 	pm_runtime_put_autosuspend(wl->dev);
3535 
3536 out_wake_queues:
3537 	if (might_change_spare)
3538 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3539 
3540 	mutex_unlock(&wl->mutex);
3541 
3542 	return ret;
3543 }
3544 
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3545 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3546 		   struct ieee80211_vif *vif,
3547 		   struct ieee80211_sta *sta,
3548 		   struct ieee80211_key_conf *key_conf)
3549 {
3550 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3551 	int ret;
3552 	u32 tx_seq_32 = 0;
3553 	u16 tx_seq_16 = 0;
3554 	u8 key_type;
3555 	u8 hlid;
3556 	bool is_pairwise;
3557 
3558 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3559 
3560 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3561 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3562 		     key_conf->cipher, key_conf->keyidx,
3563 		     key_conf->keylen, key_conf->flags);
3564 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3565 
3566 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3567 		if (sta) {
3568 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3569 			hlid = wl_sta->hlid;
3570 		} else {
3571 			hlid = wlvif->ap.bcast_hlid;
3572 		}
3573 	else
3574 		hlid = wlvif->sta.hlid;
3575 
3576 	if (hlid != WL12XX_INVALID_LINK_ID) {
3577 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3578 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3579 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3580 	}
3581 
3582 	switch (key_conf->cipher) {
3583 	case WLAN_CIPHER_SUITE_WEP40:
3584 	case WLAN_CIPHER_SUITE_WEP104:
3585 		key_type = KEY_WEP;
3586 
3587 		key_conf->hw_key_idx = key_conf->keyidx;
3588 		break;
3589 	case WLAN_CIPHER_SUITE_TKIP:
3590 		key_type = KEY_TKIP;
3591 		key_conf->hw_key_idx = key_conf->keyidx;
3592 		break;
3593 	case WLAN_CIPHER_SUITE_CCMP:
3594 		key_type = KEY_AES;
3595 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3596 		break;
3597 	case WL1271_CIPHER_SUITE_GEM:
3598 		key_type = KEY_GEM;
3599 		break;
3600 	default:
3601 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3602 
3603 		return -EOPNOTSUPP;
3604 	}
3605 
3606 	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3607 
3608 	switch (cmd) {
3609 	case SET_KEY:
3610 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3611 				 key_conf->keyidx, key_type,
3612 				 key_conf->keylen, key_conf->key,
3613 				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3614 		if (ret < 0) {
3615 			wl1271_error("Could not add or replace key");
3616 			return ret;
3617 		}
3618 
3619 		/* Store AP encryption key type */
3620 		if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3621 			wlvif->encryption_type = key_type;
3622 
3623 		/*
3624 		 * reconfiguring arp response if the unicast (or common)
3625 		 * encryption key type was changed
3626 		 */
3627 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3628 		    (sta || key_type == KEY_WEP) &&
3629 		    wlvif->encryption_type != key_type) {
3630 			wlvif->encryption_type = key_type;
3631 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3632 			if (ret < 0) {
3633 				wl1271_warning("build arp rsp failed: %d", ret);
3634 				return ret;
3635 			}
3636 		}
3637 		break;
3638 
3639 	case DISABLE_KEY:
3640 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3641 				     key_conf->keyidx, key_type,
3642 				     key_conf->keylen, key_conf->key,
3643 				     0, 0, sta, is_pairwise);
3644 		if (ret < 0) {
3645 			wl1271_error("Could not remove key");
3646 			return ret;
3647 		}
3648 		break;
3649 
3650 	default:
3651 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3652 		return -EOPNOTSUPP;
3653 	}
3654 
3655 	return ret;
3656 }
3657 EXPORT_SYMBOL_GPL(wlcore_set_key);
3658 
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3659 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3660 					  struct ieee80211_vif *vif,
3661 					  int key_idx)
3662 {
3663 	struct wl1271 *wl = hw->priv;
3664 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3665 	int ret;
3666 
3667 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3668 		     key_idx);
3669 
3670 	/* we don't handle unsetting of default key */
3671 	if (key_idx == -1)
3672 		return;
3673 
3674 	mutex_lock(&wl->mutex);
3675 
3676 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3677 		ret = -EAGAIN;
3678 		goto out_unlock;
3679 	}
3680 
3681 	ret = pm_runtime_resume_and_get(wl->dev);
3682 	if (ret < 0)
3683 		goto out_unlock;
3684 
3685 	wlvif->default_key = key_idx;
3686 
3687 	/* the default WEP key needs to be configured at least once */
3688 	if (wlvif->encryption_type == KEY_WEP) {
3689 		ret = wl12xx_cmd_set_default_wep_key(wl,
3690 				key_idx,
3691 				wlvif->sta.hlid);
3692 		if (ret < 0)
3693 			goto out_sleep;
3694 	}
3695 
3696 out_sleep:
3697 	pm_runtime_mark_last_busy(wl->dev);
3698 	pm_runtime_put_autosuspend(wl->dev);
3699 
3700 out_unlock:
3701 	mutex_unlock(&wl->mutex);
3702 }
3703 
wlcore_regdomain_config(struct wl1271 * wl)3704 void wlcore_regdomain_config(struct wl1271 *wl)
3705 {
3706 	int ret;
3707 
3708 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3709 		return;
3710 
3711 	mutex_lock(&wl->mutex);
3712 
3713 	if (unlikely(wl->state != WLCORE_STATE_ON))
3714 		goto out;
3715 
3716 	ret = pm_runtime_resume_and_get(wl->dev);
3717 	if (ret < 0)
3718 		goto out;
3719 
3720 	ret = wlcore_cmd_regdomain_config_locked(wl);
3721 	if (ret < 0) {
3722 		wl12xx_queue_recovery_work(wl);
3723 		goto out;
3724 	}
3725 
3726 	pm_runtime_mark_last_busy(wl->dev);
3727 	pm_runtime_put_autosuspend(wl->dev);
3728 out:
3729 	mutex_unlock(&wl->mutex);
3730 }
3731 
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3732 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3733 			     struct ieee80211_vif *vif,
3734 			     struct ieee80211_scan_request *hw_req)
3735 {
3736 	struct cfg80211_scan_request *req = &hw_req->req;
3737 	struct wl1271 *wl = hw->priv;
3738 	int ret;
3739 	u8 *ssid = NULL;
3740 	size_t len = 0;
3741 
3742 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3743 
3744 	if (req->n_ssids) {
3745 		ssid = req->ssids[0].ssid;
3746 		len = req->ssids[0].ssid_len;
3747 	}
3748 
3749 	mutex_lock(&wl->mutex);
3750 
3751 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3752 		/*
3753 		 * We cannot return -EBUSY here because cfg80211 will expect
3754 		 * a call to ieee80211_scan_completed if we do - in this case
3755 		 * there won't be any call.
3756 		 */
3757 		ret = -EAGAIN;
3758 		goto out;
3759 	}
3760 
3761 	ret = pm_runtime_resume_and_get(wl->dev);
3762 	if (ret < 0)
3763 		goto out;
3764 
3765 	/* fail if there is any role in ROC */
3766 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3767 		/* don't allow scanning right now */
3768 		ret = -EBUSY;
3769 		goto out_sleep;
3770 	}
3771 
3772 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3773 out_sleep:
3774 	pm_runtime_mark_last_busy(wl->dev);
3775 	pm_runtime_put_autosuspend(wl->dev);
3776 out:
3777 	mutex_unlock(&wl->mutex);
3778 
3779 	return ret;
3780 }
3781 
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3782 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3783 				     struct ieee80211_vif *vif)
3784 {
3785 	struct wl1271 *wl = hw->priv;
3786 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3787 	struct cfg80211_scan_info info = {
3788 		.aborted = true,
3789 	};
3790 	int ret;
3791 
3792 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3793 
3794 	mutex_lock(&wl->mutex);
3795 
3796 	if (unlikely(wl->state != WLCORE_STATE_ON))
3797 		goto out;
3798 
3799 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3800 		goto out;
3801 
3802 	ret = pm_runtime_resume_and_get(wl->dev);
3803 	if (ret < 0)
3804 		goto out;
3805 
3806 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3807 		ret = wl->ops->scan_stop(wl, wlvif);
3808 		if (ret < 0)
3809 			goto out_sleep;
3810 	}
3811 
3812 	/*
3813 	 * Rearm the tx watchdog just before idling scan. This
3814 	 * prevents just-finished scans from triggering the watchdog
3815 	 */
3816 	wl12xx_rearm_tx_watchdog_locked(wl);
3817 
3818 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3819 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3820 	wl->scan_wlvif = NULL;
3821 	wl->scan.req = NULL;
3822 	ieee80211_scan_completed(wl->hw, &info);
3823 
3824 out_sleep:
3825 	pm_runtime_mark_last_busy(wl->dev);
3826 	pm_runtime_put_autosuspend(wl->dev);
3827 out:
3828 	mutex_unlock(&wl->mutex);
3829 
3830 	cancel_delayed_work_sync(&wl->scan_complete_work);
3831 }
3832 
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3833 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3834 				      struct ieee80211_vif *vif,
3835 				      struct cfg80211_sched_scan_request *req,
3836 				      struct ieee80211_scan_ies *ies)
3837 {
3838 	struct wl1271 *wl = hw->priv;
3839 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3840 	int ret;
3841 
3842 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3843 
3844 	mutex_lock(&wl->mutex);
3845 
3846 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3847 		ret = -EAGAIN;
3848 		goto out;
3849 	}
3850 
3851 	ret = pm_runtime_resume_and_get(wl->dev);
3852 	if (ret < 0)
3853 		goto out;
3854 
3855 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3856 	if (ret < 0)
3857 		goto out_sleep;
3858 
3859 	wl->sched_vif = wlvif;
3860 
3861 out_sleep:
3862 	pm_runtime_mark_last_busy(wl->dev);
3863 	pm_runtime_put_autosuspend(wl->dev);
3864 out:
3865 	mutex_unlock(&wl->mutex);
3866 	return ret;
3867 }
3868 
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3869 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3870 				     struct ieee80211_vif *vif)
3871 {
3872 	struct wl1271 *wl = hw->priv;
3873 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3874 	int ret;
3875 
3876 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3877 
3878 	mutex_lock(&wl->mutex);
3879 
3880 	if (unlikely(wl->state != WLCORE_STATE_ON))
3881 		goto out;
3882 
3883 	ret = pm_runtime_resume_and_get(wl->dev);
3884 	if (ret < 0)
3885 		goto out;
3886 
3887 	wl->ops->sched_scan_stop(wl, wlvif);
3888 
3889 	pm_runtime_mark_last_busy(wl->dev);
3890 	pm_runtime_put_autosuspend(wl->dev);
3891 out:
3892 	mutex_unlock(&wl->mutex);
3893 
3894 	return 0;
3895 }
3896 
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3897 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3898 {
3899 	struct wl1271 *wl = hw->priv;
3900 	int ret = 0;
3901 
3902 	mutex_lock(&wl->mutex);
3903 
3904 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3905 		ret = -EAGAIN;
3906 		goto out;
3907 	}
3908 
3909 	ret = pm_runtime_resume_and_get(wl->dev);
3910 	if (ret < 0)
3911 		goto out;
3912 
3913 	ret = wl1271_acx_frag_threshold(wl, value);
3914 	if (ret < 0)
3915 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3916 
3917 	pm_runtime_mark_last_busy(wl->dev);
3918 	pm_runtime_put_autosuspend(wl->dev);
3919 
3920 out:
3921 	mutex_unlock(&wl->mutex);
3922 
3923 	return ret;
3924 }
3925 
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3926 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3927 {
3928 	struct wl1271 *wl = hw->priv;
3929 	struct wl12xx_vif *wlvif;
3930 	int ret = 0;
3931 
3932 	mutex_lock(&wl->mutex);
3933 
3934 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3935 		ret = -EAGAIN;
3936 		goto out;
3937 	}
3938 
3939 	ret = pm_runtime_resume_and_get(wl->dev);
3940 	if (ret < 0)
3941 		goto out;
3942 
3943 	wl12xx_for_each_wlvif(wl, wlvif) {
3944 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3945 		if (ret < 0)
3946 			wl1271_warning("set rts threshold failed: %d", ret);
3947 	}
3948 	pm_runtime_mark_last_busy(wl->dev);
3949 	pm_runtime_put_autosuspend(wl->dev);
3950 
3951 out:
3952 	mutex_unlock(&wl->mutex);
3953 
3954 	return ret;
3955 }
3956 
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3957 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3958 {
3959 	int len;
3960 	const u8 *next, *end = skb->data + skb->len;
3961 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3962 					skb->len - ieoffset);
3963 	if (!ie)
3964 		return;
3965 	len = ie[1] + 2;
3966 	next = ie + len;
3967 	memmove(ie, next, end - next);
3968 	skb_trim(skb, skb->len - len);
3969 }
3970 
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3971 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3972 					    unsigned int oui, u8 oui_type,
3973 					    int ieoffset)
3974 {
3975 	int len;
3976 	const u8 *next, *end = skb->data + skb->len;
3977 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3978 					       skb->data + ieoffset,
3979 					       skb->len - ieoffset);
3980 	if (!ie)
3981 		return;
3982 	len = ie[1] + 2;
3983 	next = ie + len;
3984 	memmove(ie, next, end - next);
3985 	skb_trim(skb, skb->len - len);
3986 }
3987 
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3988 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3989 					 struct ieee80211_vif *vif)
3990 {
3991 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3992 	struct sk_buff *skb;
3993 	int ret;
3994 
3995 	skb = ieee80211_proberesp_get(wl->hw, vif);
3996 	if (!skb)
3997 		return -EOPNOTSUPP;
3998 
3999 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4000 				      CMD_TEMPL_AP_PROBE_RESPONSE,
4001 				      skb->data,
4002 				      skb->len, 0,
4003 				      rates);
4004 	dev_kfree_skb(skb);
4005 
4006 	if (ret < 0)
4007 		goto out;
4008 
4009 	wl1271_debug(DEBUG_AP, "probe response updated");
4010 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
4011 
4012 out:
4013 	return ret;
4014 }
4015 
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)4016 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
4017 					     struct ieee80211_vif *vif,
4018 					     u8 *probe_rsp_data,
4019 					     size_t probe_rsp_len,
4020 					     u32 rates)
4021 {
4022 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4023 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
4024 	int ssid_ie_offset, ie_offset, templ_len;
4025 	const u8 *ptr;
4026 
4027 	/* no need to change probe response if the SSID is set correctly */
4028 	if (wlvif->ssid_len > 0)
4029 		return wl1271_cmd_template_set(wl, wlvif->role_id,
4030 					       CMD_TEMPL_AP_PROBE_RESPONSE,
4031 					       probe_rsp_data,
4032 					       probe_rsp_len, 0,
4033 					       rates);
4034 
4035 	if (probe_rsp_len + vif->cfg.ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4036 		wl1271_error("probe_rsp template too big");
4037 		return -EINVAL;
4038 	}
4039 
4040 	/* start searching from IE offset */
4041 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4042 
4043 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4044 			       probe_rsp_len - ie_offset);
4045 	if (!ptr) {
4046 		wl1271_error("No SSID in beacon!");
4047 		return -EINVAL;
4048 	}
4049 
4050 	ssid_ie_offset = ptr - probe_rsp_data;
4051 	ptr += (ptr[1] + 2);
4052 
4053 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4054 
4055 	/* insert SSID from bss_conf */
4056 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4057 	probe_rsp_templ[ssid_ie_offset + 1] = vif->cfg.ssid_len;
4058 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4059 	       vif->cfg.ssid, vif->cfg.ssid_len);
4060 	templ_len = ssid_ie_offset + 2 + vif->cfg.ssid_len;
4061 
4062 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + vif->cfg.ssid_len,
4063 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4064 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4065 
4066 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4067 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4068 				       probe_rsp_templ,
4069 				       templ_len, 0,
4070 				       rates);
4071 }
4072 
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4073 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4074 				       struct ieee80211_vif *vif,
4075 				       struct ieee80211_bss_conf *bss_conf,
4076 				       u32 changed)
4077 {
4078 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4079 	int ret = 0;
4080 
4081 	if (changed & BSS_CHANGED_ERP_SLOT) {
4082 		if (bss_conf->use_short_slot)
4083 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4084 		else
4085 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4086 		if (ret < 0) {
4087 			wl1271_warning("Set slot time failed %d", ret);
4088 			goto out;
4089 		}
4090 	}
4091 
4092 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4093 		if (bss_conf->use_short_preamble)
4094 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4095 		else
4096 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4097 	}
4098 
4099 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4100 		if (bss_conf->use_cts_prot)
4101 			ret = wl1271_acx_cts_protect(wl, wlvif,
4102 						     CTSPROTECT_ENABLE);
4103 		else
4104 			ret = wl1271_acx_cts_protect(wl, wlvif,
4105 						     CTSPROTECT_DISABLE);
4106 		if (ret < 0) {
4107 			wl1271_warning("Set ctsprotect failed %d", ret);
4108 			goto out;
4109 		}
4110 	}
4111 
4112 out:
4113 	return ret;
4114 }
4115 
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4116 static int wlcore_set_beacon_template(struct wl1271 *wl,
4117 				      struct ieee80211_vif *vif,
4118 				      bool is_ap)
4119 {
4120 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4121 	struct ieee80211_hdr *hdr;
4122 	u32 min_rate;
4123 	int ret;
4124 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4125 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif, 0);
4126 	u16 tmpl_id;
4127 
4128 	if (!beacon) {
4129 		ret = -EINVAL;
4130 		goto out;
4131 	}
4132 
4133 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4134 
4135 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4136 	if (ret < 0) {
4137 		dev_kfree_skb(beacon);
4138 		goto out;
4139 	}
4140 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4141 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4142 		CMD_TEMPL_BEACON;
4143 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4144 				      beacon->data,
4145 				      beacon->len, 0,
4146 				      min_rate);
4147 	if (ret < 0) {
4148 		dev_kfree_skb(beacon);
4149 		goto out;
4150 	}
4151 
4152 	wlvif->wmm_enabled =
4153 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4154 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4155 					beacon->data + ieoffset,
4156 					beacon->len - ieoffset);
4157 
4158 	/*
4159 	 * In case we already have a probe-resp beacon set explicitly
4160 	 * by usermode, don't use the beacon data.
4161 	 */
4162 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4163 		goto end_bcn;
4164 
4165 	/* remove TIM ie from probe response */
4166 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4167 
4168 	/*
4169 	 * remove p2p ie from probe response.
4170 	 * the fw reponds to probe requests that don't include
4171 	 * the p2p ie. probe requests with p2p ie will be passed,
4172 	 * and will be responded by the supplicant (the spec
4173 	 * forbids including the p2p ie when responding to probe
4174 	 * requests that didn't include it).
4175 	 */
4176 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4177 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4178 
4179 	hdr = (struct ieee80211_hdr *) beacon->data;
4180 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4181 					 IEEE80211_STYPE_PROBE_RESP);
4182 	if (is_ap)
4183 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4184 							   beacon->data,
4185 							   beacon->len,
4186 							   min_rate);
4187 	else
4188 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4189 					      CMD_TEMPL_PROBE_RESPONSE,
4190 					      beacon->data,
4191 					      beacon->len, 0,
4192 					      min_rate);
4193 end_bcn:
4194 	dev_kfree_skb(beacon);
4195 	if (ret < 0)
4196 		goto out;
4197 
4198 out:
4199 	return ret;
4200 }
4201 
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4202 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4203 					  struct ieee80211_vif *vif,
4204 					  struct ieee80211_bss_conf *bss_conf,
4205 					  u32 changed)
4206 {
4207 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4208 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4209 	int ret = 0;
4210 
4211 	if (changed & BSS_CHANGED_BEACON_INT) {
4212 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4213 			bss_conf->beacon_int);
4214 
4215 		wlvif->beacon_int = bss_conf->beacon_int;
4216 	}
4217 
4218 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4219 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4220 
4221 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4222 	}
4223 
4224 	if (changed & BSS_CHANGED_BEACON) {
4225 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4226 		if (ret < 0)
4227 			goto out;
4228 
4229 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4230 				       &wlvif->flags)) {
4231 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4232 			if (ret < 0)
4233 				goto out;
4234 		}
4235 	}
4236 out:
4237 	if (ret != 0)
4238 		wl1271_error("beacon info change failed: %d", ret);
4239 	return ret;
4240 }
4241 
4242 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4243 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4244 				       struct ieee80211_vif *vif,
4245 				       struct ieee80211_bss_conf *bss_conf,
4246 				       u32 changed)
4247 {
4248 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4249 	int ret = 0;
4250 
4251 	if (changed & BSS_CHANGED_BASIC_RATES) {
4252 		u32 rates = bss_conf->basic_rates;
4253 
4254 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4255 								 wlvif->band);
4256 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4257 							wlvif->basic_rate_set);
4258 
4259 		ret = wl1271_init_ap_rates(wl, wlvif);
4260 		if (ret < 0) {
4261 			wl1271_error("AP rate policy change failed %d", ret);
4262 			goto out;
4263 		}
4264 
4265 		ret = wl1271_ap_init_templates(wl, vif);
4266 		if (ret < 0)
4267 			goto out;
4268 
4269 		/* No need to set probe resp template for mesh */
4270 		if (!ieee80211_vif_is_mesh(vif)) {
4271 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4272 							    wlvif->basic_rate,
4273 							    vif);
4274 			if (ret < 0)
4275 				goto out;
4276 		}
4277 
4278 		ret = wlcore_set_beacon_template(wl, vif, true);
4279 		if (ret < 0)
4280 			goto out;
4281 	}
4282 
4283 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4284 	if (ret < 0)
4285 		goto out;
4286 
4287 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4288 		if (bss_conf->enable_beacon) {
4289 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4290 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4291 				if (ret < 0)
4292 					goto out;
4293 
4294 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4295 				if (ret < 0)
4296 					goto out;
4297 
4298 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4299 				wl1271_debug(DEBUG_AP, "started AP");
4300 			}
4301 		} else {
4302 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4303 				/*
4304 				 * AP might be in ROC in case we have just
4305 				 * sent auth reply. handle it.
4306 				 */
4307 				if (test_bit(wlvif->role_id, wl->roc_map))
4308 					wl12xx_croc(wl, wlvif->role_id);
4309 
4310 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4311 				if (ret < 0)
4312 					goto out;
4313 
4314 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4315 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4316 					  &wlvif->flags);
4317 				wl1271_debug(DEBUG_AP, "stopped AP");
4318 			}
4319 		}
4320 	}
4321 
4322 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4323 	if (ret < 0)
4324 		goto out;
4325 
4326 	/* Handle HT information change */
4327 	if ((changed & BSS_CHANGED_HT) &&
4328 	    (bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4329 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4330 					bss_conf->ht_operation_mode);
4331 		if (ret < 0) {
4332 			wl1271_warning("Set ht information failed %d", ret);
4333 			goto out;
4334 		}
4335 	}
4336 
4337 out:
4338 	return;
4339 }
4340 
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_vif * vif,u32 sta_rate_set)4341 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4342 			    struct ieee80211_vif *vif, u32 sta_rate_set)
4343 {
4344 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
4345 	u32 rates;
4346 	int ret;
4347 
4348 	wl1271_debug(DEBUG_MAC80211,
4349 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4350 	     bss_conf->bssid, vif->cfg.aid,
4351 	     bss_conf->beacon_int,
4352 	     bss_conf->basic_rates, sta_rate_set);
4353 
4354 	wlvif->beacon_int = bss_conf->beacon_int;
4355 	rates = bss_conf->basic_rates;
4356 	wlvif->basic_rate_set =
4357 		wl1271_tx_enabled_rates_get(wl, rates,
4358 					    wlvif->band);
4359 	wlvif->basic_rate =
4360 		wl1271_tx_min_rate_get(wl,
4361 				       wlvif->basic_rate_set);
4362 
4363 	if (sta_rate_set)
4364 		wlvif->rate_set =
4365 			wl1271_tx_enabled_rates_get(wl,
4366 						sta_rate_set,
4367 						wlvif->band);
4368 
4369 	/* we only support sched_scan while not connected */
4370 	if (wl->sched_vif == wlvif)
4371 		wl->ops->sched_scan_stop(wl, wlvif);
4372 
4373 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4374 	if (ret < 0)
4375 		return ret;
4376 
4377 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4378 	if (ret < 0)
4379 		return ret;
4380 
4381 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4382 	if (ret < 0)
4383 		return ret;
4384 
4385 	wlcore_set_ssid(wl, wlvif);
4386 
4387 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4388 
4389 	return 0;
4390 }
4391 
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4392 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4393 {
4394 	int ret;
4395 
4396 	/* revert back to minimum rates for the current band */
4397 	wl1271_set_band_rate(wl, wlvif);
4398 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4399 
4400 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4401 	if (ret < 0)
4402 		return ret;
4403 
4404 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4405 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4406 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4407 		if (ret < 0)
4408 			return ret;
4409 	}
4410 
4411 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4412 	return 0;
4413 }
4414 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4415 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4416 					struct ieee80211_vif *vif,
4417 					struct ieee80211_bss_conf *bss_conf,
4418 					u32 changed)
4419 {
4420 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4421 	bool do_join = false;
4422 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4423 	bool ibss_joined = false;
4424 	u32 sta_rate_set = 0;
4425 	int ret;
4426 	struct ieee80211_sta *sta;
4427 	bool sta_exists = false;
4428 	struct ieee80211_sta_ht_cap sta_ht_cap;
4429 
4430 	if (is_ibss) {
4431 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4432 						     changed);
4433 		if (ret < 0)
4434 			goto out;
4435 	}
4436 
4437 	if (changed & BSS_CHANGED_IBSS) {
4438 		if (vif->cfg.ibss_joined) {
4439 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4440 			ibss_joined = true;
4441 		} else {
4442 			wlcore_unset_assoc(wl, wlvif);
4443 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4444 		}
4445 	}
4446 
4447 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4448 		do_join = true;
4449 
4450 	/* Need to update the SSID (for filtering etc) */
4451 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4452 		do_join = true;
4453 
4454 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4455 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4456 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4457 
4458 		do_join = true;
4459 	}
4460 
4461 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4462 		wl1271_sta_handle_idle(wl, wlvif, vif->cfg.idle);
4463 
4464 	if (changed & BSS_CHANGED_CQM) {
4465 		bool enable = false;
4466 		if (bss_conf->cqm_rssi_thold)
4467 			enable = true;
4468 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4469 						  bss_conf->cqm_rssi_thold,
4470 						  bss_conf->cqm_rssi_hyst);
4471 		if (ret < 0)
4472 			goto out;
4473 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4474 	}
4475 
4476 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4477 		       BSS_CHANGED_ASSOC)) {
4478 		rcu_read_lock();
4479 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4480 		if (sta) {
4481 			u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask;
4482 
4483 			/* save the supp_rates of the ap */
4484 			sta_rate_set = sta->deflink.supp_rates[wlvif->band];
4485 			if (sta->deflink.ht_cap.ht_supported)
4486 				sta_rate_set |=
4487 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4488 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4489 			sta_ht_cap = sta->deflink.ht_cap;
4490 			sta_exists = true;
4491 		}
4492 
4493 		rcu_read_unlock();
4494 	}
4495 
4496 	if (changed & BSS_CHANGED_BSSID) {
4497 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4498 			ret = wlcore_set_bssid(wl, wlvif, vif,
4499 					       sta_rate_set);
4500 			if (ret < 0)
4501 				goto out;
4502 
4503 			/* Need to update the BSSID (for filtering etc) */
4504 			do_join = true;
4505 		} else {
4506 			ret = wlcore_clear_bssid(wl, wlvif);
4507 			if (ret < 0)
4508 				goto out;
4509 		}
4510 	}
4511 
4512 	if (changed & BSS_CHANGED_IBSS) {
4513 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4514 			     vif->cfg.ibss_joined);
4515 
4516 		if (vif->cfg.ibss_joined) {
4517 			u32 rates = bss_conf->basic_rates;
4518 			wlvif->basic_rate_set =
4519 				wl1271_tx_enabled_rates_get(wl, rates,
4520 							    wlvif->band);
4521 			wlvif->basic_rate =
4522 				wl1271_tx_min_rate_get(wl,
4523 						       wlvif->basic_rate_set);
4524 
4525 			/* by default, use 11b + OFDM rates */
4526 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4527 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4528 			if (ret < 0)
4529 				goto out;
4530 		}
4531 	}
4532 
4533 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4534 		/* enable beacon filtering */
4535 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4536 		if (ret < 0)
4537 			goto out;
4538 	}
4539 
4540 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4541 	if (ret < 0)
4542 		goto out;
4543 
4544 	if (do_join) {
4545 		ret = wlcore_join(wl, wlvif);
4546 		if (ret < 0) {
4547 			wl1271_warning("cmd join failed %d", ret);
4548 			goto out;
4549 		}
4550 	}
4551 
4552 	if (changed & BSS_CHANGED_ASSOC) {
4553 		if (vif->cfg.assoc) {
4554 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4555 					       sta_rate_set);
4556 			if (ret < 0)
4557 				goto out;
4558 
4559 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4560 				wl12xx_set_authorized(wl, wlvif);
4561 		} else {
4562 			wlcore_unset_assoc(wl, wlvif);
4563 		}
4564 	}
4565 
4566 	if (changed & BSS_CHANGED_PS) {
4567 		if (vif->cfg.ps &&
4568 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4569 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4570 			int ps_mode;
4571 			char *ps_mode_str;
4572 
4573 			if (wl->conf.conn.forced_ps) {
4574 				ps_mode = STATION_POWER_SAVE_MODE;
4575 				ps_mode_str = "forced";
4576 			} else {
4577 				ps_mode = STATION_AUTO_PS_MODE;
4578 				ps_mode_str = "auto";
4579 			}
4580 
4581 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4582 
4583 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4584 			if (ret < 0)
4585 				wl1271_warning("enter %s ps failed %d",
4586 					       ps_mode_str, ret);
4587 		} else if (!vif->cfg.ps &&
4588 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4589 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4590 
4591 			ret = wl1271_ps_set_mode(wl, wlvif,
4592 						 STATION_ACTIVE_MODE);
4593 			if (ret < 0)
4594 				wl1271_warning("exit auto ps failed %d", ret);
4595 		}
4596 	}
4597 
4598 	/* Handle new association with HT. Do this after join. */
4599 	if (sta_exists) {
4600 		bool enabled =
4601 			bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT;
4602 
4603 		ret = wlcore_hw_set_peer_cap(wl,
4604 					     &sta_ht_cap,
4605 					     enabled,
4606 					     wlvif->rate_set,
4607 					     wlvif->sta.hlid);
4608 		if (ret < 0) {
4609 			wl1271_warning("Set ht cap failed %d", ret);
4610 			goto out;
4611 
4612 		}
4613 
4614 		if (enabled) {
4615 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4616 						bss_conf->ht_operation_mode);
4617 			if (ret < 0) {
4618 				wl1271_warning("Set ht information failed %d",
4619 					       ret);
4620 				goto out;
4621 			}
4622 		}
4623 	}
4624 
4625 	/* Handle arp filtering. Done after join. */
4626 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4627 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4628 		__be32 addr = vif->cfg.arp_addr_list[0];
4629 		wlvif->sta.qos = bss_conf->qos;
4630 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4631 
4632 		if (vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc) {
4633 			wlvif->ip_addr = addr;
4634 			/*
4635 			 * The template should have been configured only upon
4636 			 * association. however, it seems that the correct ip
4637 			 * isn't being set (when sending), so we have to
4638 			 * reconfigure the template upon every ip change.
4639 			 */
4640 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4641 			if (ret < 0) {
4642 				wl1271_warning("build arp rsp failed: %d", ret);
4643 				goto out;
4644 			}
4645 
4646 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4647 				(ACX_ARP_FILTER_ARP_FILTERING |
4648 				 ACX_ARP_FILTER_AUTO_ARP),
4649 				addr);
4650 		} else {
4651 			wlvif->ip_addr = 0;
4652 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4653 		}
4654 
4655 		if (ret < 0)
4656 			goto out;
4657 	}
4658 
4659 out:
4660 	return;
4661 }
4662 
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u64 changed)4663 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4664 				       struct ieee80211_vif *vif,
4665 				       struct ieee80211_bss_conf *bss_conf,
4666 				       u64 changed)
4667 {
4668 	struct wl1271 *wl = hw->priv;
4669 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4670 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4671 	int ret;
4672 
4673 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4674 		     wlvif->role_id, (int)changed);
4675 
4676 	/*
4677 	 * make sure to cancel pending disconnections if our association
4678 	 * state changed
4679 	 */
4680 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4681 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4682 
4683 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4684 	    !bss_conf->enable_beacon)
4685 		wl1271_tx_flush(wl);
4686 
4687 	mutex_lock(&wl->mutex);
4688 
4689 	if (unlikely(wl->state != WLCORE_STATE_ON))
4690 		goto out;
4691 
4692 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4693 		goto out;
4694 
4695 	ret = pm_runtime_resume_and_get(wl->dev);
4696 	if (ret < 0)
4697 		goto out;
4698 
4699 	if ((changed & BSS_CHANGED_TXPOWER) &&
4700 	    bss_conf->txpower != wlvif->power_level) {
4701 
4702 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4703 		if (ret < 0)
4704 			goto out;
4705 
4706 		wlvif->power_level = bss_conf->txpower;
4707 	}
4708 
4709 	if (is_ap)
4710 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4711 	else
4712 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4713 
4714 	pm_runtime_mark_last_busy(wl->dev);
4715 	pm_runtime_put_autosuspend(wl->dev);
4716 
4717 out:
4718 	mutex_unlock(&wl->mutex);
4719 }
4720 
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4721 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4722 				 struct ieee80211_chanctx_conf *ctx)
4723 {
4724 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4725 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4726 		     cfg80211_get_chandef_type(&ctx->def));
4727 	return 0;
4728 }
4729 
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4730 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4731 				     struct ieee80211_chanctx_conf *ctx)
4732 {
4733 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4734 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4735 		     cfg80211_get_chandef_type(&ctx->def));
4736 }
4737 
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4738 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4739 				     struct ieee80211_chanctx_conf *ctx,
4740 				     u32 changed)
4741 {
4742 	struct wl1271 *wl = hw->priv;
4743 	struct wl12xx_vif *wlvif;
4744 	int ret;
4745 	int channel = ieee80211_frequency_to_channel(
4746 		ctx->def.chan->center_freq);
4747 
4748 	wl1271_debug(DEBUG_MAC80211,
4749 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4750 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4751 
4752 	mutex_lock(&wl->mutex);
4753 
4754 	ret = pm_runtime_resume_and_get(wl->dev);
4755 	if (ret < 0)
4756 		goto out;
4757 
4758 	wl12xx_for_each_wlvif(wl, wlvif) {
4759 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4760 
4761 		rcu_read_lock();
4762 		if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != ctx) {
4763 			rcu_read_unlock();
4764 			continue;
4765 		}
4766 		rcu_read_unlock();
4767 
4768 		/* start radar if needed */
4769 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4770 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4771 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4772 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4773 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4774 			wlcore_hw_set_cac(wl, wlvif, true);
4775 			wlvif->radar_enabled = true;
4776 		}
4777 	}
4778 
4779 	pm_runtime_mark_last_busy(wl->dev);
4780 	pm_runtime_put_autosuspend(wl->dev);
4781 out:
4782 	mutex_unlock(&wl->mutex);
4783 }
4784 
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)4785 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4786 					struct ieee80211_vif *vif,
4787 					struct ieee80211_bss_conf *link_conf,
4788 					struct ieee80211_chanctx_conf *ctx)
4789 {
4790 	struct wl1271 *wl = hw->priv;
4791 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4792 	int channel = ieee80211_frequency_to_channel(
4793 		ctx->def.chan->center_freq);
4794 	int ret = -EINVAL;
4795 
4796 	wl1271_debug(DEBUG_MAC80211,
4797 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4798 		     wlvif->role_id, channel,
4799 		     cfg80211_get_chandef_type(&ctx->def),
4800 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4801 
4802 	mutex_lock(&wl->mutex);
4803 
4804 	if (unlikely(wl->state != WLCORE_STATE_ON))
4805 		goto out;
4806 
4807 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4808 		goto out;
4809 
4810 	ret = pm_runtime_resume_and_get(wl->dev);
4811 	if (ret < 0)
4812 		goto out;
4813 
4814 	wlvif->band = ctx->def.chan->band;
4815 	wlvif->channel = channel;
4816 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4817 
4818 	/* update default rates according to the band */
4819 	wl1271_set_band_rate(wl, wlvif);
4820 
4821 	if (ctx->radar_enabled &&
4822 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4823 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4824 		wlcore_hw_set_cac(wl, wlvif, true);
4825 		wlvif->radar_enabled = true;
4826 	}
4827 
4828 	pm_runtime_mark_last_busy(wl->dev);
4829 	pm_runtime_put_autosuspend(wl->dev);
4830 out:
4831 	mutex_unlock(&wl->mutex);
4832 
4833 	return 0;
4834 }
4835 
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)4836 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4837 					   struct ieee80211_vif *vif,
4838 					   struct ieee80211_bss_conf *link_conf,
4839 					   struct ieee80211_chanctx_conf *ctx)
4840 {
4841 	struct wl1271 *wl = hw->priv;
4842 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4843 	int ret;
4844 
4845 	wl1271_debug(DEBUG_MAC80211,
4846 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4847 		     wlvif->role_id,
4848 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4849 		     cfg80211_get_chandef_type(&ctx->def));
4850 
4851 	wl1271_tx_flush(wl);
4852 
4853 	mutex_lock(&wl->mutex);
4854 
4855 	if (unlikely(wl->state != WLCORE_STATE_ON))
4856 		goto out;
4857 
4858 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4859 		goto out;
4860 
4861 	ret = pm_runtime_resume_and_get(wl->dev);
4862 	if (ret < 0)
4863 		goto out;
4864 
4865 	if (wlvif->radar_enabled) {
4866 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4867 		wlcore_hw_set_cac(wl, wlvif, false);
4868 		wlvif->radar_enabled = false;
4869 	}
4870 
4871 	pm_runtime_mark_last_busy(wl->dev);
4872 	pm_runtime_put_autosuspend(wl->dev);
4873 out:
4874 	mutex_unlock(&wl->mutex);
4875 }
4876 
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4877 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4878 				    struct wl12xx_vif *wlvif,
4879 				    struct ieee80211_chanctx_conf *new_ctx)
4880 {
4881 	int channel = ieee80211_frequency_to_channel(
4882 		new_ctx->def.chan->center_freq);
4883 
4884 	wl1271_debug(DEBUG_MAC80211,
4885 		     "switch vif (role %d) %d -> %d chan_type: %d",
4886 		     wlvif->role_id, wlvif->channel, channel,
4887 		     cfg80211_get_chandef_type(&new_ctx->def));
4888 
4889 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4890 		return 0;
4891 
4892 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4893 
4894 	if (wlvif->radar_enabled) {
4895 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4896 		wlcore_hw_set_cac(wl, wlvif, false);
4897 		wlvif->radar_enabled = false;
4898 	}
4899 
4900 	wlvif->band = new_ctx->def.chan->band;
4901 	wlvif->channel = channel;
4902 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4903 
4904 	/* start radar if needed */
4905 	if (new_ctx->radar_enabled) {
4906 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4907 		wlcore_hw_set_cac(wl, wlvif, true);
4908 		wlvif->radar_enabled = true;
4909 	}
4910 
4911 	return 0;
4912 }
4913 
4914 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4915 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4916 			     struct ieee80211_vif_chanctx_switch *vifs,
4917 			     int n_vifs,
4918 			     enum ieee80211_chanctx_switch_mode mode)
4919 {
4920 	struct wl1271 *wl = hw->priv;
4921 	int i, ret;
4922 
4923 	wl1271_debug(DEBUG_MAC80211,
4924 		     "mac80211 switch chanctx n_vifs %d mode %d",
4925 		     n_vifs, mode);
4926 
4927 	mutex_lock(&wl->mutex);
4928 
4929 	ret = pm_runtime_resume_and_get(wl->dev);
4930 	if (ret < 0)
4931 		goto out;
4932 
4933 	for (i = 0; i < n_vifs; i++) {
4934 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4935 
4936 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4937 		if (ret)
4938 			goto out_sleep;
4939 	}
4940 out_sleep:
4941 	pm_runtime_mark_last_busy(wl->dev);
4942 	pm_runtime_put_autosuspend(wl->dev);
4943 out:
4944 	mutex_unlock(&wl->mutex);
4945 
4946 	return 0;
4947 }
4948 
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,u16 queue,const struct ieee80211_tx_queue_params * params)4949 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4950 			     struct ieee80211_vif *vif,
4951 			     unsigned int link_id, u16 queue,
4952 			     const struct ieee80211_tx_queue_params *params)
4953 {
4954 	struct wl1271 *wl = hw->priv;
4955 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4956 	u8 ps_scheme;
4957 	int ret = 0;
4958 
4959 	if (wlcore_is_p2p_mgmt(wlvif))
4960 		return 0;
4961 
4962 	mutex_lock(&wl->mutex);
4963 
4964 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4965 
4966 	if (params->uapsd)
4967 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4968 	else
4969 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4970 
4971 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4972 		goto out;
4973 
4974 	ret = pm_runtime_resume_and_get(wl->dev);
4975 	if (ret < 0)
4976 		goto out;
4977 
4978 	/*
4979 	 * the txop is confed in units of 32us by the mac80211,
4980 	 * we need us
4981 	 */
4982 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4983 				params->cw_min, params->cw_max,
4984 				params->aifs, params->txop << 5);
4985 	if (ret < 0)
4986 		goto out_sleep;
4987 
4988 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4989 				 CONF_CHANNEL_TYPE_EDCF,
4990 				 wl1271_tx_get_queue(queue),
4991 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4992 				 0, 0);
4993 
4994 out_sleep:
4995 	pm_runtime_mark_last_busy(wl->dev);
4996 	pm_runtime_put_autosuspend(wl->dev);
4997 
4998 out:
4999 	mutex_unlock(&wl->mutex);
5000 
5001 	return ret;
5002 }
5003 
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5004 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
5005 			     struct ieee80211_vif *vif)
5006 {
5007 
5008 	struct wl1271 *wl = hw->priv;
5009 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5010 	u64 mactime = ULLONG_MAX;
5011 	int ret;
5012 
5013 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
5014 
5015 	mutex_lock(&wl->mutex);
5016 
5017 	if (unlikely(wl->state != WLCORE_STATE_ON))
5018 		goto out;
5019 
5020 	ret = pm_runtime_resume_and_get(wl->dev);
5021 	if (ret < 0)
5022 		goto out;
5023 
5024 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5025 	if (ret < 0)
5026 		goto out_sleep;
5027 
5028 out_sleep:
5029 	pm_runtime_mark_last_busy(wl->dev);
5030 	pm_runtime_put_autosuspend(wl->dev);
5031 
5032 out:
5033 	mutex_unlock(&wl->mutex);
5034 	return mactime;
5035 }
5036 
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)5037 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5038 				struct survey_info *survey)
5039 {
5040 	struct ieee80211_conf *conf = &hw->conf;
5041 
5042 	if (idx != 0)
5043 		return -ENOENT;
5044 
5045 	survey->channel = conf->chandef.chan;
5046 	survey->filled = 0;
5047 	return 0;
5048 }
5049 
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5050 static int wl1271_allocate_sta(struct wl1271 *wl,
5051 			     struct wl12xx_vif *wlvif,
5052 			     struct ieee80211_sta *sta)
5053 {
5054 	struct wl1271_station *wl_sta;
5055 	int ret;
5056 
5057 
5058 	if (wl->active_sta_count >= wl->max_ap_stations) {
5059 		wl1271_warning("could not allocate HLID - too much stations");
5060 		return -EBUSY;
5061 	}
5062 
5063 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5064 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5065 	if (ret < 0) {
5066 		wl1271_warning("could not allocate HLID - too many links");
5067 		return -EBUSY;
5068 	}
5069 
5070 	/* use the previous security seq, if this is a recovery/resume */
5071 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5072 
5073 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5074 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5075 	wl->active_sta_count++;
5076 	return 0;
5077 }
5078 
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)5079 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5080 {
5081 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5082 		return;
5083 
5084 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5085 	__clear_bit(hlid, &wl->ap_ps_map);
5086 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5087 
5088 	/*
5089 	 * save the last used PN in the private part of iee80211_sta,
5090 	 * in case of recovery/suspend
5091 	 */
5092 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5093 
5094 	wl12xx_free_link(wl, wlvif, &hlid);
5095 	wl->active_sta_count--;
5096 
5097 	/*
5098 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5099 	 * chance to return STA-buffered packets before complaining.
5100 	 */
5101 	if (wl->active_sta_count == 0)
5102 		wl12xx_rearm_tx_watchdog_locked(wl);
5103 }
5104 
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5105 static int wl12xx_sta_add(struct wl1271 *wl,
5106 			  struct wl12xx_vif *wlvif,
5107 			  struct ieee80211_sta *sta)
5108 {
5109 	struct wl1271_station *wl_sta;
5110 	int ret = 0;
5111 	u8 hlid;
5112 
5113 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5114 
5115 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5116 	if (ret < 0)
5117 		return ret;
5118 
5119 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5120 	hlid = wl_sta->hlid;
5121 
5122 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5123 	if (ret < 0)
5124 		wl1271_free_sta(wl, wlvif, hlid);
5125 
5126 	return ret;
5127 }
5128 
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5129 static int wl12xx_sta_remove(struct wl1271 *wl,
5130 			     struct wl12xx_vif *wlvif,
5131 			     struct ieee80211_sta *sta)
5132 {
5133 	struct wl1271_station *wl_sta;
5134 	int ret = 0, id;
5135 
5136 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5137 
5138 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5139 	id = wl_sta->hlid;
5140 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5141 		return -EINVAL;
5142 
5143 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5144 	if (ret < 0)
5145 		return ret;
5146 
5147 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5148 	return ret;
5149 }
5150 
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5151 static void wlcore_roc_if_possible(struct wl1271 *wl,
5152 				   struct wl12xx_vif *wlvif)
5153 {
5154 	if (find_first_bit(wl->roc_map,
5155 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5156 		return;
5157 
5158 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5159 		return;
5160 
5161 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5162 }
5163 
5164 /*
5165  * when wl_sta is NULL, we treat this call as if coming from a
5166  * pending auth reply.
5167  * wl->mutex must be taken and the FW must be awake when the call
5168  * takes place.
5169  */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5170 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5171 			      struct wl1271_station *wl_sta, bool in_conn)
5172 {
5173 	if (in_conn) {
5174 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5175 			return;
5176 
5177 		if (!wlvif->ap_pending_auth_reply &&
5178 		    !wlvif->inconn_count)
5179 			wlcore_roc_if_possible(wl, wlvif);
5180 
5181 		if (wl_sta) {
5182 			wl_sta->in_connection = true;
5183 			wlvif->inconn_count++;
5184 		} else {
5185 			wlvif->ap_pending_auth_reply = true;
5186 		}
5187 	} else {
5188 		if (wl_sta && !wl_sta->in_connection)
5189 			return;
5190 
5191 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5192 			return;
5193 
5194 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5195 			return;
5196 
5197 		if (wl_sta) {
5198 			wl_sta->in_connection = false;
5199 			wlvif->inconn_count--;
5200 		} else {
5201 			wlvif->ap_pending_auth_reply = false;
5202 		}
5203 
5204 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5205 		    test_bit(wlvif->role_id, wl->roc_map))
5206 			wl12xx_croc(wl, wlvif->role_id);
5207 	}
5208 }
5209 
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5210 static int wl12xx_update_sta_state(struct wl1271 *wl,
5211 				   struct wl12xx_vif *wlvif,
5212 				   struct ieee80211_sta *sta,
5213 				   enum ieee80211_sta_state old_state,
5214 				   enum ieee80211_sta_state new_state)
5215 {
5216 	struct wl1271_station *wl_sta;
5217 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5218 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5219 	int ret;
5220 
5221 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5222 
5223 	/* Add station (AP mode) */
5224 	if (is_ap &&
5225 	    old_state == IEEE80211_STA_AUTH &&
5226 	    new_state == IEEE80211_STA_ASSOC) {
5227 		ret = wl12xx_sta_add(wl, wlvif, sta);
5228 		if (ret)
5229 			return ret;
5230 
5231 		wl_sta->fw_added = true;
5232 
5233 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5234 	}
5235 
5236 	/* Remove station (AP mode) */
5237 	if (is_ap &&
5238 	    old_state == IEEE80211_STA_ASSOC &&
5239 	    new_state == IEEE80211_STA_AUTH) {
5240 		wl_sta->fw_added = false;
5241 
5242 		/* must not fail */
5243 		wl12xx_sta_remove(wl, wlvif, sta);
5244 
5245 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5246 	}
5247 
5248 	/* Authorize station (AP mode) */
5249 	if (is_ap &&
5250 	    new_state == IEEE80211_STA_AUTHORIZED) {
5251 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5252 		if (ret < 0)
5253 			return ret;
5254 
5255 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
5256 						     true,
5257 						     wl_sta->hlid);
5258 		if (ret)
5259 			return ret;
5260 
5261 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5262 	}
5263 
5264 	/* Authorize station */
5265 	if (is_sta &&
5266 	    new_state == IEEE80211_STA_AUTHORIZED) {
5267 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5268 		ret = wl12xx_set_authorized(wl, wlvif);
5269 		if (ret)
5270 			return ret;
5271 	}
5272 
5273 	if (is_sta &&
5274 	    old_state == IEEE80211_STA_AUTHORIZED &&
5275 	    new_state == IEEE80211_STA_ASSOC) {
5276 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5277 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5278 	}
5279 
5280 	/* save seq number on disassoc (suspend) */
5281 	if (is_sta &&
5282 	    old_state == IEEE80211_STA_ASSOC &&
5283 	    new_state == IEEE80211_STA_AUTH) {
5284 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5285 		wlvif->total_freed_pkts = 0;
5286 	}
5287 
5288 	/* restore seq number on assoc (resume) */
5289 	if (is_sta &&
5290 	    old_state == IEEE80211_STA_AUTH &&
5291 	    new_state == IEEE80211_STA_ASSOC) {
5292 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5293 	}
5294 
5295 	/* clear ROCs on failure or authorization */
5296 	if (is_sta &&
5297 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5298 	     new_state == IEEE80211_STA_NOTEXIST)) {
5299 		if (test_bit(wlvif->role_id, wl->roc_map))
5300 			wl12xx_croc(wl, wlvif->role_id);
5301 	}
5302 
5303 	if (is_sta &&
5304 	    old_state == IEEE80211_STA_NOTEXIST &&
5305 	    new_state == IEEE80211_STA_NONE) {
5306 		if (find_first_bit(wl->roc_map,
5307 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5308 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5309 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5310 				   wlvif->band, wlvif->channel);
5311 		}
5312 	}
5313 	return 0;
5314 }
5315 
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5316 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5317 			       struct ieee80211_vif *vif,
5318 			       struct ieee80211_sta *sta,
5319 			       enum ieee80211_sta_state old_state,
5320 			       enum ieee80211_sta_state new_state)
5321 {
5322 	struct wl1271 *wl = hw->priv;
5323 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5324 	int ret;
5325 
5326 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5327 		     sta->aid, old_state, new_state);
5328 
5329 	mutex_lock(&wl->mutex);
5330 
5331 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5332 		ret = -EBUSY;
5333 		goto out;
5334 	}
5335 
5336 	ret = pm_runtime_resume_and_get(wl->dev);
5337 	if (ret < 0)
5338 		goto out;
5339 
5340 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5341 
5342 	pm_runtime_mark_last_busy(wl->dev);
5343 	pm_runtime_put_autosuspend(wl->dev);
5344 out:
5345 	mutex_unlock(&wl->mutex);
5346 	if (new_state < old_state)
5347 		return 0;
5348 	return ret;
5349 }
5350 
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5351 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5352 				  struct ieee80211_vif *vif,
5353 				  struct ieee80211_ampdu_params *params)
5354 {
5355 	struct wl1271 *wl = hw->priv;
5356 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5357 	int ret;
5358 	u8 hlid, *ba_bitmap;
5359 	struct ieee80211_sta *sta = params->sta;
5360 	enum ieee80211_ampdu_mlme_action action = params->action;
5361 	u16 tid = params->tid;
5362 	u16 *ssn = &params->ssn;
5363 
5364 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5365 		     tid);
5366 
5367 	/* sanity check - the fields in FW are only 8bits wide */
5368 	if (WARN_ON(tid > 0xFF))
5369 		return -ENOTSUPP;
5370 
5371 	mutex_lock(&wl->mutex);
5372 
5373 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5374 		ret = -EAGAIN;
5375 		goto out;
5376 	}
5377 
5378 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5379 		hlid = wlvif->sta.hlid;
5380 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5381 		struct wl1271_station *wl_sta;
5382 
5383 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5384 		hlid = wl_sta->hlid;
5385 	} else {
5386 		ret = -EINVAL;
5387 		goto out;
5388 	}
5389 
5390 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5391 
5392 	ret = pm_runtime_resume_and_get(wl->dev);
5393 	if (ret < 0)
5394 		goto out;
5395 
5396 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5397 		     tid, action);
5398 
5399 	switch (action) {
5400 	case IEEE80211_AMPDU_RX_START:
5401 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5402 			ret = -ENOTSUPP;
5403 			break;
5404 		}
5405 
5406 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5407 			ret = -EBUSY;
5408 			wl1271_debug(DEBUG_RX, "exceeded max RX BA sessions");
5409 			break;
5410 		}
5411 
5412 		if (*ba_bitmap & BIT(tid)) {
5413 			ret = -EINVAL;
5414 			wl1271_error("cannot enable RX BA session on active "
5415 				     "tid: %d", tid);
5416 			break;
5417 		}
5418 
5419 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5420 				hlid,
5421 				params->buf_size);
5422 
5423 		if (!ret) {
5424 			*ba_bitmap |= BIT(tid);
5425 			wl->ba_rx_session_count++;
5426 		}
5427 		break;
5428 
5429 	case IEEE80211_AMPDU_RX_STOP:
5430 		if (!(*ba_bitmap & BIT(tid))) {
5431 			/*
5432 			 * this happens on reconfig - so only output a debug
5433 			 * message for now, and don't fail the function.
5434 			 */
5435 			wl1271_debug(DEBUG_MAC80211,
5436 				     "no active RX BA session on tid: %d",
5437 				     tid);
5438 			ret = 0;
5439 			break;
5440 		}
5441 
5442 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5443 							 hlid, 0);
5444 		if (!ret) {
5445 			*ba_bitmap &= ~BIT(tid);
5446 			wl->ba_rx_session_count--;
5447 		}
5448 		break;
5449 
5450 	/*
5451 	 * The BA initiator session management in FW independently.
5452 	 * Falling break here on purpose for all TX APDU commands.
5453 	 */
5454 	case IEEE80211_AMPDU_TX_START:
5455 	case IEEE80211_AMPDU_TX_STOP_CONT:
5456 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5457 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5458 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5459 		ret = -EINVAL;
5460 		break;
5461 
5462 	default:
5463 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5464 		ret = -EINVAL;
5465 	}
5466 
5467 	pm_runtime_mark_last_busy(wl->dev);
5468 	pm_runtime_put_autosuspend(wl->dev);
5469 
5470 out:
5471 	mutex_unlock(&wl->mutex);
5472 
5473 	return ret;
5474 }
5475 
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5476 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5477 				   struct ieee80211_vif *vif,
5478 				   const struct cfg80211_bitrate_mask *mask)
5479 {
5480 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5481 	struct wl1271 *wl = hw->priv;
5482 	int i, ret = 0;
5483 
5484 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5485 		mask->control[NL80211_BAND_2GHZ].legacy,
5486 		mask->control[NL80211_BAND_5GHZ].legacy);
5487 
5488 	mutex_lock(&wl->mutex);
5489 
5490 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5491 		wlvif->bitrate_masks[i] =
5492 			wl1271_tx_enabled_rates_get(wl,
5493 						    mask->control[i].legacy,
5494 						    i);
5495 
5496 	if (unlikely(wl->state != WLCORE_STATE_ON))
5497 		goto out;
5498 
5499 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5500 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5501 
5502 		ret = pm_runtime_resume_and_get(wl->dev);
5503 		if (ret < 0)
5504 			goto out;
5505 
5506 		wl1271_set_band_rate(wl, wlvif);
5507 		wlvif->basic_rate =
5508 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5509 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5510 
5511 		pm_runtime_mark_last_busy(wl->dev);
5512 		pm_runtime_put_autosuspend(wl->dev);
5513 	}
5514 out:
5515 	mutex_unlock(&wl->mutex);
5516 
5517 	return ret;
5518 }
5519 
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5520 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5521 				     struct ieee80211_vif *vif,
5522 				     struct ieee80211_channel_switch *ch_switch)
5523 {
5524 	struct wl1271 *wl = hw->priv;
5525 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5526 	int ret;
5527 
5528 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5529 
5530 	wl1271_tx_flush(wl);
5531 
5532 	mutex_lock(&wl->mutex);
5533 
5534 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5535 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5536 			ieee80211_chswitch_done(vif, false, 0);
5537 		goto out;
5538 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5539 		goto out;
5540 	}
5541 
5542 	ret = pm_runtime_resume_and_get(wl->dev);
5543 	if (ret < 0)
5544 		goto out;
5545 
5546 	/* TODO: change mac80211 to pass vif as param */
5547 
5548 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5549 		unsigned long delay_usec;
5550 
5551 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5552 		if (ret)
5553 			goto out_sleep;
5554 
5555 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5556 
5557 		/* indicate failure 5 seconds after channel switch time */
5558 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5559 			ch_switch->count;
5560 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5561 					     usecs_to_jiffies(delay_usec) +
5562 					     msecs_to_jiffies(5000));
5563 	}
5564 
5565 out_sleep:
5566 	pm_runtime_mark_last_busy(wl->dev);
5567 	pm_runtime_put_autosuspend(wl->dev);
5568 
5569 out:
5570 	mutex_unlock(&wl->mutex);
5571 }
5572 
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5573 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5574 					struct wl12xx_vif *wlvif,
5575 					u8 eid)
5576 {
5577 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5578 	struct sk_buff *beacon =
5579 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif), 0);
5580 
5581 	if (!beacon)
5582 		return NULL;
5583 
5584 	return cfg80211_find_ie(eid,
5585 				beacon->data + ieoffset,
5586 				beacon->len - ieoffset);
5587 }
5588 
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5589 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5590 				u8 *csa_count)
5591 {
5592 	const u8 *ie;
5593 	const struct ieee80211_channel_sw_ie *ie_csa;
5594 
5595 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5596 	if (!ie)
5597 		return -EINVAL;
5598 
5599 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5600 	*csa_count = ie_csa->count;
5601 
5602 	return 0;
5603 }
5604 
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5605 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5606 					    struct ieee80211_vif *vif,
5607 					    struct cfg80211_chan_def *chandef)
5608 {
5609 	struct wl1271 *wl = hw->priv;
5610 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5611 	struct ieee80211_channel_switch ch_switch = {
5612 		.block_tx = true,
5613 		.chandef = *chandef,
5614 	};
5615 	int ret;
5616 
5617 	wl1271_debug(DEBUG_MAC80211,
5618 		     "mac80211 channel switch beacon (role %d)",
5619 		     wlvif->role_id);
5620 
5621 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5622 	if (ret < 0) {
5623 		wl1271_error("error getting beacon (for CSA counter)");
5624 		return;
5625 	}
5626 
5627 	mutex_lock(&wl->mutex);
5628 
5629 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5630 		ret = -EBUSY;
5631 		goto out;
5632 	}
5633 
5634 	ret = pm_runtime_resume_and_get(wl->dev);
5635 	if (ret < 0)
5636 		goto out;
5637 
5638 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5639 	if (ret)
5640 		goto out_sleep;
5641 
5642 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5643 
5644 out_sleep:
5645 	pm_runtime_mark_last_busy(wl->dev);
5646 	pm_runtime_put_autosuspend(wl->dev);
5647 out:
5648 	mutex_unlock(&wl->mutex);
5649 }
5650 
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5651 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5652 			    u32 queues, bool drop)
5653 {
5654 	struct wl1271 *wl = hw->priv;
5655 
5656 	wl1271_tx_flush(wl);
5657 }
5658 
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5659 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5660 				       struct ieee80211_vif *vif,
5661 				       struct ieee80211_channel *chan,
5662 				       int duration,
5663 				       enum ieee80211_roc_type type)
5664 {
5665 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5666 	struct wl1271 *wl = hw->priv;
5667 	int channel, active_roc, ret = 0;
5668 
5669 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5670 
5671 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5672 		     channel, wlvif->role_id);
5673 
5674 	mutex_lock(&wl->mutex);
5675 
5676 	if (unlikely(wl->state != WLCORE_STATE_ON))
5677 		goto out;
5678 
5679 	/* return EBUSY if we can't ROC right now */
5680 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5681 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5682 		wl1271_warning("active roc on role %d", active_roc);
5683 		ret = -EBUSY;
5684 		goto out;
5685 	}
5686 
5687 	ret = pm_runtime_resume_and_get(wl->dev);
5688 	if (ret < 0)
5689 		goto out;
5690 
5691 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5692 	if (ret < 0)
5693 		goto out_sleep;
5694 
5695 	wl->roc_vif = vif;
5696 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5697 				     msecs_to_jiffies(duration));
5698 out_sleep:
5699 	pm_runtime_mark_last_busy(wl->dev);
5700 	pm_runtime_put_autosuspend(wl->dev);
5701 out:
5702 	mutex_unlock(&wl->mutex);
5703 	return ret;
5704 }
5705 
__wlcore_roc_completed(struct wl1271 * wl)5706 static int __wlcore_roc_completed(struct wl1271 *wl)
5707 {
5708 	struct wl12xx_vif *wlvif;
5709 	int ret;
5710 
5711 	/* already completed */
5712 	if (unlikely(!wl->roc_vif))
5713 		return 0;
5714 
5715 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5716 
5717 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5718 		return -EBUSY;
5719 
5720 	ret = wl12xx_stop_dev(wl, wlvif);
5721 	if (ret < 0)
5722 		return ret;
5723 
5724 	wl->roc_vif = NULL;
5725 
5726 	return 0;
5727 }
5728 
wlcore_roc_completed(struct wl1271 * wl)5729 static int wlcore_roc_completed(struct wl1271 *wl)
5730 {
5731 	int ret;
5732 
5733 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5734 
5735 	mutex_lock(&wl->mutex);
5736 
5737 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5738 		ret = -EBUSY;
5739 		goto out;
5740 	}
5741 
5742 	ret = pm_runtime_resume_and_get(wl->dev);
5743 	if (ret < 0)
5744 		goto out;
5745 
5746 	ret = __wlcore_roc_completed(wl);
5747 
5748 	pm_runtime_mark_last_busy(wl->dev);
5749 	pm_runtime_put_autosuspend(wl->dev);
5750 out:
5751 	mutex_unlock(&wl->mutex);
5752 
5753 	return ret;
5754 }
5755 
wlcore_roc_complete_work(struct work_struct * work)5756 static void wlcore_roc_complete_work(struct work_struct *work)
5757 {
5758 	struct delayed_work *dwork;
5759 	struct wl1271 *wl;
5760 	int ret;
5761 
5762 	dwork = to_delayed_work(work);
5763 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5764 
5765 	ret = wlcore_roc_completed(wl);
5766 	if (!ret)
5767 		ieee80211_remain_on_channel_expired(wl->hw);
5768 }
5769 
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5770 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5771 					      struct ieee80211_vif *vif)
5772 {
5773 	struct wl1271 *wl = hw->priv;
5774 
5775 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5776 
5777 	/* TODO: per-vif */
5778 	wl1271_tx_flush(wl);
5779 
5780 	/*
5781 	 * we can't just flush_work here, because it might deadlock
5782 	 * (as we might get called from the same workqueue)
5783 	 */
5784 	cancel_delayed_work_sync(&wl->roc_complete_work);
5785 	wlcore_roc_completed(wl);
5786 
5787 	return 0;
5788 }
5789 
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)5790 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5791 				    struct ieee80211_vif *vif,
5792 				    struct ieee80211_sta *sta,
5793 				    u32 changed)
5794 {
5795 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5796 
5797 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5798 
5799 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5800 		return;
5801 
5802 	/* this callback is atomic, so schedule a new work */
5803 	wlvif->rc_update_bw = sta->deflink.bandwidth;
5804 	memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap,
5805 	       sizeof(sta->deflink.ht_cap));
5806 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5807 }
5808 
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5809 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5810 				     struct ieee80211_vif *vif,
5811 				     struct ieee80211_sta *sta,
5812 				     struct station_info *sinfo)
5813 {
5814 	struct wl1271 *wl = hw->priv;
5815 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5816 	s8 rssi_dbm;
5817 	int ret;
5818 
5819 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5820 
5821 	mutex_lock(&wl->mutex);
5822 
5823 	if (unlikely(wl->state != WLCORE_STATE_ON))
5824 		goto out;
5825 
5826 	ret = pm_runtime_resume_and_get(wl->dev);
5827 	if (ret < 0)
5828 		goto out_sleep;
5829 
5830 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5831 	if (ret < 0)
5832 		goto out_sleep;
5833 
5834 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5835 	sinfo->signal = rssi_dbm;
5836 
5837 out_sleep:
5838 	pm_runtime_mark_last_busy(wl->dev);
5839 	pm_runtime_put_autosuspend(wl->dev);
5840 
5841 out:
5842 	mutex_unlock(&wl->mutex);
5843 }
5844 
wlcore_op_get_expected_throughput(struct ieee80211_hw * hw,struct ieee80211_sta * sta)5845 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5846 					     struct ieee80211_sta *sta)
5847 {
5848 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5849 	struct wl1271 *wl = hw->priv;
5850 	u8 hlid = wl_sta->hlid;
5851 
5852 	/* return in units of Kbps */
5853 	return (wl->links[hlid].fw_rate_mbps * 1000);
5854 }
5855 
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5856 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5857 {
5858 	struct wl1271 *wl = hw->priv;
5859 	bool ret = false;
5860 
5861 	mutex_lock(&wl->mutex);
5862 
5863 	if (unlikely(wl->state != WLCORE_STATE_ON))
5864 		goto out;
5865 
5866 	/* packets are considered pending if in the TX queue or the FW */
5867 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5868 out:
5869 	mutex_unlock(&wl->mutex);
5870 
5871 	return ret;
5872 }
5873 
5874 /* can't be const, mac80211 writes to this */
5875 static struct ieee80211_rate wl1271_rates[] = {
5876 	{ .bitrate = 10,
5877 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5878 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5879 	{ .bitrate = 20,
5880 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5881 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5882 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5883 	{ .bitrate = 55,
5884 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5885 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5886 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5887 	{ .bitrate = 110,
5888 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5889 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5890 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5891 	{ .bitrate = 60,
5892 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5893 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5894 	{ .bitrate = 90,
5895 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5896 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5897 	{ .bitrate = 120,
5898 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5899 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5900 	{ .bitrate = 180,
5901 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5902 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5903 	{ .bitrate = 240,
5904 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5905 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5906 	{ .bitrate = 360,
5907 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5908 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5909 	{ .bitrate = 480,
5910 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5911 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5912 	{ .bitrate = 540,
5913 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5914 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5915 };
5916 
5917 /* can't be const, mac80211 writes to this */
5918 static struct ieee80211_channel wl1271_channels[] = {
5919 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5920 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5921 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5922 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5923 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5924 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5925 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5926 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5927 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5928 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5929 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5930 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5931 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5932 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5933 };
5934 
5935 /* can't be const, mac80211 writes to this */
5936 static struct ieee80211_supported_band wl1271_band_2ghz = {
5937 	.channels = wl1271_channels,
5938 	.n_channels = ARRAY_SIZE(wl1271_channels),
5939 	.bitrates = wl1271_rates,
5940 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5941 };
5942 
5943 /* 5 GHz data rates for WL1273 */
5944 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5945 	{ .bitrate = 60,
5946 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5947 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5948 	{ .bitrate = 90,
5949 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5950 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5951 	{ .bitrate = 120,
5952 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5953 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5954 	{ .bitrate = 180,
5955 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5956 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5957 	{ .bitrate = 240,
5958 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5959 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5960 	{ .bitrate = 360,
5961 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5962 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5963 	{ .bitrate = 480,
5964 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5965 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5966 	{ .bitrate = 540,
5967 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5968 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5969 };
5970 
5971 /* 5 GHz band channels for WL1273 */
5972 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5973 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5974 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5975 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5976 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5977 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5991 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5992 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5993 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5994 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5995 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5996 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5997 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5998 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5999 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
6000 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
6001 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
6002 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
6003 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
6004 };
6005 
6006 static struct ieee80211_supported_band wl1271_band_5ghz = {
6007 	.channels = wl1271_channels_5ghz,
6008 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6009 	.bitrates = wl1271_rates_5ghz,
6010 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6011 };
6012 
6013 static const struct ieee80211_ops wl1271_ops = {
6014 	.start = wl1271_op_start,
6015 	.stop = wlcore_op_stop,
6016 	.add_interface = wl1271_op_add_interface,
6017 	.remove_interface = wl1271_op_remove_interface,
6018 	.change_interface = wl12xx_op_change_interface,
6019 #ifdef CONFIG_PM
6020 	.suspend = wl1271_op_suspend,
6021 	.resume = wl1271_op_resume,
6022 #endif
6023 	.config = wl1271_op_config,
6024 	.prepare_multicast = wl1271_op_prepare_multicast,
6025 	.configure_filter = wl1271_op_configure_filter,
6026 	.tx = wl1271_op_tx,
6027 	.wake_tx_queue = ieee80211_handle_wake_tx_queue,
6028 	.set_key = wlcore_op_set_key,
6029 	.hw_scan = wl1271_op_hw_scan,
6030 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6031 	.sched_scan_start = wl1271_op_sched_scan_start,
6032 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6033 	.bss_info_changed = wl1271_op_bss_info_changed,
6034 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6035 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6036 	.conf_tx = wl1271_op_conf_tx,
6037 	.get_tsf = wl1271_op_get_tsf,
6038 	.get_survey = wl1271_op_get_survey,
6039 	.sta_state = wl12xx_op_sta_state,
6040 	.ampdu_action = wl1271_op_ampdu_action,
6041 	.tx_frames_pending = wl1271_tx_frames_pending,
6042 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6043 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6044 	.channel_switch = wl12xx_op_channel_switch,
6045 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6046 	.flush = wlcore_op_flush,
6047 	.remain_on_channel = wlcore_op_remain_on_channel,
6048 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6049 	.add_chanctx = wlcore_op_add_chanctx,
6050 	.remove_chanctx = wlcore_op_remove_chanctx,
6051 	.change_chanctx = wlcore_op_change_chanctx,
6052 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6053 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6054 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6055 	.sta_rc_update = wlcore_op_sta_rc_update,
6056 	.sta_statistics = wlcore_op_sta_statistics,
6057 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6058 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6059 };
6060 
6061 
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum nl80211_band band)6062 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6063 {
6064 	u8 idx;
6065 
6066 	BUG_ON(band >= 2);
6067 
6068 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6069 		wl1271_error("Illegal RX rate from HW: %d", rate);
6070 		return 0;
6071 	}
6072 
6073 	idx = wl->band_rate_to_idx[band][rate];
6074 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6075 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6076 		return 0;
6077 	}
6078 
6079 	return idx;
6080 }
6081 
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)6082 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6083 {
6084 	int i;
6085 
6086 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6087 		     oui, nic);
6088 
6089 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6090 		wl1271_warning("NIC part of the MAC address wraps around!");
6091 
6092 	for (i = 0; i < wl->num_mac_addr; i++) {
6093 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6094 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6095 		wl->addresses[i].addr[2] = (u8) oui;
6096 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6097 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6098 		wl->addresses[i].addr[5] = (u8) nic;
6099 		nic++;
6100 	}
6101 
6102 	/* we may be one address short at the most */
6103 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6104 
6105 	/*
6106 	 * turn on the LAA bit in the first address and use it as
6107 	 * the last address.
6108 	 */
6109 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6110 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6111 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6112 		       sizeof(wl->addresses[0]));
6113 		/* LAA bit */
6114 		wl->addresses[idx].addr[0] |= BIT(1);
6115 	}
6116 
6117 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6118 	wl->hw->wiphy->addresses = wl->addresses;
6119 }
6120 
wl12xx_get_hw_info(struct wl1271 * wl)6121 static int wl12xx_get_hw_info(struct wl1271 *wl)
6122 {
6123 	int ret;
6124 
6125 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6126 	if (ret < 0)
6127 		goto out;
6128 
6129 	wl->fuse_oui_addr = 0;
6130 	wl->fuse_nic_addr = 0;
6131 
6132 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6133 	if (ret < 0)
6134 		goto out;
6135 
6136 	if (wl->ops->get_mac)
6137 		ret = wl->ops->get_mac(wl);
6138 
6139 out:
6140 	return ret;
6141 }
6142 
wl1271_register_hw(struct wl1271 * wl)6143 static int wl1271_register_hw(struct wl1271 *wl)
6144 {
6145 	int ret;
6146 	u32 oui_addr = 0, nic_addr = 0;
6147 	struct platform_device *pdev = wl->pdev;
6148 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6149 
6150 	if (wl->mac80211_registered)
6151 		return 0;
6152 
6153 	if (wl->nvs_len >= 12) {
6154 		/* NOTE: The wl->nvs->nvs element must be first, in
6155 		 * order to simplify the casting, we assume it is at
6156 		 * the beginning of the wl->nvs structure.
6157 		 */
6158 		u8 *nvs_ptr = (u8 *)wl->nvs;
6159 
6160 		oui_addr =
6161 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6162 		nic_addr =
6163 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6164 	}
6165 
6166 	/* if the MAC address is zeroed in the NVS derive from fuse */
6167 	if (oui_addr == 0 && nic_addr == 0) {
6168 		oui_addr = wl->fuse_oui_addr;
6169 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6170 		nic_addr = wl->fuse_nic_addr + 1;
6171 	}
6172 
6173 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6174 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6175 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6176 			wl1271_warning("This default nvs file can be removed from the file system");
6177 		} else {
6178 			wl1271_warning("Your device performance is not optimized.");
6179 			wl1271_warning("Please use the calibrator tool to configure your device.");
6180 		}
6181 
6182 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6183 			wl1271_warning("Fuse mac address is zero. using random mac");
6184 			/* Use TI oui and a random nic */
6185 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6186 			nic_addr = get_random_u32();
6187 		} else {
6188 			oui_addr = wl->fuse_oui_addr;
6189 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6190 			nic_addr = wl->fuse_nic_addr + 1;
6191 		}
6192 	}
6193 
6194 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6195 
6196 	ret = ieee80211_register_hw(wl->hw);
6197 	if (ret < 0) {
6198 		wl1271_error("unable to register mac80211 hw: %d", ret);
6199 		goto out;
6200 	}
6201 
6202 	wl->mac80211_registered = true;
6203 
6204 	wl1271_debugfs_init(wl);
6205 
6206 	wl1271_notice("loaded");
6207 
6208 out:
6209 	return ret;
6210 }
6211 
wl1271_unregister_hw(struct wl1271 * wl)6212 static void wl1271_unregister_hw(struct wl1271 *wl)
6213 {
6214 	if (wl->plt)
6215 		wl1271_plt_stop(wl);
6216 
6217 	ieee80211_unregister_hw(wl->hw);
6218 	wl->mac80211_registered = false;
6219 
6220 }
6221 
wl1271_init_ieee80211(struct wl1271 * wl)6222 static int wl1271_init_ieee80211(struct wl1271 *wl)
6223 {
6224 	int i;
6225 	static const u32 cipher_suites[] = {
6226 		WLAN_CIPHER_SUITE_WEP40,
6227 		WLAN_CIPHER_SUITE_WEP104,
6228 		WLAN_CIPHER_SUITE_TKIP,
6229 		WLAN_CIPHER_SUITE_CCMP,
6230 		WL1271_CIPHER_SUITE_GEM,
6231 	};
6232 
6233 	/* The tx descriptor buffer */
6234 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6235 
6236 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6237 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6238 
6239 	/* unit us */
6240 	/* FIXME: find a proper value */
6241 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6242 
6243 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6244 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6245 	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6246 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6247 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6248 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6249 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6250 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6251 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6252 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6253 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6254 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6255 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6256 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6257 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6258 
6259 	wl->hw->wiphy->cipher_suites = cipher_suites;
6260 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6261 
6262 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6263 					 BIT(NL80211_IFTYPE_AP) |
6264 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6265 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6266 #ifdef CONFIG_MAC80211_MESH
6267 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6268 #endif
6269 					 BIT(NL80211_IFTYPE_P2P_GO);
6270 
6271 	wl->hw->wiphy->max_scan_ssids = 1;
6272 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6273 	wl->hw->wiphy->max_match_sets = 16;
6274 	/*
6275 	 * Maximum length of elements in scanning probe request templates
6276 	 * should be the maximum length possible for a template, without
6277 	 * the IEEE80211 header of the template
6278 	 */
6279 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6280 			sizeof(struct ieee80211_header);
6281 
6282 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6283 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6284 		sizeof(struct ieee80211_header);
6285 
6286 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6287 
6288 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6289 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6290 				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6291 				WIPHY_FLAG_IBSS_RSN;
6292 
6293 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6294 
6295 	/* make sure all our channels fit in the scanned_ch bitmask */
6296 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6297 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6298 		     WL1271_MAX_CHANNELS);
6299 	/*
6300 	* clear channel flags from the previous usage
6301 	* and restore max_power & max_antenna_gain values.
6302 	*/
6303 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6304 		wl1271_band_2ghz.channels[i].flags = 0;
6305 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6306 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6307 	}
6308 
6309 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6310 		wl1271_band_5ghz.channels[i].flags = 0;
6311 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6312 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6313 	}
6314 
6315 	/*
6316 	 * We keep local copies of the band structs because we need to
6317 	 * modify them on a per-device basis.
6318 	 */
6319 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6320 	       sizeof(wl1271_band_2ghz));
6321 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6322 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6323 	       sizeof(*wl->ht_cap));
6324 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6325 	       sizeof(wl1271_band_5ghz));
6326 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6327 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6328 	       sizeof(*wl->ht_cap));
6329 
6330 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6331 		&wl->bands[NL80211_BAND_2GHZ];
6332 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6333 		&wl->bands[NL80211_BAND_5GHZ];
6334 
6335 	/*
6336 	 * allow 4 queues per mac address we support +
6337 	 * 1 cab queue per mac + one global offchannel Tx queue
6338 	 */
6339 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6340 
6341 	/* the last queue is the offchannel queue */
6342 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6343 	wl->hw->max_rates = 1;
6344 
6345 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6346 
6347 	/* the FW answers probe-requests in AP-mode */
6348 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6349 	wl->hw->wiphy->probe_resp_offload =
6350 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6351 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6352 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6353 
6354 	/* allowed interface combinations */
6355 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6356 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6357 
6358 	/* register vendor commands */
6359 	wlcore_set_vendor_commands(wl->hw->wiphy);
6360 
6361 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6362 
6363 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6364 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6365 
6366 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6367 
6368 	return 0;
6369 }
6370 
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6371 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6372 				     u32 mbox_size)
6373 {
6374 	struct ieee80211_hw *hw;
6375 	struct wl1271 *wl;
6376 	int i, j, ret;
6377 	unsigned int order;
6378 
6379 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6380 	if (!hw) {
6381 		wl1271_error("could not alloc ieee80211_hw");
6382 		ret = -ENOMEM;
6383 		goto err_hw_alloc;
6384 	}
6385 
6386 	wl = hw->priv;
6387 	memset(wl, 0, sizeof(*wl));
6388 
6389 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6390 	if (!wl->priv) {
6391 		wl1271_error("could not alloc wl priv");
6392 		ret = -ENOMEM;
6393 		goto err_priv_alloc;
6394 	}
6395 
6396 	INIT_LIST_HEAD(&wl->wlvif_list);
6397 
6398 	wl->hw = hw;
6399 
6400 	/*
6401 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6402 	 * we don't allocate any additional resource here, so that's fine.
6403 	 */
6404 	for (i = 0; i < NUM_TX_QUEUES; i++)
6405 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6406 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6407 
6408 	skb_queue_head_init(&wl->deferred_rx_queue);
6409 	skb_queue_head_init(&wl->deferred_tx_queue);
6410 
6411 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6412 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6413 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6414 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6415 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6416 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6417 
6418 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6419 	if (!wl->freezable_wq) {
6420 		ret = -ENOMEM;
6421 		goto err_hw;
6422 	}
6423 
6424 	wl->channel = 0;
6425 	wl->rx_counter = 0;
6426 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6427 	wl->band = NL80211_BAND_2GHZ;
6428 	wl->channel_type = NL80211_CHAN_NO_HT;
6429 	wl->flags = 0;
6430 	wl->sg_enabled = true;
6431 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6432 	wl->recovery_count = 0;
6433 	wl->hw_pg_ver = -1;
6434 	wl->ap_ps_map = 0;
6435 	wl->ap_fw_ps_map = 0;
6436 	wl->quirks = 0;
6437 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6438 	wl->active_sta_count = 0;
6439 	wl->active_link_count = 0;
6440 	wl->fwlog_size = 0;
6441 
6442 	/* The system link is always allocated */
6443 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6444 
6445 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6446 	for (i = 0; i < wl->num_tx_desc; i++)
6447 		wl->tx_frames[i] = NULL;
6448 
6449 	spin_lock_init(&wl->wl_lock);
6450 
6451 	wl->state = WLCORE_STATE_OFF;
6452 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6453 	mutex_init(&wl->mutex);
6454 	mutex_init(&wl->flush_mutex);
6455 	init_completion(&wl->nvs_loading_complete);
6456 
6457 	order = get_order(aggr_buf_size);
6458 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6459 	if (!wl->aggr_buf) {
6460 		ret = -ENOMEM;
6461 		goto err_wq;
6462 	}
6463 	wl->aggr_buf_size = aggr_buf_size;
6464 
6465 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6466 	if (!wl->dummy_packet) {
6467 		ret = -ENOMEM;
6468 		goto err_aggr;
6469 	}
6470 
6471 	/* Allocate one page for the FW log */
6472 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6473 	if (!wl->fwlog) {
6474 		ret = -ENOMEM;
6475 		goto err_dummy_packet;
6476 	}
6477 
6478 	wl->mbox_size = mbox_size;
6479 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6480 	if (!wl->mbox) {
6481 		ret = -ENOMEM;
6482 		goto err_fwlog;
6483 	}
6484 
6485 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6486 	if (!wl->buffer_32) {
6487 		ret = -ENOMEM;
6488 		goto err_mbox;
6489 	}
6490 
6491 	return hw;
6492 
6493 err_mbox:
6494 	kfree(wl->mbox);
6495 
6496 err_fwlog:
6497 	free_page((unsigned long)wl->fwlog);
6498 
6499 err_dummy_packet:
6500 	dev_kfree_skb(wl->dummy_packet);
6501 
6502 err_aggr:
6503 	free_pages((unsigned long)wl->aggr_buf, order);
6504 
6505 err_wq:
6506 	destroy_workqueue(wl->freezable_wq);
6507 
6508 err_hw:
6509 	wl1271_debugfs_exit(wl);
6510 	kfree(wl->priv);
6511 
6512 err_priv_alloc:
6513 	ieee80211_free_hw(hw);
6514 
6515 err_hw_alloc:
6516 
6517 	return ERR_PTR(ret);
6518 }
6519 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6520 
wlcore_free_hw(struct wl1271 * wl)6521 int wlcore_free_hw(struct wl1271 *wl)
6522 {
6523 	/* Unblock any fwlog readers */
6524 	mutex_lock(&wl->mutex);
6525 	wl->fwlog_size = -1;
6526 	mutex_unlock(&wl->mutex);
6527 
6528 	wlcore_sysfs_free(wl);
6529 
6530 	kfree(wl->buffer_32);
6531 	kfree(wl->mbox);
6532 	free_page((unsigned long)wl->fwlog);
6533 	dev_kfree_skb(wl->dummy_packet);
6534 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6535 
6536 	wl1271_debugfs_exit(wl);
6537 
6538 	vfree(wl->fw);
6539 	wl->fw = NULL;
6540 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6541 	kfree(wl->nvs);
6542 	wl->nvs = NULL;
6543 
6544 	kfree(wl->raw_fw_status);
6545 	kfree(wl->fw_status);
6546 	kfree(wl->tx_res_if);
6547 	destroy_workqueue(wl->freezable_wq);
6548 
6549 	kfree(wl->priv);
6550 	ieee80211_free_hw(wl->hw);
6551 
6552 	return 0;
6553 }
6554 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6555 
6556 #ifdef CONFIG_PM
6557 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6558 	.flags = WIPHY_WOWLAN_ANY,
6559 	.n_patterns = WL1271_MAX_RX_FILTERS,
6560 	.pattern_min_len = 1,
6561 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6562 };
6563 #endif
6564 
wlcore_hardirq(int irq,void * cookie)6565 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6566 {
6567 	return IRQ_WAKE_THREAD;
6568 }
6569 
wlcore_nvs_cb(const struct firmware * fw,void * context)6570 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6571 {
6572 	struct wl1271 *wl = context;
6573 	struct platform_device *pdev = wl->pdev;
6574 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6575 	struct resource *res;
6576 
6577 	int ret;
6578 	irq_handler_t hardirq_fn = NULL;
6579 
6580 	if (fw) {
6581 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6582 		if (!wl->nvs) {
6583 			wl1271_error("Could not allocate nvs data");
6584 			goto out;
6585 		}
6586 		wl->nvs_len = fw->size;
6587 	} else if (pdev_data->family->nvs_name) {
6588 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6589 			     pdev_data->family->nvs_name);
6590 		wl->nvs = NULL;
6591 		wl->nvs_len = 0;
6592 	} else {
6593 		wl->nvs = NULL;
6594 		wl->nvs_len = 0;
6595 	}
6596 
6597 	ret = wl->ops->setup(wl);
6598 	if (ret < 0)
6599 		goto out_free_nvs;
6600 
6601 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6602 
6603 	/* adjust some runtime configuration parameters */
6604 	wlcore_adjust_conf(wl);
6605 
6606 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6607 	if (!res) {
6608 		wl1271_error("Could not get IRQ resource");
6609 		goto out_free_nvs;
6610 	}
6611 
6612 	wl->irq = res->start;
6613 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6614 	wl->if_ops = pdev_data->if_ops;
6615 
6616 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6617 		hardirq_fn = wlcore_hardirq;
6618 	else
6619 		wl->irq_flags |= IRQF_ONESHOT;
6620 
6621 	ret = wl12xx_set_power_on(wl);
6622 	if (ret < 0)
6623 		goto out_free_nvs;
6624 
6625 	ret = wl12xx_get_hw_info(wl);
6626 	if (ret < 0) {
6627 		wl1271_error("couldn't get hw info");
6628 		wl1271_power_off(wl);
6629 		goto out_free_nvs;
6630 	}
6631 
6632 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6633 				   wl->irq_flags, pdev->name, wl);
6634 	if (ret < 0) {
6635 		wl1271_error("interrupt configuration failed");
6636 		wl1271_power_off(wl);
6637 		goto out_free_nvs;
6638 	}
6639 
6640 #ifdef CONFIG_PM
6641 	device_init_wakeup(wl->dev, true);
6642 
6643 	ret = enable_irq_wake(wl->irq);
6644 	if (!ret) {
6645 		wl->irq_wake_enabled = true;
6646 		if (pdev_data->pwr_in_suspend)
6647 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6648 	}
6649 
6650 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6651 	if (res) {
6652 		wl->wakeirq = res->start;
6653 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6654 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6655 		if (ret)
6656 			wl->wakeirq = -ENODEV;
6657 	} else {
6658 		wl->wakeirq = -ENODEV;
6659 	}
6660 #endif
6661 	disable_irq(wl->irq);
6662 	wl1271_power_off(wl);
6663 
6664 	ret = wl->ops->identify_chip(wl);
6665 	if (ret < 0)
6666 		goto out_irq;
6667 
6668 	ret = wl1271_init_ieee80211(wl);
6669 	if (ret)
6670 		goto out_irq;
6671 
6672 	ret = wl1271_register_hw(wl);
6673 	if (ret)
6674 		goto out_irq;
6675 
6676 	ret = wlcore_sysfs_init(wl);
6677 	if (ret)
6678 		goto out_unreg;
6679 
6680 	wl->initialized = true;
6681 	goto out;
6682 
6683 out_unreg:
6684 	wl1271_unregister_hw(wl);
6685 
6686 out_irq:
6687 	if (wl->wakeirq >= 0)
6688 		dev_pm_clear_wake_irq(wl->dev);
6689 	device_init_wakeup(wl->dev, false);
6690 	free_irq(wl->irq, wl);
6691 
6692 out_free_nvs:
6693 	kfree(wl->nvs);
6694 
6695 out:
6696 	release_firmware(fw);
6697 	complete_all(&wl->nvs_loading_complete);
6698 }
6699 
wlcore_runtime_suspend(struct device * dev)6700 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6701 {
6702 	struct wl1271 *wl = dev_get_drvdata(dev);
6703 	struct wl12xx_vif *wlvif;
6704 	int error;
6705 
6706 	/* We do not enter elp sleep in PLT mode */
6707 	if (wl->plt)
6708 		return 0;
6709 
6710 	/* Nothing to do if no ELP mode requested */
6711 	if (wl->sleep_auth != WL1271_PSM_ELP)
6712 		return 0;
6713 
6714 	wl12xx_for_each_wlvif(wl, wlvif) {
6715 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6716 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6717 			return -EBUSY;
6718 	}
6719 
6720 	wl1271_debug(DEBUG_PSM, "chip to elp");
6721 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6722 	if (error < 0) {
6723 		wl12xx_queue_recovery_work(wl);
6724 
6725 		return error;
6726 	}
6727 
6728 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6729 
6730 	return 0;
6731 }
6732 
wlcore_runtime_resume(struct device * dev)6733 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6734 {
6735 	struct wl1271 *wl = dev_get_drvdata(dev);
6736 	DECLARE_COMPLETION_ONSTACK(compl);
6737 	unsigned long flags;
6738 	int ret;
6739 	unsigned long start_time = jiffies;
6740 	bool recovery = false;
6741 
6742 	/* Nothing to do if no ELP mode requested */
6743 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6744 		return 0;
6745 
6746 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6747 
6748 	spin_lock_irqsave(&wl->wl_lock, flags);
6749 	wl->elp_compl = &compl;
6750 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6751 
6752 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6753 	if (ret < 0) {
6754 		recovery = true;
6755 	} else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6756 		ret = wait_for_completion_timeout(&compl,
6757 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6758 		if (ret == 0) {
6759 			wl1271_warning("ELP wakeup timeout!");
6760 			recovery = true;
6761 		}
6762 	}
6763 
6764 	spin_lock_irqsave(&wl->wl_lock, flags);
6765 	wl->elp_compl = NULL;
6766 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6767 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6768 
6769 	if (recovery) {
6770 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6771 		wl12xx_queue_recovery_work(wl);
6772 	} else {
6773 		wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6774 			     jiffies_to_msecs(jiffies - start_time));
6775 	}
6776 
6777 	return 0;
6778 }
6779 
6780 static const struct dev_pm_ops wlcore_pm_ops = {
6781 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6782 			   wlcore_runtime_resume,
6783 			   NULL)
6784 };
6785 
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6786 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6787 {
6788 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6789 	const char *nvs_name;
6790 	int ret = 0;
6791 
6792 	if (!wl->ops || !wl->ptable || !pdev_data)
6793 		return -EINVAL;
6794 
6795 	wl->dev = &pdev->dev;
6796 	wl->pdev = pdev;
6797 	platform_set_drvdata(pdev, wl);
6798 
6799 	if (pdev_data->family && pdev_data->family->nvs_name) {
6800 		nvs_name = pdev_data->family->nvs_name;
6801 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
6802 					      nvs_name, &pdev->dev, GFP_KERNEL,
6803 					      wl, wlcore_nvs_cb);
6804 		if (ret < 0) {
6805 			wl1271_error("request_firmware_nowait failed for %s: %d",
6806 				     nvs_name, ret);
6807 			complete_all(&wl->nvs_loading_complete);
6808 		}
6809 	} else {
6810 		wlcore_nvs_cb(NULL, wl);
6811 	}
6812 
6813 	wl->dev->driver->pm = &wlcore_pm_ops;
6814 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6815 	pm_runtime_use_autosuspend(wl->dev);
6816 	pm_runtime_enable(wl->dev);
6817 
6818 	return ret;
6819 }
6820 EXPORT_SYMBOL_GPL(wlcore_probe);
6821 
wlcore_remove(struct platform_device * pdev)6822 void wlcore_remove(struct platform_device *pdev)
6823 {
6824 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6825 	struct wl1271 *wl = platform_get_drvdata(pdev);
6826 	int error;
6827 
6828 	error = pm_runtime_get_sync(wl->dev);
6829 	if (error < 0)
6830 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6831 
6832 	wl->dev->driver->pm = NULL;
6833 
6834 	if (pdev_data->family && pdev_data->family->nvs_name)
6835 		wait_for_completion(&wl->nvs_loading_complete);
6836 	if (!wl->initialized)
6837 		return;
6838 
6839 	if (wl->wakeirq >= 0) {
6840 		dev_pm_clear_wake_irq(wl->dev);
6841 		wl->wakeirq = -ENODEV;
6842 	}
6843 
6844 	device_init_wakeup(wl->dev, false);
6845 
6846 	if (wl->irq_wake_enabled)
6847 		disable_irq_wake(wl->irq);
6848 
6849 	wl1271_unregister_hw(wl);
6850 
6851 	pm_runtime_put_sync(wl->dev);
6852 	pm_runtime_dont_use_autosuspend(wl->dev);
6853 	pm_runtime_disable(wl->dev);
6854 
6855 	free_irq(wl->irq, wl);
6856 	wlcore_free_hw(wl);
6857 }
6858 EXPORT_SYMBOL_GPL(wlcore_remove);
6859 
6860 u32 wl12xx_debug_level = DEBUG_NONE;
6861 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6862 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6863 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6864 
6865 module_param_named(fwlog, fwlog_param, charp, 0);
6866 MODULE_PARM_DESC(fwlog,
6867 		 "FW logger options: continuous, dbgpins or disable");
6868 
6869 module_param(fwlog_mem_blocks, int, 0600);
6870 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6871 
6872 module_param(bug_on_recovery, int, 0600);
6873 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6874 
6875 module_param(no_recovery, int, 0600);
6876 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6877 
6878 MODULE_DESCRIPTION("TI WLAN core driver");
6879 MODULE_LICENSE("GPL");
6880 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6881 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6882