xref: /linux/drivers/net/wireless/ti/wlcore/main.c (revision be54f8c558027a218423134dd9b8c7c46d92204a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17 
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31 
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_WAKEUP_TIMEOUT 500
34 
35 static char *fwlog_param;
36 static int fwlog_mem_blocks = -1;
37 static int bug_on_recovery = -1;
38 static int no_recovery     = -1;
39 
40 static void __wl1271_op_remove_interface(struct wl1271 *wl,
41 					 struct ieee80211_vif *vif,
42 					 bool reset_tx_queues);
43 static void wlcore_op_stop_locked(struct wl1271 *wl);
44 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
45 
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)46 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
47 {
48 	int ret;
49 
50 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
51 		return -EINVAL;
52 
53 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
54 		return 0;
55 
56 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
57 		return 0;
58 
59 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
60 	if (ret < 0)
61 		return ret;
62 
63 	wl1271_info("Association completed.");
64 	return 0;
65 }
66 
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)67 static void wl1271_reg_notify(struct wiphy *wiphy,
68 			      struct regulatory_request *request)
69 {
70 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
71 	struct wl1271 *wl = hw->priv;
72 
73 	/* copy the current dfs region */
74 	if (request)
75 		wl->dfs_region = request->dfs_region;
76 
77 	wlcore_regdomain_config(wl);
78 }
79 
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)80 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
81 				   bool enable)
82 {
83 	int ret = 0;
84 
85 	/* we should hold wl->mutex */
86 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
87 	if (ret < 0)
88 		goto out;
89 
90 	if (enable)
91 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
92 	else
93 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
94 out:
95 	return ret;
96 }
97 
98 /*
99  * this function is being called when the rx_streaming interval
100  * has beed changed or rx_streaming should be disabled
101  */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)102 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
103 {
104 	int ret = 0;
105 	int period = wl->conf.rx_streaming.interval;
106 
107 	/* don't reconfigure if rx_streaming is disabled */
108 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
109 		goto out;
110 
111 	/* reconfigure/disable according to new streaming_period */
112 	if (period &&
113 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
114 	    (wl->conf.rx_streaming.always ||
115 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
116 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
117 	else {
118 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
119 		/* don't cancel_work_sync since we might deadlock */
120 		timer_delete_sync(&wlvif->rx_streaming_timer);
121 	}
122 out:
123 	return ret;
124 }
125 
wl1271_rx_streaming_enable_work(struct work_struct * work)126 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
127 {
128 	int ret;
129 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
130 						rx_streaming_enable_work);
131 	struct wl1271 *wl = wlvif->wl;
132 
133 	mutex_lock(&wl->mutex);
134 
135 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
136 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
137 	    (!wl->conf.rx_streaming.always &&
138 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
139 		goto out;
140 
141 	if (!wl->conf.rx_streaming.interval)
142 		goto out;
143 
144 	ret = pm_runtime_resume_and_get(wl->dev);
145 	if (ret < 0)
146 		goto out;
147 
148 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
149 	if (ret < 0)
150 		goto out_sleep;
151 
152 	/* stop it after some time of inactivity */
153 	mod_timer(&wlvif->rx_streaming_timer,
154 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
155 
156 out_sleep:
157 	pm_runtime_mark_last_busy(wl->dev);
158 	pm_runtime_put_autosuspend(wl->dev);
159 out:
160 	mutex_unlock(&wl->mutex);
161 }
162 
wl1271_rx_streaming_disable_work(struct work_struct * work)163 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
164 {
165 	int ret;
166 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
167 						rx_streaming_disable_work);
168 	struct wl1271 *wl = wlvif->wl;
169 
170 	mutex_lock(&wl->mutex);
171 
172 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
173 		goto out;
174 
175 	ret = pm_runtime_resume_and_get(wl->dev);
176 	if (ret < 0)
177 		goto out;
178 
179 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
180 	if (ret)
181 		goto out_sleep;
182 
183 out_sleep:
184 	pm_runtime_mark_last_busy(wl->dev);
185 	pm_runtime_put_autosuspend(wl->dev);
186 out:
187 	mutex_unlock(&wl->mutex);
188 }
189 
wl1271_rx_streaming_timer(struct timer_list * t)190 static void wl1271_rx_streaming_timer(struct timer_list *t)
191 {
192 	struct wl12xx_vif *wlvif = timer_container_of(wlvif, t,
193 						      rx_streaming_timer);
194 	struct wl1271 *wl = wlvif->wl;
195 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
196 }
197 
198 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)199 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
200 {
201 	/* if the watchdog is not armed, don't do anything */
202 	if (wl->tx_allocated_blocks == 0)
203 		return;
204 
205 	cancel_delayed_work(&wl->tx_watchdog_work);
206 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
207 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
208 }
209 
wlcore_rc_update_work(struct work_struct * work)210 static void wlcore_rc_update_work(struct work_struct *work)
211 {
212 	int ret;
213 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
214 						rc_update_work);
215 	struct wl1271 *wl = wlvif->wl;
216 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
217 
218 	mutex_lock(&wl->mutex);
219 
220 	if (unlikely(wl->state != WLCORE_STATE_ON))
221 		goto out;
222 
223 	ret = pm_runtime_resume_and_get(wl->dev);
224 	if (ret < 0)
225 		goto out;
226 
227 	if (ieee80211_vif_is_mesh(vif)) {
228 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
229 						     true, wlvif->sta.hlid);
230 		if (ret < 0)
231 			goto out_sleep;
232 	} else {
233 		wlcore_hw_sta_rc_update(wl, wlvif);
234 	}
235 
236 out_sleep:
237 	pm_runtime_mark_last_busy(wl->dev);
238 	pm_runtime_put_autosuspend(wl->dev);
239 out:
240 	mutex_unlock(&wl->mutex);
241 }
242 
wl12xx_tx_watchdog_work(struct work_struct * work)243 static void wl12xx_tx_watchdog_work(struct work_struct *work)
244 {
245 	struct delayed_work *dwork;
246 	struct wl1271 *wl;
247 
248 	dwork = to_delayed_work(work);
249 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
250 
251 	mutex_lock(&wl->mutex);
252 
253 	if (unlikely(wl->state != WLCORE_STATE_ON))
254 		goto out;
255 
256 	/* Tx went out in the meantime - everything is ok */
257 	if (unlikely(wl->tx_allocated_blocks == 0))
258 		goto out;
259 
260 	/*
261 	 * if a ROC is in progress, we might not have any Tx for a long
262 	 * time (e.g. pending Tx on the non-ROC channels)
263 	 */
264 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
265 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
266 			     wl->conf.tx.tx_watchdog_timeout);
267 		wl12xx_rearm_tx_watchdog_locked(wl);
268 		goto out;
269 	}
270 
271 	/*
272 	 * if a scan is in progress, we might not have any Tx for a long
273 	 * time
274 	 */
275 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
276 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
277 			     wl->conf.tx.tx_watchdog_timeout);
278 		wl12xx_rearm_tx_watchdog_locked(wl);
279 		goto out;
280 	}
281 
282 	/*
283 	* AP might cache a frame for a long time for a sleeping station,
284 	* so rearm the timer if there's an AP interface with stations. If
285 	* Tx is genuinely stuck we will most hopefully discover it when all
286 	* stations are removed due to inactivity.
287 	*/
288 	if (wl->active_sta_count) {
289 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
290 			     " %d stations",
291 			      wl->conf.tx.tx_watchdog_timeout,
292 			      wl->active_sta_count);
293 		wl12xx_rearm_tx_watchdog_locked(wl);
294 		goto out;
295 	}
296 
297 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
298 		     wl->conf.tx.tx_watchdog_timeout);
299 	wl12xx_queue_recovery_work(wl);
300 
301 out:
302 	mutex_unlock(&wl->mutex);
303 }
304 
wlcore_adjust_conf(struct wl1271 * wl)305 static void wlcore_adjust_conf(struct wl1271 *wl)
306 {
307 
308 	if (fwlog_param) {
309 		if (!strcmp(fwlog_param, "continuous")) {
310 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
311 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
312 		} else if (!strcmp(fwlog_param, "dbgpins")) {
313 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
314 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
315 		} else if (!strcmp(fwlog_param, "disable")) {
316 			wl->conf.fwlog.mem_blocks = 0;
317 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 		} else {
319 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
320 		}
321 	}
322 
323 	if (bug_on_recovery != -1)
324 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
325 
326 	if (no_recovery != -1)
327 		wl->conf.recovery.no_recovery = (u8) no_recovery;
328 }
329 
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)330 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
331 					struct wl12xx_vif *wlvif,
332 					u8 hlid, u8 tx_pkts)
333 {
334 	bool fw_ps;
335 
336 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
337 
338 	/*
339 	 * Wake up from high level PS if the STA is asleep with too little
340 	 * packets in FW or if the STA is awake.
341 	 */
342 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
343 		wl12xx_ps_link_end(wl, wlvif, hlid);
344 
345 	/*
346 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 	 * Make an exception if this is the only connected link. In this
348 	 * case FW-memory congestion is less of a problem.
349 	 * Note that a single connected STA means 2*ap_count + 1 active links,
350 	 * since we must account for the global and broadcast AP links
351 	 * for each AP. The "fw_ps" check assures us the other link is a STA
352 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
353 	 */
354 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
355 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
356 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
357 }
358 
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)359 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
360 					   struct wl12xx_vif *wlvif,
361 					   struct wl_fw_status *status)
362 {
363 	unsigned long cur_fw_ps_map;
364 	u8 hlid;
365 
366 	cur_fw_ps_map = status->link_ps_bitmap;
367 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
368 		wl1271_debug(DEBUG_PSM,
369 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
370 			     wl->ap_fw_ps_map, cur_fw_ps_map,
371 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
372 
373 		wl->ap_fw_ps_map = cur_fw_ps_map;
374 	}
375 
376 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
377 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
378 					    wl->links[hlid].allocated_pkts);
379 }
380 
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)381 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
382 {
383 	struct wl12xx_vif *wlvifsta;
384 	struct wl12xx_vif *wlvifap;
385 	struct wl12xx_vif *wlvif;
386 	u32 old_tx_blk_count = wl->tx_blocks_available;
387 	int avail, freed_blocks;
388 	int i;
389 	int ret;
390 	struct wl1271_link *lnk;
391 
392 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
393 				   wl->raw_fw_status,
394 				   wl->fw_status_len, false);
395 	if (ret < 0)
396 		return ret;
397 
398 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, status);
399 
400 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
401 		     "drv_rx_counter = %d, tx_results_counter = %d)",
402 		     status->intr,
403 		     status->fw_rx_counter,
404 		     status->drv_rx_counter,
405 		     status->tx_results_counter);
406 
407 	for (i = 0; i < NUM_TX_QUEUES; i++) {
408 		/* prevent wrap-around in freed-packets counter */
409 		wl->tx_allocated_pkts[i] -=
410 				(status->counters.tx_released_pkts[i] -
411 				wl->tx_pkts_freed[i]) & 0xff;
412 
413 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
414 	}
415 
416 	/* Find an authorized STA vif */
417 	wlvifsta = NULL;
418 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
419 		if (wlvif->sta.hlid != WL12XX_INVALID_LINK_ID &&
420 		    test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) {
421 			wlvifsta = wlvif;
422 			break;
423 		}
424 	}
425 
426 	/* Find a started AP vif */
427 	wlvifap = NULL;
428 	wl12xx_for_each_wlvif(wl, wlvif) {
429 		if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
430 		    wlvif->inconn_count == 0 &&
431 		    test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
432 			wlvifap = wlvif;
433 			break;
434 		}
435 	}
436 
437 	for_each_set_bit(i, wl->links_map, wl->num_links) {
438 		u16 diff16, sec_pn16;
439 		u8 diff, tx_lnk_free_pkts;
440 
441 		lnk = &wl->links[i];
442 
443 		/* prevent wrap-around in freed-packets counter */
444 		tx_lnk_free_pkts = status->counters.tx_lnk_free_pkts[i];
445 		diff = (tx_lnk_free_pkts - lnk->prev_freed_pkts) & 0xff;
446 
447 		if (diff) {
448 			lnk->allocated_pkts -= diff;
449 			lnk->prev_freed_pkts = tx_lnk_free_pkts;
450 		}
451 
452 		/* Get the current sec_pn16 value if present */
453 		if (status->counters.tx_lnk_sec_pn16)
454 			sec_pn16 = __le16_to_cpu(status->counters.tx_lnk_sec_pn16[i]);
455 		else
456 			sec_pn16 = 0;
457 		/* prevent wrap-around in pn16 counter */
458 		diff16 = (sec_pn16 - lnk->prev_sec_pn16) & 0xffff;
459 
460 		/* FIXME: since free_pkts is a 8-bit counter of packets that
461 		 * rolls over, it can become zero. If it is zero, then we
462 		 * omit processing below. Is that really correct?
463 		 */
464 		if (tx_lnk_free_pkts <= 0)
465 			continue;
466 
467 		/* For a station that has an authorized link: */
468 		if (wlvifsta && wlvifsta->sta.hlid == i) {
469 			if (wlvifsta->encryption_type == KEY_TKIP ||
470 			    wlvifsta->encryption_type == KEY_AES) {
471 				if (diff16) {
472 					lnk->prev_sec_pn16 = sec_pn16;
473 					/* accumulate the prev_freed_pkts
474 					 * counter according to the PN from
475 					 * firmware
476 					 */
477 					lnk->total_freed_pkts += diff16;
478 				}
479 			} else {
480 				if (diff)
481 					/* accumulate the prev_freed_pkts
482 					 * counter according to the free packets
483 					 * count from firmware
484 					 */
485 					lnk->total_freed_pkts += diff;
486 			}
487 		}
488 
489 		/* For an AP that has been started */
490 		if (wlvifap && test_bit(i, wlvifap->ap.sta_hlid_map)) {
491 			if (wlvifap->encryption_type == KEY_TKIP ||
492 			    wlvifap->encryption_type == KEY_AES) {
493 				if (diff16) {
494 					lnk->prev_sec_pn16 = sec_pn16;
495 					/* accumulate the prev_freed_pkts
496 					 * counter according to the PN from
497 					 * firmware
498 					 */
499 					lnk->total_freed_pkts += diff16;
500 				}
501 			} else {
502 				if (diff)
503 					/* accumulate the prev_freed_pkts
504 					 * counter according to the free packets
505 					 * count from firmware
506 					 */
507 					lnk->total_freed_pkts += diff;
508 			}
509 		}
510 	}
511 
512 	/* prevent wrap-around in total blocks counter */
513 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
514 		freed_blocks = status->total_released_blks -
515 			       wl->tx_blocks_freed;
516 	else
517 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
518 			       status->total_released_blks;
519 
520 	wl->tx_blocks_freed = status->total_released_blks;
521 
522 	wl->tx_allocated_blocks -= freed_blocks;
523 
524 	/*
525 	 * If the FW freed some blocks:
526 	 * If we still have allocated blocks - re-arm the timer, Tx is
527 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
528 	 */
529 	if (freed_blocks) {
530 		if (wl->tx_allocated_blocks)
531 			wl12xx_rearm_tx_watchdog_locked(wl);
532 		else
533 			cancel_delayed_work(&wl->tx_watchdog_work);
534 	}
535 
536 	avail = status->tx_total - wl->tx_allocated_blocks;
537 
538 	/*
539 	 * The FW might change the total number of TX memblocks before
540 	 * we get a notification about blocks being released. Thus, the
541 	 * available blocks calculation might yield a temporary result
542 	 * which is lower than the actual available blocks. Keeping in
543 	 * mind that only blocks that were allocated can be moved from
544 	 * TX to RX, tx_blocks_available should never decrease here.
545 	 */
546 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
547 				      avail);
548 
549 	/* if more blocks are available now, tx work can be scheduled */
550 	if (wl->tx_blocks_available > old_tx_blk_count)
551 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
552 
553 	/* for AP update num of allocated TX blocks per link and ps status */
554 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
555 		wl12xx_irq_update_links_status(wl, wlvif, status);
556 	}
557 
558 	/* update the host-chipset time offset */
559 	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
560 		(s64)(status->fw_localtime);
561 
562 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
563 
564 	return 0;
565 }
566 
wl1271_flush_deferred_work(struct wl1271 * wl)567 static void wl1271_flush_deferred_work(struct wl1271 *wl)
568 {
569 	struct sk_buff *skb;
570 
571 	/* Pass all received frames to the network stack */
572 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
573 		ieee80211_rx_ni(wl->hw, skb);
574 
575 	/* Return sent skbs to the network stack */
576 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
577 		ieee80211_tx_status_ni(wl->hw, skb);
578 }
579 
wl1271_netstack_work(struct work_struct * work)580 static void wl1271_netstack_work(struct work_struct *work)
581 {
582 	struct wl1271 *wl =
583 		container_of(work, struct wl1271, netstack_work);
584 
585 	do {
586 		wl1271_flush_deferred_work(wl);
587 	} while (skb_queue_len(&wl->deferred_rx_queue));
588 }
589 
590 #define WL1271_IRQ_MAX_LOOPS 256
591 
wlcore_irq_locked(struct wl1271 * wl)592 static int wlcore_irq_locked(struct wl1271 *wl)
593 {
594 	int ret = 0;
595 	u32 intr;
596 	int loopcount = WL1271_IRQ_MAX_LOOPS;
597 	bool run_tx_queue = true;
598 	bool done = false;
599 	unsigned int defer_count;
600 	unsigned long flags;
601 
602 	/*
603 	 * In case edge triggered interrupt must be used, we cannot iterate
604 	 * more than once without introducing race conditions with the hardirq.
605 	 */
606 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
607 		loopcount = 1;
608 
609 	wl1271_debug(DEBUG_IRQ, "IRQ work");
610 
611 	if (unlikely(wl->state != WLCORE_STATE_ON))
612 		goto out;
613 
614 	ret = pm_runtime_resume_and_get(wl->dev);
615 	if (ret < 0)
616 		goto out;
617 
618 	while (!done && loopcount--) {
619 		smp_mb__after_atomic();
620 
621 		ret = wlcore_fw_status(wl, wl->fw_status);
622 		if (ret < 0)
623 			goto err_ret;
624 
625 		wlcore_hw_tx_immediate_compl(wl);
626 
627 		intr = wl->fw_status->intr;
628 		intr &= WLCORE_ALL_INTR_MASK;
629 		if (!intr) {
630 			done = true;
631 			continue;
632 		}
633 
634 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
635 			wl1271_error("HW watchdog interrupt received! starting recovery.");
636 			wl->watchdog_recovery = true;
637 			ret = -EIO;
638 
639 			/* restarting the chip. ignore any other interrupt. */
640 			goto err_ret;
641 		}
642 
643 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
644 			wl1271_error("SW watchdog interrupt received! "
645 				     "starting recovery.");
646 			wl->watchdog_recovery = true;
647 			ret = -EIO;
648 
649 			/* restarting the chip. ignore any other interrupt. */
650 			goto err_ret;
651 		}
652 
653 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
654 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
655 
656 			ret = wlcore_rx(wl, wl->fw_status);
657 			if (ret < 0)
658 				goto err_ret;
659 
660 			/* Check if any tx blocks were freed */
661 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
662 				if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
663 					if (!wl1271_tx_total_queue_count(wl))
664 						run_tx_queue = false;
665 					spin_unlock_irqrestore(&wl->wl_lock, flags);
666 				}
667 
668 				/*
669 				 * In order to avoid starvation of the TX path,
670 				 * call the work function directly.
671 				 */
672 				if (run_tx_queue) {
673 					ret = wlcore_tx_work_locked(wl);
674 					if (ret < 0)
675 						goto err_ret;
676 				}
677 			}
678 
679 			/* check for tx results */
680 			ret = wlcore_hw_tx_delayed_compl(wl);
681 			if (ret < 0)
682 				goto err_ret;
683 
684 			/* Make sure the deferred queues don't get too long */
685 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
686 				      skb_queue_len(&wl->deferred_rx_queue);
687 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
688 				wl1271_flush_deferred_work(wl);
689 		}
690 
691 		if (intr & WL1271_ACX_INTR_EVENT_A) {
692 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
693 			ret = wl1271_event_handle(wl, 0);
694 			if (ret < 0)
695 				goto err_ret;
696 		}
697 
698 		if (intr & WL1271_ACX_INTR_EVENT_B) {
699 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
700 			ret = wl1271_event_handle(wl, 1);
701 			if (ret < 0)
702 				goto err_ret;
703 		}
704 
705 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
706 			wl1271_debug(DEBUG_IRQ,
707 				     "WL1271_ACX_INTR_INIT_COMPLETE");
708 
709 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
710 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
711 	}
712 
713 err_ret:
714 	pm_runtime_mark_last_busy(wl->dev);
715 	pm_runtime_put_autosuspend(wl->dev);
716 
717 out:
718 	return ret;
719 }
720 
wlcore_irq(int irq,void * cookie)721 static irqreturn_t wlcore_irq(int irq, void *cookie)
722 {
723 	int ret;
724 	unsigned long flags;
725 	struct wl1271 *wl = cookie;
726 	bool queue_tx_work = true;
727 
728 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
729 
730 	/* complete the ELP completion */
731 	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
732 		spin_lock_irqsave(&wl->wl_lock, flags);
733 		if (wl->elp_compl)
734 			complete(wl->elp_compl);
735 		spin_unlock_irqrestore(&wl->wl_lock, flags);
736 	}
737 
738 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
739 		/* don't enqueue a work right now. mark it as pending */
740 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
741 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
742 		spin_lock_irqsave(&wl->wl_lock, flags);
743 		disable_irq_nosync(wl->irq);
744 		pm_wakeup_event(wl->dev, 0);
745 		spin_unlock_irqrestore(&wl->wl_lock, flags);
746 		goto out_handled;
747 	}
748 
749 	/* TX might be handled here, avoid redundant work */
750 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
751 	cancel_work_sync(&wl->tx_work);
752 
753 	mutex_lock(&wl->mutex);
754 
755 	ret = wlcore_irq_locked(wl);
756 	if (ret)
757 		wl12xx_queue_recovery_work(wl);
758 
759 	/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
760 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
761 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
762 		if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
763 			if (!wl1271_tx_total_queue_count(wl))
764 				queue_tx_work = false;
765 			spin_unlock_irqrestore(&wl->wl_lock, flags);
766 		}
767 		if (queue_tx_work)
768 			ieee80211_queue_work(wl->hw, &wl->tx_work);
769 	}
770 
771 	mutex_unlock(&wl->mutex);
772 
773 out_handled:
774 	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
775 
776 	return IRQ_HANDLED;
777 }
778 
779 struct vif_counter_data {
780 	u8 counter;
781 
782 	struct ieee80211_vif *cur_vif;
783 	bool cur_vif_running;
784 };
785 
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)786 static void wl12xx_vif_count_iter(void *data, u8 *mac,
787 				  struct ieee80211_vif *vif)
788 {
789 	struct vif_counter_data *counter = data;
790 
791 	counter->counter++;
792 	if (counter->cur_vif == vif)
793 		counter->cur_vif_running = true;
794 }
795 
796 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)797 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
798 			       struct ieee80211_vif *cur_vif,
799 			       struct vif_counter_data *data)
800 {
801 	memset(data, 0, sizeof(*data));
802 	data->cur_vif = cur_vif;
803 
804 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
805 					    wl12xx_vif_count_iter, data);
806 }
807 
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)808 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
809 {
810 	const struct firmware *fw;
811 	const char *fw_name;
812 	enum wl12xx_fw_type fw_type;
813 	int ret;
814 
815 	if (plt) {
816 		fw_type = WL12XX_FW_TYPE_PLT;
817 		fw_name = wl->plt_fw_name;
818 	} else {
819 		/*
820 		 * we can't call wl12xx_get_vif_count() here because
821 		 * wl->mutex is taken, so use the cached last_vif_count value
822 		 */
823 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
824 			fw_type = WL12XX_FW_TYPE_MULTI;
825 			fw_name = wl->mr_fw_name;
826 		} else {
827 			fw_type = WL12XX_FW_TYPE_NORMAL;
828 			fw_name = wl->sr_fw_name;
829 		}
830 	}
831 
832 	if (wl->fw_type == fw_type)
833 		return 0;
834 
835 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
836 
837 	ret = request_firmware(&fw, fw_name, wl->dev);
838 
839 	if (ret < 0) {
840 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
841 		return ret;
842 	}
843 
844 	if (fw->size % 4) {
845 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
846 			     fw->size);
847 		ret = -EILSEQ;
848 		goto out;
849 	}
850 
851 	vfree(wl->fw);
852 	wl->fw_type = WL12XX_FW_TYPE_NONE;
853 	wl->fw_len = fw->size;
854 	wl->fw = vmalloc(wl->fw_len);
855 
856 	if (!wl->fw) {
857 		wl1271_error("could not allocate memory for the firmware");
858 		ret = -ENOMEM;
859 		goto out;
860 	}
861 
862 	memcpy(wl->fw, fw->data, wl->fw_len);
863 	ret = 0;
864 	wl->fw_type = fw_type;
865 out:
866 	release_firmware(fw);
867 
868 	return ret;
869 }
870 
wl12xx_queue_recovery_work(struct wl1271 * wl)871 void wl12xx_queue_recovery_work(struct wl1271 *wl)
872 {
873 	/* Avoid a recursive recovery */
874 	if (wl->state == WLCORE_STATE_ON) {
875 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
876 				  &wl->flags));
877 
878 		wl->state = WLCORE_STATE_RESTARTING;
879 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
880 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
881 	}
882 }
883 
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)884 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
885 {
886 	size_t len;
887 
888 	/* Make sure we have enough room */
889 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
890 
891 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
892 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
893 	wl->fwlog_size += len;
894 
895 	return len;
896 }
897 
wl12xx_read_fwlog_panic(struct wl1271 * wl)898 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
899 {
900 	u32 end_of_log = 0;
901 	int error;
902 
903 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
904 		return;
905 
906 	wl1271_info("Reading FW panic log");
907 
908 	/*
909 	 * Make sure the chip is awake and the logger isn't active.
910 	 * Do not send a stop fwlog command if the fw is hanged or if
911 	 * dbgpins are used (due to some fw bug).
912 	 */
913 	error = pm_runtime_resume_and_get(wl->dev);
914 	if (error < 0)
915 		return;
916 	if (!wl->watchdog_recovery &&
917 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
918 		wl12xx_cmd_stop_fwlog(wl);
919 
920 	/* Traverse the memory blocks linked list */
921 	do {
922 		end_of_log = wlcore_event_fw_logger(wl);
923 		if (end_of_log == 0) {
924 			msleep(100);
925 			end_of_log = wlcore_event_fw_logger(wl);
926 		}
927 	} while (end_of_log != 0);
928 }
929 
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)930 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
931 				   u8 hlid, struct ieee80211_sta *sta)
932 {
933 	struct wl1271_station *wl_sta;
934 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
935 
936 	wl_sta = (void *)sta->drv_priv;
937 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
938 
939 	/*
940 	 * increment the initial seq number on recovery to account for
941 	 * transmitted packets that we haven't yet got in the FW status
942 	 */
943 	if (wlvif->encryption_type == KEY_GEM)
944 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
945 
946 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
947 		wl_sta->total_freed_pkts += sqn_recovery_padding;
948 }
949 
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)950 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
951 					struct wl12xx_vif *wlvif,
952 					u8 hlid, const u8 *addr)
953 {
954 	struct ieee80211_sta *sta;
955 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
956 
957 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
958 		    is_zero_ether_addr(addr)))
959 		return;
960 
961 	rcu_read_lock();
962 	sta = ieee80211_find_sta(vif, addr);
963 	if (sta)
964 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
965 	rcu_read_unlock();
966 }
967 
wlcore_print_recovery(struct wl1271 * wl)968 static void wlcore_print_recovery(struct wl1271 *wl)
969 {
970 	u32 pc = 0;
971 	u32 hint_sts = 0;
972 	int ret;
973 
974 	wl1271_info("Hardware recovery in progress. FW ver: %s",
975 		    wl->chip.fw_ver_str);
976 
977 	/* change partitions momentarily so we can read the FW pc */
978 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
979 	if (ret < 0)
980 		return;
981 
982 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
983 	if (ret < 0)
984 		return;
985 
986 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
987 	if (ret < 0)
988 		return;
989 
990 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
991 				pc, hint_sts, ++wl->recovery_count);
992 
993 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
994 }
995 
996 
wl1271_recovery_work(struct work_struct * work)997 static void wl1271_recovery_work(struct work_struct *work)
998 {
999 	struct wl1271 *wl =
1000 		container_of(work, struct wl1271, recovery_work);
1001 	struct wl12xx_vif *wlvif;
1002 	struct ieee80211_vif *vif;
1003 	int error;
1004 
1005 	mutex_lock(&wl->mutex);
1006 
1007 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
1008 		goto out_unlock;
1009 
1010 	error = pm_runtime_resume_and_get(wl->dev);
1011 	if (error < 0)
1012 		wl1271_warning("Enable for recovery failed");
1013 	wlcore_disable_interrupts_nosync(wl);
1014 
1015 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
1016 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
1017 			wl12xx_read_fwlog_panic(wl);
1018 		wlcore_print_recovery(wl);
1019 	}
1020 
1021 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
1022 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1023 
1024 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
1025 
1026 	if (wl->conf.recovery.no_recovery) {
1027 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1028 		goto out_unlock;
1029 	}
1030 
1031 	/* Prevent spurious TX during FW restart */
1032 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1033 
1034 	/* reboot the chipset */
1035 	while (!list_empty(&wl->wlvif_list)) {
1036 		wlvif = list_first_entry(&wl->wlvif_list,
1037 				       struct wl12xx_vif, list);
1038 		vif = wl12xx_wlvif_to_vif(wlvif);
1039 
1040 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1041 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1042 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1043 						    vif->bss_conf.bssid);
1044 		}
1045 
1046 		__wl1271_op_remove_interface(wl, vif, false);
1047 	}
1048 
1049 	wlcore_op_stop_locked(wl);
1050 	pm_runtime_mark_last_busy(wl->dev);
1051 	pm_runtime_put_autosuspend(wl->dev);
1052 
1053 	ieee80211_restart_hw(wl->hw);
1054 
1055 	/*
1056 	 * Its safe to enable TX now - the queues are stopped after a request
1057 	 * to restart the HW.
1058 	 */
1059 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1060 
1061 out_unlock:
1062 	wl->watchdog_recovery = false;
1063 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1064 	mutex_unlock(&wl->mutex);
1065 }
1066 
wlcore_fw_wakeup(struct wl1271 * wl)1067 static int wlcore_fw_wakeup(struct wl1271 *wl)
1068 {
1069 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1070 }
1071 
wl1271_setup(struct wl1271 * wl)1072 static int wl1271_setup(struct wl1271 *wl)
1073 {
1074 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1075 	if (!wl->raw_fw_status)
1076 		goto err;
1077 
1078 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1079 	if (!wl->fw_status)
1080 		goto err;
1081 
1082 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1083 	if (!wl->tx_res_if)
1084 		goto err;
1085 
1086 	return 0;
1087 err:
1088 	kfree(wl->fw_status);
1089 	kfree(wl->raw_fw_status);
1090 	return -ENOMEM;
1091 }
1092 
wl12xx_set_power_on(struct wl1271 * wl)1093 static int wl12xx_set_power_on(struct wl1271 *wl)
1094 {
1095 	int ret;
1096 
1097 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1098 	ret = wl1271_power_on(wl);
1099 	if (ret < 0)
1100 		goto out;
1101 	msleep(WL1271_POWER_ON_SLEEP);
1102 	wl1271_io_reset(wl);
1103 	wl1271_io_init(wl);
1104 
1105 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1106 	if (ret < 0)
1107 		goto fail;
1108 
1109 	/* ELP module wake up */
1110 	ret = wlcore_fw_wakeup(wl);
1111 	if (ret < 0)
1112 		goto fail;
1113 
1114 out:
1115 	return ret;
1116 
1117 fail:
1118 	wl1271_power_off(wl);
1119 	return ret;
1120 }
1121 
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1122 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1123 {
1124 	int ret = 0;
1125 
1126 	ret = wl12xx_set_power_on(wl);
1127 	if (ret < 0)
1128 		goto out;
1129 
1130 	/*
1131 	 * For wl127x based devices we could use the default block
1132 	 * size (512 bytes), but due to a bug in the sdio driver, we
1133 	 * need to set it explicitly after the chip is powered on.  To
1134 	 * simplify the code and since the performance impact is
1135 	 * negligible, we use the same block size for all different
1136 	 * chip types.
1137 	 *
1138 	 * Check if the bus supports blocksize alignment and, if it
1139 	 * doesn't, make sure we don't have the quirk.
1140 	 */
1141 	if (!wl1271_set_block_size(wl))
1142 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1143 
1144 	/* TODO: make sure the lower driver has set things up correctly */
1145 
1146 	ret = wl1271_setup(wl);
1147 	if (ret < 0)
1148 		goto out;
1149 
1150 	ret = wl12xx_fetch_firmware(wl, plt);
1151 	if (ret < 0) {
1152 		kfree(wl->fw_status);
1153 		kfree(wl->raw_fw_status);
1154 		kfree(wl->tx_res_if);
1155 	}
1156 
1157 out:
1158 	return ret;
1159 }
1160 
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1161 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1162 {
1163 	int retries = WL1271_BOOT_RETRIES;
1164 	struct wiphy *wiphy = wl->hw->wiphy;
1165 
1166 	static const char* const PLT_MODE[] = {
1167 		"PLT_OFF",
1168 		"PLT_ON",
1169 		"PLT_FEM_DETECT",
1170 		"PLT_CHIP_AWAKE"
1171 	};
1172 
1173 	int ret;
1174 
1175 	mutex_lock(&wl->mutex);
1176 
1177 	wl1271_notice("power up");
1178 
1179 	if (wl->state != WLCORE_STATE_OFF) {
1180 		wl1271_error("cannot go into PLT state because not "
1181 			     "in off state: %d", wl->state);
1182 		ret = -EBUSY;
1183 		goto out;
1184 	}
1185 
1186 	/* Indicate to lower levels that we are now in PLT mode */
1187 	wl->plt = true;
1188 	wl->plt_mode = plt_mode;
1189 
1190 	while (retries) {
1191 		retries--;
1192 		ret = wl12xx_chip_wakeup(wl, true);
1193 		if (ret < 0)
1194 			goto power_off;
1195 
1196 		if (plt_mode != PLT_CHIP_AWAKE) {
1197 			ret = wl->ops->plt_init(wl);
1198 			if (ret < 0)
1199 				goto power_off;
1200 		}
1201 
1202 		wl->state = WLCORE_STATE_ON;
1203 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1204 			      PLT_MODE[plt_mode],
1205 			      wl->chip.fw_ver_str);
1206 
1207 		/* update hw/fw version info in wiphy struct */
1208 		wiphy->hw_version = wl->chip.id;
1209 		strscpy(wiphy->fw_version, wl->chip.fw_ver_str,
1210 			sizeof(wiphy->fw_version));
1211 
1212 		goto out;
1213 
1214 power_off:
1215 		wl1271_power_off(wl);
1216 	}
1217 
1218 	wl->plt = false;
1219 	wl->plt_mode = PLT_OFF;
1220 
1221 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1222 		     WL1271_BOOT_RETRIES);
1223 out:
1224 	mutex_unlock(&wl->mutex);
1225 
1226 	return ret;
1227 }
1228 
wl1271_plt_stop(struct wl1271 * wl)1229 int wl1271_plt_stop(struct wl1271 *wl)
1230 {
1231 	int ret = 0;
1232 
1233 	wl1271_notice("power down");
1234 
1235 	/*
1236 	 * Interrupts must be disabled before setting the state to OFF.
1237 	 * Otherwise, the interrupt handler might be called and exit without
1238 	 * reading the interrupt status.
1239 	 */
1240 	wlcore_disable_interrupts(wl);
1241 	mutex_lock(&wl->mutex);
1242 	if (!wl->plt) {
1243 		mutex_unlock(&wl->mutex);
1244 
1245 		/*
1246 		 * This will not necessarily enable interrupts as interrupts
1247 		 * may have been disabled when op_stop was called. It will,
1248 		 * however, balance the above call to disable_interrupts().
1249 		 */
1250 		wlcore_enable_interrupts(wl);
1251 
1252 		wl1271_error("cannot power down because not in PLT "
1253 			     "state: %d", wl->state);
1254 		ret = -EBUSY;
1255 		goto out;
1256 	}
1257 
1258 	mutex_unlock(&wl->mutex);
1259 
1260 	wl1271_flush_deferred_work(wl);
1261 	cancel_work_sync(&wl->netstack_work);
1262 	cancel_work_sync(&wl->recovery_work);
1263 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1264 
1265 	mutex_lock(&wl->mutex);
1266 	wl1271_power_off(wl);
1267 	wl->flags = 0;
1268 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1269 	wl->state = WLCORE_STATE_OFF;
1270 	wl->plt = false;
1271 	wl->plt_mode = PLT_OFF;
1272 	wl->rx_counter = 0;
1273 	mutex_unlock(&wl->mutex);
1274 
1275 out:
1276 	return ret;
1277 }
1278 
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1279 static void wl1271_op_tx(struct ieee80211_hw *hw,
1280 			 struct ieee80211_tx_control *control,
1281 			 struct sk_buff *skb)
1282 {
1283 	struct wl1271 *wl = hw->priv;
1284 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1285 	struct ieee80211_vif *vif = info->control.vif;
1286 	struct wl12xx_vif *wlvif = NULL;
1287 	unsigned long flags;
1288 	int q, mapping;
1289 	u8 hlid;
1290 
1291 	if (!vif) {
1292 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1293 		ieee80211_free_txskb(hw, skb);
1294 		return;
1295 	}
1296 
1297 	wlvif = wl12xx_vif_to_data(vif);
1298 	mapping = skb_get_queue_mapping(skb);
1299 	q = wl1271_tx_get_queue(mapping);
1300 
1301 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1302 
1303 	spin_lock_irqsave(&wl->wl_lock, flags);
1304 
1305 	/*
1306 	 * drop the packet if the link is invalid or the queue is stopped
1307 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1308 	 * allow these packets through.
1309 	 */
1310 	if (hlid == WL12XX_INVALID_LINK_ID ||
1311 	    (!test_bit(hlid, wlvif->links_map)) ||
1312 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1313 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1314 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1315 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1316 		ieee80211_free_txskb(hw, skb);
1317 		goto out;
1318 	}
1319 
1320 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1321 		     hlid, q, skb->len);
1322 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1323 
1324 	wl->tx_queue_count[q]++;
1325 	wlvif->tx_queue_count[q]++;
1326 
1327 	/*
1328 	 * The workqueue is slow to process the tx_queue and we need stop
1329 	 * the queue here, otherwise the queue will get too long.
1330 	 */
1331 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1332 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1333 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1334 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1335 		wlcore_stop_queue_locked(wl, wlvif, q,
1336 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1337 	}
1338 
1339 	/*
1340 	 * The chip specific setup must run before the first TX packet -
1341 	 * before that, the tx_work will not be initialized!
1342 	 */
1343 
1344 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1345 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1346 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1347 
1348 out:
1349 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1350 }
1351 
wl1271_tx_dummy_packet(struct wl1271 * wl)1352 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1353 {
1354 	unsigned long flags;
1355 	int q;
1356 
1357 	/* no need to queue a new dummy packet if one is already pending */
1358 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1359 		return 0;
1360 
1361 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1362 
1363 	spin_lock_irqsave(&wl->wl_lock, flags);
1364 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1365 	wl->tx_queue_count[q]++;
1366 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1367 
1368 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1369 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1370 		return wlcore_tx_work_locked(wl);
1371 
1372 	/*
1373 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1374 	 * interrupt handler function
1375 	 */
1376 	return 0;
1377 }
1378 
1379 /*
1380  * The size of the dummy packet should be at least 1400 bytes. However, in
1381  * order to minimize the number of bus transactions, aligning it to 512 bytes
1382  * boundaries could be beneficial, performance wise
1383  */
1384 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1385 
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1386 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1387 {
1388 	struct sk_buff *skb;
1389 	struct ieee80211_hdr_3addr *hdr;
1390 	unsigned int dummy_packet_size;
1391 
1392 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1393 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1394 
1395 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1396 	if (!skb) {
1397 		wl1271_warning("Failed to allocate a dummy packet skb");
1398 		return NULL;
1399 	}
1400 
1401 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1402 
1403 	hdr = skb_put_zero(skb, sizeof(*hdr));
1404 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1405 					 IEEE80211_STYPE_NULLFUNC |
1406 					 IEEE80211_FCTL_TODS);
1407 
1408 	skb_put_zero(skb, dummy_packet_size);
1409 
1410 	/* Dummy packets require the TID to be management */
1411 	skb->priority = WL1271_TID_MGMT;
1412 
1413 	/* Initialize all fields that might be used */
1414 	skb_set_queue_mapping(skb, 0);
1415 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1416 
1417 	return skb;
1418 }
1419 
1420 
1421 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1422 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1423 {
1424 	int num_fields = 0, in_field = 0, fields_size = 0;
1425 	int i, pattern_len = 0;
1426 
1427 	if (!p->mask) {
1428 		wl1271_warning("No mask in WoWLAN pattern");
1429 		return -EINVAL;
1430 	}
1431 
1432 	/*
1433 	 * The pattern is broken up into segments of bytes at different offsets
1434 	 * that need to be checked by the FW filter. Each segment is called
1435 	 * a field in the FW API. We verify that the total number of fields
1436 	 * required for this pattern won't exceed FW limits (8)
1437 	 * as well as the total fields buffer won't exceed the FW limit.
1438 	 * Note that if there's a pattern which crosses Ethernet/IP header
1439 	 * boundary a new field is required.
1440 	 */
1441 	for (i = 0; i < p->pattern_len; i++) {
1442 		if (test_bit(i, (unsigned long *)p->mask)) {
1443 			if (!in_field) {
1444 				in_field = 1;
1445 				pattern_len = 1;
1446 			} else {
1447 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1448 					num_fields++;
1449 					fields_size += pattern_len +
1450 						RX_FILTER_FIELD_OVERHEAD;
1451 					pattern_len = 1;
1452 				} else
1453 					pattern_len++;
1454 			}
1455 		} else {
1456 			if (in_field) {
1457 				in_field = 0;
1458 				fields_size += pattern_len +
1459 					RX_FILTER_FIELD_OVERHEAD;
1460 				num_fields++;
1461 			}
1462 		}
1463 	}
1464 
1465 	if (in_field) {
1466 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1467 		num_fields++;
1468 	}
1469 
1470 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1471 		wl1271_warning("RX Filter too complex. Too many segments");
1472 		return -EINVAL;
1473 	}
1474 
1475 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1476 		wl1271_warning("RX filter pattern is too big");
1477 		return -E2BIG;
1478 	}
1479 
1480 	return 0;
1481 }
1482 
wl1271_rx_filter_alloc(void)1483 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1484 {
1485 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1486 }
1487 
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1488 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1489 {
1490 	int i;
1491 
1492 	if (filter == NULL)
1493 		return;
1494 
1495 	for (i = 0; i < filter->num_fields; i++)
1496 		kfree(filter->fields[i].pattern);
1497 
1498 	kfree(filter);
1499 }
1500 
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1501 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1502 				 u16 offset, u8 flags,
1503 				 const u8 *pattern, u8 len)
1504 {
1505 	struct wl12xx_rx_filter_field *field;
1506 
1507 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1508 		wl1271_warning("Max fields per RX filter. can't alloc another");
1509 		return -EINVAL;
1510 	}
1511 
1512 	field = &filter->fields[filter->num_fields];
1513 
1514 	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1515 	if (!field->pattern) {
1516 		wl1271_warning("Failed to allocate RX filter pattern");
1517 		return -ENOMEM;
1518 	}
1519 
1520 	filter->num_fields++;
1521 
1522 	field->offset = cpu_to_le16(offset);
1523 	field->flags = flags;
1524 	field->len = len;
1525 
1526 	return 0;
1527 }
1528 
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1529 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1530 {
1531 	int i, fields_size = 0;
1532 
1533 	for (i = 0; i < filter->num_fields; i++)
1534 		fields_size += filter->fields[i].len +
1535 			sizeof(struct wl12xx_rx_filter_field) -
1536 			sizeof(u8 *);
1537 
1538 	return fields_size;
1539 }
1540 
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1541 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1542 				    u8 *buf)
1543 {
1544 	int i;
1545 	struct wl12xx_rx_filter_field *field;
1546 
1547 	for (i = 0; i < filter->num_fields; i++) {
1548 		field = (struct wl12xx_rx_filter_field *)buf;
1549 
1550 		field->offset = filter->fields[i].offset;
1551 		field->flags = filter->fields[i].flags;
1552 		field->len = filter->fields[i].len;
1553 
1554 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1555 		buf += sizeof(struct wl12xx_rx_filter_field) -
1556 			sizeof(u8 *) + field->len;
1557 	}
1558 }
1559 
1560 /*
1561  * Allocates an RX filter returned through f
1562  * which needs to be freed using rx_filter_free()
1563  */
1564 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1565 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1566 					   struct wl12xx_rx_filter **f)
1567 {
1568 	int i, j, ret = 0;
1569 	struct wl12xx_rx_filter *filter;
1570 	u16 offset;
1571 	u8 flags, len;
1572 
1573 	filter = wl1271_rx_filter_alloc();
1574 	if (!filter) {
1575 		wl1271_warning("Failed to alloc rx filter");
1576 		ret = -ENOMEM;
1577 		goto err;
1578 	}
1579 
1580 	i = 0;
1581 	while (i < p->pattern_len) {
1582 		if (!test_bit(i, (unsigned long *)p->mask)) {
1583 			i++;
1584 			continue;
1585 		}
1586 
1587 		for (j = i; j < p->pattern_len; j++) {
1588 			if (!test_bit(j, (unsigned long *)p->mask))
1589 				break;
1590 
1591 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1592 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1593 				break;
1594 		}
1595 
1596 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1597 			offset = i;
1598 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1599 		} else {
1600 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1601 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1602 		}
1603 
1604 		len = j - i;
1605 
1606 		ret = wl1271_rx_filter_alloc_field(filter,
1607 						   offset,
1608 						   flags,
1609 						   &p->pattern[i], len);
1610 		if (ret)
1611 			goto err;
1612 
1613 		i = j;
1614 	}
1615 
1616 	filter->action = FILTER_SIGNAL;
1617 
1618 	*f = filter;
1619 	return 0;
1620 
1621 err:
1622 	wl1271_rx_filter_free(filter);
1623 	*f = NULL;
1624 
1625 	return ret;
1626 }
1627 
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1628 static int wl1271_configure_wowlan(struct wl1271 *wl,
1629 				   struct cfg80211_wowlan *wow)
1630 {
1631 	int i, ret;
1632 
1633 	if (!wow || wow->any || !wow->n_patterns) {
1634 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1635 							  FILTER_SIGNAL);
1636 		if (ret)
1637 			goto out;
1638 
1639 		ret = wl1271_rx_filter_clear_all(wl);
1640 		if (ret)
1641 			goto out;
1642 
1643 		return 0;
1644 	}
1645 
1646 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1647 		return -EINVAL;
1648 
1649 	/* Validate all incoming patterns before clearing current FW state */
1650 	for (i = 0; i < wow->n_patterns; i++) {
1651 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1652 		if (ret) {
1653 			wl1271_warning("Bad wowlan pattern %d", i);
1654 			return ret;
1655 		}
1656 	}
1657 
1658 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1659 	if (ret)
1660 		goto out;
1661 
1662 	ret = wl1271_rx_filter_clear_all(wl);
1663 	if (ret)
1664 		goto out;
1665 
1666 	/* Translate WoWLAN patterns into filters */
1667 	for (i = 0; i < wow->n_patterns; i++) {
1668 		struct cfg80211_pkt_pattern *p;
1669 		struct wl12xx_rx_filter *filter = NULL;
1670 
1671 		p = &wow->patterns[i];
1672 
1673 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1674 		if (ret) {
1675 			wl1271_warning("Failed to create an RX filter from "
1676 				       "wowlan pattern %d", i);
1677 			goto out;
1678 		}
1679 
1680 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1681 
1682 		wl1271_rx_filter_free(filter);
1683 		if (ret)
1684 			goto out;
1685 	}
1686 
1687 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1688 
1689 out:
1690 	return ret;
1691 }
1692 
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1693 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1694 					struct wl12xx_vif *wlvif,
1695 					struct cfg80211_wowlan *wow)
1696 {
1697 	int ret = 0;
1698 
1699 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1700 		goto out;
1701 
1702 	ret = wl1271_configure_wowlan(wl, wow);
1703 	if (ret < 0)
1704 		goto out;
1705 
1706 	if ((wl->conf.conn.suspend_wake_up_event ==
1707 	     wl->conf.conn.wake_up_event) &&
1708 	    (wl->conf.conn.suspend_listen_interval ==
1709 	     wl->conf.conn.listen_interval))
1710 		goto out;
1711 
1712 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1713 				    wl->conf.conn.suspend_wake_up_event,
1714 				    wl->conf.conn.suspend_listen_interval);
1715 
1716 	if (ret < 0)
1717 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1718 out:
1719 	return ret;
1720 
1721 }
1722 
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1723 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1724 					struct wl12xx_vif *wlvif,
1725 					struct cfg80211_wowlan *wow)
1726 {
1727 	int ret = 0;
1728 
1729 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1730 		goto out;
1731 
1732 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1733 	if (ret < 0)
1734 		goto out;
1735 
1736 	ret = wl1271_configure_wowlan(wl, wow);
1737 	if (ret < 0)
1738 		goto out;
1739 
1740 out:
1741 	return ret;
1742 
1743 }
1744 
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1745 static int wl1271_configure_suspend(struct wl1271 *wl,
1746 				    struct wl12xx_vif *wlvif,
1747 				    struct cfg80211_wowlan *wow)
1748 {
1749 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1750 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1751 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1752 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1753 	return 0;
1754 }
1755 
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1756 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1757 {
1758 	int ret = 0;
1759 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1760 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1761 
1762 	if ((!is_ap) && (!is_sta))
1763 		return;
1764 
1765 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1766 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1767 		return;
1768 
1769 	wl1271_configure_wowlan(wl, NULL);
1770 
1771 	if (is_sta) {
1772 		if ((wl->conf.conn.suspend_wake_up_event ==
1773 		     wl->conf.conn.wake_up_event) &&
1774 		    (wl->conf.conn.suspend_listen_interval ==
1775 		     wl->conf.conn.listen_interval))
1776 			return;
1777 
1778 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1779 				    wl->conf.conn.wake_up_event,
1780 				    wl->conf.conn.listen_interval);
1781 
1782 		if (ret < 0)
1783 			wl1271_error("resume: wake up conditions failed: %d",
1784 				     ret);
1785 
1786 	} else if (is_ap) {
1787 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1788 	}
1789 }
1790 
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1791 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1792 					    struct cfg80211_wowlan *wow)
1793 {
1794 	struct wl1271 *wl = hw->priv;
1795 	struct wl12xx_vif *wlvif;
1796 	unsigned long flags;
1797 	int ret;
1798 
1799 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1800 	WARN_ON(!wow);
1801 
1802 	/* we want to perform the recovery before suspending */
1803 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1804 		wl1271_warning("postponing suspend to perform recovery");
1805 		return -EBUSY;
1806 	}
1807 
1808 	wl1271_tx_flush(wl);
1809 
1810 	mutex_lock(&wl->mutex);
1811 
1812 	ret = pm_runtime_resume_and_get(wl->dev);
1813 	if (ret < 0) {
1814 		mutex_unlock(&wl->mutex);
1815 		return ret;
1816 	}
1817 
1818 	wl->wow_enabled = true;
1819 	wl12xx_for_each_wlvif(wl, wlvif) {
1820 		if (wlcore_is_p2p_mgmt(wlvif))
1821 			continue;
1822 
1823 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1824 		if (ret < 0) {
1825 			goto out_sleep;
1826 		}
1827 	}
1828 
1829 	/* disable fast link flow control notifications from FW */
1830 	ret = wlcore_hw_interrupt_notify(wl, false);
1831 	if (ret < 0)
1832 		goto out_sleep;
1833 
1834 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1835 	ret = wlcore_hw_rx_ba_filter(wl,
1836 				     !!wl->conf.conn.suspend_rx_ba_activity);
1837 	if (ret < 0)
1838 		goto out_sleep;
1839 
1840 out_sleep:
1841 	pm_runtime_put_noidle(wl->dev);
1842 	mutex_unlock(&wl->mutex);
1843 
1844 	if (ret < 0) {
1845 		wl1271_warning("couldn't prepare device to suspend");
1846 		return ret;
1847 	}
1848 
1849 	/* flush any remaining work */
1850 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1851 
1852 	flush_work(&wl->tx_work);
1853 
1854 	/*
1855 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1856 	 * it on resume anyway.
1857 	 */
1858 	cancel_delayed_work(&wl->tx_watchdog_work);
1859 
1860 	/*
1861 	 * set suspended flag to avoid triggering a new threaded_irq
1862 	 * work.
1863 	 */
1864 	spin_lock_irqsave(&wl->wl_lock, flags);
1865 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1866 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1867 
1868 	return pm_runtime_force_suspend(wl->dev);
1869 }
1870 
wl1271_op_resume(struct ieee80211_hw * hw)1871 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1872 {
1873 	struct wl1271 *wl = hw->priv;
1874 	struct wl12xx_vif *wlvif;
1875 	unsigned long flags;
1876 	bool run_irq_work = false, pending_recovery;
1877 	int ret;
1878 
1879 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1880 		     wl->wow_enabled);
1881 	WARN_ON(!wl->wow_enabled);
1882 
1883 	ret = pm_runtime_force_resume(wl->dev);
1884 	if (ret < 0) {
1885 		wl1271_error("ELP wakeup failure!");
1886 		goto out_sleep;
1887 	}
1888 
1889 	/*
1890 	 * re-enable irq_work enqueuing, and call irq_work directly if
1891 	 * there is a pending work.
1892 	 */
1893 	spin_lock_irqsave(&wl->wl_lock, flags);
1894 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1895 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1896 		run_irq_work = true;
1897 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1898 
1899 	mutex_lock(&wl->mutex);
1900 
1901 	/* test the recovery flag before calling any SDIO functions */
1902 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1903 				    &wl->flags);
1904 
1905 	if (run_irq_work) {
1906 		wl1271_debug(DEBUG_MAC80211,
1907 			     "run postponed irq_work directly");
1908 
1909 		/* don't talk to the HW if recovery is pending */
1910 		if (!pending_recovery) {
1911 			ret = wlcore_irq_locked(wl);
1912 			if (ret)
1913 				wl12xx_queue_recovery_work(wl);
1914 		}
1915 
1916 		wlcore_enable_interrupts(wl);
1917 	}
1918 
1919 	if (pending_recovery) {
1920 		wl1271_warning("queuing forgotten recovery on resume");
1921 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1922 		goto out_sleep;
1923 	}
1924 
1925 	ret = pm_runtime_resume_and_get(wl->dev);
1926 	if (ret < 0)
1927 		goto out;
1928 
1929 	wl12xx_for_each_wlvif(wl, wlvif) {
1930 		if (wlcore_is_p2p_mgmt(wlvif))
1931 			continue;
1932 
1933 		wl1271_configure_resume(wl, wlvif);
1934 	}
1935 
1936 	ret = wlcore_hw_interrupt_notify(wl, true);
1937 	if (ret < 0)
1938 		goto out_sleep;
1939 
1940 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1941 	ret = wlcore_hw_rx_ba_filter(wl, false);
1942 	if (ret < 0)
1943 		goto out_sleep;
1944 
1945 out_sleep:
1946 	pm_runtime_mark_last_busy(wl->dev);
1947 	pm_runtime_put_autosuspend(wl->dev);
1948 
1949 out:
1950 	wl->wow_enabled = false;
1951 
1952 	/*
1953 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1954 	 * That way we avoid possible conditions where Tx-complete interrupts
1955 	 * fail to arrive and we perform a spurious recovery.
1956 	 */
1957 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1958 	mutex_unlock(&wl->mutex);
1959 
1960 	return 0;
1961 }
1962 
wl1271_op_start(struct ieee80211_hw * hw)1963 static int wl1271_op_start(struct ieee80211_hw *hw)
1964 {
1965 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1966 
1967 	/*
1968 	 * We have to delay the booting of the hardware because
1969 	 * we need to know the local MAC address before downloading and
1970 	 * initializing the firmware. The MAC address cannot be changed
1971 	 * after boot, and without the proper MAC address, the firmware
1972 	 * will not function properly.
1973 	 *
1974 	 * The MAC address is first known when the corresponding interface
1975 	 * is added. That is where we will initialize the hardware.
1976 	 */
1977 
1978 	return 0;
1979 }
1980 
wlcore_op_stop_locked(struct wl1271 * wl)1981 static void wlcore_op_stop_locked(struct wl1271 *wl)
1982 {
1983 	int i;
1984 
1985 	if (wl->state == WLCORE_STATE_OFF) {
1986 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1987 					&wl->flags))
1988 			wlcore_enable_interrupts(wl);
1989 
1990 		return;
1991 	}
1992 
1993 	/*
1994 	 * this must be before the cancel_work calls below, so that the work
1995 	 * functions don't perform further work.
1996 	 */
1997 	wl->state = WLCORE_STATE_OFF;
1998 
1999 	/*
2000 	 * Use the nosync variant to disable interrupts, so the mutex could be
2001 	 * held while doing so without deadlocking.
2002 	 */
2003 	wlcore_disable_interrupts_nosync(wl);
2004 
2005 	mutex_unlock(&wl->mutex);
2006 
2007 	wlcore_synchronize_interrupts(wl);
2008 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2009 		cancel_work_sync(&wl->recovery_work);
2010 	wl1271_flush_deferred_work(wl);
2011 	cancel_delayed_work_sync(&wl->scan_complete_work);
2012 	cancel_work_sync(&wl->netstack_work);
2013 	cancel_work_sync(&wl->tx_work);
2014 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
2015 
2016 	/* let's notify MAC80211 about the remaining pending TX frames */
2017 	mutex_lock(&wl->mutex);
2018 	wl12xx_tx_reset(wl);
2019 
2020 	wl1271_power_off(wl);
2021 	/*
2022 	 * In case a recovery was scheduled, interrupts were disabled to avoid
2023 	 * an interrupt storm. Now that the power is down, it is safe to
2024 	 * re-enable interrupts to balance the disable depth
2025 	 */
2026 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2027 		wlcore_enable_interrupts(wl);
2028 
2029 	wl->band = NL80211_BAND_2GHZ;
2030 
2031 	wl->rx_counter = 0;
2032 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2033 	wl->channel_type = NL80211_CHAN_NO_HT;
2034 	wl->tx_blocks_available = 0;
2035 	wl->tx_allocated_blocks = 0;
2036 	wl->tx_results_count = 0;
2037 	wl->tx_packets_count = 0;
2038 	wl->time_offset = 0;
2039 	wl->ap_fw_ps_map = 0;
2040 	wl->ap_ps_map = 0;
2041 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
2042 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
2043 	memset(wl->links_map, 0, sizeof(wl->links_map));
2044 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
2045 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
2046 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2047 	wl->active_sta_count = 0;
2048 	wl->active_link_count = 0;
2049 
2050 	/* The system link is always allocated */
2051 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2052 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2053 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2054 
2055 	/*
2056 	 * this is performed after the cancel_work calls and the associated
2057 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2058 	 * get executed before all these vars have been reset.
2059 	 */
2060 	wl->flags = 0;
2061 
2062 	wl->tx_blocks_freed = 0;
2063 
2064 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2065 		wl->tx_pkts_freed[i] = 0;
2066 		wl->tx_allocated_pkts[i] = 0;
2067 	}
2068 
2069 	wl1271_debugfs_reset(wl);
2070 
2071 	kfree(wl->raw_fw_status);
2072 	wl->raw_fw_status = NULL;
2073 	kfree(wl->fw_status);
2074 	wl->fw_status = NULL;
2075 	kfree(wl->tx_res_if);
2076 	wl->tx_res_if = NULL;
2077 	kfree(wl->target_mem_map);
2078 	wl->target_mem_map = NULL;
2079 
2080 	/*
2081 	 * FW channels must be re-calibrated after recovery,
2082 	 * save current Reg-Domain channel configuration and clear it.
2083 	 */
2084 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2085 	       sizeof(wl->reg_ch_conf_pending));
2086 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2087 }
2088 
wlcore_op_stop(struct ieee80211_hw * hw,bool suspend)2089 static void wlcore_op_stop(struct ieee80211_hw *hw, bool suspend)
2090 {
2091 	struct wl1271 *wl = hw->priv;
2092 
2093 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2094 
2095 	mutex_lock(&wl->mutex);
2096 
2097 	wlcore_op_stop_locked(wl);
2098 
2099 	mutex_unlock(&wl->mutex);
2100 }
2101 
wlcore_channel_switch_work(struct work_struct * work)2102 static void wlcore_channel_switch_work(struct work_struct *work)
2103 {
2104 	struct delayed_work *dwork;
2105 	struct wl1271 *wl;
2106 	struct ieee80211_vif *vif;
2107 	struct wl12xx_vif *wlvif;
2108 	int ret;
2109 
2110 	dwork = to_delayed_work(work);
2111 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2112 	wl = wlvif->wl;
2113 
2114 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2115 
2116 	mutex_lock(&wl->mutex);
2117 
2118 	if (unlikely(wl->state != WLCORE_STATE_ON))
2119 		goto out;
2120 
2121 	/* check the channel switch is still ongoing */
2122 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2123 		goto out;
2124 
2125 	vif = wl12xx_wlvif_to_vif(wlvif);
2126 	ieee80211_chswitch_done(vif, false, 0);
2127 
2128 	ret = pm_runtime_resume_and_get(wl->dev);
2129 	if (ret < 0)
2130 		goto out;
2131 
2132 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2133 
2134 	pm_runtime_mark_last_busy(wl->dev);
2135 	pm_runtime_put_autosuspend(wl->dev);
2136 out:
2137 	mutex_unlock(&wl->mutex);
2138 }
2139 
wlcore_connection_loss_work(struct work_struct * work)2140 static void wlcore_connection_loss_work(struct work_struct *work)
2141 {
2142 	struct delayed_work *dwork;
2143 	struct wl1271 *wl;
2144 	struct ieee80211_vif *vif;
2145 	struct wl12xx_vif *wlvif;
2146 
2147 	dwork = to_delayed_work(work);
2148 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2149 	wl = wlvif->wl;
2150 
2151 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2152 
2153 	mutex_lock(&wl->mutex);
2154 
2155 	if (unlikely(wl->state != WLCORE_STATE_ON))
2156 		goto out;
2157 
2158 	/* Call mac80211 connection loss */
2159 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2160 		goto out;
2161 
2162 	vif = wl12xx_wlvif_to_vif(wlvif);
2163 	ieee80211_connection_loss(vif);
2164 out:
2165 	mutex_unlock(&wl->mutex);
2166 }
2167 
wlcore_pending_auth_complete_work(struct work_struct * work)2168 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2169 {
2170 	struct delayed_work *dwork;
2171 	struct wl1271 *wl;
2172 	struct wl12xx_vif *wlvif;
2173 	unsigned long time_spare;
2174 	int ret;
2175 
2176 	dwork = to_delayed_work(work);
2177 	wlvif = container_of(dwork, struct wl12xx_vif,
2178 			     pending_auth_complete_work);
2179 	wl = wlvif->wl;
2180 
2181 	mutex_lock(&wl->mutex);
2182 
2183 	if (unlikely(wl->state != WLCORE_STATE_ON))
2184 		goto out;
2185 
2186 	/*
2187 	 * Make sure a second really passed since the last auth reply. Maybe
2188 	 * a second auth reply arrived while we were stuck on the mutex.
2189 	 * Check for a little less than the timeout to protect from scheduler
2190 	 * irregularities.
2191 	 */
2192 	time_spare = jiffies +
2193 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2194 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2195 		goto out;
2196 
2197 	ret = pm_runtime_resume_and_get(wl->dev);
2198 	if (ret < 0)
2199 		goto out;
2200 
2201 	/* cancel the ROC if active */
2202 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2203 
2204 	pm_runtime_mark_last_busy(wl->dev);
2205 	pm_runtime_put_autosuspend(wl->dev);
2206 out:
2207 	mutex_unlock(&wl->mutex);
2208 }
2209 
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2210 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2211 {
2212 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2213 					WL12XX_MAX_RATE_POLICIES);
2214 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2215 		return -EBUSY;
2216 
2217 	__set_bit(policy, wl->rate_policies_map);
2218 	*idx = policy;
2219 	return 0;
2220 }
2221 
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2222 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2223 {
2224 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2225 		return;
2226 
2227 	__clear_bit(*idx, wl->rate_policies_map);
2228 	*idx = WL12XX_MAX_RATE_POLICIES;
2229 }
2230 
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2231 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2232 {
2233 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2234 					WLCORE_MAX_KLV_TEMPLATES);
2235 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2236 		return -EBUSY;
2237 
2238 	__set_bit(policy, wl->klv_templates_map);
2239 	*idx = policy;
2240 	return 0;
2241 }
2242 
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2243 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2244 {
2245 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2246 		return;
2247 
2248 	__clear_bit(*idx, wl->klv_templates_map);
2249 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2250 }
2251 
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2252 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2253 {
2254 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2255 
2256 	switch (wlvif->bss_type) {
2257 	case BSS_TYPE_AP_BSS:
2258 		if (wlvif->p2p)
2259 			return WL1271_ROLE_P2P_GO;
2260 		else if (ieee80211_vif_is_mesh(vif))
2261 			return WL1271_ROLE_MESH_POINT;
2262 		else
2263 			return WL1271_ROLE_AP;
2264 
2265 	case BSS_TYPE_STA_BSS:
2266 		if (wlvif->p2p)
2267 			return WL1271_ROLE_P2P_CL;
2268 		else
2269 			return WL1271_ROLE_STA;
2270 
2271 	case BSS_TYPE_IBSS:
2272 		return WL1271_ROLE_IBSS;
2273 
2274 	default:
2275 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2276 	}
2277 	return WL12XX_INVALID_ROLE_TYPE;
2278 }
2279 
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2280 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2281 {
2282 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2283 	int i;
2284 
2285 	/* clear everything but the persistent data */
2286 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2287 
2288 	switch (ieee80211_vif_type_p2p(vif)) {
2289 	case NL80211_IFTYPE_P2P_CLIENT:
2290 		wlvif->p2p = 1;
2291 		fallthrough;
2292 	case NL80211_IFTYPE_STATION:
2293 	case NL80211_IFTYPE_P2P_DEVICE:
2294 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2295 		break;
2296 	case NL80211_IFTYPE_ADHOC:
2297 		wlvif->bss_type = BSS_TYPE_IBSS;
2298 		break;
2299 	case NL80211_IFTYPE_P2P_GO:
2300 		wlvif->p2p = 1;
2301 		fallthrough;
2302 	case NL80211_IFTYPE_AP:
2303 	case NL80211_IFTYPE_MESH_POINT:
2304 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2305 		break;
2306 	default:
2307 		wlvif->bss_type = MAX_BSS_TYPE;
2308 		return -EOPNOTSUPP;
2309 	}
2310 
2311 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2312 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2313 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2314 
2315 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2316 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2317 		/* init sta/ibss data */
2318 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2319 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2320 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2321 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2322 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2323 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2324 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2325 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2326 	} else {
2327 		/* init ap data */
2328 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2329 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2330 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2331 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2332 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2333 			wl12xx_allocate_rate_policy(wl,
2334 						&wlvif->ap.ucast_rate_idx[i]);
2335 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2336 		/*
2337 		 * TODO: check if basic_rate shouldn't be
2338 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2339 		 * instead (the same thing for STA above).
2340 		*/
2341 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2342 		/* TODO: this seems to be used only for STA, check it */
2343 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2344 	}
2345 
2346 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2347 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2348 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2349 
2350 	/*
2351 	 * mac80211 configures some values globally, while we treat them
2352 	 * per-interface. thus, on init, we have to copy them from wl
2353 	 */
2354 	wlvif->band = wl->band;
2355 	wlvif->channel = wl->channel;
2356 	wlvif->power_level = wl->power_level;
2357 	wlvif->channel_type = wl->channel_type;
2358 
2359 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2360 		  wl1271_rx_streaming_enable_work);
2361 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2362 		  wl1271_rx_streaming_disable_work);
2363 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2364 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2365 			  wlcore_channel_switch_work);
2366 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2367 			  wlcore_connection_loss_work);
2368 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2369 			  wlcore_pending_auth_complete_work);
2370 	INIT_LIST_HEAD(&wlvif->list);
2371 
2372 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2373 	return 0;
2374 }
2375 
wl12xx_init_fw(struct wl1271 * wl)2376 static int wl12xx_init_fw(struct wl1271 *wl)
2377 {
2378 	int retries = WL1271_BOOT_RETRIES;
2379 	bool booted = false;
2380 	struct wiphy *wiphy = wl->hw->wiphy;
2381 	int ret;
2382 
2383 	while (retries) {
2384 		retries--;
2385 		ret = wl12xx_chip_wakeup(wl, false);
2386 		if (ret < 0)
2387 			goto power_off;
2388 
2389 		ret = wl->ops->boot(wl);
2390 		if (ret < 0)
2391 			goto power_off;
2392 
2393 		ret = wl1271_hw_init(wl);
2394 		if (ret < 0)
2395 			goto irq_disable;
2396 
2397 		booted = true;
2398 		break;
2399 
2400 irq_disable:
2401 		mutex_unlock(&wl->mutex);
2402 		/* Unlocking the mutex in the middle of handling is
2403 		   inherently unsafe. In this case we deem it safe to do,
2404 		   because we need to let any possibly pending IRQ out of
2405 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2406 		   work function will not do anything.) Also, any other
2407 		   possible concurrent operations will fail due to the
2408 		   current state, hence the wl1271 struct should be safe. */
2409 		wlcore_disable_interrupts(wl);
2410 		wl1271_flush_deferred_work(wl);
2411 		cancel_work_sync(&wl->netstack_work);
2412 		mutex_lock(&wl->mutex);
2413 power_off:
2414 		wl1271_power_off(wl);
2415 	}
2416 
2417 	if (!booted) {
2418 		wl1271_error("firmware boot failed despite %d retries",
2419 			     WL1271_BOOT_RETRIES);
2420 		goto out;
2421 	}
2422 
2423 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2424 
2425 	/* update hw/fw version info in wiphy struct */
2426 	wiphy->hw_version = wl->chip.id;
2427 	strscpy(wiphy->fw_version, wl->chip.fw_ver_str,
2428 		sizeof(wiphy->fw_version));
2429 
2430 	/*
2431 	 * Now we know if 11a is supported (info from the NVS), so disable
2432 	 * 11a channels if not supported
2433 	 */
2434 	if (!wl->enable_11a)
2435 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2436 
2437 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2438 		     wl->enable_11a ? "" : "not ");
2439 
2440 	wl->state = WLCORE_STATE_ON;
2441 out:
2442 	return ret;
2443 }
2444 
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2445 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2446 {
2447 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2448 }
2449 
2450 /*
2451  * Check whether a fw switch (i.e. moving from one loaded
2452  * fw to another) is needed. This function is also responsible
2453  * for updating wl->last_vif_count, so it must be called before
2454  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2455  * will be used).
2456  */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2457 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2458 				  struct vif_counter_data vif_counter_data,
2459 				  bool add)
2460 {
2461 	enum wl12xx_fw_type current_fw = wl->fw_type;
2462 	u8 vif_count = vif_counter_data.counter;
2463 
2464 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2465 		return false;
2466 
2467 	/* increase the vif count if this is a new vif */
2468 	if (add && !vif_counter_data.cur_vif_running)
2469 		vif_count++;
2470 
2471 	wl->last_vif_count = vif_count;
2472 
2473 	/* no need for fw change if the device is OFF */
2474 	if (wl->state == WLCORE_STATE_OFF)
2475 		return false;
2476 
2477 	/* no need for fw change if a single fw is used */
2478 	if (!wl->mr_fw_name)
2479 		return false;
2480 
2481 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2482 		return true;
2483 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2484 		return true;
2485 
2486 	return false;
2487 }
2488 
2489 /*
2490  * Enter "forced psm". Make sure the sta is in psm against the ap,
2491  * to make the fw switch a bit more disconnection-persistent.
2492  */
wl12xx_force_active_psm(struct wl1271 * wl)2493 static void wl12xx_force_active_psm(struct wl1271 *wl)
2494 {
2495 	struct wl12xx_vif *wlvif;
2496 
2497 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2498 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2499 	}
2500 }
2501 
2502 struct wlcore_hw_queue_iter_data {
2503 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2504 	/* current vif */
2505 	struct ieee80211_vif *vif;
2506 	/* is the current vif among those iterated */
2507 	bool cur_running;
2508 };
2509 
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2510 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2511 				 struct ieee80211_vif *vif)
2512 {
2513 	struct wlcore_hw_queue_iter_data *iter_data = data;
2514 
2515 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2516 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2517 		return;
2518 
2519 	if (iter_data->cur_running || vif == iter_data->vif) {
2520 		iter_data->cur_running = true;
2521 		return;
2522 	}
2523 
2524 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2525 }
2526 
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2527 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2528 					 struct wl12xx_vif *wlvif)
2529 {
2530 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2531 	struct wlcore_hw_queue_iter_data iter_data = {};
2532 	int i, q_base;
2533 
2534 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2535 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2536 		return 0;
2537 	}
2538 
2539 	iter_data.vif = vif;
2540 
2541 	/* mark all bits taken by active interfaces */
2542 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2543 					IEEE80211_IFACE_ITER_RESUME_ALL,
2544 					wlcore_hw_queue_iter, &iter_data);
2545 
2546 	/* the current vif is already running in mac80211 (resume/recovery) */
2547 	if (iter_data.cur_running) {
2548 		wlvif->hw_queue_base = vif->hw_queue[0];
2549 		wl1271_debug(DEBUG_MAC80211,
2550 			     "using pre-allocated hw queue base %d",
2551 			     wlvif->hw_queue_base);
2552 
2553 		/* interface type might have changed type */
2554 		goto adjust_cab_queue;
2555 	}
2556 
2557 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2558 				     WLCORE_NUM_MAC_ADDRESSES);
2559 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2560 		return -EBUSY;
2561 
2562 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2563 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2564 		     wlvif->hw_queue_base);
2565 
2566 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2567 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2568 		/* register hw queues in mac80211 */
2569 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2570 	}
2571 
2572 adjust_cab_queue:
2573 	/* the last places are reserved for cab queues per interface */
2574 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2575 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2576 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2577 	else
2578 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2579 
2580 	return 0;
2581 }
2582 
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2583 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2584 				   struct ieee80211_vif *vif)
2585 {
2586 	struct wl1271 *wl = hw->priv;
2587 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2588 	struct vif_counter_data vif_count;
2589 	int ret = 0;
2590 	u8 role_type;
2591 
2592 	if (wl->plt) {
2593 		wl1271_error("Adding Interface not allowed while in PLT mode");
2594 		return -EBUSY;
2595 	}
2596 
2597 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2598 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2599 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2600 
2601 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2602 		     ieee80211_vif_type_p2p(vif), vif->addr);
2603 
2604 	wl12xx_get_vif_count(hw, vif, &vif_count);
2605 
2606 	mutex_lock(&wl->mutex);
2607 
2608 	/*
2609 	 * in some very corner case HW recovery scenarios its possible to
2610 	 * get here before __wl1271_op_remove_interface is complete, so
2611 	 * opt out if that is the case.
2612 	 */
2613 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2614 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2615 		ret = -EBUSY;
2616 		goto out_unlock;
2617 	}
2618 
2619 
2620 	ret = wl12xx_init_vif_data(wl, vif);
2621 	if (ret < 0)
2622 		goto out_unlock;
2623 
2624 	wlvif->wl = wl;
2625 	role_type = wl12xx_get_role_type(wl, wlvif);
2626 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2627 		ret = -EINVAL;
2628 		goto out_unlock;
2629 	}
2630 
2631 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2632 	if (ret < 0)
2633 		goto out_unlock;
2634 
2635 	/*
2636 	 * TODO: after the nvs issue will be solved, move this block
2637 	 * to start(), and make sure here the driver is ON.
2638 	 */
2639 	if (wl->state == WLCORE_STATE_OFF) {
2640 		/*
2641 		 * we still need this in order to configure the fw
2642 		 * while uploading the nvs
2643 		 */
2644 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2645 
2646 		ret = wl12xx_init_fw(wl);
2647 		if (ret < 0)
2648 			goto out_unlock;
2649 	}
2650 
2651 	/*
2652 	 * Call runtime PM only after possible wl12xx_init_fw() above
2653 	 * is done. Otherwise we do not have interrupts enabled.
2654 	 */
2655 	ret = pm_runtime_resume_and_get(wl->dev);
2656 	if (ret < 0)
2657 		goto out_unlock;
2658 
2659 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2660 		wl12xx_force_active_psm(wl);
2661 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2662 		mutex_unlock(&wl->mutex);
2663 		wl1271_recovery_work(&wl->recovery_work);
2664 		return 0;
2665 	}
2666 
2667 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2668 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2669 					     role_type, &wlvif->role_id);
2670 		if (ret < 0)
2671 			goto out;
2672 
2673 		ret = wl1271_init_vif_specific(wl, vif);
2674 		if (ret < 0)
2675 			goto out;
2676 
2677 	} else {
2678 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2679 					     &wlvif->dev_role_id);
2680 		if (ret < 0)
2681 			goto out;
2682 
2683 		/* needed mainly for configuring rate policies */
2684 		ret = wl1271_sta_hw_init(wl, wlvif);
2685 		if (ret < 0)
2686 			goto out;
2687 	}
2688 
2689 	list_add(&wlvif->list, &wl->wlvif_list);
2690 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2691 
2692 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2693 		wl->ap_count++;
2694 	else
2695 		wl->sta_count++;
2696 out:
2697 	pm_runtime_mark_last_busy(wl->dev);
2698 	pm_runtime_put_autosuspend(wl->dev);
2699 out_unlock:
2700 	mutex_unlock(&wl->mutex);
2701 
2702 	return ret;
2703 }
2704 
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2705 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2706 					 struct ieee80211_vif *vif,
2707 					 bool reset_tx_queues)
2708 {
2709 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2710 	int i, ret;
2711 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2712 
2713 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2714 
2715 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2716 		return;
2717 
2718 	/* because of hardware recovery, we may get here twice */
2719 	if (wl->state == WLCORE_STATE_OFF)
2720 		return;
2721 
2722 	wl1271_info("down");
2723 
2724 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2725 	    wl->scan_wlvif == wlvif) {
2726 		struct cfg80211_scan_info info = {
2727 			.aborted = true,
2728 		};
2729 
2730 		/*
2731 		 * Rearm the tx watchdog just before idling scan. This
2732 		 * prevents just-finished scans from triggering the watchdog
2733 		 */
2734 		wl12xx_rearm_tx_watchdog_locked(wl);
2735 
2736 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2737 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2738 		wl->scan_wlvif = NULL;
2739 		wl->scan.req = NULL;
2740 		ieee80211_scan_completed(wl->hw, &info);
2741 	}
2742 
2743 	if (wl->sched_vif == wlvif)
2744 		wl->sched_vif = NULL;
2745 
2746 	if (wl->roc_vif == vif) {
2747 		wl->roc_vif = NULL;
2748 		ieee80211_remain_on_channel_expired(wl->hw);
2749 	}
2750 
2751 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2752 		/* disable active roles */
2753 		ret = pm_runtime_resume_and_get(wl->dev);
2754 		if (ret < 0)
2755 			goto deinit;
2756 
2757 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2758 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2759 			if (wl12xx_dev_role_started(wlvif))
2760 				wl12xx_stop_dev(wl, wlvif);
2761 		}
2762 
2763 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2764 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2765 			if (ret < 0) {
2766 				pm_runtime_put_noidle(wl->dev);
2767 				goto deinit;
2768 			}
2769 		} else {
2770 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2771 			if (ret < 0) {
2772 				pm_runtime_put_noidle(wl->dev);
2773 				goto deinit;
2774 			}
2775 		}
2776 
2777 		pm_runtime_mark_last_busy(wl->dev);
2778 		pm_runtime_put_autosuspend(wl->dev);
2779 	}
2780 deinit:
2781 	wl12xx_tx_reset_wlvif(wl, wlvif);
2782 
2783 	/* clear all hlids (except system_hlid) */
2784 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2785 
2786 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2787 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2788 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2789 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2790 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2791 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2792 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2793 	} else {
2794 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2795 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2796 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2797 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2798 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2799 			wl12xx_free_rate_policy(wl,
2800 						&wlvif->ap.ucast_rate_idx[i]);
2801 		wl1271_free_ap_keys(wl, wlvif);
2802 	}
2803 
2804 	dev_kfree_skb(wlvif->probereq);
2805 	wlvif->probereq = NULL;
2806 	if (wl->last_wlvif == wlvif)
2807 		wl->last_wlvif = NULL;
2808 	list_del(&wlvif->list);
2809 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2810 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2811 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2812 
2813 	if (is_ap)
2814 		wl->ap_count--;
2815 	else
2816 		wl->sta_count--;
2817 
2818 	/*
2819 	 * Last AP, have more stations. Configure sleep auth according to STA.
2820 	 * Don't do thin on unintended recovery.
2821 	 */
2822 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2823 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2824 		goto unlock;
2825 
2826 	if (wl->ap_count == 0 && is_ap) {
2827 		/* mask ap events */
2828 		wl->event_mask &= ~wl->ap_event_mask;
2829 		wl1271_event_unmask(wl);
2830 	}
2831 
2832 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2833 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2834 		/* Configure for power according to debugfs */
2835 		if (sta_auth != WL1271_PSM_ILLEGAL)
2836 			wl1271_acx_sleep_auth(wl, sta_auth);
2837 		/* Configure for ELP power saving */
2838 		else
2839 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2840 	}
2841 
2842 unlock:
2843 	mutex_unlock(&wl->mutex);
2844 
2845 	timer_delete_sync(&wlvif->rx_streaming_timer);
2846 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2847 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2848 	cancel_work_sync(&wlvif->rc_update_work);
2849 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2850 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2851 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2852 
2853 	mutex_lock(&wl->mutex);
2854 }
2855 
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2856 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2857 				       struct ieee80211_vif *vif)
2858 {
2859 	struct wl1271 *wl = hw->priv;
2860 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2861 	struct wl12xx_vif *iter;
2862 	struct vif_counter_data vif_count;
2863 
2864 	wl12xx_get_vif_count(hw, vif, &vif_count);
2865 	mutex_lock(&wl->mutex);
2866 
2867 	if (wl->state == WLCORE_STATE_OFF ||
2868 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2869 		goto out;
2870 
2871 	/*
2872 	 * wl->vif can be null here if someone shuts down the interface
2873 	 * just when hardware recovery has been started.
2874 	 */
2875 	wl12xx_for_each_wlvif(wl, iter) {
2876 		if (iter != wlvif)
2877 			continue;
2878 
2879 		__wl1271_op_remove_interface(wl, vif, true);
2880 		break;
2881 	}
2882 	WARN_ON(iter != wlvif);
2883 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2884 		wl12xx_force_active_psm(wl);
2885 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2886 		wl12xx_queue_recovery_work(wl);
2887 	}
2888 out:
2889 	mutex_unlock(&wl->mutex);
2890 }
2891 
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2892 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2893 				      struct ieee80211_vif *vif,
2894 				      enum nl80211_iftype new_type, bool p2p)
2895 {
2896 	struct wl1271 *wl = hw->priv;
2897 	int ret;
2898 
2899 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2900 	wl1271_op_remove_interface(hw, vif);
2901 
2902 	vif->type = new_type;
2903 	vif->p2p = p2p;
2904 	ret = wl1271_op_add_interface(hw, vif);
2905 
2906 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2907 	return ret;
2908 }
2909 
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2910 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2911 {
2912 	int ret;
2913 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2914 
2915 	/*
2916 	 * One of the side effects of the JOIN command is that is clears
2917 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2918 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2919 	 * Currently the only valid scenario for JOIN during association
2920 	 * is on roaming, in which case we will also be given new keys.
2921 	 * Keep the below message for now, unless it starts bothering
2922 	 * users who really like to roam a lot :)
2923 	 */
2924 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2925 		wl1271_info("JOIN while associated.");
2926 
2927 	/* clear encryption type */
2928 	wlvif->encryption_type = KEY_NONE;
2929 
2930 	if (is_ibss)
2931 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2932 	else
2933 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2934 
2935 	return ret;
2936 }
2937 
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2938 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2939 			    int offset)
2940 {
2941 	u8 ssid_len;
2942 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2943 					 skb->len - offset);
2944 
2945 	if (!ptr) {
2946 		wl1271_error("No SSID in IEs!");
2947 		return -ENOENT;
2948 	}
2949 
2950 	ssid_len = ptr[1];
2951 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2952 		wl1271_error("SSID is too long!");
2953 		return -EINVAL;
2954 	}
2955 
2956 	wlvif->ssid_len = ssid_len;
2957 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2958 	return 0;
2959 }
2960 
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2961 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2962 {
2963 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2964 	struct sk_buff *skb;
2965 	int ieoffset;
2966 
2967 	/* we currently only support setting the ssid from the ap probe req */
2968 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2969 		return -EINVAL;
2970 
2971 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2972 	if (!skb)
2973 		return -EINVAL;
2974 
2975 	ieoffset = offsetof(struct ieee80211_mgmt,
2976 			    u.probe_req.variable);
2977 	wl1271_ssid_set(wlvif, skb, ieoffset);
2978 	dev_kfree_skb(skb);
2979 
2980 	return 0;
2981 }
2982 
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2983 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2984 			    struct ieee80211_bss_conf *bss_conf,
2985 			    u32 sta_rate_set)
2986 {
2987 	struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
2988 						 bss_conf);
2989 	int ieoffset;
2990 	int ret;
2991 
2992 	wlvif->aid = vif->cfg.aid;
2993 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chanreq.oper);
2994 	wlvif->beacon_int = bss_conf->beacon_int;
2995 	wlvif->wmm_enabled = bss_conf->qos;
2996 
2997 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2998 
2999 	/*
3000 	 * with wl1271, we don't need to update the
3001 	 * beacon_int and dtim_period, because the firmware
3002 	 * updates it by itself when the first beacon is
3003 	 * received after a join.
3004 	 */
3005 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3006 	if (ret < 0)
3007 		return ret;
3008 
3009 	/*
3010 	 * Get a template for hardware connection maintenance
3011 	 */
3012 	dev_kfree_skb(wlvif->probereq);
3013 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3014 							wlvif,
3015 							NULL);
3016 	ieoffset = offsetof(struct ieee80211_mgmt,
3017 			    u.probe_req.variable);
3018 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
3019 
3020 	/* enable the connection monitoring feature */
3021 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3022 	if (ret < 0)
3023 		return ret;
3024 
3025 	/*
3026 	 * The join command disable the keep-alive mode, shut down its process,
3027 	 * and also clear the template config, so we need to reset it all after
3028 	 * the join. The acx_aid starts the keep-alive process, and the order
3029 	 * of the commands below is relevant.
3030 	 */
3031 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
3032 	if (ret < 0)
3033 		return ret;
3034 
3035 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
3036 	if (ret < 0)
3037 		return ret;
3038 
3039 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
3040 	if (ret < 0)
3041 		return ret;
3042 
3043 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
3044 					   wlvif->sta.klv_template_id,
3045 					   ACX_KEEP_ALIVE_TPL_VALID);
3046 	if (ret < 0)
3047 		return ret;
3048 
3049 	/*
3050 	 * The default fw psm configuration is AUTO, while mac80211 default
3051 	 * setting is off (ACTIVE), so sync the fw with the correct value.
3052 	 */
3053 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3054 	if (ret < 0)
3055 		return ret;
3056 
3057 	if (sta_rate_set) {
3058 		wlvif->rate_set =
3059 			wl1271_tx_enabled_rates_get(wl,
3060 						    sta_rate_set,
3061 						    wlvif->band);
3062 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3063 		if (ret < 0)
3064 			return ret;
3065 	}
3066 
3067 	return ret;
3068 }
3069 
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3070 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3071 {
3072 	int ret;
3073 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3074 
3075 	/* make sure we are connected (sta) joined */
3076 	if (sta &&
3077 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3078 		return false;
3079 
3080 	/* make sure we are joined (ibss) */
3081 	if (!sta &&
3082 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3083 		return false;
3084 
3085 	if (sta) {
3086 		/* use defaults when not associated */
3087 		wlvif->aid = 0;
3088 
3089 		/* free probe-request template */
3090 		dev_kfree_skb(wlvif->probereq);
3091 		wlvif->probereq = NULL;
3092 
3093 		/* disable connection monitor features */
3094 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3095 		if (ret < 0)
3096 			return ret;
3097 
3098 		/* Disable the keep-alive feature */
3099 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3100 		if (ret < 0)
3101 			return ret;
3102 
3103 		/* disable beacon filtering */
3104 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3105 		if (ret < 0)
3106 			return ret;
3107 	}
3108 
3109 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3110 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3111 
3112 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3113 		ieee80211_chswitch_done(vif, false, 0);
3114 		cancel_delayed_work(&wlvif->channel_switch_work);
3115 	}
3116 
3117 	/* invalidate keep-alive template */
3118 	wl1271_acx_keep_alive_config(wl, wlvif,
3119 				     wlvif->sta.klv_template_id,
3120 				     ACX_KEEP_ALIVE_TPL_INVALID);
3121 
3122 	return 0;
3123 }
3124 
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3125 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3126 {
3127 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3128 	wlvif->rate_set = wlvif->basic_rate_set;
3129 }
3130 
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3131 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3132 				   bool idle)
3133 {
3134 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3135 
3136 	if (idle == cur_idle)
3137 		return;
3138 
3139 	if (idle) {
3140 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3141 	} else {
3142 		/* The current firmware only supports sched_scan in idle */
3143 		if (wl->sched_vif == wlvif)
3144 			wl->ops->sched_scan_stop(wl, wlvif);
3145 
3146 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3147 	}
3148 }
3149 
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3150 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3151 			     struct ieee80211_conf *conf, u32 changed)
3152 {
3153 	int ret;
3154 
3155 	if (wlcore_is_p2p_mgmt(wlvif))
3156 		return 0;
3157 
3158 	if (conf->power_level != wlvif->power_level) {
3159 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3160 		if (ret < 0)
3161 			return ret;
3162 
3163 		wlvif->power_level = conf->power_level;
3164 	}
3165 
3166 	return 0;
3167 }
3168 
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3169 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3170 {
3171 	struct wl1271 *wl = hw->priv;
3172 	struct wl12xx_vif *wlvif;
3173 	struct ieee80211_conf *conf = &hw->conf;
3174 	int ret = 0;
3175 
3176 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3177 		     " changed 0x%x",
3178 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3179 		     conf->power_level,
3180 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3181 			 changed);
3182 
3183 	mutex_lock(&wl->mutex);
3184 
3185 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3186 		wl->power_level = conf->power_level;
3187 
3188 	if (unlikely(wl->state != WLCORE_STATE_ON))
3189 		goto out;
3190 
3191 	ret = pm_runtime_resume_and_get(wl->dev);
3192 	if (ret < 0)
3193 		goto out;
3194 
3195 	/* configure each interface */
3196 	wl12xx_for_each_wlvif(wl, wlvif) {
3197 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3198 		if (ret < 0)
3199 			goto out_sleep;
3200 	}
3201 
3202 out_sleep:
3203 	pm_runtime_mark_last_busy(wl->dev);
3204 	pm_runtime_put_autosuspend(wl->dev);
3205 
3206 out:
3207 	mutex_unlock(&wl->mutex);
3208 
3209 	return ret;
3210 }
3211 
3212 struct wl1271_filter_params {
3213 	bool enabled;
3214 	int mc_list_length;
3215 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3216 };
3217 
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3218 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3219 				       struct netdev_hw_addr_list *mc_list)
3220 {
3221 	struct wl1271_filter_params *fp;
3222 	struct netdev_hw_addr *ha;
3223 
3224 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3225 	if (!fp) {
3226 		wl1271_error("Out of memory setting filters.");
3227 		return 0;
3228 	}
3229 
3230 	/* update multicast filtering parameters */
3231 	fp->mc_list_length = 0;
3232 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3233 		fp->enabled = false;
3234 	} else {
3235 		fp->enabled = true;
3236 		netdev_hw_addr_list_for_each(ha, mc_list) {
3237 			memcpy(fp->mc_list[fp->mc_list_length],
3238 					ha->addr, ETH_ALEN);
3239 			fp->mc_list_length++;
3240 		}
3241 	}
3242 
3243 	return (u64)(unsigned long)fp;
3244 }
3245 
3246 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3247 				  FIF_FCSFAIL | \
3248 				  FIF_BCN_PRBRESP_PROMISC | \
3249 				  FIF_CONTROL | \
3250 				  FIF_OTHER_BSS)
3251 
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3252 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3253 				       unsigned int changed,
3254 				       unsigned int *total, u64 multicast)
3255 {
3256 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3257 	struct wl1271 *wl = hw->priv;
3258 	struct wl12xx_vif *wlvif;
3259 
3260 	int ret;
3261 
3262 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3263 		     " total %x", changed, *total);
3264 
3265 	mutex_lock(&wl->mutex);
3266 
3267 	*total &= WL1271_SUPPORTED_FILTERS;
3268 	changed &= WL1271_SUPPORTED_FILTERS;
3269 
3270 	if (unlikely(wl->state != WLCORE_STATE_ON))
3271 		goto out;
3272 
3273 	ret = pm_runtime_resume_and_get(wl->dev);
3274 	if (ret < 0)
3275 		goto out;
3276 
3277 	wl12xx_for_each_wlvif(wl, wlvif) {
3278 		if (wlcore_is_p2p_mgmt(wlvif))
3279 			continue;
3280 
3281 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3282 			if (*total & FIF_ALLMULTI)
3283 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3284 								   false,
3285 								   NULL, 0);
3286 			else if (fp)
3287 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3288 							fp->enabled,
3289 							fp->mc_list,
3290 							fp->mc_list_length);
3291 			if (ret < 0)
3292 				goto out_sleep;
3293 		}
3294 
3295 		/*
3296 		 * If interface in AP mode and created with allmulticast then disable
3297 		 * the firmware filters so that all multicast packets are passed
3298 		 * This is mandatory for MDNS based discovery protocols
3299 		 */
3300 		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3301 			if (*total & FIF_ALLMULTI) {
3302 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3303 							false,
3304 							NULL, 0);
3305 				if (ret < 0)
3306 					goto out_sleep;
3307 			}
3308 		}
3309 	}
3310 
3311 	/*
3312 	 * the fw doesn't provide an api to configure the filters. instead,
3313 	 * the filters configuration is based on the active roles / ROC
3314 	 * state.
3315 	 */
3316 
3317 out_sleep:
3318 	pm_runtime_mark_last_busy(wl->dev);
3319 	pm_runtime_put_autosuspend(wl->dev);
3320 
3321 out:
3322 	mutex_unlock(&wl->mutex);
3323 	kfree(fp);
3324 }
3325 
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16,bool is_pairwise)3326 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3327 				u8 id, u8 key_type, u8 key_size,
3328 				const u8 *key, u8 hlid, u32 tx_seq_32,
3329 				u16 tx_seq_16, bool is_pairwise)
3330 {
3331 	struct wl1271_ap_key *ap_key;
3332 	int i;
3333 
3334 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3335 
3336 	if (key_size > MAX_KEY_SIZE)
3337 		return -EINVAL;
3338 
3339 	/*
3340 	 * Find next free entry in ap_keys. Also check we are not replacing
3341 	 * an existing key.
3342 	 */
3343 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3344 		if (wlvif->ap.recorded_keys[i] == NULL)
3345 			break;
3346 
3347 		if (wlvif->ap.recorded_keys[i]->id == id) {
3348 			wl1271_warning("trying to record key replacement");
3349 			return -EINVAL;
3350 		}
3351 	}
3352 
3353 	if (i == MAX_NUM_KEYS)
3354 		return -EBUSY;
3355 
3356 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3357 	if (!ap_key)
3358 		return -ENOMEM;
3359 
3360 	ap_key->id = id;
3361 	ap_key->key_type = key_type;
3362 	ap_key->key_size = key_size;
3363 	memcpy(ap_key->key, key, key_size);
3364 	ap_key->hlid = hlid;
3365 	ap_key->tx_seq_32 = tx_seq_32;
3366 	ap_key->tx_seq_16 = tx_seq_16;
3367 	ap_key->is_pairwise = is_pairwise;
3368 
3369 	wlvif->ap.recorded_keys[i] = ap_key;
3370 	return 0;
3371 }
3372 
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3373 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3374 {
3375 	int i;
3376 
3377 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3378 		kfree(wlvif->ap.recorded_keys[i]);
3379 		wlvif->ap.recorded_keys[i] = NULL;
3380 	}
3381 }
3382 
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3383 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3384 {
3385 	int i, ret = 0;
3386 	struct wl1271_ap_key *key;
3387 	bool wep_key_added = false;
3388 
3389 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3390 		u8 hlid;
3391 		if (wlvif->ap.recorded_keys[i] == NULL)
3392 			break;
3393 
3394 		key = wlvif->ap.recorded_keys[i];
3395 		hlid = key->hlid;
3396 		if (hlid == WL12XX_INVALID_LINK_ID)
3397 			hlid = wlvif->ap.bcast_hlid;
3398 
3399 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3400 					    key->id, key->key_type,
3401 					    key->key_size, key->key,
3402 					    hlid, key->tx_seq_32,
3403 					    key->tx_seq_16, key->is_pairwise);
3404 		if (ret < 0)
3405 			goto out;
3406 
3407 		if (key->key_type == KEY_WEP)
3408 			wep_key_added = true;
3409 	}
3410 
3411 	if (wep_key_added) {
3412 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3413 						     wlvif->ap.bcast_hlid);
3414 		if (ret < 0)
3415 			goto out;
3416 	}
3417 
3418 out:
3419 	wl1271_free_ap_keys(wl, wlvif);
3420 	return ret;
3421 }
3422 
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta,bool is_pairwise)3423 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3424 		       u16 action, u8 id, u8 key_type,
3425 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3426 		       u16 tx_seq_16, struct ieee80211_sta *sta,
3427 		       bool is_pairwise)
3428 {
3429 	int ret;
3430 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3431 
3432 	if (is_ap) {
3433 		struct wl1271_station *wl_sta;
3434 		u8 hlid;
3435 
3436 		if (sta) {
3437 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3438 			hlid = wl_sta->hlid;
3439 		} else {
3440 			hlid = wlvif->ap.bcast_hlid;
3441 		}
3442 
3443 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3444 			/*
3445 			 * We do not support removing keys after AP shutdown.
3446 			 * Pretend we do to make mac80211 happy.
3447 			 */
3448 			if (action != KEY_ADD_OR_REPLACE)
3449 				return 0;
3450 
3451 			ret = wl1271_record_ap_key(wl, wlvif, id,
3452 					     key_type, key_size,
3453 					     key, hlid, tx_seq_32,
3454 					     tx_seq_16, is_pairwise);
3455 		} else {
3456 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3457 					     id, key_type, key_size,
3458 					     key, hlid, tx_seq_32,
3459 					     tx_seq_16, is_pairwise);
3460 		}
3461 
3462 		if (ret < 0)
3463 			return ret;
3464 	} else {
3465 		const u8 *addr;
3466 		static const u8 bcast_addr[ETH_ALEN] = {
3467 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3468 		};
3469 
3470 		addr = sta ? sta->addr : bcast_addr;
3471 
3472 		if (is_zero_ether_addr(addr)) {
3473 			/* We dont support TX only encryption */
3474 			return -EOPNOTSUPP;
3475 		}
3476 
3477 		/* The wl1271 does not allow to remove unicast keys - they
3478 		   will be cleared automatically on next CMD_JOIN. Ignore the
3479 		   request silently, as we dont want the mac80211 to emit
3480 		   an error message. */
3481 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3482 			return 0;
3483 
3484 		/* don't remove key if hlid was already deleted */
3485 		if (action == KEY_REMOVE &&
3486 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3487 			return 0;
3488 
3489 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3490 					     id, key_type, key_size,
3491 					     key, addr, tx_seq_32,
3492 					     tx_seq_16);
3493 		if (ret < 0)
3494 			return ret;
3495 
3496 	}
3497 
3498 	return 0;
3499 }
3500 
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3501 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3502 			     struct ieee80211_vif *vif,
3503 			     struct ieee80211_sta *sta,
3504 			     struct ieee80211_key_conf *key_conf)
3505 {
3506 	struct wl1271 *wl = hw->priv;
3507 	int ret;
3508 	bool might_change_spare =
3509 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3510 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3511 
3512 	if (might_change_spare) {
3513 		/*
3514 		 * stop the queues and flush to ensure the next packets are
3515 		 * in sync with FW spare block accounting
3516 		 */
3517 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3518 		wl1271_tx_flush(wl);
3519 	}
3520 
3521 	mutex_lock(&wl->mutex);
3522 
3523 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3524 		ret = -EAGAIN;
3525 		goto out_wake_queues;
3526 	}
3527 
3528 	ret = pm_runtime_resume_and_get(wl->dev);
3529 	if (ret < 0)
3530 		goto out_wake_queues;
3531 
3532 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3533 
3534 	pm_runtime_mark_last_busy(wl->dev);
3535 	pm_runtime_put_autosuspend(wl->dev);
3536 
3537 out_wake_queues:
3538 	if (might_change_spare)
3539 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3540 
3541 	mutex_unlock(&wl->mutex);
3542 
3543 	return ret;
3544 }
3545 
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3546 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3547 		   struct ieee80211_vif *vif,
3548 		   struct ieee80211_sta *sta,
3549 		   struct ieee80211_key_conf *key_conf)
3550 {
3551 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3552 	int ret;
3553 	u32 tx_seq_32 = 0;
3554 	u16 tx_seq_16 = 0;
3555 	u8 key_type;
3556 	u8 hlid;
3557 	bool is_pairwise;
3558 
3559 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3560 
3561 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3562 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3563 		     key_conf->cipher, key_conf->keyidx,
3564 		     key_conf->keylen, key_conf->flags);
3565 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3566 
3567 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3568 		if (sta) {
3569 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3570 			hlid = wl_sta->hlid;
3571 		} else {
3572 			hlid = wlvif->ap.bcast_hlid;
3573 		}
3574 	else
3575 		hlid = wlvif->sta.hlid;
3576 
3577 	if (hlid != WL12XX_INVALID_LINK_ID) {
3578 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3579 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3580 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3581 	}
3582 
3583 	switch (key_conf->cipher) {
3584 	case WLAN_CIPHER_SUITE_WEP40:
3585 	case WLAN_CIPHER_SUITE_WEP104:
3586 		key_type = KEY_WEP;
3587 
3588 		key_conf->hw_key_idx = key_conf->keyidx;
3589 		break;
3590 	case WLAN_CIPHER_SUITE_TKIP:
3591 		key_type = KEY_TKIP;
3592 		key_conf->hw_key_idx = key_conf->keyidx;
3593 		break;
3594 	case WLAN_CIPHER_SUITE_CCMP:
3595 		key_type = KEY_AES;
3596 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3597 		break;
3598 	case WL1271_CIPHER_SUITE_GEM:
3599 		key_type = KEY_GEM;
3600 		break;
3601 	default:
3602 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3603 
3604 		return -EOPNOTSUPP;
3605 	}
3606 
3607 	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3608 
3609 	switch (cmd) {
3610 	case SET_KEY:
3611 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3612 				 key_conf->keyidx, key_type,
3613 				 key_conf->keylen, key_conf->key,
3614 				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3615 		if (ret < 0) {
3616 			wl1271_error("Could not add or replace key");
3617 			return ret;
3618 		}
3619 
3620 		/* Store AP encryption key type */
3621 		if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3622 			wlvif->encryption_type = key_type;
3623 
3624 		/*
3625 		 * reconfiguring arp response if the unicast (or common)
3626 		 * encryption key type was changed
3627 		 */
3628 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3629 		    (sta || key_type == KEY_WEP) &&
3630 		    wlvif->encryption_type != key_type) {
3631 			wlvif->encryption_type = key_type;
3632 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3633 			if (ret < 0) {
3634 				wl1271_warning("build arp rsp failed: %d", ret);
3635 				return ret;
3636 			}
3637 		}
3638 		break;
3639 
3640 	case DISABLE_KEY:
3641 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3642 				     key_conf->keyidx, key_type,
3643 				     key_conf->keylen, key_conf->key,
3644 				     0, 0, sta, is_pairwise);
3645 		if (ret < 0) {
3646 			wl1271_error("Could not remove key");
3647 			return ret;
3648 		}
3649 		break;
3650 
3651 	default:
3652 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3653 		return -EOPNOTSUPP;
3654 	}
3655 
3656 	return ret;
3657 }
3658 EXPORT_SYMBOL_GPL(wlcore_set_key);
3659 
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3660 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3661 					  struct ieee80211_vif *vif,
3662 					  int key_idx)
3663 {
3664 	struct wl1271 *wl = hw->priv;
3665 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3666 	int ret;
3667 
3668 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3669 		     key_idx);
3670 
3671 	/* we don't handle unsetting of default key */
3672 	if (key_idx == -1)
3673 		return;
3674 
3675 	mutex_lock(&wl->mutex);
3676 
3677 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3678 		ret = -EAGAIN;
3679 		goto out_unlock;
3680 	}
3681 
3682 	ret = pm_runtime_resume_and_get(wl->dev);
3683 	if (ret < 0)
3684 		goto out_unlock;
3685 
3686 	wlvif->default_key = key_idx;
3687 
3688 	/* the default WEP key needs to be configured at least once */
3689 	if (wlvif->encryption_type == KEY_WEP) {
3690 		ret = wl12xx_cmd_set_default_wep_key(wl,
3691 				key_idx,
3692 				wlvif->sta.hlid);
3693 		if (ret < 0)
3694 			goto out_sleep;
3695 	}
3696 
3697 out_sleep:
3698 	pm_runtime_mark_last_busy(wl->dev);
3699 	pm_runtime_put_autosuspend(wl->dev);
3700 
3701 out_unlock:
3702 	mutex_unlock(&wl->mutex);
3703 }
3704 
wlcore_regdomain_config(struct wl1271 * wl)3705 void wlcore_regdomain_config(struct wl1271 *wl)
3706 {
3707 	int ret;
3708 
3709 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3710 		return;
3711 
3712 	mutex_lock(&wl->mutex);
3713 
3714 	if (unlikely(wl->state != WLCORE_STATE_ON))
3715 		goto out;
3716 
3717 	ret = pm_runtime_resume_and_get(wl->dev);
3718 	if (ret < 0)
3719 		goto out;
3720 
3721 	ret = wlcore_cmd_regdomain_config_locked(wl);
3722 	if (ret < 0) {
3723 		wl12xx_queue_recovery_work(wl);
3724 		goto out;
3725 	}
3726 
3727 	pm_runtime_mark_last_busy(wl->dev);
3728 	pm_runtime_put_autosuspend(wl->dev);
3729 out:
3730 	mutex_unlock(&wl->mutex);
3731 }
3732 
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3733 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3734 			     struct ieee80211_vif *vif,
3735 			     struct ieee80211_scan_request *hw_req)
3736 {
3737 	struct cfg80211_scan_request *req = &hw_req->req;
3738 	struct wl1271 *wl = hw->priv;
3739 	int ret;
3740 	u8 *ssid = NULL;
3741 	size_t len = 0;
3742 
3743 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3744 
3745 	if (req->n_ssids) {
3746 		ssid = req->ssids[0].ssid;
3747 		len = req->ssids[0].ssid_len;
3748 	}
3749 
3750 	mutex_lock(&wl->mutex);
3751 
3752 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3753 		/*
3754 		 * We cannot return -EBUSY here because cfg80211 will expect
3755 		 * a call to ieee80211_scan_completed if we do - in this case
3756 		 * there won't be any call.
3757 		 */
3758 		ret = -EAGAIN;
3759 		goto out;
3760 	}
3761 
3762 	ret = pm_runtime_resume_and_get(wl->dev);
3763 	if (ret < 0)
3764 		goto out;
3765 
3766 	/* fail if there is any role in ROC */
3767 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3768 		/* don't allow scanning right now */
3769 		ret = -EBUSY;
3770 		goto out_sleep;
3771 	}
3772 
3773 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3774 out_sleep:
3775 	pm_runtime_mark_last_busy(wl->dev);
3776 	pm_runtime_put_autosuspend(wl->dev);
3777 out:
3778 	mutex_unlock(&wl->mutex);
3779 
3780 	return ret;
3781 }
3782 
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3783 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3784 				     struct ieee80211_vif *vif)
3785 {
3786 	struct wl1271 *wl = hw->priv;
3787 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3788 	struct cfg80211_scan_info info = {
3789 		.aborted = true,
3790 	};
3791 	int ret;
3792 
3793 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3794 
3795 	mutex_lock(&wl->mutex);
3796 
3797 	if (unlikely(wl->state != WLCORE_STATE_ON))
3798 		goto out;
3799 
3800 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3801 		goto out;
3802 
3803 	ret = pm_runtime_resume_and_get(wl->dev);
3804 	if (ret < 0)
3805 		goto out;
3806 
3807 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3808 		ret = wl->ops->scan_stop(wl, wlvif);
3809 		if (ret < 0)
3810 			goto out_sleep;
3811 	}
3812 
3813 	/*
3814 	 * Rearm the tx watchdog just before idling scan. This
3815 	 * prevents just-finished scans from triggering the watchdog
3816 	 */
3817 	wl12xx_rearm_tx_watchdog_locked(wl);
3818 
3819 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3820 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3821 	wl->scan_wlvif = NULL;
3822 	wl->scan.req = NULL;
3823 	ieee80211_scan_completed(wl->hw, &info);
3824 
3825 out_sleep:
3826 	pm_runtime_mark_last_busy(wl->dev);
3827 	pm_runtime_put_autosuspend(wl->dev);
3828 out:
3829 	mutex_unlock(&wl->mutex);
3830 
3831 	cancel_delayed_work_sync(&wl->scan_complete_work);
3832 }
3833 
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3834 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3835 				      struct ieee80211_vif *vif,
3836 				      struct cfg80211_sched_scan_request *req,
3837 				      struct ieee80211_scan_ies *ies)
3838 {
3839 	struct wl1271 *wl = hw->priv;
3840 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3841 	int ret;
3842 
3843 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3844 
3845 	mutex_lock(&wl->mutex);
3846 
3847 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3848 		ret = -EAGAIN;
3849 		goto out;
3850 	}
3851 
3852 	ret = pm_runtime_resume_and_get(wl->dev);
3853 	if (ret < 0)
3854 		goto out;
3855 
3856 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3857 	if (ret < 0)
3858 		goto out_sleep;
3859 
3860 	wl->sched_vif = wlvif;
3861 
3862 out_sleep:
3863 	pm_runtime_mark_last_busy(wl->dev);
3864 	pm_runtime_put_autosuspend(wl->dev);
3865 out:
3866 	mutex_unlock(&wl->mutex);
3867 	return ret;
3868 }
3869 
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3870 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3871 				     struct ieee80211_vif *vif)
3872 {
3873 	struct wl1271 *wl = hw->priv;
3874 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3875 	int ret;
3876 
3877 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3878 
3879 	mutex_lock(&wl->mutex);
3880 
3881 	if (unlikely(wl->state != WLCORE_STATE_ON))
3882 		goto out;
3883 
3884 	ret = pm_runtime_resume_and_get(wl->dev);
3885 	if (ret < 0)
3886 		goto out;
3887 
3888 	wl->ops->sched_scan_stop(wl, wlvif);
3889 
3890 	pm_runtime_mark_last_busy(wl->dev);
3891 	pm_runtime_put_autosuspend(wl->dev);
3892 out:
3893 	mutex_unlock(&wl->mutex);
3894 
3895 	return 0;
3896 }
3897 
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3898 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3899 {
3900 	struct wl1271 *wl = hw->priv;
3901 	int ret = 0;
3902 
3903 	mutex_lock(&wl->mutex);
3904 
3905 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3906 		ret = -EAGAIN;
3907 		goto out;
3908 	}
3909 
3910 	ret = pm_runtime_resume_and_get(wl->dev);
3911 	if (ret < 0)
3912 		goto out;
3913 
3914 	ret = wl1271_acx_frag_threshold(wl, value);
3915 	if (ret < 0)
3916 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3917 
3918 	pm_runtime_mark_last_busy(wl->dev);
3919 	pm_runtime_put_autosuspend(wl->dev);
3920 
3921 out:
3922 	mutex_unlock(&wl->mutex);
3923 
3924 	return ret;
3925 }
3926 
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3927 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3928 {
3929 	struct wl1271 *wl = hw->priv;
3930 	struct wl12xx_vif *wlvif;
3931 	int ret = 0;
3932 
3933 	mutex_lock(&wl->mutex);
3934 
3935 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3936 		ret = -EAGAIN;
3937 		goto out;
3938 	}
3939 
3940 	ret = pm_runtime_resume_and_get(wl->dev);
3941 	if (ret < 0)
3942 		goto out;
3943 
3944 	wl12xx_for_each_wlvif(wl, wlvif) {
3945 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3946 		if (ret < 0)
3947 			wl1271_warning("set rts threshold failed: %d", ret);
3948 	}
3949 	pm_runtime_mark_last_busy(wl->dev);
3950 	pm_runtime_put_autosuspend(wl->dev);
3951 
3952 out:
3953 	mutex_unlock(&wl->mutex);
3954 
3955 	return ret;
3956 }
3957 
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3958 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3959 {
3960 	int len;
3961 	const u8 *next, *end = skb->data + skb->len;
3962 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3963 					skb->len - ieoffset);
3964 	if (!ie)
3965 		return;
3966 	len = ie[1] + 2;
3967 	next = ie + len;
3968 	memmove(ie, next, end - next);
3969 	skb_trim(skb, skb->len - len);
3970 }
3971 
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3972 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3973 					    unsigned int oui, u8 oui_type,
3974 					    int ieoffset)
3975 {
3976 	int len;
3977 	const u8 *next, *end = skb->data + skb->len;
3978 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3979 					       skb->data + ieoffset,
3980 					       skb->len - ieoffset);
3981 	if (!ie)
3982 		return;
3983 	len = ie[1] + 2;
3984 	next = ie + len;
3985 	memmove(ie, next, end - next);
3986 	skb_trim(skb, skb->len - len);
3987 }
3988 
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3989 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3990 					 struct ieee80211_vif *vif)
3991 {
3992 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3993 	struct sk_buff *skb;
3994 	int ret;
3995 
3996 	skb = ieee80211_proberesp_get(wl->hw, vif);
3997 	if (!skb)
3998 		return -EOPNOTSUPP;
3999 
4000 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4001 				      CMD_TEMPL_AP_PROBE_RESPONSE,
4002 				      skb->data,
4003 				      skb->len, 0,
4004 				      rates);
4005 	dev_kfree_skb(skb);
4006 
4007 	if (ret < 0)
4008 		goto out;
4009 
4010 	wl1271_debug(DEBUG_AP, "probe response updated");
4011 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
4012 
4013 out:
4014 	return ret;
4015 }
4016 
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)4017 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
4018 					     struct ieee80211_vif *vif,
4019 					     u8 *probe_rsp_data,
4020 					     size_t probe_rsp_len,
4021 					     u32 rates)
4022 {
4023 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4024 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
4025 	int ssid_ie_offset, ie_offset, templ_len;
4026 	const u8 *ptr;
4027 
4028 	/* no need to change probe response if the SSID is set correctly */
4029 	if (wlvif->ssid_len > 0)
4030 		return wl1271_cmd_template_set(wl, wlvif->role_id,
4031 					       CMD_TEMPL_AP_PROBE_RESPONSE,
4032 					       probe_rsp_data,
4033 					       probe_rsp_len, 0,
4034 					       rates);
4035 
4036 	if (probe_rsp_len + vif->cfg.ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4037 		wl1271_error("probe_rsp template too big");
4038 		return -EINVAL;
4039 	}
4040 
4041 	/* start searching from IE offset */
4042 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4043 
4044 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4045 			       probe_rsp_len - ie_offset);
4046 	if (!ptr) {
4047 		wl1271_error("No SSID in beacon!");
4048 		return -EINVAL;
4049 	}
4050 
4051 	ssid_ie_offset = ptr - probe_rsp_data;
4052 	ptr += (ptr[1] + 2);
4053 
4054 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4055 
4056 	/* insert SSID from bss_conf */
4057 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4058 	probe_rsp_templ[ssid_ie_offset + 1] = vif->cfg.ssid_len;
4059 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4060 	       vif->cfg.ssid, vif->cfg.ssid_len);
4061 	templ_len = ssid_ie_offset + 2 + vif->cfg.ssid_len;
4062 
4063 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + vif->cfg.ssid_len,
4064 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4065 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4066 
4067 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4068 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4069 				       probe_rsp_templ,
4070 				       templ_len, 0,
4071 				       rates);
4072 }
4073 
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4074 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4075 				       struct ieee80211_vif *vif,
4076 				       struct ieee80211_bss_conf *bss_conf,
4077 				       u32 changed)
4078 {
4079 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4080 	int ret = 0;
4081 
4082 	if (changed & BSS_CHANGED_ERP_SLOT) {
4083 		if (bss_conf->use_short_slot)
4084 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4085 		else
4086 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4087 		if (ret < 0) {
4088 			wl1271_warning("Set slot time failed %d", ret);
4089 			goto out;
4090 		}
4091 	}
4092 
4093 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4094 		if (bss_conf->use_short_preamble)
4095 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4096 		else
4097 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4098 	}
4099 
4100 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4101 		if (bss_conf->use_cts_prot)
4102 			ret = wl1271_acx_cts_protect(wl, wlvif,
4103 						     CTSPROTECT_ENABLE);
4104 		else
4105 			ret = wl1271_acx_cts_protect(wl, wlvif,
4106 						     CTSPROTECT_DISABLE);
4107 		if (ret < 0) {
4108 			wl1271_warning("Set ctsprotect failed %d", ret);
4109 			goto out;
4110 		}
4111 	}
4112 
4113 out:
4114 	return ret;
4115 }
4116 
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4117 static int wlcore_set_beacon_template(struct wl1271 *wl,
4118 				      struct ieee80211_vif *vif,
4119 				      bool is_ap)
4120 {
4121 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4122 	struct ieee80211_hdr *hdr;
4123 	u32 min_rate;
4124 	int ret;
4125 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4126 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif, 0);
4127 	u16 tmpl_id;
4128 
4129 	if (!beacon) {
4130 		ret = -EINVAL;
4131 		goto out;
4132 	}
4133 
4134 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4135 
4136 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4137 	if (ret < 0) {
4138 		dev_kfree_skb(beacon);
4139 		goto out;
4140 	}
4141 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4142 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4143 		CMD_TEMPL_BEACON;
4144 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4145 				      beacon->data,
4146 				      beacon->len, 0,
4147 				      min_rate);
4148 	if (ret < 0) {
4149 		dev_kfree_skb(beacon);
4150 		goto out;
4151 	}
4152 
4153 	wlvif->wmm_enabled =
4154 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4155 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4156 					beacon->data + ieoffset,
4157 					beacon->len - ieoffset);
4158 
4159 	/*
4160 	 * In case we already have a probe-resp beacon set explicitly
4161 	 * by usermode, don't use the beacon data.
4162 	 */
4163 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4164 		goto end_bcn;
4165 
4166 	/* remove TIM ie from probe response */
4167 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4168 
4169 	/*
4170 	 * remove p2p ie from probe response.
4171 	 * the fw reponds to probe requests that don't include
4172 	 * the p2p ie. probe requests with p2p ie will be passed,
4173 	 * and will be responded by the supplicant (the spec
4174 	 * forbids including the p2p ie when responding to probe
4175 	 * requests that didn't include it).
4176 	 */
4177 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4178 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4179 
4180 	hdr = (struct ieee80211_hdr *) beacon->data;
4181 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4182 					 IEEE80211_STYPE_PROBE_RESP);
4183 	if (is_ap)
4184 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4185 							   beacon->data,
4186 							   beacon->len,
4187 							   min_rate);
4188 	else
4189 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4190 					      CMD_TEMPL_PROBE_RESPONSE,
4191 					      beacon->data,
4192 					      beacon->len, 0,
4193 					      min_rate);
4194 end_bcn:
4195 	dev_kfree_skb(beacon);
4196 	if (ret < 0)
4197 		goto out;
4198 
4199 out:
4200 	return ret;
4201 }
4202 
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4203 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4204 					  struct ieee80211_vif *vif,
4205 					  struct ieee80211_bss_conf *bss_conf,
4206 					  u32 changed)
4207 {
4208 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4209 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4210 	int ret = 0;
4211 
4212 	if (changed & BSS_CHANGED_BEACON_INT) {
4213 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4214 			bss_conf->beacon_int);
4215 
4216 		wlvif->beacon_int = bss_conf->beacon_int;
4217 	}
4218 
4219 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4220 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4221 
4222 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4223 	}
4224 
4225 	if (changed & BSS_CHANGED_BEACON) {
4226 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4227 		if (ret < 0)
4228 			goto out;
4229 
4230 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4231 				       &wlvif->flags)) {
4232 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4233 			if (ret < 0)
4234 				goto out;
4235 		}
4236 	}
4237 out:
4238 	if (ret != 0)
4239 		wl1271_error("beacon info change failed: %d", ret);
4240 	return ret;
4241 }
4242 
4243 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4244 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4245 				       struct ieee80211_vif *vif,
4246 				       struct ieee80211_bss_conf *bss_conf,
4247 				       u32 changed)
4248 {
4249 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4250 	int ret = 0;
4251 
4252 	if (changed & BSS_CHANGED_BASIC_RATES) {
4253 		u32 rates = bss_conf->basic_rates;
4254 
4255 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4256 								 wlvif->band);
4257 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4258 							wlvif->basic_rate_set);
4259 
4260 		ret = wl1271_init_ap_rates(wl, wlvif);
4261 		if (ret < 0) {
4262 			wl1271_error("AP rate policy change failed %d", ret);
4263 			goto out;
4264 		}
4265 
4266 		ret = wl1271_ap_init_templates(wl, vif);
4267 		if (ret < 0)
4268 			goto out;
4269 
4270 		/* No need to set probe resp template for mesh */
4271 		if (!ieee80211_vif_is_mesh(vif)) {
4272 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4273 							    wlvif->basic_rate,
4274 							    vif);
4275 			if (ret < 0)
4276 				goto out;
4277 		}
4278 
4279 		ret = wlcore_set_beacon_template(wl, vif, true);
4280 		if (ret < 0)
4281 			goto out;
4282 	}
4283 
4284 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4285 	if (ret < 0)
4286 		goto out;
4287 
4288 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4289 		if (bss_conf->enable_beacon) {
4290 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4291 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4292 				if (ret < 0)
4293 					goto out;
4294 
4295 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4296 				if (ret < 0)
4297 					goto out;
4298 
4299 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4300 				wl1271_debug(DEBUG_AP, "started AP");
4301 			}
4302 		} else {
4303 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4304 				/*
4305 				 * AP might be in ROC in case we have just
4306 				 * sent auth reply. handle it.
4307 				 */
4308 				if (test_bit(wlvif->role_id, wl->roc_map))
4309 					wl12xx_croc(wl, wlvif->role_id);
4310 
4311 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4312 				if (ret < 0)
4313 					goto out;
4314 
4315 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4316 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4317 					  &wlvif->flags);
4318 				wl1271_debug(DEBUG_AP, "stopped AP");
4319 			}
4320 		}
4321 	}
4322 
4323 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4324 	if (ret < 0)
4325 		goto out;
4326 
4327 	/* Handle HT information change */
4328 	if ((changed & BSS_CHANGED_HT) &&
4329 	    (bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4330 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4331 					bss_conf->ht_operation_mode);
4332 		if (ret < 0) {
4333 			wl1271_warning("Set ht information failed %d", ret);
4334 			goto out;
4335 		}
4336 	}
4337 
4338 out:
4339 	return;
4340 }
4341 
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_vif * vif,u32 sta_rate_set)4342 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4343 			    struct ieee80211_vif *vif, u32 sta_rate_set)
4344 {
4345 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
4346 	u32 rates;
4347 	int ret;
4348 
4349 	wl1271_debug(DEBUG_MAC80211,
4350 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4351 	     bss_conf->bssid, vif->cfg.aid,
4352 	     bss_conf->beacon_int,
4353 	     bss_conf->basic_rates, sta_rate_set);
4354 
4355 	wlvif->beacon_int = bss_conf->beacon_int;
4356 	rates = bss_conf->basic_rates;
4357 	wlvif->basic_rate_set =
4358 		wl1271_tx_enabled_rates_get(wl, rates,
4359 					    wlvif->band);
4360 	wlvif->basic_rate =
4361 		wl1271_tx_min_rate_get(wl,
4362 				       wlvif->basic_rate_set);
4363 
4364 	if (sta_rate_set)
4365 		wlvif->rate_set =
4366 			wl1271_tx_enabled_rates_get(wl,
4367 						sta_rate_set,
4368 						wlvif->band);
4369 
4370 	/* we only support sched_scan while not connected */
4371 	if (wl->sched_vif == wlvif)
4372 		wl->ops->sched_scan_stop(wl, wlvif);
4373 
4374 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4375 	if (ret < 0)
4376 		return ret;
4377 
4378 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4379 	if (ret < 0)
4380 		return ret;
4381 
4382 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4383 	if (ret < 0)
4384 		return ret;
4385 
4386 	wlcore_set_ssid(wl, wlvif);
4387 
4388 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4389 
4390 	return 0;
4391 }
4392 
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4393 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4394 {
4395 	int ret;
4396 
4397 	/* revert back to minimum rates for the current band */
4398 	wl1271_set_band_rate(wl, wlvif);
4399 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4400 
4401 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4402 	if (ret < 0)
4403 		return ret;
4404 
4405 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4406 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4407 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4408 		if (ret < 0)
4409 			return ret;
4410 	}
4411 
4412 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4413 	return 0;
4414 }
4415 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4416 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4417 					struct ieee80211_vif *vif,
4418 					struct ieee80211_bss_conf *bss_conf,
4419 					u32 changed)
4420 {
4421 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4422 	bool do_join = false;
4423 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4424 	bool ibss_joined = false;
4425 	u32 sta_rate_set = 0;
4426 	int ret;
4427 	struct ieee80211_sta *sta;
4428 	bool sta_exists = false;
4429 	struct ieee80211_sta_ht_cap sta_ht_cap;
4430 
4431 	if (is_ibss) {
4432 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4433 						     changed);
4434 		if (ret < 0)
4435 			goto out;
4436 	}
4437 
4438 	if (changed & BSS_CHANGED_IBSS) {
4439 		if (vif->cfg.ibss_joined) {
4440 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4441 			ibss_joined = true;
4442 		} else {
4443 			wlcore_unset_assoc(wl, wlvif);
4444 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4445 		}
4446 	}
4447 
4448 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4449 		do_join = true;
4450 
4451 	/* Need to update the SSID (for filtering etc) */
4452 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4453 		do_join = true;
4454 
4455 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4456 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4457 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4458 
4459 		do_join = true;
4460 	}
4461 
4462 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4463 		wl1271_sta_handle_idle(wl, wlvif, vif->cfg.idle);
4464 
4465 	if (changed & BSS_CHANGED_CQM) {
4466 		bool enable = false;
4467 		if (bss_conf->cqm_rssi_thold)
4468 			enable = true;
4469 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4470 						  bss_conf->cqm_rssi_thold,
4471 						  bss_conf->cqm_rssi_hyst);
4472 		if (ret < 0)
4473 			goto out;
4474 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4475 	}
4476 
4477 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4478 		       BSS_CHANGED_ASSOC)) {
4479 		rcu_read_lock();
4480 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4481 		if (sta) {
4482 			u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask;
4483 
4484 			/* save the supp_rates of the ap */
4485 			sta_rate_set = sta->deflink.supp_rates[wlvif->band];
4486 			if (sta->deflink.ht_cap.ht_supported)
4487 				sta_rate_set |=
4488 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4489 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4490 			sta_ht_cap = sta->deflink.ht_cap;
4491 			sta_exists = true;
4492 		}
4493 
4494 		rcu_read_unlock();
4495 	}
4496 
4497 	if (changed & BSS_CHANGED_BSSID) {
4498 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4499 			ret = wlcore_set_bssid(wl, wlvif, vif,
4500 					       sta_rate_set);
4501 			if (ret < 0)
4502 				goto out;
4503 
4504 			/* Need to update the BSSID (for filtering etc) */
4505 			do_join = true;
4506 		} else {
4507 			ret = wlcore_clear_bssid(wl, wlvif);
4508 			if (ret < 0)
4509 				goto out;
4510 		}
4511 	}
4512 
4513 	if (changed & BSS_CHANGED_IBSS) {
4514 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4515 			     vif->cfg.ibss_joined);
4516 
4517 		if (vif->cfg.ibss_joined) {
4518 			u32 rates = bss_conf->basic_rates;
4519 			wlvif->basic_rate_set =
4520 				wl1271_tx_enabled_rates_get(wl, rates,
4521 							    wlvif->band);
4522 			wlvif->basic_rate =
4523 				wl1271_tx_min_rate_get(wl,
4524 						       wlvif->basic_rate_set);
4525 
4526 			/* by default, use 11b + OFDM rates */
4527 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4528 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4529 			if (ret < 0)
4530 				goto out;
4531 		}
4532 	}
4533 
4534 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4535 		/* enable beacon filtering */
4536 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4537 		if (ret < 0)
4538 			goto out;
4539 	}
4540 
4541 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4542 	if (ret < 0)
4543 		goto out;
4544 
4545 	if (do_join) {
4546 		ret = wlcore_join(wl, wlvif);
4547 		if (ret < 0) {
4548 			wl1271_warning("cmd join failed %d", ret);
4549 			goto out;
4550 		}
4551 	}
4552 
4553 	if (changed & BSS_CHANGED_ASSOC) {
4554 		if (vif->cfg.assoc) {
4555 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4556 					       sta_rate_set);
4557 			if (ret < 0)
4558 				goto out;
4559 
4560 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4561 				wl12xx_set_authorized(wl, wlvif);
4562 		} else {
4563 			wlcore_unset_assoc(wl, wlvif);
4564 		}
4565 	}
4566 
4567 	if (changed & BSS_CHANGED_PS) {
4568 		if (vif->cfg.ps &&
4569 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4570 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4571 			int ps_mode;
4572 			char *ps_mode_str;
4573 
4574 			if (wl->conf.conn.forced_ps) {
4575 				ps_mode = STATION_POWER_SAVE_MODE;
4576 				ps_mode_str = "forced";
4577 			} else {
4578 				ps_mode = STATION_AUTO_PS_MODE;
4579 				ps_mode_str = "auto";
4580 			}
4581 
4582 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4583 
4584 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4585 			if (ret < 0)
4586 				wl1271_warning("enter %s ps failed %d",
4587 					       ps_mode_str, ret);
4588 		} else if (!vif->cfg.ps &&
4589 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4590 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4591 
4592 			ret = wl1271_ps_set_mode(wl, wlvif,
4593 						 STATION_ACTIVE_MODE);
4594 			if (ret < 0)
4595 				wl1271_warning("exit auto ps failed %d", ret);
4596 		}
4597 	}
4598 
4599 	/* Handle new association with HT. Do this after join. */
4600 	if (sta_exists) {
4601 		bool enabled =
4602 			bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT;
4603 
4604 		ret = wlcore_hw_set_peer_cap(wl,
4605 					     &sta_ht_cap,
4606 					     enabled,
4607 					     wlvif->rate_set,
4608 					     wlvif->sta.hlid);
4609 		if (ret < 0) {
4610 			wl1271_warning("Set ht cap failed %d", ret);
4611 			goto out;
4612 
4613 		}
4614 
4615 		if (enabled) {
4616 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4617 						bss_conf->ht_operation_mode);
4618 			if (ret < 0) {
4619 				wl1271_warning("Set ht information failed %d",
4620 					       ret);
4621 				goto out;
4622 			}
4623 		}
4624 	}
4625 
4626 	/* Handle arp filtering. Done after join. */
4627 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4628 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4629 		__be32 addr = vif->cfg.arp_addr_list[0];
4630 		wlvif->sta.qos = bss_conf->qos;
4631 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4632 
4633 		if (vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc) {
4634 			wlvif->ip_addr = addr;
4635 			/*
4636 			 * The template should have been configured only upon
4637 			 * association. however, it seems that the correct ip
4638 			 * isn't being set (when sending), so we have to
4639 			 * reconfigure the template upon every ip change.
4640 			 */
4641 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4642 			if (ret < 0) {
4643 				wl1271_warning("build arp rsp failed: %d", ret);
4644 				goto out;
4645 			}
4646 
4647 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4648 				(ACX_ARP_FILTER_ARP_FILTERING |
4649 				 ACX_ARP_FILTER_AUTO_ARP),
4650 				addr);
4651 		} else {
4652 			wlvif->ip_addr = 0;
4653 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4654 		}
4655 
4656 		if (ret < 0)
4657 			goto out;
4658 	}
4659 
4660 out:
4661 	return;
4662 }
4663 
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u64 changed)4664 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4665 				       struct ieee80211_vif *vif,
4666 				       struct ieee80211_bss_conf *bss_conf,
4667 				       u64 changed)
4668 {
4669 	struct wl1271 *wl = hw->priv;
4670 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4671 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4672 	int ret;
4673 
4674 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4675 		     wlvif->role_id, (int)changed);
4676 
4677 	/*
4678 	 * make sure to cancel pending disconnections if our association
4679 	 * state changed
4680 	 */
4681 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4682 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4683 
4684 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4685 	    !bss_conf->enable_beacon)
4686 		wl1271_tx_flush(wl);
4687 
4688 	mutex_lock(&wl->mutex);
4689 
4690 	if (unlikely(wl->state != WLCORE_STATE_ON))
4691 		goto out;
4692 
4693 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4694 		goto out;
4695 
4696 	ret = pm_runtime_resume_and_get(wl->dev);
4697 	if (ret < 0)
4698 		goto out;
4699 
4700 	if ((changed & BSS_CHANGED_TXPOWER) &&
4701 	    bss_conf->txpower != wlvif->power_level) {
4702 
4703 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4704 		if (ret < 0)
4705 			goto out;
4706 
4707 		wlvif->power_level = bss_conf->txpower;
4708 	}
4709 
4710 	if (is_ap)
4711 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4712 	else
4713 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4714 
4715 	pm_runtime_mark_last_busy(wl->dev);
4716 	pm_runtime_put_autosuspend(wl->dev);
4717 
4718 out:
4719 	mutex_unlock(&wl->mutex);
4720 }
4721 
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4722 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4723 				 struct ieee80211_chanctx_conf *ctx)
4724 {
4725 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4726 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4727 		     cfg80211_get_chandef_type(&ctx->def));
4728 	return 0;
4729 }
4730 
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4731 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4732 				     struct ieee80211_chanctx_conf *ctx)
4733 {
4734 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4735 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4736 		     cfg80211_get_chandef_type(&ctx->def));
4737 }
4738 
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4739 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4740 				     struct ieee80211_chanctx_conf *ctx,
4741 				     u32 changed)
4742 {
4743 	struct wl1271 *wl = hw->priv;
4744 	struct wl12xx_vif *wlvif;
4745 	int ret;
4746 	int channel = ieee80211_frequency_to_channel(
4747 		ctx->def.chan->center_freq);
4748 
4749 	wl1271_debug(DEBUG_MAC80211,
4750 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4751 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4752 
4753 	mutex_lock(&wl->mutex);
4754 
4755 	ret = pm_runtime_resume_and_get(wl->dev);
4756 	if (ret < 0)
4757 		goto out;
4758 
4759 	wl12xx_for_each_wlvif(wl, wlvif) {
4760 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4761 
4762 		rcu_read_lock();
4763 		if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != ctx) {
4764 			rcu_read_unlock();
4765 			continue;
4766 		}
4767 		rcu_read_unlock();
4768 
4769 		/* start radar if needed */
4770 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4771 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4772 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4773 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4774 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4775 			wlcore_hw_set_cac(wl, wlvif, true);
4776 			wlvif->radar_enabled = true;
4777 		}
4778 	}
4779 
4780 	pm_runtime_mark_last_busy(wl->dev);
4781 	pm_runtime_put_autosuspend(wl->dev);
4782 out:
4783 	mutex_unlock(&wl->mutex);
4784 }
4785 
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)4786 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4787 					struct ieee80211_vif *vif,
4788 					struct ieee80211_bss_conf *link_conf,
4789 					struct ieee80211_chanctx_conf *ctx)
4790 {
4791 	struct wl1271 *wl = hw->priv;
4792 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4793 	int channel = ieee80211_frequency_to_channel(
4794 		ctx->def.chan->center_freq);
4795 	int ret = -EINVAL;
4796 
4797 	wl1271_debug(DEBUG_MAC80211,
4798 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4799 		     wlvif->role_id, channel,
4800 		     cfg80211_get_chandef_type(&ctx->def),
4801 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4802 
4803 	mutex_lock(&wl->mutex);
4804 
4805 	if (unlikely(wl->state != WLCORE_STATE_ON))
4806 		goto out;
4807 
4808 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4809 		goto out;
4810 
4811 	ret = pm_runtime_resume_and_get(wl->dev);
4812 	if (ret < 0)
4813 		goto out;
4814 
4815 	wlvif->band = ctx->def.chan->band;
4816 	wlvif->channel = channel;
4817 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4818 
4819 	/* update default rates according to the band */
4820 	wl1271_set_band_rate(wl, wlvif);
4821 
4822 	if (ctx->radar_enabled &&
4823 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4824 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4825 		wlcore_hw_set_cac(wl, wlvif, true);
4826 		wlvif->radar_enabled = true;
4827 	}
4828 
4829 	pm_runtime_mark_last_busy(wl->dev);
4830 	pm_runtime_put_autosuspend(wl->dev);
4831 out:
4832 	mutex_unlock(&wl->mutex);
4833 
4834 	return 0;
4835 }
4836 
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)4837 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4838 					   struct ieee80211_vif *vif,
4839 					   struct ieee80211_bss_conf *link_conf,
4840 					   struct ieee80211_chanctx_conf *ctx)
4841 {
4842 	struct wl1271 *wl = hw->priv;
4843 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4844 	int ret;
4845 
4846 	wl1271_debug(DEBUG_MAC80211,
4847 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4848 		     wlvif->role_id,
4849 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4850 		     cfg80211_get_chandef_type(&ctx->def));
4851 
4852 	wl1271_tx_flush(wl);
4853 
4854 	mutex_lock(&wl->mutex);
4855 
4856 	if (unlikely(wl->state != WLCORE_STATE_ON))
4857 		goto out;
4858 
4859 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4860 		goto out;
4861 
4862 	ret = pm_runtime_resume_and_get(wl->dev);
4863 	if (ret < 0)
4864 		goto out;
4865 
4866 	if (wlvif->radar_enabled) {
4867 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4868 		wlcore_hw_set_cac(wl, wlvif, false);
4869 		wlvif->radar_enabled = false;
4870 	}
4871 
4872 	pm_runtime_mark_last_busy(wl->dev);
4873 	pm_runtime_put_autosuspend(wl->dev);
4874 out:
4875 	mutex_unlock(&wl->mutex);
4876 }
4877 
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4878 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4879 				    struct wl12xx_vif *wlvif,
4880 				    struct ieee80211_chanctx_conf *new_ctx)
4881 {
4882 	int channel = ieee80211_frequency_to_channel(
4883 		new_ctx->def.chan->center_freq);
4884 
4885 	wl1271_debug(DEBUG_MAC80211,
4886 		     "switch vif (role %d) %d -> %d chan_type: %d",
4887 		     wlvif->role_id, wlvif->channel, channel,
4888 		     cfg80211_get_chandef_type(&new_ctx->def));
4889 
4890 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4891 		return 0;
4892 
4893 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4894 
4895 	if (wlvif->radar_enabled) {
4896 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4897 		wlcore_hw_set_cac(wl, wlvif, false);
4898 		wlvif->radar_enabled = false;
4899 	}
4900 
4901 	wlvif->band = new_ctx->def.chan->band;
4902 	wlvif->channel = channel;
4903 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4904 
4905 	/* start radar if needed */
4906 	if (new_ctx->radar_enabled) {
4907 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4908 		wlcore_hw_set_cac(wl, wlvif, true);
4909 		wlvif->radar_enabled = true;
4910 	}
4911 
4912 	return 0;
4913 }
4914 
4915 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4916 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4917 			     struct ieee80211_vif_chanctx_switch *vifs,
4918 			     int n_vifs,
4919 			     enum ieee80211_chanctx_switch_mode mode)
4920 {
4921 	struct wl1271 *wl = hw->priv;
4922 	int i, ret;
4923 
4924 	wl1271_debug(DEBUG_MAC80211,
4925 		     "mac80211 switch chanctx n_vifs %d mode %d",
4926 		     n_vifs, mode);
4927 
4928 	mutex_lock(&wl->mutex);
4929 
4930 	ret = pm_runtime_resume_and_get(wl->dev);
4931 	if (ret < 0)
4932 		goto out;
4933 
4934 	for (i = 0; i < n_vifs; i++) {
4935 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4936 
4937 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4938 		if (ret)
4939 			goto out_sleep;
4940 	}
4941 out_sleep:
4942 	pm_runtime_mark_last_busy(wl->dev);
4943 	pm_runtime_put_autosuspend(wl->dev);
4944 out:
4945 	mutex_unlock(&wl->mutex);
4946 
4947 	return 0;
4948 }
4949 
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,u16 queue,const struct ieee80211_tx_queue_params * params)4950 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4951 			     struct ieee80211_vif *vif,
4952 			     unsigned int link_id, u16 queue,
4953 			     const struct ieee80211_tx_queue_params *params)
4954 {
4955 	struct wl1271 *wl = hw->priv;
4956 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4957 	u8 ps_scheme;
4958 	int ret = 0;
4959 
4960 	if (wlcore_is_p2p_mgmt(wlvif))
4961 		return 0;
4962 
4963 	mutex_lock(&wl->mutex);
4964 
4965 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4966 
4967 	if (params->uapsd)
4968 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4969 	else
4970 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4971 
4972 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4973 		goto out;
4974 
4975 	ret = pm_runtime_resume_and_get(wl->dev);
4976 	if (ret < 0)
4977 		goto out;
4978 
4979 	/*
4980 	 * the txop is confed in units of 32us by the mac80211,
4981 	 * we need us
4982 	 */
4983 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4984 				params->cw_min, params->cw_max,
4985 				params->aifs, params->txop << 5);
4986 	if (ret < 0)
4987 		goto out_sleep;
4988 
4989 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4990 				 CONF_CHANNEL_TYPE_EDCF,
4991 				 wl1271_tx_get_queue(queue),
4992 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4993 				 0, 0);
4994 
4995 out_sleep:
4996 	pm_runtime_mark_last_busy(wl->dev);
4997 	pm_runtime_put_autosuspend(wl->dev);
4998 
4999 out:
5000 	mutex_unlock(&wl->mutex);
5001 
5002 	return ret;
5003 }
5004 
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5005 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
5006 			     struct ieee80211_vif *vif)
5007 {
5008 
5009 	struct wl1271 *wl = hw->priv;
5010 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5011 	u64 mactime = ULLONG_MAX;
5012 	int ret;
5013 
5014 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
5015 
5016 	mutex_lock(&wl->mutex);
5017 
5018 	if (unlikely(wl->state != WLCORE_STATE_ON))
5019 		goto out;
5020 
5021 	ret = pm_runtime_resume_and_get(wl->dev);
5022 	if (ret < 0)
5023 		goto out;
5024 
5025 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5026 	if (ret < 0)
5027 		goto out_sleep;
5028 
5029 out_sleep:
5030 	pm_runtime_mark_last_busy(wl->dev);
5031 	pm_runtime_put_autosuspend(wl->dev);
5032 
5033 out:
5034 	mutex_unlock(&wl->mutex);
5035 	return mactime;
5036 }
5037 
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)5038 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5039 				struct survey_info *survey)
5040 {
5041 	struct ieee80211_conf *conf = &hw->conf;
5042 
5043 	if (idx != 0)
5044 		return -ENOENT;
5045 
5046 	survey->channel = conf->chandef.chan;
5047 	survey->filled = 0;
5048 	return 0;
5049 }
5050 
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5051 static int wl1271_allocate_sta(struct wl1271 *wl,
5052 			     struct wl12xx_vif *wlvif,
5053 			     struct ieee80211_sta *sta)
5054 {
5055 	struct wl1271_station *wl_sta;
5056 	int ret;
5057 
5058 
5059 	if (wl->active_sta_count >= wl->max_ap_stations) {
5060 		wl1271_warning("could not allocate HLID - too much stations");
5061 		return -EBUSY;
5062 	}
5063 
5064 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5065 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5066 	if (ret < 0) {
5067 		wl1271_warning("could not allocate HLID - too many links");
5068 		return -EBUSY;
5069 	}
5070 
5071 	/* use the previous security seq, if this is a recovery/resume */
5072 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5073 
5074 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5075 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5076 	wl->active_sta_count++;
5077 	return 0;
5078 }
5079 
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)5080 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5081 {
5082 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5083 		return;
5084 
5085 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5086 	__clear_bit(hlid, &wl->ap_ps_map);
5087 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5088 
5089 	/*
5090 	 * save the last used PN in the private part of iee80211_sta,
5091 	 * in case of recovery/suspend
5092 	 */
5093 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5094 
5095 	wl12xx_free_link(wl, wlvif, &hlid);
5096 	wl->active_sta_count--;
5097 
5098 	/*
5099 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5100 	 * chance to return STA-buffered packets before complaining.
5101 	 */
5102 	if (wl->active_sta_count == 0)
5103 		wl12xx_rearm_tx_watchdog_locked(wl);
5104 }
5105 
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5106 static int wl12xx_sta_add(struct wl1271 *wl,
5107 			  struct wl12xx_vif *wlvif,
5108 			  struct ieee80211_sta *sta)
5109 {
5110 	struct wl1271_station *wl_sta;
5111 	int ret = 0;
5112 	u8 hlid;
5113 
5114 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5115 
5116 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5117 	if (ret < 0)
5118 		return ret;
5119 
5120 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5121 	hlid = wl_sta->hlid;
5122 
5123 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5124 	if (ret < 0)
5125 		wl1271_free_sta(wl, wlvif, hlid);
5126 
5127 	return ret;
5128 }
5129 
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5130 static int wl12xx_sta_remove(struct wl1271 *wl,
5131 			     struct wl12xx_vif *wlvif,
5132 			     struct ieee80211_sta *sta)
5133 {
5134 	struct wl1271_station *wl_sta;
5135 	int ret = 0, id;
5136 
5137 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5138 
5139 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5140 	id = wl_sta->hlid;
5141 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5142 		return -EINVAL;
5143 
5144 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5145 	if (ret < 0)
5146 		return ret;
5147 
5148 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5149 	return ret;
5150 }
5151 
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5152 static void wlcore_roc_if_possible(struct wl1271 *wl,
5153 				   struct wl12xx_vif *wlvif)
5154 {
5155 	if (find_first_bit(wl->roc_map,
5156 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5157 		return;
5158 
5159 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5160 		return;
5161 
5162 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5163 }
5164 
5165 /*
5166  * when wl_sta is NULL, we treat this call as if coming from a
5167  * pending auth reply.
5168  * wl->mutex must be taken and the FW must be awake when the call
5169  * takes place.
5170  */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5171 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5172 			      struct wl1271_station *wl_sta, bool in_conn)
5173 {
5174 	if (in_conn) {
5175 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5176 			return;
5177 
5178 		if (!wlvif->ap_pending_auth_reply &&
5179 		    !wlvif->inconn_count)
5180 			wlcore_roc_if_possible(wl, wlvif);
5181 
5182 		if (wl_sta) {
5183 			wl_sta->in_connection = true;
5184 			wlvif->inconn_count++;
5185 		} else {
5186 			wlvif->ap_pending_auth_reply = true;
5187 		}
5188 	} else {
5189 		if (wl_sta && !wl_sta->in_connection)
5190 			return;
5191 
5192 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5193 			return;
5194 
5195 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5196 			return;
5197 
5198 		if (wl_sta) {
5199 			wl_sta->in_connection = false;
5200 			wlvif->inconn_count--;
5201 		} else {
5202 			wlvif->ap_pending_auth_reply = false;
5203 		}
5204 
5205 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5206 		    test_bit(wlvif->role_id, wl->roc_map))
5207 			wl12xx_croc(wl, wlvif->role_id);
5208 	}
5209 }
5210 
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5211 static int wl12xx_update_sta_state(struct wl1271 *wl,
5212 				   struct wl12xx_vif *wlvif,
5213 				   struct ieee80211_sta *sta,
5214 				   enum ieee80211_sta_state old_state,
5215 				   enum ieee80211_sta_state new_state)
5216 {
5217 	struct wl1271_station *wl_sta;
5218 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5219 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5220 	int ret;
5221 
5222 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5223 
5224 	/* Add station (AP mode) */
5225 	if (is_ap &&
5226 	    old_state == IEEE80211_STA_AUTH &&
5227 	    new_state == IEEE80211_STA_ASSOC) {
5228 		ret = wl12xx_sta_add(wl, wlvif, sta);
5229 		if (ret)
5230 			return ret;
5231 
5232 		wl_sta->fw_added = true;
5233 
5234 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5235 	}
5236 
5237 	/* Remove station (AP mode) */
5238 	if (is_ap &&
5239 	    old_state == IEEE80211_STA_ASSOC &&
5240 	    new_state == IEEE80211_STA_AUTH) {
5241 		wl_sta->fw_added = false;
5242 
5243 		/* must not fail */
5244 		wl12xx_sta_remove(wl, wlvif, sta);
5245 
5246 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5247 	}
5248 
5249 	/* Authorize station (AP mode) */
5250 	if (is_ap &&
5251 	    new_state == IEEE80211_STA_AUTHORIZED) {
5252 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5253 		if (ret < 0)
5254 			return ret;
5255 
5256 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
5257 						     true,
5258 						     wl_sta->hlid);
5259 		if (ret)
5260 			return ret;
5261 
5262 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5263 	}
5264 
5265 	/* Authorize station */
5266 	if (is_sta &&
5267 	    new_state == IEEE80211_STA_AUTHORIZED) {
5268 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5269 		ret = wl12xx_set_authorized(wl, wlvif);
5270 		if (ret)
5271 			return ret;
5272 	}
5273 
5274 	if (is_sta &&
5275 	    old_state == IEEE80211_STA_AUTHORIZED &&
5276 	    new_state == IEEE80211_STA_ASSOC) {
5277 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5278 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5279 	}
5280 
5281 	/* save seq number on disassoc (suspend) */
5282 	if (is_sta &&
5283 	    old_state == IEEE80211_STA_ASSOC &&
5284 	    new_state == IEEE80211_STA_AUTH) {
5285 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5286 		wlvif->total_freed_pkts = 0;
5287 	}
5288 
5289 	/* restore seq number on assoc (resume) */
5290 	if (is_sta &&
5291 	    old_state == IEEE80211_STA_AUTH &&
5292 	    new_state == IEEE80211_STA_ASSOC) {
5293 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5294 	}
5295 
5296 	/* clear ROCs on failure or authorization */
5297 	if (is_sta &&
5298 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5299 	     new_state == IEEE80211_STA_NOTEXIST)) {
5300 		if (test_bit(wlvif->role_id, wl->roc_map))
5301 			wl12xx_croc(wl, wlvif->role_id);
5302 	}
5303 
5304 	if (is_sta &&
5305 	    old_state == IEEE80211_STA_NOTEXIST &&
5306 	    new_state == IEEE80211_STA_NONE) {
5307 		if (find_first_bit(wl->roc_map,
5308 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5309 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5310 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5311 				   wlvif->band, wlvif->channel);
5312 		}
5313 	}
5314 	return 0;
5315 }
5316 
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5317 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5318 			       struct ieee80211_vif *vif,
5319 			       struct ieee80211_sta *sta,
5320 			       enum ieee80211_sta_state old_state,
5321 			       enum ieee80211_sta_state new_state)
5322 {
5323 	struct wl1271 *wl = hw->priv;
5324 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5325 	int ret;
5326 
5327 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5328 		     sta->aid, old_state, new_state);
5329 
5330 	mutex_lock(&wl->mutex);
5331 
5332 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5333 		ret = -EBUSY;
5334 		goto out;
5335 	}
5336 
5337 	ret = pm_runtime_resume_and_get(wl->dev);
5338 	if (ret < 0)
5339 		goto out;
5340 
5341 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5342 
5343 	pm_runtime_mark_last_busy(wl->dev);
5344 	pm_runtime_put_autosuspend(wl->dev);
5345 out:
5346 	mutex_unlock(&wl->mutex);
5347 	if (new_state < old_state)
5348 		return 0;
5349 	return ret;
5350 }
5351 
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5352 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5353 				  struct ieee80211_vif *vif,
5354 				  struct ieee80211_ampdu_params *params)
5355 {
5356 	struct wl1271 *wl = hw->priv;
5357 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5358 	int ret;
5359 	u8 hlid, *ba_bitmap;
5360 	struct ieee80211_sta *sta = params->sta;
5361 	enum ieee80211_ampdu_mlme_action action = params->action;
5362 	u16 tid = params->tid;
5363 	u16 *ssn = &params->ssn;
5364 
5365 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5366 		     tid);
5367 
5368 	/* sanity check - the fields in FW are only 8bits wide */
5369 	if (WARN_ON(tid > 0xFF))
5370 		return -ENOTSUPP;
5371 
5372 	mutex_lock(&wl->mutex);
5373 
5374 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5375 		ret = -EAGAIN;
5376 		goto out;
5377 	}
5378 
5379 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5380 		hlid = wlvif->sta.hlid;
5381 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5382 		struct wl1271_station *wl_sta;
5383 
5384 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5385 		hlid = wl_sta->hlid;
5386 	} else {
5387 		ret = -EINVAL;
5388 		goto out;
5389 	}
5390 
5391 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5392 
5393 	ret = pm_runtime_resume_and_get(wl->dev);
5394 	if (ret < 0)
5395 		goto out;
5396 
5397 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5398 		     tid, action);
5399 
5400 	switch (action) {
5401 	case IEEE80211_AMPDU_RX_START:
5402 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5403 			ret = -ENOTSUPP;
5404 			break;
5405 		}
5406 
5407 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5408 			ret = -EBUSY;
5409 			wl1271_debug(DEBUG_RX, "exceeded max RX BA sessions");
5410 			break;
5411 		}
5412 
5413 		if (*ba_bitmap & BIT(tid)) {
5414 			ret = -EINVAL;
5415 			wl1271_error("cannot enable RX BA session on active "
5416 				     "tid: %d", tid);
5417 			break;
5418 		}
5419 
5420 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5421 				hlid,
5422 				params->buf_size);
5423 
5424 		if (!ret) {
5425 			*ba_bitmap |= BIT(tid);
5426 			wl->ba_rx_session_count++;
5427 		}
5428 		break;
5429 
5430 	case IEEE80211_AMPDU_RX_STOP:
5431 		if (!(*ba_bitmap & BIT(tid))) {
5432 			/*
5433 			 * this happens on reconfig - so only output a debug
5434 			 * message for now, and don't fail the function.
5435 			 */
5436 			wl1271_debug(DEBUG_MAC80211,
5437 				     "no active RX BA session on tid: %d",
5438 				     tid);
5439 			ret = 0;
5440 			break;
5441 		}
5442 
5443 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5444 							 hlid, 0);
5445 		if (!ret) {
5446 			*ba_bitmap &= ~BIT(tid);
5447 			wl->ba_rx_session_count--;
5448 		}
5449 		break;
5450 
5451 	/*
5452 	 * The BA initiator session management in FW independently.
5453 	 * Falling break here on purpose for all TX APDU commands.
5454 	 */
5455 	case IEEE80211_AMPDU_TX_START:
5456 	case IEEE80211_AMPDU_TX_STOP_CONT:
5457 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5458 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5459 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5460 		ret = -EINVAL;
5461 		break;
5462 
5463 	default:
5464 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5465 		ret = -EINVAL;
5466 	}
5467 
5468 	pm_runtime_mark_last_busy(wl->dev);
5469 	pm_runtime_put_autosuspend(wl->dev);
5470 
5471 out:
5472 	mutex_unlock(&wl->mutex);
5473 
5474 	return ret;
5475 }
5476 
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5477 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5478 				   struct ieee80211_vif *vif,
5479 				   const struct cfg80211_bitrate_mask *mask)
5480 {
5481 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5482 	struct wl1271 *wl = hw->priv;
5483 	int i, ret = 0;
5484 
5485 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5486 		mask->control[NL80211_BAND_2GHZ].legacy,
5487 		mask->control[NL80211_BAND_5GHZ].legacy);
5488 
5489 	mutex_lock(&wl->mutex);
5490 
5491 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5492 		wlvif->bitrate_masks[i] =
5493 			wl1271_tx_enabled_rates_get(wl,
5494 						    mask->control[i].legacy,
5495 						    i);
5496 
5497 	if (unlikely(wl->state != WLCORE_STATE_ON))
5498 		goto out;
5499 
5500 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5501 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5502 
5503 		ret = pm_runtime_resume_and_get(wl->dev);
5504 		if (ret < 0)
5505 			goto out;
5506 
5507 		wl1271_set_band_rate(wl, wlvif);
5508 		wlvif->basic_rate =
5509 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5510 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5511 
5512 		pm_runtime_mark_last_busy(wl->dev);
5513 		pm_runtime_put_autosuspend(wl->dev);
5514 	}
5515 out:
5516 	mutex_unlock(&wl->mutex);
5517 
5518 	return ret;
5519 }
5520 
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5521 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5522 				     struct ieee80211_vif *vif,
5523 				     struct ieee80211_channel_switch *ch_switch)
5524 {
5525 	struct wl1271 *wl = hw->priv;
5526 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5527 	int ret;
5528 
5529 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5530 
5531 	wl1271_tx_flush(wl);
5532 
5533 	mutex_lock(&wl->mutex);
5534 
5535 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5536 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5537 			ieee80211_chswitch_done(vif, false, 0);
5538 		goto out;
5539 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5540 		goto out;
5541 	}
5542 
5543 	ret = pm_runtime_resume_and_get(wl->dev);
5544 	if (ret < 0)
5545 		goto out;
5546 
5547 	/* TODO: change mac80211 to pass vif as param */
5548 
5549 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5550 		unsigned long delay_usec;
5551 
5552 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5553 		if (ret)
5554 			goto out_sleep;
5555 
5556 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5557 
5558 		/* indicate failure 5 seconds after channel switch time */
5559 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5560 			ch_switch->count;
5561 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5562 					     usecs_to_jiffies(delay_usec) +
5563 					     msecs_to_jiffies(5000));
5564 	}
5565 
5566 out_sleep:
5567 	pm_runtime_mark_last_busy(wl->dev);
5568 	pm_runtime_put_autosuspend(wl->dev);
5569 
5570 out:
5571 	mutex_unlock(&wl->mutex);
5572 }
5573 
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5574 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5575 					struct wl12xx_vif *wlvif,
5576 					u8 eid)
5577 {
5578 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5579 	struct sk_buff *beacon =
5580 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif), 0);
5581 
5582 	if (!beacon)
5583 		return NULL;
5584 
5585 	return cfg80211_find_ie(eid,
5586 				beacon->data + ieoffset,
5587 				beacon->len - ieoffset);
5588 }
5589 
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5590 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5591 				u8 *csa_count)
5592 {
5593 	const u8 *ie;
5594 	const struct ieee80211_channel_sw_ie *ie_csa;
5595 
5596 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5597 	if (!ie)
5598 		return -EINVAL;
5599 
5600 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5601 	*csa_count = ie_csa->count;
5602 
5603 	return 0;
5604 }
5605 
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5606 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5607 					    struct ieee80211_vif *vif,
5608 					    struct cfg80211_chan_def *chandef)
5609 {
5610 	struct wl1271 *wl = hw->priv;
5611 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5612 	struct ieee80211_channel_switch ch_switch = {
5613 		.block_tx = true,
5614 		.chandef = *chandef,
5615 	};
5616 	int ret;
5617 
5618 	wl1271_debug(DEBUG_MAC80211,
5619 		     "mac80211 channel switch beacon (role %d)",
5620 		     wlvif->role_id);
5621 
5622 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5623 	if (ret < 0) {
5624 		wl1271_error("error getting beacon (for CSA counter)");
5625 		return;
5626 	}
5627 
5628 	mutex_lock(&wl->mutex);
5629 
5630 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5631 		ret = -EBUSY;
5632 		goto out;
5633 	}
5634 
5635 	ret = pm_runtime_resume_and_get(wl->dev);
5636 	if (ret < 0)
5637 		goto out;
5638 
5639 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5640 	if (ret)
5641 		goto out_sleep;
5642 
5643 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5644 
5645 out_sleep:
5646 	pm_runtime_mark_last_busy(wl->dev);
5647 	pm_runtime_put_autosuspend(wl->dev);
5648 out:
5649 	mutex_unlock(&wl->mutex);
5650 }
5651 
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5652 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5653 			    u32 queues, bool drop)
5654 {
5655 	struct wl1271 *wl = hw->priv;
5656 
5657 	wl1271_tx_flush(wl);
5658 }
5659 
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5660 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5661 				       struct ieee80211_vif *vif,
5662 				       struct ieee80211_channel *chan,
5663 				       int duration,
5664 				       enum ieee80211_roc_type type)
5665 {
5666 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5667 	struct wl1271 *wl = hw->priv;
5668 	int channel, active_roc, ret = 0;
5669 
5670 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5671 
5672 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5673 		     channel, wlvif->role_id);
5674 
5675 	mutex_lock(&wl->mutex);
5676 
5677 	if (unlikely(wl->state != WLCORE_STATE_ON))
5678 		goto out;
5679 
5680 	/* return EBUSY if we can't ROC right now */
5681 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5682 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5683 		wl1271_warning("active roc on role %d", active_roc);
5684 		ret = -EBUSY;
5685 		goto out;
5686 	}
5687 
5688 	ret = pm_runtime_resume_and_get(wl->dev);
5689 	if (ret < 0)
5690 		goto out;
5691 
5692 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5693 	if (ret < 0)
5694 		goto out_sleep;
5695 
5696 	wl->roc_vif = vif;
5697 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5698 				     msecs_to_jiffies(duration));
5699 out_sleep:
5700 	pm_runtime_mark_last_busy(wl->dev);
5701 	pm_runtime_put_autosuspend(wl->dev);
5702 out:
5703 	mutex_unlock(&wl->mutex);
5704 	return ret;
5705 }
5706 
__wlcore_roc_completed(struct wl1271 * wl)5707 static int __wlcore_roc_completed(struct wl1271 *wl)
5708 {
5709 	struct wl12xx_vif *wlvif;
5710 	int ret;
5711 
5712 	/* already completed */
5713 	if (unlikely(!wl->roc_vif))
5714 		return 0;
5715 
5716 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5717 
5718 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5719 		return -EBUSY;
5720 
5721 	ret = wl12xx_stop_dev(wl, wlvif);
5722 	if (ret < 0)
5723 		return ret;
5724 
5725 	wl->roc_vif = NULL;
5726 
5727 	return 0;
5728 }
5729 
wlcore_roc_completed(struct wl1271 * wl)5730 static int wlcore_roc_completed(struct wl1271 *wl)
5731 {
5732 	int ret;
5733 
5734 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5735 
5736 	mutex_lock(&wl->mutex);
5737 
5738 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5739 		ret = -EBUSY;
5740 		goto out;
5741 	}
5742 
5743 	ret = pm_runtime_resume_and_get(wl->dev);
5744 	if (ret < 0)
5745 		goto out;
5746 
5747 	ret = __wlcore_roc_completed(wl);
5748 
5749 	pm_runtime_mark_last_busy(wl->dev);
5750 	pm_runtime_put_autosuspend(wl->dev);
5751 out:
5752 	mutex_unlock(&wl->mutex);
5753 
5754 	return ret;
5755 }
5756 
wlcore_roc_complete_work(struct work_struct * work)5757 static void wlcore_roc_complete_work(struct work_struct *work)
5758 {
5759 	struct delayed_work *dwork;
5760 	struct wl1271 *wl;
5761 	int ret;
5762 
5763 	dwork = to_delayed_work(work);
5764 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5765 
5766 	ret = wlcore_roc_completed(wl);
5767 	if (!ret)
5768 		ieee80211_remain_on_channel_expired(wl->hw);
5769 }
5770 
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5771 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5772 					      struct ieee80211_vif *vif)
5773 {
5774 	struct wl1271 *wl = hw->priv;
5775 
5776 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5777 
5778 	/* TODO: per-vif */
5779 	wl1271_tx_flush(wl);
5780 
5781 	/*
5782 	 * we can't just flush_work here, because it might deadlock
5783 	 * (as we might get called from the same workqueue)
5784 	 */
5785 	cancel_delayed_work_sync(&wl->roc_complete_work);
5786 	wlcore_roc_completed(wl);
5787 
5788 	return 0;
5789 }
5790 
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_link_sta * link_sta,u32 changed)5791 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5792 				    struct ieee80211_vif *vif,
5793 				    struct ieee80211_link_sta *link_sta,
5794 				    u32 changed)
5795 {
5796 	struct ieee80211_sta *sta = link_sta->sta;
5797 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5798 
5799 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5800 
5801 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5802 		return;
5803 
5804 	/* this callback is atomic, so schedule a new work */
5805 	wlvif->rc_update_bw = sta->deflink.bandwidth;
5806 	memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap,
5807 	       sizeof(sta->deflink.ht_cap));
5808 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5809 }
5810 
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5811 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5812 				     struct ieee80211_vif *vif,
5813 				     struct ieee80211_sta *sta,
5814 				     struct station_info *sinfo)
5815 {
5816 	struct wl1271 *wl = hw->priv;
5817 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5818 	s8 rssi_dbm;
5819 	int ret;
5820 
5821 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5822 
5823 	mutex_lock(&wl->mutex);
5824 
5825 	if (unlikely(wl->state != WLCORE_STATE_ON))
5826 		goto out;
5827 
5828 	ret = pm_runtime_resume_and_get(wl->dev);
5829 	if (ret < 0)
5830 		goto out_sleep;
5831 
5832 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5833 	if (ret < 0)
5834 		goto out_sleep;
5835 
5836 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5837 	sinfo->signal = rssi_dbm;
5838 
5839 out_sleep:
5840 	pm_runtime_mark_last_busy(wl->dev);
5841 	pm_runtime_put_autosuspend(wl->dev);
5842 
5843 out:
5844 	mutex_unlock(&wl->mutex);
5845 }
5846 
wlcore_op_get_expected_throughput(struct ieee80211_hw * hw,struct ieee80211_sta * sta)5847 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5848 					     struct ieee80211_sta *sta)
5849 {
5850 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5851 	struct wl1271 *wl = hw->priv;
5852 	u8 hlid = wl_sta->hlid;
5853 
5854 	/* return in units of Kbps */
5855 	return (wl->links[hlid].fw_rate_mbps * 1000);
5856 }
5857 
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5858 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5859 {
5860 	struct wl1271 *wl = hw->priv;
5861 	bool ret = false;
5862 
5863 	mutex_lock(&wl->mutex);
5864 
5865 	if (unlikely(wl->state != WLCORE_STATE_ON))
5866 		goto out;
5867 
5868 	/* packets are considered pending if in the TX queue or the FW */
5869 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5870 out:
5871 	mutex_unlock(&wl->mutex);
5872 
5873 	return ret;
5874 }
5875 
5876 /* can't be const, mac80211 writes to this */
5877 static struct ieee80211_rate wl1271_rates[] = {
5878 	{ .bitrate = 10,
5879 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5880 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5881 	{ .bitrate = 20,
5882 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5883 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5884 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5885 	{ .bitrate = 55,
5886 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5887 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5888 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5889 	{ .bitrate = 110,
5890 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5891 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5892 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5893 	{ .bitrate = 60,
5894 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5895 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5896 	{ .bitrate = 90,
5897 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5898 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5899 	{ .bitrate = 120,
5900 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5901 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5902 	{ .bitrate = 180,
5903 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5904 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5905 	{ .bitrate = 240,
5906 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5907 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5908 	{ .bitrate = 360,
5909 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5910 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5911 	{ .bitrate = 480,
5912 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5913 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5914 	{ .bitrate = 540,
5915 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5916 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5917 };
5918 
5919 /* can't be const, mac80211 writes to this */
5920 static struct ieee80211_channel wl1271_channels[] = {
5921 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5922 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5923 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5924 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5925 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5926 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5927 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5928 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5929 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5930 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5931 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5932 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5933 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5934 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5935 };
5936 
5937 /* can't be const, mac80211 writes to this */
5938 static struct ieee80211_supported_band wl1271_band_2ghz = {
5939 	.channels = wl1271_channels,
5940 	.n_channels = ARRAY_SIZE(wl1271_channels),
5941 	.bitrates = wl1271_rates,
5942 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5943 };
5944 
5945 /* 5 GHz data rates for WL1273 */
5946 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5947 	{ .bitrate = 60,
5948 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5949 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5950 	{ .bitrate = 90,
5951 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5952 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5953 	{ .bitrate = 120,
5954 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5955 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5956 	{ .bitrate = 180,
5957 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5958 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5959 	{ .bitrate = 240,
5960 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5961 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5962 	{ .bitrate = 360,
5963 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5964 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5965 	{ .bitrate = 480,
5966 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5967 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5968 	{ .bitrate = 540,
5969 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5970 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5971 };
5972 
5973 /* 5 GHz band channels for WL1273 */
5974 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5975 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5976 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5977 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5991 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5992 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5993 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5994 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5995 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5996 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5997 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5998 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5999 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
6000 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
6001 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
6002 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
6003 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
6004 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
6005 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
6006 };
6007 
6008 static struct ieee80211_supported_band wl1271_band_5ghz = {
6009 	.channels = wl1271_channels_5ghz,
6010 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6011 	.bitrates = wl1271_rates_5ghz,
6012 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6013 };
6014 
6015 static const struct ieee80211_ops wl1271_ops = {
6016 	.start = wl1271_op_start,
6017 	.stop = wlcore_op_stop,
6018 	.add_interface = wl1271_op_add_interface,
6019 	.remove_interface = wl1271_op_remove_interface,
6020 	.change_interface = wl12xx_op_change_interface,
6021 #ifdef CONFIG_PM
6022 	.suspend = wl1271_op_suspend,
6023 	.resume = wl1271_op_resume,
6024 #endif
6025 	.config = wl1271_op_config,
6026 	.prepare_multicast = wl1271_op_prepare_multicast,
6027 	.configure_filter = wl1271_op_configure_filter,
6028 	.tx = wl1271_op_tx,
6029 	.wake_tx_queue = ieee80211_handle_wake_tx_queue,
6030 	.set_key = wlcore_op_set_key,
6031 	.hw_scan = wl1271_op_hw_scan,
6032 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6033 	.sched_scan_start = wl1271_op_sched_scan_start,
6034 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6035 	.bss_info_changed = wl1271_op_bss_info_changed,
6036 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6037 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6038 	.conf_tx = wl1271_op_conf_tx,
6039 	.get_tsf = wl1271_op_get_tsf,
6040 	.get_survey = wl1271_op_get_survey,
6041 	.sta_state = wl12xx_op_sta_state,
6042 	.ampdu_action = wl1271_op_ampdu_action,
6043 	.tx_frames_pending = wl1271_tx_frames_pending,
6044 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6045 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6046 	.channel_switch = wl12xx_op_channel_switch,
6047 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6048 	.flush = wlcore_op_flush,
6049 	.remain_on_channel = wlcore_op_remain_on_channel,
6050 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6051 	.add_chanctx = wlcore_op_add_chanctx,
6052 	.remove_chanctx = wlcore_op_remove_chanctx,
6053 	.change_chanctx = wlcore_op_change_chanctx,
6054 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6055 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6056 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6057 	.link_sta_rc_update = wlcore_op_sta_rc_update,
6058 	.sta_statistics = wlcore_op_sta_statistics,
6059 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6060 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6061 };
6062 
6063 
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum nl80211_band band)6064 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6065 {
6066 	u8 idx;
6067 
6068 	BUG_ON(band >= 2);
6069 
6070 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6071 		wl1271_error("Illegal RX rate from HW: %d", rate);
6072 		return 0;
6073 	}
6074 
6075 	idx = wl->band_rate_to_idx[band][rate];
6076 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6077 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6078 		return 0;
6079 	}
6080 
6081 	return idx;
6082 }
6083 
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)6084 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6085 {
6086 	int i;
6087 
6088 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6089 		     oui, nic);
6090 
6091 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6092 		wl1271_warning("NIC part of the MAC address wraps around!");
6093 
6094 	for (i = 0; i < wl->num_mac_addr; i++) {
6095 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6096 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6097 		wl->addresses[i].addr[2] = (u8) oui;
6098 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6099 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6100 		wl->addresses[i].addr[5] = (u8) nic;
6101 		nic++;
6102 	}
6103 
6104 	/* we may be one address short at the most */
6105 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6106 
6107 	/*
6108 	 * turn on the LAA bit in the first address and use it as
6109 	 * the last address.
6110 	 */
6111 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6112 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6113 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6114 		       sizeof(wl->addresses[0]));
6115 		/* LAA bit */
6116 		wl->addresses[idx].addr[0] |= BIT(1);
6117 	}
6118 
6119 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6120 	wl->hw->wiphy->addresses = wl->addresses;
6121 }
6122 
wl12xx_get_hw_info(struct wl1271 * wl)6123 static int wl12xx_get_hw_info(struct wl1271 *wl)
6124 {
6125 	int ret;
6126 
6127 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6128 	if (ret < 0)
6129 		goto out;
6130 
6131 	wl->fuse_oui_addr = 0;
6132 	wl->fuse_nic_addr = 0;
6133 
6134 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6135 	if (ret < 0)
6136 		goto out;
6137 
6138 	if (wl->ops->get_mac)
6139 		ret = wl->ops->get_mac(wl);
6140 
6141 out:
6142 	return ret;
6143 }
6144 
wl1271_register_hw(struct wl1271 * wl)6145 static int wl1271_register_hw(struct wl1271 *wl)
6146 {
6147 	int ret;
6148 	u32 oui_addr = 0, nic_addr = 0;
6149 	struct platform_device *pdev = wl->pdev;
6150 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6151 
6152 	if (wl->mac80211_registered)
6153 		return 0;
6154 
6155 	if (wl->nvs_len >= 12) {
6156 		/* NOTE: The wl->nvs->nvs element must be first, in
6157 		 * order to simplify the casting, we assume it is at
6158 		 * the beginning of the wl->nvs structure.
6159 		 */
6160 		u8 *nvs_ptr = (u8 *)wl->nvs;
6161 
6162 		oui_addr =
6163 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6164 		nic_addr =
6165 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6166 	}
6167 
6168 	/* if the MAC address is zeroed in the NVS derive from fuse */
6169 	if (oui_addr == 0 && nic_addr == 0) {
6170 		oui_addr = wl->fuse_oui_addr;
6171 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6172 		nic_addr = wl->fuse_nic_addr + 1;
6173 	}
6174 
6175 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6176 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6177 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6178 			wl1271_warning("This default nvs file can be removed from the file system");
6179 		} else {
6180 			wl1271_warning("Your device performance is not optimized.");
6181 			wl1271_warning("Please use the calibrator tool to configure your device.");
6182 		}
6183 
6184 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6185 			wl1271_warning("Fuse mac address is zero. using random mac");
6186 			/* Use TI oui and a random nic */
6187 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6188 			nic_addr = get_random_u32();
6189 		} else {
6190 			oui_addr = wl->fuse_oui_addr;
6191 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6192 			nic_addr = wl->fuse_nic_addr + 1;
6193 		}
6194 	}
6195 
6196 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6197 
6198 	ret = ieee80211_register_hw(wl->hw);
6199 	if (ret < 0) {
6200 		wl1271_error("unable to register mac80211 hw: %d", ret);
6201 		goto out;
6202 	}
6203 
6204 	wl->mac80211_registered = true;
6205 
6206 	wl1271_debugfs_init(wl);
6207 
6208 	wl1271_notice("loaded");
6209 
6210 out:
6211 	return ret;
6212 }
6213 
wl1271_unregister_hw(struct wl1271 * wl)6214 static void wl1271_unregister_hw(struct wl1271 *wl)
6215 {
6216 	if (wl->plt)
6217 		wl1271_plt_stop(wl);
6218 
6219 	ieee80211_unregister_hw(wl->hw);
6220 	wl->mac80211_registered = false;
6221 
6222 }
6223 
wl1271_init_ieee80211(struct wl1271 * wl)6224 static int wl1271_init_ieee80211(struct wl1271 *wl)
6225 {
6226 	int i;
6227 	static const u32 cipher_suites[] = {
6228 		WLAN_CIPHER_SUITE_WEP40,
6229 		WLAN_CIPHER_SUITE_WEP104,
6230 		WLAN_CIPHER_SUITE_TKIP,
6231 		WLAN_CIPHER_SUITE_CCMP,
6232 		WL1271_CIPHER_SUITE_GEM,
6233 	};
6234 
6235 	/* The tx descriptor buffer */
6236 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6237 
6238 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6239 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6240 
6241 	/* unit us */
6242 	/* FIXME: find a proper value */
6243 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6244 
6245 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6246 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6247 	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6248 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6249 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6250 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6251 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6252 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6253 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6254 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6255 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6256 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6257 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6258 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6259 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6260 
6261 	wl->hw->wiphy->cipher_suites = cipher_suites;
6262 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6263 
6264 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6265 					 BIT(NL80211_IFTYPE_AP) |
6266 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6267 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6268 #ifdef CONFIG_MAC80211_MESH
6269 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6270 #endif
6271 					 BIT(NL80211_IFTYPE_P2P_GO);
6272 
6273 	wl->hw->wiphy->max_scan_ssids = 1;
6274 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6275 	wl->hw->wiphy->max_match_sets = 16;
6276 	/*
6277 	 * Maximum length of elements in scanning probe request templates
6278 	 * should be the maximum length possible for a template, without
6279 	 * the IEEE80211 header of the template
6280 	 */
6281 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6282 			sizeof(struct ieee80211_header);
6283 
6284 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6285 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6286 		sizeof(struct ieee80211_header);
6287 
6288 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6289 
6290 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6291 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6292 				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6293 				WIPHY_FLAG_IBSS_RSN;
6294 
6295 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6296 
6297 	/* make sure all our channels fit in the scanned_ch bitmask */
6298 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6299 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6300 		     WL1271_MAX_CHANNELS);
6301 	/*
6302 	* clear channel flags from the previous usage
6303 	* and restore max_power & max_antenna_gain values.
6304 	*/
6305 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6306 		wl1271_band_2ghz.channels[i].flags = 0;
6307 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6308 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6309 	}
6310 
6311 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6312 		wl1271_band_5ghz.channels[i].flags = 0;
6313 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6314 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6315 	}
6316 
6317 	/*
6318 	 * We keep local copies of the band structs because we need to
6319 	 * modify them on a per-device basis.
6320 	 */
6321 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6322 	       sizeof(wl1271_band_2ghz));
6323 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6324 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6325 	       sizeof(*wl->ht_cap));
6326 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6327 	       sizeof(wl1271_band_5ghz));
6328 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6329 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6330 	       sizeof(*wl->ht_cap));
6331 
6332 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6333 		&wl->bands[NL80211_BAND_2GHZ];
6334 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6335 		&wl->bands[NL80211_BAND_5GHZ];
6336 
6337 	/*
6338 	 * allow 4 queues per mac address we support +
6339 	 * 1 cab queue per mac + one global offchannel Tx queue
6340 	 */
6341 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6342 
6343 	/* the last queue is the offchannel queue */
6344 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6345 	wl->hw->max_rates = 1;
6346 
6347 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6348 
6349 	/* the FW answers probe-requests in AP-mode */
6350 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6351 	wl->hw->wiphy->probe_resp_offload =
6352 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6353 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6354 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6355 
6356 	/* allowed interface combinations */
6357 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6358 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6359 
6360 	/* register vendor commands */
6361 	wlcore_set_vendor_commands(wl->hw->wiphy);
6362 
6363 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6364 
6365 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6366 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6367 
6368 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6369 
6370 	return 0;
6371 }
6372 
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6373 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6374 				     u32 mbox_size)
6375 {
6376 	struct ieee80211_hw *hw;
6377 	struct wl1271 *wl;
6378 	int i, j, ret;
6379 	unsigned int order;
6380 
6381 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6382 	if (!hw) {
6383 		wl1271_error("could not alloc ieee80211_hw");
6384 		ret = -ENOMEM;
6385 		goto err_hw_alloc;
6386 	}
6387 
6388 	wl = hw->priv;
6389 	memset(wl, 0, sizeof(*wl));
6390 
6391 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6392 	if (!wl->priv) {
6393 		wl1271_error("could not alloc wl priv");
6394 		ret = -ENOMEM;
6395 		goto err_priv_alloc;
6396 	}
6397 
6398 	INIT_LIST_HEAD(&wl->wlvif_list);
6399 
6400 	wl->hw = hw;
6401 
6402 	/*
6403 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6404 	 * we don't allocate any additional resource here, so that's fine.
6405 	 */
6406 	for (i = 0; i < NUM_TX_QUEUES; i++)
6407 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6408 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6409 
6410 	skb_queue_head_init(&wl->deferred_rx_queue);
6411 	skb_queue_head_init(&wl->deferred_tx_queue);
6412 
6413 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6414 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6415 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6416 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6417 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6418 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6419 
6420 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6421 	if (!wl->freezable_wq) {
6422 		ret = -ENOMEM;
6423 		goto err_hw;
6424 	}
6425 
6426 	wl->channel = 0;
6427 	wl->rx_counter = 0;
6428 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6429 	wl->band = NL80211_BAND_2GHZ;
6430 	wl->channel_type = NL80211_CHAN_NO_HT;
6431 	wl->flags = 0;
6432 	wl->sg_enabled = true;
6433 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6434 	wl->recovery_count = 0;
6435 	wl->hw_pg_ver = -1;
6436 	wl->ap_ps_map = 0;
6437 	wl->ap_fw_ps_map = 0;
6438 	wl->quirks = 0;
6439 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6440 	wl->active_sta_count = 0;
6441 	wl->active_link_count = 0;
6442 	wl->fwlog_size = 0;
6443 
6444 	/* The system link is always allocated */
6445 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6446 
6447 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6448 	for (i = 0; i < wl->num_tx_desc; i++)
6449 		wl->tx_frames[i] = NULL;
6450 
6451 	spin_lock_init(&wl->wl_lock);
6452 
6453 	wl->state = WLCORE_STATE_OFF;
6454 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6455 	mutex_init(&wl->mutex);
6456 	mutex_init(&wl->flush_mutex);
6457 	init_completion(&wl->nvs_loading_complete);
6458 
6459 	order = get_order(aggr_buf_size);
6460 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6461 	if (!wl->aggr_buf) {
6462 		ret = -ENOMEM;
6463 		goto err_wq;
6464 	}
6465 	wl->aggr_buf_size = aggr_buf_size;
6466 
6467 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6468 	if (!wl->dummy_packet) {
6469 		ret = -ENOMEM;
6470 		goto err_aggr;
6471 	}
6472 
6473 	/* Allocate one page for the FW log */
6474 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6475 	if (!wl->fwlog) {
6476 		ret = -ENOMEM;
6477 		goto err_dummy_packet;
6478 	}
6479 
6480 	wl->mbox_size = mbox_size;
6481 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6482 	if (!wl->mbox) {
6483 		ret = -ENOMEM;
6484 		goto err_fwlog;
6485 	}
6486 
6487 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6488 	if (!wl->buffer_32) {
6489 		ret = -ENOMEM;
6490 		goto err_mbox;
6491 	}
6492 
6493 	return hw;
6494 
6495 err_mbox:
6496 	kfree(wl->mbox);
6497 
6498 err_fwlog:
6499 	free_page((unsigned long)wl->fwlog);
6500 
6501 err_dummy_packet:
6502 	dev_kfree_skb(wl->dummy_packet);
6503 
6504 err_aggr:
6505 	free_pages((unsigned long)wl->aggr_buf, order);
6506 
6507 err_wq:
6508 	destroy_workqueue(wl->freezable_wq);
6509 
6510 err_hw:
6511 	wl1271_debugfs_exit(wl);
6512 	kfree(wl->priv);
6513 
6514 err_priv_alloc:
6515 	ieee80211_free_hw(hw);
6516 
6517 err_hw_alloc:
6518 
6519 	return ERR_PTR(ret);
6520 }
6521 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6522 
wlcore_free_hw(struct wl1271 * wl)6523 int wlcore_free_hw(struct wl1271 *wl)
6524 {
6525 	/* Unblock any fwlog readers */
6526 	mutex_lock(&wl->mutex);
6527 	wl->fwlog_size = -1;
6528 	mutex_unlock(&wl->mutex);
6529 
6530 	wlcore_sysfs_free(wl);
6531 
6532 	kfree(wl->buffer_32);
6533 	kfree(wl->mbox);
6534 	free_page((unsigned long)wl->fwlog);
6535 	dev_kfree_skb(wl->dummy_packet);
6536 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6537 
6538 	wl1271_debugfs_exit(wl);
6539 
6540 	vfree(wl->fw);
6541 	wl->fw = NULL;
6542 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6543 	kfree(wl->nvs);
6544 	wl->nvs = NULL;
6545 
6546 	kfree(wl->raw_fw_status);
6547 	kfree(wl->fw_status);
6548 	kfree(wl->tx_res_if);
6549 	destroy_workqueue(wl->freezable_wq);
6550 
6551 	kfree(wl->priv);
6552 	ieee80211_free_hw(wl->hw);
6553 
6554 	return 0;
6555 }
6556 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6557 
6558 #ifdef CONFIG_PM
6559 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6560 	.flags = WIPHY_WOWLAN_ANY,
6561 	.n_patterns = WL1271_MAX_RX_FILTERS,
6562 	.pattern_min_len = 1,
6563 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6564 };
6565 #endif
6566 
wlcore_hardirq(int irq,void * cookie)6567 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6568 {
6569 	return IRQ_WAKE_THREAD;
6570 }
6571 
wlcore_nvs_cb(const struct firmware * fw,void * context)6572 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6573 {
6574 	struct wl1271 *wl = context;
6575 	struct platform_device *pdev = wl->pdev;
6576 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6577 	struct resource *res;
6578 
6579 	int ret;
6580 	irq_handler_t hardirq_fn = NULL;
6581 
6582 	if (fw) {
6583 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6584 		if (!wl->nvs) {
6585 			wl1271_error("Could not allocate nvs data");
6586 			goto out;
6587 		}
6588 		wl->nvs_len = fw->size;
6589 	} else if (pdev_data->family->nvs_name) {
6590 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6591 			     pdev_data->family->nvs_name);
6592 		wl->nvs = NULL;
6593 		wl->nvs_len = 0;
6594 	} else {
6595 		wl->nvs = NULL;
6596 		wl->nvs_len = 0;
6597 	}
6598 
6599 	ret = wl->ops->setup(wl);
6600 	if (ret < 0)
6601 		goto out_free_nvs;
6602 
6603 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6604 
6605 	/* adjust some runtime configuration parameters */
6606 	wlcore_adjust_conf(wl);
6607 
6608 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6609 	if (!res) {
6610 		wl1271_error("Could not get IRQ resource");
6611 		goto out_free_nvs;
6612 	}
6613 
6614 	wl->irq = res->start;
6615 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6616 	wl->if_ops = pdev_data->if_ops;
6617 
6618 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6619 		hardirq_fn = wlcore_hardirq;
6620 	else
6621 		wl->irq_flags |= IRQF_ONESHOT;
6622 
6623 	ret = wl12xx_set_power_on(wl);
6624 	if (ret < 0)
6625 		goto out_free_nvs;
6626 
6627 	ret = wl12xx_get_hw_info(wl);
6628 	if (ret < 0) {
6629 		wl1271_error("couldn't get hw info");
6630 		wl1271_power_off(wl);
6631 		goto out_free_nvs;
6632 	}
6633 
6634 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6635 				   wl->irq_flags, pdev->name, wl);
6636 	if (ret < 0) {
6637 		wl1271_error("interrupt configuration failed");
6638 		wl1271_power_off(wl);
6639 		goto out_free_nvs;
6640 	}
6641 
6642 #ifdef CONFIG_PM
6643 	device_init_wakeup(wl->dev, true);
6644 
6645 	ret = enable_irq_wake(wl->irq);
6646 	if (!ret) {
6647 		wl->irq_wake_enabled = true;
6648 		if (pdev_data->pwr_in_suspend)
6649 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6650 	}
6651 
6652 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6653 	if (res) {
6654 		wl->wakeirq = res->start;
6655 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6656 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6657 		if (ret)
6658 			wl->wakeirq = -ENODEV;
6659 	} else {
6660 		wl->wakeirq = -ENODEV;
6661 	}
6662 #endif
6663 	disable_irq(wl->irq);
6664 	wl1271_power_off(wl);
6665 
6666 	ret = wl->ops->identify_chip(wl);
6667 	if (ret < 0)
6668 		goto out_irq;
6669 
6670 	ret = wl1271_init_ieee80211(wl);
6671 	if (ret)
6672 		goto out_irq;
6673 
6674 	ret = wl1271_register_hw(wl);
6675 	if (ret)
6676 		goto out_irq;
6677 
6678 	ret = wlcore_sysfs_init(wl);
6679 	if (ret)
6680 		goto out_unreg;
6681 
6682 	wl->initialized = true;
6683 	goto out;
6684 
6685 out_unreg:
6686 	wl1271_unregister_hw(wl);
6687 
6688 out_irq:
6689 	if (wl->wakeirq >= 0)
6690 		dev_pm_clear_wake_irq(wl->dev);
6691 	device_init_wakeup(wl->dev, false);
6692 	free_irq(wl->irq, wl);
6693 
6694 out_free_nvs:
6695 	kfree(wl->nvs);
6696 
6697 out:
6698 	release_firmware(fw);
6699 	complete_all(&wl->nvs_loading_complete);
6700 }
6701 
wlcore_runtime_suspend(struct device * dev)6702 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6703 {
6704 	struct wl1271 *wl = dev_get_drvdata(dev);
6705 	struct wl12xx_vif *wlvif;
6706 	int error;
6707 
6708 	/* We do not enter elp sleep in PLT mode */
6709 	if (wl->plt)
6710 		return 0;
6711 
6712 	/* Nothing to do if no ELP mode requested */
6713 	if (wl->sleep_auth != WL1271_PSM_ELP)
6714 		return 0;
6715 
6716 	wl12xx_for_each_wlvif(wl, wlvif) {
6717 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6718 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6719 			return -EBUSY;
6720 	}
6721 
6722 	wl1271_debug(DEBUG_PSM, "chip to elp");
6723 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6724 	if (error < 0) {
6725 		wl12xx_queue_recovery_work(wl);
6726 
6727 		return error;
6728 	}
6729 
6730 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6731 
6732 	return 0;
6733 }
6734 
wlcore_runtime_resume(struct device * dev)6735 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6736 {
6737 	struct wl1271 *wl = dev_get_drvdata(dev);
6738 	DECLARE_COMPLETION_ONSTACK(compl);
6739 	unsigned long flags;
6740 	int ret;
6741 	unsigned long start_time = jiffies;
6742 	bool recovery = false;
6743 
6744 	/* Nothing to do if no ELP mode requested */
6745 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6746 		return 0;
6747 
6748 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6749 
6750 	spin_lock_irqsave(&wl->wl_lock, flags);
6751 	wl->elp_compl = &compl;
6752 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6753 
6754 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6755 	if (ret < 0) {
6756 		recovery = true;
6757 	} else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6758 		ret = wait_for_completion_timeout(&compl,
6759 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6760 		if (ret == 0) {
6761 			wl1271_warning("ELP wakeup timeout!");
6762 			recovery = true;
6763 		}
6764 	}
6765 
6766 	spin_lock_irqsave(&wl->wl_lock, flags);
6767 	wl->elp_compl = NULL;
6768 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6769 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6770 
6771 	if (recovery) {
6772 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6773 		wl12xx_queue_recovery_work(wl);
6774 	} else {
6775 		wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6776 			     jiffies_to_msecs(jiffies - start_time));
6777 	}
6778 
6779 	return 0;
6780 }
6781 
6782 static const struct dev_pm_ops wlcore_pm_ops = {
6783 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6784 			   wlcore_runtime_resume,
6785 			   NULL)
6786 };
6787 
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6788 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6789 {
6790 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6791 	const char *nvs_name;
6792 	int ret = 0;
6793 
6794 	if (!wl->ops || !wl->ptable || !pdev_data)
6795 		return -EINVAL;
6796 
6797 	wl->dev = &pdev->dev;
6798 	wl->pdev = pdev;
6799 	platform_set_drvdata(pdev, wl);
6800 
6801 	if (pdev_data->family && pdev_data->family->nvs_name) {
6802 		nvs_name = pdev_data->family->nvs_name;
6803 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
6804 					      nvs_name, &pdev->dev, GFP_KERNEL,
6805 					      wl, wlcore_nvs_cb);
6806 		if (ret < 0) {
6807 			wl1271_error("request_firmware_nowait failed for %s: %d",
6808 				     nvs_name, ret);
6809 			complete_all(&wl->nvs_loading_complete);
6810 		}
6811 	} else {
6812 		wlcore_nvs_cb(NULL, wl);
6813 	}
6814 
6815 	wl->dev->driver->pm = &wlcore_pm_ops;
6816 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6817 	pm_runtime_use_autosuspend(wl->dev);
6818 	pm_runtime_enable(wl->dev);
6819 
6820 	return ret;
6821 }
6822 EXPORT_SYMBOL_GPL(wlcore_probe);
6823 
wlcore_remove(struct platform_device * pdev)6824 void wlcore_remove(struct platform_device *pdev)
6825 {
6826 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6827 	struct wl1271 *wl = platform_get_drvdata(pdev);
6828 	int error;
6829 
6830 	error = pm_runtime_get_sync(wl->dev);
6831 	if (error < 0)
6832 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6833 
6834 	wl->dev->driver->pm = NULL;
6835 
6836 	if (pdev_data->family && pdev_data->family->nvs_name)
6837 		wait_for_completion(&wl->nvs_loading_complete);
6838 	if (!wl->initialized)
6839 		return;
6840 
6841 	if (wl->wakeirq >= 0) {
6842 		dev_pm_clear_wake_irq(wl->dev);
6843 		wl->wakeirq = -ENODEV;
6844 	}
6845 
6846 	device_init_wakeup(wl->dev, false);
6847 
6848 	if (wl->irq_wake_enabled)
6849 		disable_irq_wake(wl->irq);
6850 
6851 	wl1271_unregister_hw(wl);
6852 
6853 	pm_runtime_put_sync(wl->dev);
6854 	pm_runtime_dont_use_autosuspend(wl->dev);
6855 	pm_runtime_disable(wl->dev);
6856 
6857 	free_irq(wl->irq, wl);
6858 	wlcore_free_hw(wl);
6859 }
6860 EXPORT_SYMBOL_GPL(wlcore_remove);
6861 
6862 u32 wl12xx_debug_level = DEBUG_NONE;
6863 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6864 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6865 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6866 
6867 module_param_named(fwlog, fwlog_param, charp, 0);
6868 MODULE_PARM_DESC(fwlog,
6869 		 "FW logger options: continuous, dbgpins or disable");
6870 
6871 module_param(fwlog_mem_blocks, int, 0600);
6872 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6873 
6874 module_param(bug_on_recovery, int, 0600);
6875 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6876 
6877 module_param(no_recovery, int, 0600);
6878 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6879 
6880 MODULE_DESCRIPTION("TI WLAN core driver");
6881 MODULE_LICENSE("GPL");
6882 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6883 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6884