xref: /linux/drivers/net/wireless/ti/wlcore/main.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17 
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31 
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_WAKEUP_TIMEOUT 500
34 
35 static char *fwlog_param;
36 static int fwlog_mem_blocks = -1;
37 static int bug_on_recovery = -1;
38 static int no_recovery     = -1;
39 
40 static void __wl1271_op_remove_interface(struct wl1271 *wl,
41 					 struct ieee80211_vif *vif,
42 					 bool reset_tx_queues);
43 static void wlcore_op_stop_locked(struct wl1271 *wl);
44 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
45 
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)46 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
47 {
48 	int ret;
49 
50 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
51 		return -EINVAL;
52 
53 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
54 		return 0;
55 
56 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
57 		return 0;
58 
59 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
60 	if (ret < 0)
61 		return ret;
62 
63 	wl1271_info("Association completed.");
64 	return 0;
65 }
66 
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)67 static void wl1271_reg_notify(struct wiphy *wiphy,
68 			      struct regulatory_request *request)
69 {
70 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
71 	struct wl1271 *wl = hw->priv;
72 
73 	/* copy the current dfs region */
74 	if (request)
75 		wl->dfs_region = request->dfs_region;
76 
77 	wlcore_regdomain_config(wl);
78 }
79 
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)80 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
81 				   bool enable)
82 {
83 	int ret = 0;
84 
85 	/* we should hold wl->mutex */
86 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
87 	if (ret < 0)
88 		goto out;
89 
90 	if (enable)
91 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
92 	else
93 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
94 out:
95 	return ret;
96 }
97 
98 /*
99  * this function is being called when the rx_streaming interval
100  * has beed changed or rx_streaming should be disabled
101  */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)102 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
103 {
104 	int ret = 0;
105 	int period = wl->conf.rx_streaming.interval;
106 
107 	/* don't reconfigure if rx_streaming is disabled */
108 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
109 		goto out;
110 
111 	/* reconfigure/disable according to new streaming_period */
112 	if (period &&
113 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
114 	    (wl->conf.rx_streaming.always ||
115 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
116 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
117 	else {
118 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
119 		/* don't cancel_work_sync since we might deadlock */
120 		timer_delete_sync(&wlvif->rx_streaming_timer);
121 	}
122 out:
123 	return ret;
124 }
125 
wl1271_rx_streaming_enable_work(struct work_struct * work)126 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
127 {
128 	int ret;
129 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
130 						rx_streaming_enable_work);
131 	struct wl1271 *wl = wlvif->wl;
132 
133 	mutex_lock(&wl->mutex);
134 
135 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
136 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
137 	    (!wl->conf.rx_streaming.always &&
138 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
139 		goto out;
140 
141 	if (!wl->conf.rx_streaming.interval)
142 		goto out;
143 
144 	ret = pm_runtime_resume_and_get(wl->dev);
145 	if (ret < 0)
146 		goto out;
147 
148 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
149 	if (ret < 0)
150 		goto out_sleep;
151 
152 	/* stop it after some time of inactivity */
153 	mod_timer(&wlvif->rx_streaming_timer,
154 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
155 
156 out_sleep:
157 	pm_runtime_mark_last_busy(wl->dev);
158 	pm_runtime_put_autosuspend(wl->dev);
159 out:
160 	mutex_unlock(&wl->mutex);
161 }
162 
wl1271_rx_streaming_disable_work(struct work_struct * work)163 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
164 {
165 	int ret;
166 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
167 						rx_streaming_disable_work);
168 	struct wl1271 *wl = wlvif->wl;
169 
170 	mutex_lock(&wl->mutex);
171 
172 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
173 		goto out;
174 
175 	ret = pm_runtime_resume_and_get(wl->dev);
176 	if (ret < 0)
177 		goto out;
178 
179 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
180 	if (ret)
181 		goto out_sleep;
182 
183 out_sleep:
184 	pm_runtime_mark_last_busy(wl->dev);
185 	pm_runtime_put_autosuspend(wl->dev);
186 out:
187 	mutex_unlock(&wl->mutex);
188 }
189 
wl1271_rx_streaming_timer(struct timer_list * t)190 static void wl1271_rx_streaming_timer(struct timer_list *t)
191 {
192 	struct wl12xx_vif *wlvif = timer_container_of(wlvif, t,
193 						      rx_streaming_timer);
194 	struct wl1271 *wl = wlvif->wl;
195 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
196 }
197 
198 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)199 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
200 {
201 	/* if the watchdog is not armed, don't do anything */
202 	if (wl->tx_allocated_blocks == 0)
203 		return;
204 
205 	cancel_delayed_work(&wl->tx_watchdog_work);
206 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
207 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
208 }
209 
wlcore_rc_update_work(struct work_struct * work)210 static void wlcore_rc_update_work(struct work_struct *work)
211 {
212 	int ret;
213 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
214 						rc_update_work);
215 	struct wl1271 *wl = wlvif->wl;
216 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
217 
218 	mutex_lock(&wl->mutex);
219 
220 	if (unlikely(wl->state != WLCORE_STATE_ON))
221 		goto out;
222 
223 	ret = pm_runtime_resume_and_get(wl->dev);
224 	if (ret < 0)
225 		goto out;
226 
227 	if (ieee80211_vif_is_mesh(vif)) {
228 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
229 						     true, wlvif->sta.hlid);
230 		if (ret < 0)
231 			goto out_sleep;
232 	} else {
233 		wlcore_hw_sta_rc_update(wl, wlvif);
234 	}
235 
236 out_sleep:
237 	pm_runtime_mark_last_busy(wl->dev);
238 	pm_runtime_put_autosuspend(wl->dev);
239 out:
240 	mutex_unlock(&wl->mutex);
241 }
242 
wl12xx_tx_watchdog_work(struct work_struct * work)243 static void wl12xx_tx_watchdog_work(struct work_struct *work)
244 {
245 	struct delayed_work *dwork;
246 	struct wl1271 *wl;
247 
248 	dwork = to_delayed_work(work);
249 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
250 
251 	mutex_lock(&wl->mutex);
252 
253 	if (unlikely(wl->state != WLCORE_STATE_ON))
254 		goto out;
255 
256 	/* Tx went out in the meantime - everything is ok */
257 	if (unlikely(wl->tx_allocated_blocks == 0))
258 		goto out;
259 
260 	/*
261 	 * if a ROC is in progress, we might not have any Tx for a long
262 	 * time (e.g. pending Tx on the non-ROC channels)
263 	 */
264 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
265 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
266 			     wl->conf.tx.tx_watchdog_timeout);
267 		wl12xx_rearm_tx_watchdog_locked(wl);
268 		goto out;
269 	}
270 
271 	/*
272 	 * if a scan is in progress, we might not have any Tx for a long
273 	 * time
274 	 */
275 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
276 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
277 			     wl->conf.tx.tx_watchdog_timeout);
278 		wl12xx_rearm_tx_watchdog_locked(wl);
279 		goto out;
280 	}
281 
282 	/*
283 	* AP might cache a frame for a long time for a sleeping station,
284 	* so rearm the timer if there's an AP interface with stations. If
285 	* Tx is genuinely stuck we will most hopefully discover it when all
286 	* stations are removed due to inactivity.
287 	*/
288 	if (wl->active_sta_count) {
289 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
290 			     " %d stations",
291 			      wl->conf.tx.tx_watchdog_timeout,
292 			      wl->active_sta_count);
293 		wl12xx_rearm_tx_watchdog_locked(wl);
294 		goto out;
295 	}
296 
297 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
298 		     wl->conf.tx.tx_watchdog_timeout);
299 	wl12xx_queue_recovery_work(wl);
300 
301 out:
302 	mutex_unlock(&wl->mutex);
303 }
304 
wlcore_adjust_conf(struct wl1271 * wl)305 static void wlcore_adjust_conf(struct wl1271 *wl)
306 {
307 
308 	if (fwlog_param) {
309 		if (!strcmp(fwlog_param, "continuous")) {
310 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
311 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
312 		} else if (!strcmp(fwlog_param, "dbgpins")) {
313 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
314 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
315 		} else if (!strcmp(fwlog_param, "disable")) {
316 			wl->conf.fwlog.mem_blocks = 0;
317 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 		} else {
319 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
320 		}
321 	}
322 
323 	if (bug_on_recovery != -1)
324 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
325 
326 	if (no_recovery != -1)
327 		wl->conf.recovery.no_recovery = (u8) no_recovery;
328 }
329 
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)330 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
331 					struct wl12xx_vif *wlvif,
332 					u8 hlid, u8 tx_pkts)
333 {
334 	bool fw_ps;
335 
336 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
337 
338 	/*
339 	 * Wake up from high level PS if the STA is asleep with too little
340 	 * packets in FW or if the STA is awake.
341 	 */
342 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
343 		wl12xx_ps_link_end(wl, wlvif, hlid);
344 
345 	/*
346 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 	 * Make an exception if this is the only connected link. In this
348 	 * case FW-memory congestion is less of a problem.
349 	 * Note that a single connected STA means 2*ap_count + 1 active links,
350 	 * since we must account for the global and broadcast AP links
351 	 * for each AP. The "fw_ps" check assures us the other link is a STA
352 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
353 	 */
354 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
355 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
356 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
357 }
358 
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)359 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
360 					   struct wl12xx_vif *wlvif,
361 					   struct wl_fw_status *status)
362 {
363 	unsigned long cur_fw_ps_map;
364 	u8 hlid;
365 
366 	cur_fw_ps_map = status->link_ps_bitmap;
367 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
368 		wl1271_debug(DEBUG_PSM,
369 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
370 			     wl->ap_fw_ps_map, cur_fw_ps_map,
371 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
372 
373 		wl->ap_fw_ps_map = cur_fw_ps_map;
374 	}
375 
376 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
377 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
378 					    wl->links[hlid].allocated_pkts);
379 }
380 
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)381 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
382 {
383 	struct wl12xx_vif *wlvifsta;
384 	struct wl12xx_vif *wlvifap;
385 	struct wl12xx_vif *wlvif;
386 	u32 old_tx_blk_count = wl->tx_blocks_available;
387 	int avail, freed_blocks;
388 	int i;
389 	int ret;
390 	struct wl1271_link *lnk;
391 
392 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
393 				   wl->raw_fw_status,
394 				   wl->fw_status_len, false);
395 	if (ret < 0)
396 		return ret;
397 
398 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, status);
399 
400 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
401 		     "drv_rx_counter = %d, tx_results_counter = %d)",
402 		     status->intr,
403 		     status->fw_rx_counter,
404 		     status->drv_rx_counter,
405 		     status->tx_results_counter);
406 
407 	for (i = 0; i < NUM_TX_QUEUES; i++) {
408 		/* prevent wrap-around in freed-packets counter */
409 		wl->tx_allocated_pkts[i] -=
410 				(status->counters.tx_released_pkts[i] -
411 				wl->tx_pkts_freed[i]) & 0xff;
412 
413 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
414 	}
415 
416 	/* Find an authorized STA vif */
417 	wlvifsta = NULL;
418 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
419 		if (wlvif->sta.hlid != WL12XX_INVALID_LINK_ID &&
420 		    test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) {
421 			wlvifsta = wlvif;
422 			break;
423 		}
424 	}
425 
426 	/* Find a started AP vif */
427 	wlvifap = NULL;
428 	wl12xx_for_each_wlvif(wl, wlvif) {
429 		if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
430 		    wlvif->inconn_count == 0 &&
431 		    test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
432 			wlvifap = wlvif;
433 			break;
434 		}
435 	}
436 
437 	for_each_set_bit(i, wl->links_map, wl->num_links) {
438 		u16 diff16, sec_pn16;
439 		u8 diff, tx_lnk_free_pkts;
440 
441 		lnk = &wl->links[i];
442 
443 		/* prevent wrap-around in freed-packets counter */
444 		tx_lnk_free_pkts = status->counters.tx_lnk_free_pkts[i];
445 		diff = (tx_lnk_free_pkts - lnk->prev_freed_pkts) & 0xff;
446 
447 		if (diff) {
448 			lnk->allocated_pkts -= diff;
449 			lnk->prev_freed_pkts = tx_lnk_free_pkts;
450 		}
451 
452 		/* Get the current sec_pn16 value if present */
453 		if (status->counters.tx_lnk_sec_pn16)
454 			sec_pn16 = __le16_to_cpu(status->counters.tx_lnk_sec_pn16[i]);
455 		else
456 			sec_pn16 = 0;
457 		/* prevent wrap-around in pn16 counter */
458 		diff16 = (sec_pn16 - lnk->prev_sec_pn16) & 0xffff;
459 
460 		/* FIXME: since free_pkts is a 8-bit counter of packets that
461 		 * rolls over, it can become zero. If it is zero, then we
462 		 * omit processing below. Is that really correct?
463 		 */
464 		if (tx_lnk_free_pkts <= 0)
465 			continue;
466 
467 		/* For a station that has an authorized link: */
468 		if (wlvifsta && wlvifsta->sta.hlid == i) {
469 			if (wlvifsta->encryption_type == KEY_TKIP ||
470 			    wlvifsta->encryption_type == KEY_AES) {
471 				if (diff16) {
472 					lnk->prev_sec_pn16 = sec_pn16;
473 					/* accumulate the prev_freed_pkts
474 					 * counter according to the PN from
475 					 * firmware
476 					 */
477 					lnk->total_freed_pkts += diff16;
478 				}
479 			} else {
480 				if (diff)
481 					/* accumulate the prev_freed_pkts
482 					 * counter according to the free packets
483 					 * count from firmware
484 					 */
485 					lnk->total_freed_pkts += diff;
486 			}
487 		}
488 
489 		/* For an AP that has been started */
490 		if (wlvifap && test_bit(i, wlvifap->ap.sta_hlid_map)) {
491 			if (wlvifap->encryption_type == KEY_TKIP ||
492 			    wlvifap->encryption_type == KEY_AES) {
493 				if (diff16) {
494 					lnk->prev_sec_pn16 = sec_pn16;
495 					/* accumulate the prev_freed_pkts
496 					 * counter according to the PN from
497 					 * firmware
498 					 */
499 					lnk->total_freed_pkts += diff16;
500 				}
501 			} else {
502 				if (diff)
503 					/* accumulate the prev_freed_pkts
504 					 * counter according to the free packets
505 					 * count from firmware
506 					 */
507 					lnk->total_freed_pkts += diff;
508 			}
509 		}
510 	}
511 
512 	/* prevent wrap-around in total blocks counter */
513 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
514 		freed_blocks = status->total_released_blks -
515 			       wl->tx_blocks_freed;
516 	else
517 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
518 			       status->total_released_blks;
519 
520 	wl->tx_blocks_freed = status->total_released_blks;
521 
522 	wl->tx_allocated_blocks -= freed_blocks;
523 
524 	/*
525 	 * If the FW freed some blocks:
526 	 * If we still have allocated blocks - re-arm the timer, Tx is
527 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
528 	 */
529 	if (freed_blocks) {
530 		if (wl->tx_allocated_blocks)
531 			wl12xx_rearm_tx_watchdog_locked(wl);
532 		else
533 			cancel_delayed_work(&wl->tx_watchdog_work);
534 	}
535 
536 	avail = status->tx_total - wl->tx_allocated_blocks;
537 
538 	/*
539 	 * The FW might change the total number of TX memblocks before
540 	 * we get a notification about blocks being released. Thus, the
541 	 * available blocks calculation might yield a temporary result
542 	 * which is lower than the actual available blocks. Keeping in
543 	 * mind that only blocks that were allocated can be moved from
544 	 * TX to RX, tx_blocks_available should never decrease here.
545 	 */
546 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
547 				      avail);
548 
549 	/* if more blocks are available now, tx work can be scheduled */
550 	if (wl->tx_blocks_available > old_tx_blk_count)
551 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
552 
553 	/* for AP update num of allocated TX blocks per link and ps status */
554 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
555 		wl12xx_irq_update_links_status(wl, wlvif, status);
556 	}
557 
558 	/* update the host-chipset time offset */
559 	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
560 		(s64)(status->fw_localtime);
561 
562 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
563 
564 	return 0;
565 }
566 
wl1271_flush_deferred_work(struct wl1271 * wl)567 static void wl1271_flush_deferred_work(struct wl1271 *wl)
568 {
569 	struct sk_buff *skb;
570 
571 	/* Pass all received frames to the network stack */
572 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
573 		ieee80211_rx_ni(wl->hw, skb);
574 
575 	/* Return sent skbs to the network stack */
576 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
577 		ieee80211_tx_status_ni(wl->hw, skb);
578 }
579 
wl1271_netstack_work(struct work_struct * work)580 static void wl1271_netstack_work(struct work_struct *work)
581 {
582 	struct wl1271 *wl =
583 		container_of(work, struct wl1271, netstack_work);
584 
585 	do {
586 		wl1271_flush_deferred_work(wl);
587 	} while (skb_queue_len(&wl->deferred_rx_queue));
588 }
589 
590 #define WL1271_IRQ_MAX_LOOPS 256
591 
wlcore_irq_locked(struct wl1271 * wl)592 static int wlcore_irq_locked(struct wl1271 *wl)
593 {
594 	int ret = 0;
595 	u32 intr;
596 	int loopcount = WL1271_IRQ_MAX_LOOPS;
597 	bool run_tx_queue = true;
598 	bool done = false;
599 	unsigned int defer_count;
600 	unsigned long flags;
601 
602 	/*
603 	 * In case edge triggered interrupt must be used, we cannot iterate
604 	 * more than once without introducing race conditions with the hardirq.
605 	 */
606 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
607 		loopcount = 1;
608 
609 	wl1271_debug(DEBUG_IRQ, "IRQ work");
610 
611 	if (unlikely(wl->state != WLCORE_STATE_ON))
612 		goto out;
613 
614 	ret = pm_runtime_resume_and_get(wl->dev);
615 	if (ret < 0)
616 		goto out;
617 
618 	while (!done && loopcount--) {
619 		smp_mb__after_atomic();
620 
621 		ret = wlcore_fw_status(wl, wl->fw_status);
622 		if (ret < 0)
623 			goto err_ret;
624 
625 		wlcore_hw_tx_immediate_compl(wl);
626 
627 		intr = wl->fw_status->intr;
628 		intr &= WLCORE_ALL_INTR_MASK;
629 		if (!intr) {
630 			done = true;
631 			continue;
632 		}
633 
634 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
635 			wl1271_error("HW watchdog interrupt received! starting recovery.");
636 			wl->watchdog_recovery = true;
637 			ret = -EIO;
638 
639 			/* restarting the chip. ignore any other interrupt. */
640 			goto err_ret;
641 		}
642 
643 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
644 			wl1271_error("SW watchdog interrupt received! "
645 				     "starting recovery.");
646 			wl->watchdog_recovery = true;
647 			ret = -EIO;
648 
649 			/* restarting the chip. ignore any other interrupt. */
650 			goto err_ret;
651 		}
652 
653 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
654 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
655 
656 			ret = wlcore_rx(wl, wl->fw_status);
657 			if (ret < 0)
658 				goto err_ret;
659 
660 			/* Check if any tx blocks were freed */
661 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
662 				if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
663 					if (!wl1271_tx_total_queue_count(wl))
664 						run_tx_queue = false;
665 					spin_unlock_irqrestore(&wl->wl_lock, flags);
666 				}
667 
668 				/*
669 				 * In order to avoid starvation of the TX path,
670 				 * call the work function directly.
671 				 */
672 				if (run_tx_queue) {
673 					ret = wlcore_tx_work_locked(wl);
674 					if (ret < 0)
675 						goto err_ret;
676 				}
677 			}
678 
679 			/* check for tx results */
680 			ret = wlcore_hw_tx_delayed_compl(wl);
681 			if (ret < 0)
682 				goto err_ret;
683 
684 			/* Make sure the deferred queues don't get too long */
685 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
686 				      skb_queue_len(&wl->deferred_rx_queue);
687 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
688 				wl1271_flush_deferred_work(wl);
689 		}
690 
691 		if (intr & WL1271_ACX_INTR_EVENT_A) {
692 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
693 			ret = wl1271_event_handle(wl, 0);
694 			if (ret < 0)
695 				goto err_ret;
696 		}
697 
698 		if (intr & WL1271_ACX_INTR_EVENT_B) {
699 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
700 			ret = wl1271_event_handle(wl, 1);
701 			if (ret < 0)
702 				goto err_ret;
703 		}
704 
705 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
706 			wl1271_debug(DEBUG_IRQ,
707 				     "WL1271_ACX_INTR_INIT_COMPLETE");
708 
709 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
710 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
711 	}
712 
713 err_ret:
714 	pm_runtime_mark_last_busy(wl->dev);
715 	pm_runtime_put_autosuspend(wl->dev);
716 
717 out:
718 	return ret;
719 }
720 
wlcore_irq(int irq,void * cookie)721 static irqreturn_t wlcore_irq(int irq, void *cookie)
722 {
723 	int ret;
724 	unsigned long flags;
725 	struct wl1271 *wl = cookie;
726 	bool queue_tx_work = true;
727 
728 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
729 
730 	/* complete the ELP completion */
731 	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
732 		spin_lock_irqsave(&wl->wl_lock, flags);
733 		if (wl->elp_compl)
734 			complete(wl->elp_compl);
735 		spin_unlock_irqrestore(&wl->wl_lock, flags);
736 	}
737 
738 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
739 		/* don't enqueue a work right now. mark it as pending */
740 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
741 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
742 		spin_lock_irqsave(&wl->wl_lock, flags);
743 		disable_irq_nosync(wl->irq);
744 		pm_wakeup_event(wl->dev, 0);
745 		spin_unlock_irqrestore(&wl->wl_lock, flags);
746 		goto out_handled;
747 	}
748 
749 	/* TX might be handled here, avoid redundant work */
750 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
751 	cancel_work_sync(&wl->tx_work);
752 
753 	mutex_lock(&wl->mutex);
754 
755 	ret = wlcore_irq_locked(wl);
756 	if (ret)
757 		wl12xx_queue_recovery_work(wl);
758 
759 	/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
760 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
761 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
762 		if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
763 			if (!wl1271_tx_total_queue_count(wl))
764 				queue_tx_work = false;
765 			spin_unlock_irqrestore(&wl->wl_lock, flags);
766 		}
767 		if (queue_tx_work)
768 			ieee80211_queue_work(wl->hw, &wl->tx_work);
769 	}
770 
771 	mutex_unlock(&wl->mutex);
772 
773 out_handled:
774 	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
775 
776 	return IRQ_HANDLED;
777 }
778 
779 struct vif_counter_data {
780 	u8 counter;
781 
782 	struct ieee80211_vif *cur_vif;
783 	bool cur_vif_running;
784 };
785 
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)786 static void wl12xx_vif_count_iter(void *data, u8 *mac,
787 				  struct ieee80211_vif *vif)
788 {
789 	struct vif_counter_data *counter = data;
790 
791 	counter->counter++;
792 	if (counter->cur_vif == vif)
793 		counter->cur_vif_running = true;
794 }
795 
796 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)797 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
798 			       struct ieee80211_vif *cur_vif,
799 			       struct vif_counter_data *data)
800 {
801 	memset(data, 0, sizeof(*data));
802 	data->cur_vif = cur_vif;
803 
804 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
805 					    wl12xx_vif_count_iter, data);
806 }
807 
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)808 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
809 {
810 	const struct firmware *fw;
811 	const char *fw_name;
812 	enum wl12xx_fw_type fw_type;
813 	int ret;
814 
815 	if (plt) {
816 		fw_type = WL12XX_FW_TYPE_PLT;
817 		fw_name = wl->plt_fw_name;
818 	} else {
819 		/*
820 		 * we can't call wl12xx_get_vif_count() here because
821 		 * wl->mutex is taken, so use the cached last_vif_count value
822 		 */
823 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
824 			fw_type = WL12XX_FW_TYPE_MULTI;
825 			fw_name = wl->mr_fw_name;
826 		} else {
827 			fw_type = WL12XX_FW_TYPE_NORMAL;
828 			fw_name = wl->sr_fw_name;
829 		}
830 	}
831 
832 	if (wl->fw_type == fw_type)
833 		return 0;
834 
835 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
836 
837 	ret = request_firmware(&fw, fw_name, wl->dev);
838 
839 	if (ret < 0) {
840 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
841 		return ret;
842 	}
843 
844 	if (fw->size % 4) {
845 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
846 			     fw->size);
847 		ret = -EILSEQ;
848 		goto out;
849 	}
850 
851 	vfree(wl->fw);
852 	wl->fw_type = WL12XX_FW_TYPE_NONE;
853 	wl->fw_len = fw->size;
854 	wl->fw = vmalloc(wl->fw_len);
855 
856 	if (!wl->fw) {
857 		wl1271_error("could not allocate memory for the firmware");
858 		ret = -ENOMEM;
859 		goto out;
860 	}
861 
862 	memcpy(wl->fw, fw->data, wl->fw_len);
863 	ret = 0;
864 	wl->fw_type = fw_type;
865 out:
866 	release_firmware(fw);
867 
868 	return ret;
869 }
870 
wl12xx_queue_recovery_work(struct wl1271 * wl)871 void wl12xx_queue_recovery_work(struct wl1271 *wl)
872 {
873 	/* Avoid a recursive recovery */
874 	if (wl->state == WLCORE_STATE_ON) {
875 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
876 				  &wl->flags));
877 
878 		wl->state = WLCORE_STATE_RESTARTING;
879 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
880 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
881 	}
882 }
883 
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)884 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
885 {
886 	size_t len;
887 
888 	/* Make sure we have enough room */
889 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
890 
891 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
892 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
893 	wl->fwlog_size += len;
894 
895 	return len;
896 }
897 
wl12xx_read_fwlog_panic(struct wl1271 * wl)898 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
899 {
900 	u32 end_of_log = 0;
901 	int error;
902 
903 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
904 		return;
905 
906 	wl1271_info("Reading FW panic log");
907 
908 	/*
909 	 * Make sure the chip is awake and the logger isn't active.
910 	 * Do not send a stop fwlog command if the fw is hanged or if
911 	 * dbgpins are used (due to some fw bug).
912 	 */
913 	error = pm_runtime_resume_and_get(wl->dev);
914 	if (error < 0)
915 		return;
916 	if (!wl->watchdog_recovery &&
917 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
918 		wl12xx_cmd_stop_fwlog(wl);
919 
920 	/* Traverse the memory blocks linked list */
921 	do {
922 		end_of_log = wlcore_event_fw_logger(wl);
923 		if (end_of_log == 0) {
924 			msleep(100);
925 			end_of_log = wlcore_event_fw_logger(wl);
926 		}
927 	} while (end_of_log != 0);
928 }
929 
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)930 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
931 				   u8 hlid, struct ieee80211_sta *sta)
932 {
933 	struct wl1271_station *wl_sta;
934 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
935 
936 	wl_sta = (void *)sta->drv_priv;
937 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
938 
939 	/*
940 	 * increment the initial seq number on recovery to account for
941 	 * transmitted packets that we haven't yet got in the FW status
942 	 */
943 	if (wlvif->encryption_type == KEY_GEM)
944 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
945 
946 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
947 		wl_sta->total_freed_pkts += sqn_recovery_padding;
948 }
949 
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)950 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
951 					struct wl12xx_vif *wlvif,
952 					u8 hlid, const u8 *addr)
953 {
954 	struct ieee80211_sta *sta;
955 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
956 
957 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
958 		    is_zero_ether_addr(addr)))
959 		return;
960 
961 	rcu_read_lock();
962 	sta = ieee80211_find_sta(vif, addr);
963 	if (sta)
964 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
965 	rcu_read_unlock();
966 }
967 
wlcore_print_recovery(struct wl1271 * wl)968 static void wlcore_print_recovery(struct wl1271 *wl)
969 {
970 	u32 pc = 0;
971 	u32 hint_sts = 0;
972 	int ret;
973 
974 	wl1271_info("Hardware recovery in progress. FW ver: %s",
975 		    wl->chip.fw_ver_str);
976 
977 	/* change partitions momentarily so we can read the FW pc */
978 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
979 	if (ret < 0)
980 		return;
981 
982 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
983 	if (ret < 0)
984 		return;
985 
986 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
987 	if (ret < 0)
988 		return;
989 
990 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
991 				pc, hint_sts, ++wl->recovery_count);
992 
993 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
994 }
995 
996 
wl1271_recovery_work(struct work_struct * work)997 static void wl1271_recovery_work(struct work_struct *work)
998 {
999 	struct wl1271 *wl =
1000 		container_of(work, struct wl1271, recovery_work);
1001 	struct wl12xx_vif *wlvif;
1002 	struct ieee80211_vif *vif;
1003 	int error;
1004 
1005 	mutex_lock(&wl->mutex);
1006 
1007 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
1008 		goto out_unlock;
1009 
1010 	error = pm_runtime_resume_and_get(wl->dev);
1011 	if (error < 0)
1012 		wl1271_warning("Enable for recovery failed");
1013 	wlcore_disable_interrupts_nosync(wl);
1014 
1015 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
1016 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
1017 			wl12xx_read_fwlog_panic(wl);
1018 		wlcore_print_recovery(wl);
1019 	}
1020 
1021 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
1022 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1023 
1024 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
1025 
1026 	if (wl->conf.recovery.no_recovery) {
1027 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1028 		goto out_unlock;
1029 	}
1030 
1031 	/* Prevent spurious TX during FW restart */
1032 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1033 
1034 	/* reboot the chipset */
1035 	while (!list_empty(&wl->wlvif_list)) {
1036 		wlvif = list_first_entry(&wl->wlvif_list,
1037 				       struct wl12xx_vif, list);
1038 		vif = wl12xx_wlvif_to_vif(wlvif);
1039 
1040 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1041 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1042 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1043 						    vif->bss_conf.bssid);
1044 		}
1045 
1046 		__wl1271_op_remove_interface(wl, vif, false);
1047 	}
1048 
1049 	wlcore_op_stop_locked(wl);
1050 	pm_runtime_mark_last_busy(wl->dev);
1051 	pm_runtime_put_autosuspend(wl->dev);
1052 
1053 	ieee80211_restart_hw(wl->hw);
1054 
1055 	/*
1056 	 * Its safe to enable TX now - the queues are stopped after a request
1057 	 * to restart the HW.
1058 	 */
1059 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1060 
1061 out_unlock:
1062 	wl->watchdog_recovery = false;
1063 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1064 	mutex_unlock(&wl->mutex);
1065 }
1066 
wlcore_fw_wakeup(struct wl1271 * wl)1067 static int wlcore_fw_wakeup(struct wl1271 *wl)
1068 {
1069 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1070 }
1071 
wl1271_setup(struct wl1271 * wl)1072 static int wl1271_setup(struct wl1271 *wl)
1073 {
1074 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1075 	if (!wl->raw_fw_status)
1076 		goto err;
1077 
1078 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1079 	if (!wl->fw_status)
1080 		goto err;
1081 
1082 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1083 	if (!wl->tx_res_if)
1084 		goto err;
1085 
1086 	return 0;
1087 err:
1088 	kfree(wl->fw_status);
1089 	kfree(wl->raw_fw_status);
1090 	return -ENOMEM;
1091 }
1092 
wl12xx_set_power_on(struct wl1271 * wl)1093 static int wl12xx_set_power_on(struct wl1271 *wl)
1094 {
1095 	int ret;
1096 
1097 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1098 	ret = wl1271_power_on(wl);
1099 	if (ret < 0)
1100 		goto out;
1101 	msleep(WL1271_POWER_ON_SLEEP);
1102 	wl1271_io_reset(wl);
1103 	wl1271_io_init(wl);
1104 
1105 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1106 	if (ret < 0)
1107 		goto fail;
1108 
1109 	/* ELP module wake up */
1110 	ret = wlcore_fw_wakeup(wl);
1111 	if (ret < 0)
1112 		goto fail;
1113 
1114 out:
1115 	return ret;
1116 
1117 fail:
1118 	wl1271_power_off(wl);
1119 	return ret;
1120 }
1121 
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1122 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1123 {
1124 	int ret = 0;
1125 
1126 	ret = wl12xx_set_power_on(wl);
1127 	if (ret < 0)
1128 		goto out;
1129 
1130 	/*
1131 	 * For wl127x based devices we could use the default block
1132 	 * size (512 bytes), but due to a bug in the sdio driver, we
1133 	 * need to set it explicitly after the chip is powered on.  To
1134 	 * simplify the code and since the performance impact is
1135 	 * negligible, we use the same block size for all different
1136 	 * chip types.
1137 	 *
1138 	 * Check if the bus supports blocksize alignment and, if it
1139 	 * doesn't, make sure we don't have the quirk.
1140 	 */
1141 	if (!wl1271_set_block_size(wl))
1142 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1143 
1144 	/* TODO: make sure the lower driver has set things up correctly */
1145 
1146 	ret = wl1271_setup(wl);
1147 	if (ret < 0)
1148 		goto out;
1149 
1150 	ret = wl12xx_fetch_firmware(wl, plt);
1151 	if (ret < 0) {
1152 		kfree(wl->fw_status);
1153 		kfree(wl->raw_fw_status);
1154 		kfree(wl->tx_res_if);
1155 	}
1156 
1157 out:
1158 	return ret;
1159 }
1160 
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1161 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1162 {
1163 	int retries = WL1271_BOOT_RETRIES;
1164 	struct wiphy *wiphy = wl->hw->wiphy;
1165 
1166 	static const char* const PLT_MODE[] = {
1167 		"PLT_OFF",
1168 		"PLT_ON",
1169 		"PLT_FEM_DETECT",
1170 		"PLT_CHIP_AWAKE"
1171 	};
1172 
1173 	int ret;
1174 
1175 	mutex_lock(&wl->mutex);
1176 
1177 	wl1271_notice("power up");
1178 
1179 	if (wl->state != WLCORE_STATE_OFF) {
1180 		wl1271_error("cannot go into PLT state because not "
1181 			     "in off state: %d", wl->state);
1182 		ret = -EBUSY;
1183 		goto out;
1184 	}
1185 
1186 	/* Indicate to lower levels that we are now in PLT mode */
1187 	wl->plt = true;
1188 	wl->plt_mode = plt_mode;
1189 
1190 	while (retries) {
1191 		retries--;
1192 		ret = wl12xx_chip_wakeup(wl, true);
1193 		if (ret < 0)
1194 			goto power_off;
1195 
1196 		if (plt_mode != PLT_CHIP_AWAKE) {
1197 			ret = wl->ops->plt_init(wl);
1198 			if (ret < 0)
1199 				goto power_off;
1200 		}
1201 
1202 		wl->state = WLCORE_STATE_ON;
1203 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1204 			      PLT_MODE[plt_mode],
1205 			      wl->chip.fw_ver_str);
1206 
1207 		/* update hw/fw version info in wiphy struct */
1208 		wiphy->hw_version = wl->chip.id;
1209 		strscpy(wiphy->fw_version, wl->chip.fw_ver_str,
1210 			sizeof(wiphy->fw_version));
1211 
1212 		goto out;
1213 
1214 power_off:
1215 		wl1271_power_off(wl);
1216 	}
1217 
1218 	wl->plt = false;
1219 	wl->plt_mode = PLT_OFF;
1220 
1221 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1222 		     WL1271_BOOT_RETRIES);
1223 out:
1224 	mutex_unlock(&wl->mutex);
1225 
1226 	return ret;
1227 }
1228 
wl1271_plt_stop(struct wl1271 * wl)1229 int wl1271_plt_stop(struct wl1271 *wl)
1230 {
1231 	int ret = 0;
1232 
1233 	wl1271_notice("power down");
1234 
1235 	/*
1236 	 * Interrupts must be disabled before setting the state to OFF.
1237 	 * Otherwise, the interrupt handler might be called and exit without
1238 	 * reading the interrupt status.
1239 	 */
1240 	wlcore_disable_interrupts(wl);
1241 	mutex_lock(&wl->mutex);
1242 	if (!wl->plt) {
1243 		mutex_unlock(&wl->mutex);
1244 
1245 		/*
1246 		 * This will not necessarily enable interrupts as interrupts
1247 		 * may have been disabled when op_stop was called. It will,
1248 		 * however, balance the above call to disable_interrupts().
1249 		 */
1250 		wlcore_enable_interrupts(wl);
1251 
1252 		wl1271_error("cannot power down because not in PLT "
1253 			     "state: %d", wl->state);
1254 		ret = -EBUSY;
1255 		goto out;
1256 	}
1257 
1258 	mutex_unlock(&wl->mutex);
1259 
1260 	wl1271_flush_deferred_work(wl);
1261 	cancel_work_sync(&wl->netstack_work);
1262 	cancel_work_sync(&wl->recovery_work);
1263 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1264 
1265 	mutex_lock(&wl->mutex);
1266 	wl1271_power_off(wl);
1267 	wl->flags = 0;
1268 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1269 	wl->state = WLCORE_STATE_OFF;
1270 	wl->plt = false;
1271 	wl->plt_mode = PLT_OFF;
1272 	wl->rx_counter = 0;
1273 	mutex_unlock(&wl->mutex);
1274 
1275 out:
1276 	return ret;
1277 }
1278 
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1279 static void wl1271_op_tx(struct ieee80211_hw *hw,
1280 			 struct ieee80211_tx_control *control,
1281 			 struct sk_buff *skb)
1282 {
1283 	struct wl1271 *wl = hw->priv;
1284 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1285 	struct ieee80211_vif *vif = info->control.vif;
1286 	struct wl12xx_vif *wlvif = NULL;
1287 	unsigned long flags;
1288 	int q, mapping;
1289 	u8 hlid;
1290 
1291 	if (!vif) {
1292 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1293 		ieee80211_free_txskb(hw, skb);
1294 		return;
1295 	}
1296 
1297 	wlvif = wl12xx_vif_to_data(vif);
1298 	mapping = skb_get_queue_mapping(skb);
1299 	q = wl1271_tx_get_queue(mapping);
1300 
1301 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1302 
1303 	spin_lock_irqsave(&wl->wl_lock, flags);
1304 
1305 	/*
1306 	 * drop the packet if the link is invalid or the queue is stopped
1307 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1308 	 * allow these packets through.
1309 	 */
1310 	if (hlid == WL12XX_INVALID_LINK_ID ||
1311 	    (!test_bit(hlid, wlvif->links_map)) ||
1312 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1313 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1314 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1315 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1316 		ieee80211_free_txskb(hw, skb);
1317 		goto out;
1318 	}
1319 
1320 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1321 		     hlid, q, skb->len);
1322 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1323 
1324 	wl->tx_queue_count[q]++;
1325 	wlvif->tx_queue_count[q]++;
1326 
1327 	/*
1328 	 * The workqueue is slow to process the tx_queue and we need stop
1329 	 * the queue here, otherwise the queue will get too long.
1330 	 */
1331 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1332 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1333 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1334 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1335 		wlcore_stop_queue_locked(wl, wlvif, q,
1336 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1337 	}
1338 
1339 	/*
1340 	 * The chip specific setup must run before the first TX packet -
1341 	 * before that, the tx_work will not be initialized!
1342 	 */
1343 
1344 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1345 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1346 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1347 
1348 out:
1349 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1350 }
1351 
wl1271_tx_dummy_packet(struct wl1271 * wl)1352 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1353 {
1354 	unsigned long flags;
1355 	int q;
1356 
1357 	/* no need to queue a new dummy packet if one is already pending */
1358 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1359 		return 0;
1360 
1361 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1362 
1363 	spin_lock_irqsave(&wl->wl_lock, flags);
1364 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1365 	wl->tx_queue_count[q]++;
1366 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1367 
1368 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1369 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1370 		return wlcore_tx_work_locked(wl);
1371 
1372 	/*
1373 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1374 	 * interrupt handler function
1375 	 */
1376 	return 0;
1377 }
1378 
1379 /*
1380  * The size of the dummy packet should be at least 1400 bytes. However, in
1381  * order to minimize the number of bus transactions, aligning it to 512 bytes
1382  * boundaries could be beneficial, performance wise
1383  */
1384 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1385 
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1386 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1387 {
1388 	struct sk_buff *skb;
1389 	struct ieee80211_hdr_3addr *hdr;
1390 	unsigned int dummy_packet_size;
1391 
1392 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1393 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1394 
1395 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1396 	if (!skb) {
1397 		wl1271_warning("Failed to allocate a dummy packet skb");
1398 		return NULL;
1399 	}
1400 
1401 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1402 
1403 	hdr = skb_put_zero(skb, sizeof(*hdr));
1404 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1405 					 IEEE80211_STYPE_NULLFUNC |
1406 					 IEEE80211_FCTL_TODS);
1407 
1408 	skb_put_zero(skb, dummy_packet_size);
1409 
1410 	/* Dummy packets require the TID to be management */
1411 	skb->priority = WL1271_TID_MGMT;
1412 
1413 	/* Initialize all fields that might be used */
1414 	skb_set_queue_mapping(skb, 0);
1415 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1416 
1417 	return skb;
1418 }
1419 
1420 
1421 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1422 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1423 {
1424 	int num_fields = 0, in_field = 0, fields_size = 0;
1425 	int i, pattern_len = 0;
1426 
1427 	if (!p->mask) {
1428 		wl1271_warning("No mask in WoWLAN pattern");
1429 		return -EINVAL;
1430 	}
1431 
1432 	/*
1433 	 * The pattern is broken up into segments of bytes at different offsets
1434 	 * that need to be checked by the FW filter. Each segment is called
1435 	 * a field in the FW API. We verify that the total number of fields
1436 	 * required for this pattern won't exceed FW limits (8)
1437 	 * as well as the total fields buffer won't exceed the FW limit.
1438 	 * Note that if there's a pattern which crosses Ethernet/IP header
1439 	 * boundary a new field is required.
1440 	 */
1441 	for (i = 0; i < p->pattern_len; i++) {
1442 		if (test_bit(i, (unsigned long *)p->mask)) {
1443 			if (!in_field) {
1444 				in_field = 1;
1445 				pattern_len = 1;
1446 			} else {
1447 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1448 					num_fields++;
1449 					fields_size += pattern_len +
1450 						RX_FILTER_FIELD_OVERHEAD;
1451 					pattern_len = 1;
1452 				} else
1453 					pattern_len++;
1454 			}
1455 		} else {
1456 			if (in_field) {
1457 				in_field = 0;
1458 				fields_size += pattern_len +
1459 					RX_FILTER_FIELD_OVERHEAD;
1460 				num_fields++;
1461 			}
1462 		}
1463 	}
1464 
1465 	if (in_field) {
1466 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1467 		num_fields++;
1468 	}
1469 
1470 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1471 		wl1271_warning("RX Filter too complex. Too many segments");
1472 		return -EINVAL;
1473 	}
1474 
1475 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1476 		wl1271_warning("RX filter pattern is too big");
1477 		return -E2BIG;
1478 	}
1479 
1480 	return 0;
1481 }
1482 
wl1271_rx_filter_alloc(void)1483 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1484 {
1485 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1486 }
1487 
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1488 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1489 {
1490 	int i;
1491 
1492 	if (filter == NULL)
1493 		return;
1494 
1495 	for (i = 0; i < filter->num_fields; i++)
1496 		kfree(filter->fields[i].pattern);
1497 
1498 	kfree(filter);
1499 }
1500 
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1501 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1502 				 u16 offset, u8 flags,
1503 				 const u8 *pattern, u8 len)
1504 {
1505 	struct wl12xx_rx_filter_field *field;
1506 
1507 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1508 		wl1271_warning("Max fields per RX filter. can't alloc another");
1509 		return -EINVAL;
1510 	}
1511 
1512 	field = &filter->fields[filter->num_fields];
1513 
1514 	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1515 	if (!field->pattern) {
1516 		wl1271_warning("Failed to allocate RX filter pattern");
1517 		return -ENOMEM;
1518 	}
1519 
1520 	filter->num_fields++;
1521 
1522 	field->offset = cpu_to_le16(offset);
1523 	field->flags = flags;
1524 	field->len = len;
1525 
1526 	return 0;
1527 }
1528 
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1529 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1530 {
1531 	int i, fields_size = 0;
1532 
1533 	for (i = 0; i < filter->num_fields; i++)
1534 		fields_size += filter->fields[i].len +
1535 			sizeof(struct wl12xx_rx_filter_field) -
1536 			sizeof(u8 *);
1537 
1538 	return fields_size;
1539 }
1540 
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1541 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1542 				    u8 *buf)
1543 {
1544 	int i;
1545 	struct wl12xx_rx_filter_field *field;
1546 
1547 	for (i = 0; i < filter->num_fields; i++) {
1548 		field = (struct wl12xx_rx_filter_field *)buf;
1549 
1550 		field->offset = filter->fields[i].offset;
1551 		field->flags = filter->fields[i].flags;
1552 		field->len = filter->fields[i].len;
1553 
1554 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1555 		buf += sizeof(struct wl12xx_rx_filter_field) -
1556 			sizeof(u8 *) + field->len;
1557 	}
1558 }
1559 
1560 /*
1561  * Allocates an RX filter returned through f
1562  * which needs to be freed using rx_filter_free()
1563  */
1564 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1565 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1566 					   struct wl12xx_rx_filter **f)
1567 {
1568 	int i, j, ret = 0;
1569 	struct wl12xx_rx_filter *filter;
1570 	u16 offset;
1571 	u8 flags, len;
1572 
1573 	filter = wl1271_rx_filter_alloc();
1574 	if (!filter) {
1575 		wl1271_warning("Failed to alloc rx filter");
1576 		ret = -ENOMEM;
1577 		goto err;
1578 	}
1579 
1580 	i = 0;
1581 	while (i < p->pattern_len) {
1582 		if (!test_bit(i, (unsigned long *)p->mask)) {
1583 			i++;
1584 			continue;
1585 		}
1586 
1587 		for (j = i; j < p->pattern_len; j++) {
1588 			if (!test_bit(j, (unsigned long *)p->mask))
1589 				break;
1590 
1591 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1592 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1593 				break;
1594 		}
1595 
1596 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1597 			offset = i;
1598 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1599 		} else {
1600 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1601 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1602 		}
1603 
1604 		len = j - i;
1605 
1606 		ret = wl1271_rx_filter_alloc_field(filter,
1607 						   offset,
1608 						   flags,
1609 						   &p->pattern[i], len);
1610 		if (ret)
1611 			goto err;
1612 
1613 		i = j;
1614 	}
1615 
1616 	filter->action = FILTER_SIGNAL;
1617 
1618 	*f = filter;
1619 	return 0;
1620 
1621 err:
1622 	wl1271_rx_filter_free(filter);
1623 	*f = NULL;
1624 
1625 	return ret;
1626 }
1627 
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1628 static int wl1271_configure_wowlan(struct wl1271 *wl,
1629 				   struct cfg80211_wowlan *wow)
1630 {
1631 	int i, ret;
1632 
1633 	if (!wow || wow->any || !wow->n_patterns) {
1634 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1635 							  FILTER_SIGNAL);
1636 		if (ret)
1637 			goto out;
1638 
1639 		ret = wl1271_rx_filter_clear_all(wl);
1640 		if (ret)
1641 			goto out;
1642 
1643 		return 0;
1644 	}
1645 
1646 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1647 		return -EINVAL;
1648 
1649 	/* Validate all incoming patterns before clearing current FW state */
1650 	for (i = 0; i < wow->n_patterns; i++) {
1651 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1652 		if (ret) {
1653 			wl1271_warning("Bad wowlan pattern %d", i);
1654 			return ret;
1655 		}
1656 	}
1657 
1658 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1659 	if (ret)
1660 		goto out;
1661 
1662 	ret = wl1271_rx_filter_clear_all(wl);
1663 	if (ret)
1664 		goto out;
1665 
1666 	/* Translate WoWLAN patterns into filters */
1667 	for (i = 0; i < wow->n_patterns; i++) {
1668 		struct cfg80211_pkt_pattern *p;
1669 		struct wl12xx_rx_filter *filter = NULL;
1670 
1671 		p = &wow->patterns[i];
1672 
1673 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1674 		if (ret) {
1675 			wl1271_warning("Failed to create an RX filter from "
1676 				       "wowlan pattern %d", i);
1677 			goto out;
1678 		}
1679 
1680 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1681 
1682 		wl1271_rx_filter_free(filter);
1683 		if (ret)
1684 			goto out;
1685 	}
1686 
1687 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1688 
1689 out:
1690 	return ret;
1691 }
1692 
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1693 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1694 					struct wl12xx_vif *wlvif,
1695 					struct cfg80211_wowlan *wow)
1696 {
1697 	int ret = 0;
1698 
1699 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1700 		goto out;
1701 
1702 	ret = wl1271_configure_wowlan(wl, wow);
1703 	if (ret < 0)
1704 		goto out;
1705 
1706 	if ((wl->conf.conn.suspend_wake_up_event ==
1707 	     wl->conf.conn.wake_up_event) &&
1708 	    (wl->conf.conn.suspend_listen_interval ==
1709 	     wl->conf.conn.listen_interval))
1710 		goto out;
1711 
1712 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1713 				    wl->conf.conn.suspend_wake_up_event,
1714 				    wl->conf.conn.suspend_listen_interval);
1715 
1716 	if (ret < 0)
1717 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1718 out:
1719 	return ret;
1720 
1721 }
1722 
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1723 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1724 					struct wl12xx_vif *wlvif,
1725 					struct cfg80211_wowlan *wow)
1726 {
1727 	int ret = 0;
1728 
1729 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1730 		goto out;
1731 
1732 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1733 	if (ret < 0)
1734 		goto out;
1735 
1736 	ret = wl1271_configure_wowlan(wl, wow);
1737 	if (ret < 0)
1738 		goto out;
1739 
1740 out:
1741 	return ret;
1742 
1743 }
1744 
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1745 static int wl1271_configure_suspend(struct wl1271 *wl,
1746 				    struct wl12xx_vif *wlvif,
1747 				    struct cfg80211_wowlan *wow)
1748 {
1749 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1750 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1751 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1752 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1753 	return 0;
1754 }
1755 
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1756 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1757 {
1758 	int ret = 0;
1759 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1760 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1761 
1762 	if ((!is_ap) && (!is_sta))
1763 		return;
1764 
1765 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1766 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1767 		return;
1768 
1769 	wl1271_configure_wowlan(wl, NULL);
1770 
1771 	if (is_sta) {
1772 		if ((wl->conf.conn.suspend_wake_up_event ==
1773 		     wl->conf.conn.wake_up_event) &&
1774 		    (wl->conf.conn.suspend_listen_interval ==
1775 		     wl->conf.conn.listen_interval))
1776 			return;
1777 
1778 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1779 				    wl->conf.conn.wake_up_event,
1780 				    wl->conf.conn.listen_interval);
1781 
1782 		if (ret < 0)
1783 			wl1271_error("resume: wake up conditions failed: %d",
1784 				     ret);
1785 
1786 	} else if (is_ap) {
1787 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1788 	}
1789 }
1790 
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1791 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1792 					    struct cfg80211_wowlan *wow)
1793 {
1794 	struct wl1271 *wl = hw->priv;
1795 	struct wl12xx_vif *wlvif;
1796 	unsigned long flags;
1797 	int ret;
1798 
1799 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1800 	WARN_ON(!wow);
1801 
1802 	/* we want to perform the recovery before suspending */
1803 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1804 		wl1271_warning("postponing suspend to perform recovery");
1805 		return -EBUSY;
1806 	}
1807 
1808 	wl1271_tx_flush(wl);
1809 
1810 	mutex_lock(&wl->mutex);
1811 
1812 	ret = pm_runtime_resume_and_get(wl->dev);
1813 	if (ret < 0) {
1814 		mutex_unlock(&wl->mutex);
1815 		return ret;
1816 	}
1817 
1818 	wl->wow_enabled = true;
1819 	wl12xx_for_each_wlvif(wl, wlvif) {
1820 		if (wlcore_is_p2p_mgmt(wlvif))
1821 			continue;
1822 
1823 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1824 		if (ret < 0) {
1825 			goto out_sleep;
1826 		}
1827 	}
1828 
1829 	/* disable fast link flow control notifications from FW */
1830 	ret = wlcore_hw_interrupt_notify(wl, false);
1831 	if (ret < 0)
1832 		goto out_sleep;
1833 
1834 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1835 	ret = wlcore_hw_rx_ba_filter(wl,
1836 				     !!wl->conf.conn.suspend_rx_ba_activity);
1837 	if (ret < 0)
1838 		goto out_sleep;
1839 
1840 out_sleep:
1841 	pm_runtime_put_noidle(wl->dev);
1842 	mutex_unlock(&wl->mutex);
1843 
1844 	if (ret < 0) {
1845 		wl1271_warning("couldn't prepare device to suspend");
1846 		return ret;
1847 	}
1848 
1849 	/* flush any remaining work */
1850 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1851 
1852 	flush_work(&wl->tx_work);
1853 
1854 	/*
1855 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1856 	 * it on resume anyway.
1857 	 */
1858 	cancel_delayed_work(&wl->tx_watchdog_work);
1859 
1860 	/*
1861 	 * set suspended flag to avoid triggering a new threaded_irq
1862 	 * work.
1863 	 */
1864 	spin_lock_irqsave(&wl->wl_lock, flags);
1865 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1866 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1867 
1868 	return pm_runtime_force_suspend(wl->dev);
1869 }
1870 
wl1271_op_resume(struct ieee80211_hw * hw)1871 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1872 {
1873 	struct wl1271 *wl = hw->priv;
1874 	struct wl12xx_vif *wlvif;
1875 	unsigned long flags;
1876 	bool run_irq_work = false, pending_recovery;
1877 	int ret;
1878 
1879 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1880 		     wl->wow_enabled);
1881 	WARN_ON(!wl->wow_enabled);
1882 
1883 	ret = pm_runtime_force_resume(wl->dev);
1884 	if (ret < 0) {
1885 		wl1271_error("ELP wakeup failure!");
1886 		goto out_sleep;
1887 	}
1888 
1889 	/*
1890 	 * re-enable irq_work enqueuing, and call irq_work directly if
1891 	 * there is a pending work.
1892 	 */
1893 	spin_lock_irqsave(&wl->wl_lock, flags);
1894 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1895 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1896 		run_irq_work = true;
1897 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1898 
1899 	mutex_lock(&wl->mutex);
1900 
1901 	/* test the recovery flag before calling any SDIO functions */
1902 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1903 				    &wl->flags);
1904 
1905 	if (run_irq_work) {
1906 		wl1271_debug(DEBUG_MAC80211,
1907 			     "run postponed irq_work directly");
1908 
1909 		/* don't talk to the HW if recovery is pending */
1910 		if (!pending_recovery) {
1911 			ret = wlcore_irq_locked(wl);
1912 			if (ret)
1913 				wl12xx_queue_recovery_work(wl);
1914 		}
1915 
1916 		wlcore_enable_interrupts(wl);
1917 	}
1918 
1919 	if (pending_recovery) {
1920 		wl1271_warning("queuing forgotten recovery on resume");
1921 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1922 		goto out_sleep;
1923 	}
1924 
1925 	ret = pm_runtime_resume_and_get(wl->dev);
1926 	if (ret < 0)
1927 		goto out;
1928 
1929 	wl12xx_for_each_wlvif(wl, wlvif) {
1930 		if (wlcore_is_p2p_mgmt(wlvif))
1931 			continue;
1932 
1933 		wl1271_configure_resume(wl, wlvif);
1934 	}
1935 
1936 	ret = wlcore_hw_interrupt_notify(wl, true);
1937 	if (ret < 0)
1938 		goto out_sleep;
1939 
1940 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1941 	ret = wlcore_hw_rx_ba_filter(wl, false);
1942 	if (ret < 0)
1943 		goto out_sleep;
1944 
1945 out_sleep:
1946 	pm_runtime_mark_last_busy(wl->dev);
1947 	pm_runtime_put_autosuspend(wl->dev);
1948 
1949 out:
1950 	wl->wow_enabled = false;
1951 
1952 	/*
1953 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1954 	 * That way we avoid possible conditions where Tx-complete interrupts
1955 	 * fail to arrive and we perform a spurious recovery.
1956 	 */
1957 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1958 	mutex_unlock(&wl->mutex);
1959 
1960 	return 0;
1961 }
1962 
wl1271_op_start(struct ieee80211_hw * hw)1963 static int wl1271_op_start(struct ieee80211_hw *hw)
1964 {
1965 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1966 
1967 	/*
1968 	 * We have to delay the booting of the hardware because
1969 	 * we need to know the local MAC address before downloading and
1970 	 * initializing the firmware. The MAC address cannot be changed
1971 	 * after boot, and without the proper MAC address, the firmware
1972 	 * will not function properly.
1973 	 *
1974 	 * The MAC address is first known when the corresponding interface
1975 	 * is added. That is where we will initialize the hardware.
1976 	 */
1977 
1978 	return 0;
1979 }
1980 
wlcore_op_stop_locked(struct wl1271 * wl)1981 static void wlcore_op_stop_locked(struct wl1271 *wl)
1982 {
1983 	int i;
1984 
1985 	if (wl->state == WLCORE_STATE_OFF) {
1986 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1987 					&wl->flags))
1988 			wlcore_enable_interrupts(wl);
1989 
1990 		return;
1991 	}
1992 
1993 	/*
1994 	 * this must be before the cancel_work calls below, so that the work
1995 	 * functions don't perform further work.
1996 	 */
1997 	wl->state = WLCORE_STATE_OFF;
1998 
1999 	/*
2000 	 * Use the nosync variant to disable interrupts, so the mutex could be
2001 	 * held while doing so without deadlocking.
2002 	 */
2003 	wlcore_disable_interrupts_nosync(wl);
2004 
2005 	mutex_unlock(&wl->mutex);
2006 
2007 	wlcore_synchronize_interrupts(wl);
2008 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2009 		cancel_work_sync(&wl->recovery_work);
2010 	wl1271_flush_deferred_work(wl);
2011 	cancel_delayed_work_sync(&wl->scan_complete_work);
2012 	cancel_work_sync(&wl->netstack_work);
2013 	cancel_work_sync(&wl->tx_work);
2014 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
2015 
2016 	/* let's notify MAC80211 about the remaining pending TX frames */
2017 	mutex_lock(&wl->mutex);
2018 	wl12xx_tx_reset(wl);
2019 
2020 	wl1271_power_off(wl);
2021 	/*
2022 	 * In case a recovery was scheduled, interrupts were disabled to avoid
2023 	 * an interrupt storm. Now that the power is down, it is safe to
2024 	 * re-enable interrupts to balance the disable depth
2025 	 */
2026 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2027 		wlcore_enable_interrupts(wl);
2028 
2029 	wl->band = NL80211_BAND_2GHZ;
2030 
2031 	wl->rx_counter = 0;
2032 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2033 	wl->channel_type = NL80211_CHAN_NO_HT;
2034 	wl->tx_blocks_available = 0;
2035 	wl->tx_allocated_blocks = 0;
2036 	wl->tx_results_count = 0;
2037 	wl->tx_packets_count = 0;
2038 	wl->time_offset = 0;
2039 	wl->ap_fw_ps_map = 0;
2040 	wl->ap_ps_map = 0;
2041 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
2042 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
2043 	memset(wl->links_map, 0, sizeof(wl->links_map));
2044 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
2045 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
2046 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2047 	wl->active_sta_count = 0;
2048 	wl->active_link_count = 0;
2049 
2050 	/* The system link is always allocated */
2051 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2052 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2053 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2054 
2055 	/*
2056 	 * this is performed after the cancel_work calls and the associated
2057 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2058 	 * get executed before all these vars have been reset.
2059 	 */
2060 	wl->flags = 0;
2061 
2062 	wl->tx_blocks_freed = 0;
2063 
2064 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2065 		wl->tx_pkts_freed[i] = 0;
2066 		wl->tx_allocated_pkts[i] = 0;
2067 	}
2068 
2069 	wl1271_debugfs_reset(wl);
2070 
2071 	kfree(wl->raw_fw_status);
2072 	wl->raw_fw_status = NULL;
2073 	kfree(wl->fw_status);
2074 	wl->fw_status = NULL;
2075 	kfree(wl->tx_res_if);
2076 	wl->tx_res_if = NULL;
2077 	kfree(wl->target_mem_map);
2078 	wl->target_mem_map = NULL;
2079 
2080 	/*
2081 	 * FW channels must be re-calibrated after recovery,
2082 	 * save current Reg-Domain channel configuration and clear it.
2083 	 */
2084 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2085 	       sizeof(wl->reg_ch_conf_pending));
2086 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2087 }
2088 
wlcore_op_stop(struct ieee80211_hw * hw,bool suspend)2089 static void wlcore_op_stop(struct ieee80211_hw *hw, bool suspend)
2090 {
2091 	struct wl1271 *wl = hw->priv;
2092 
2093 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2094 
2095 	mutex_lock(&wl->mutex);
2096 
2097 	wlcore_op_stop_locked(wl);
2098 
2099 	mutex_unlock(&wl->mutex);
2100 }
2101 
wlcore_channel_switch_work(struct work_struct * work)2102 static void wlcore_channel_switch_work(struct work_struct *work)
2103 {
2104 	struct delayed_work *dwork;
2105 	struct wl1271 *wl;
2106 	struct ieee80211_vif *vif;
2107 	struct wl12xx_vif *wlvif;
2108 	int ret;
2109 
2110 	dwork = to_delayed_work(work);
2111 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2112 	wl = wlvif->wl;
2113 
2114 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2115 
2116 	mutex_lock(&wl->mutex);
2117 
2118 	if (unlikely(wl->state != WLCORE_STATE_ON))
2119 		goto out;
2120 
2121 	/* check the channel switch is still ongoing */
2122 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2123 		goto out;
2124 
2125 	vif = wl12xx_wlvif_to_vif(wlvif);
2126 	ieee80211_chswitch_done(vif, false, 0);
2127 
2128 	ret = pm_runtime_resume_and_get(wl->dev);
2129 	if (ret < 0)
2130 		goto out;
2131 
2132 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2133 
2134 	pm_runtime_mark_last_busy(wl->dev);
2135 	pm_runtime_put_autosuspend(wl->dev);
2136 out:
2137 	mutex_unlock(&wl->mutex);
2138 }
2139 
wlcore_connection_loss_work(struct work_struct * work)2140 static void wlcore_connection_loss_work(struct work_struct *work)
2141 {
2142 	struct delayed_work *dwork;
2143 	struct wl1271 *wl;
2144 	struct ieee80211_vif *vif;
2145 	struct wl12xx_vif *wlvif;
2146 
2147 	dwork = to_delayed_work(work);
2148 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2149 	wl = wlvif->wl;
2150 
2151 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2152 
2153 	mutex_lock(&wl->mutex);
2154 
2155 	if (unlikely(wl->state != WLCORE_STATE_ON))
2156 		goto out;
2157 
2158 	/* Call mac80211 connection loss */
2159 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2160 		goto out;
2161 
2162 	vif = wl12xx_wlvif_to_vif(wlvif);
2163 	ieee80211_connection_loss(vif);
2164 out:
2165 	mutex_unlock(&wl->mutex);
2166 }
2167 
wlcore_pending_auth_complete_work(struct work_struct * work)2168 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2169 {
2170 	struct delayed_work *dwork;
2171 	struct wl1271 *wl;
2172 	struct wl12xx_vif *wlvif;
2173 	unsigned long time_spare;
2174 	int ret;
2175 
2176 	dwork = to_delayed_work(work);
2177 	wlvif = container_of(dwork, struct wl12xx_vif,
2178 			     pending_auth_complete_work);
2179 	wl = wlvif->wl;
2180 
2181 	mutex_lock(&wl->mutex);
2182 
2183 	if (unlikely(wl->state != WLCORE_STATE_ON))
2184 		goto out;
2185 
2186 	/*
2187 	 * Make sure a second really passed since the last auth reply. Maybe
2188 	 * a second auth reply arrived while we were stuck on the mutex.
2189 	 * Check for a little less than the timeout to protect from scheduler
2190 	 * irregularities.
2191 	 */
2192 	time_spare = jiffies +
2193 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2194 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2195 		goto out;
2196 
2197 	ret = pm_runtime_resume_and_get(wl->dev);
2198 	if (ret < 0)
2199 		goto out;
2200 
2201 	/* cancel the ROC if active */
2202 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2203 
2204 	pm_runtime_mark_last_busy(wl->dev);
2205 	pm_runtime_put_autosuspend(wl->dev);
2206 out:
2207 	mutex_unlock(&wl->mutex);
2208 }
2209 
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2210 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2211 {
2212 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2213 					WL12XX_MAX_RATE_POLICIES);
2214 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2215 		return -EBUSY;
2216 
2217 	__set_bit(policy, wl->rate_policies_map);
2218 	*idx = policy;
2219 	return 0;
2220 }
2221 
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2222 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2223 {
2224 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2225 		return;
2226 
2227 	__clear_bit(*idx, wl->rate_policies_map);
2228 	*idx = WL12XX_MAX_RATE_POLICIES;
2229 }
2230 
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2231 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2232 {
2233 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2234 					WLCORE_MAX_KLV_TEMPLATES);
2235 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2236 		return -EBUSY;
2237 
2238 	__set_bit(policy, wl->klv_templates_map);
2239 	*idx = policy;
2240 	return 0;
2241 }
2242 
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2243 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2244 {
2245 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2246 		return;
2247 
2248 	__clear_bit(*idx, wl->klv_templates_map);
2249 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2250 }
2251 
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2252 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2253 {
2254 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2255 
2256 	switch (wlvif->bss_type) {
2257 	case BSS_TYPE_AP_BSS:
2258 		if (wlvif->p2p)
2259 			return WL1271_ROLE_P2P_GO;
2260 		else if (ieee80211_vif_is_mesh(vif))
2261 			return WL1271_ROLE_MESH_POINT;
2262 		else
2263 			return WL1271_ROLE_AP;
2264 
2265 	case BSS_TYPE_STA_BSS:
2266 		if (wlvif->p2p)
2267 			return WL1271_ROLE_P2P_CL;
2268 		else
2269 			return WL1271_ROLE_STA;
2270 
2271 	case BSS_TYPE_IBSS:
2272 		return WL1271_ROLE_IBSS;
2273 
2274 	default:
2275 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2276 	}
2277 	return WL12XX_INVALID_ROLE_TYPE;
2278 }
2279 
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2280 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2281 {
2282 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2283 	int i;
2284 
2285 	/* clear everything but the persistent data */
2286 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2287 
2288 	switch (ieee80211_vif_type_p2p(vif)) {
2289 	case NL80211_IFTYPE_P2P_CLIENT:
2290 		wlvif->p2p = 1;
2291 		fallthrough;
2292 	case NL80211_IFTYPE_STATION:
2293 	case NL80211_IFTYPE_P2P_DEVICE:
2294 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2295 		break;
2296 	case NL80211_IFTYPE_ADHOC:
2297 		wlvif->bss_type = BSS_TYPE_IBSS;
2298 		break;
2299 	case NL80211_IFTYPE_P2P_GO:
2300 		wlvif->p2p = 1;
2301 		fallthrough;
2302 	case NL80211_IFTYPE_AP:
2303 	case NL80211_IFTYPE_MESH_POINT:
2304 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2305 		break;
2306 	default:
2307 		wlvif->bss_type = MAX_BSS_TYPE;
2308 		return -EOPNOTSUPP;
2309 	}
2310 
2311 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2312 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2313 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2314 
2315 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2316 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2317 		/* init sta/ibss data */
2318 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2319 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2320 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2321 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2322 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2323 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2324 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2325 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2326 	} else {
2327 		/* init ap data */
2328 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2329 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2330 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2331 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2332 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2333 			wl12xx_allocate_rate_policy(wl,
2334 						&wlvif->ap.ucast_rate_idx[i]);
2335 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2336 		/*
2337 		 * TODO: check if basic_rate shouldn't be
2338 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2339 		 * instead (the same thing for STA above).
2340 		*/
2341 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2342 		/* TODO: this seems to be used only for STA, check it */
2343 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2344 	}
2345 
2346 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2347 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2348 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2349 
2350 	/*
2351 	 * mac80211 configures some values globally, while we treat them
2352 	 * per-interface. thus, on init, we have to copy them from wl
2353 	 */
2354 	wlvif->band = wl->band;
2355 	wlvif->channel = wl->channel;
2356 	wlvif->power_level = wl->power_level;
2357 	wlvif->channel_type = wl->channel_type;
2358 
2359 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2360 		  wl1271_rx_streaming_enable_work);
2361 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2362 		  wl1271_rx_streaming_disable_work);
2363 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2364 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2365 			  wlcore_channel_switch_work);
2366 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2367 			  wlcore_connection_loss_work);
2368 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2369 			  wlcore_pending_auth_complete_work);
2370 	INIT_LIST_HEAD(&wlvif->list);
2371 
2372 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2373 	return 0;
2374 }
2375 
wl12xx_init_fw(struct wl1271 * wl)2376 static int wl12xx_init_fw(struct wl1271 *wl)
2377 {
2378 	int retries = WL1271_BOOT_RETRIES;
2379 	bool booted = false;
2380 	struct wiphy *wiphy = wl->hw->wiphy;
2381 	int ret;
2382 
2383 	while (retries) {
2384 		retries--;
2385 		ret = wl12xx_chip_wakeup(wl, false);
2386 		if (ret < 0)
2387 			goto power_off;
2388 
2389 		ret = wl->ops->boot(wl);
2390 		if (ret < 0)
2391 			goto power_off;
2392 
2393 		ret = wl1271_hw_init(wl);
2394 		if (ret < 0)
2395 			goto irq_disable;
2396 
2397 		booted = true;
2398 		break;
2399 
2400 irq_disable:
2401 		mutex_unlock(&wl->mutex);
2402 		/* Unlocking the mutex in the middle of handling is
2403 		   inherently unsafe. In this case we deem it safe to do,
2404 		   because we need to let any possibly pending IRQ out of
2405 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2406 		   work function will not do anything.) Also, any other
2407 		   possible concurrent operations will fail due to the
2408 		   current state, hence the wl1271 struct should be safe. */
2409 		wlcore_disable_interrupts(wl);
2410 		wl1271_flush_deferred_work(wl);
2411 		cancel_work_sync(&wl->netstack_work);
2412 		mutex_lock(&wl->mutex);
2413 power_off:
2414 		wl1271_power_off(wl);
2415 	}
2416 
2417 	if (!booted) {
2418 		wl1271_error("firmware boot failed despite %d retries",
2419 			     WL1271_BOOT_RETRIES);
2420 		goto out;
2421 	}
2422 
2423 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2424 
2425 	/* update hw/fw version info in wiphy struct */
2426 	wiphy->hw_version = wl->chip.id;
2427 	strscpy(wiphy->fw_version, wl->chip.fw_ver_str,
2428 		sizeof(wiphy->fw_version));
2429 
2430 	/*
2431 	 * Now we know if 11a is supported (info from the NVS), so disable
2432 	 * 11a channels if not supported
2433 	 */
2434 	if (!wl->enable_11a)
2435 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2436 
2437 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2438 		     wl->enable_11a ? "" : "not ");
2439 
2440 	wl->state = WLCORE_STATE_ON;
2441 out:
2442 	return ret;
2443 }
2444 
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2445 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2446 {
2447 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2448 }
2449 
2450 /*
2451  * Check whether a fw switch (i.e. moving from one loaded
2452  * fw to another) is needed. This function is also responsible
2453  * for updating wl->last_vif_count, so it must be called before
2454  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2455  * will be used).
2456  */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2457 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2458 				  struct vif_counter_data vif_counter_data,
2459 				  bool add)
2460 {
2461 	enum wl12xx_fw_type current_fw = wl->fw_type;
2462 	u8 vif_count = vif_counter_data.counter;
2463 
2464 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2465 		return false;
2466 
2467 	/* increase the vif count if this is a new vif */
2468 	if (add && !vif_counter_data.cur_vif_running)
2469 		vif_count++;
2470 
2471 	wl->last_vif_count = vif_count;
2472 
2473 	/* no need for fw change if the device is OFF */
2474 	if (wl->state == WLCORE_STATE_OFF)
2475 		return false;
2476 
2477 	/* no need for fw change if a single fw is used */
2478 	if (!wl->mr_fw_name)
2479 		return false;
2480 
2481 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2482 		return true;
2483 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2484 		return true;
2485 
2486 	return false;
2487 }
2488 
2489 /*
2490  * Enter "forced psm". Make sure the sta is in psm against the ap,
2491  * to make the fw switch a bit more disconnection-persistent.
2492  */
wl12xx_force_active_psm(struct wl1271 * wl)2493 static void wl12xx_force_active_psm(struct wl1271 *wl)
2494 {
2495 	struct wl12xx_vif *wlvif;
2496 
2497 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2498 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2499 	}
2500 }
2501 
2502 struct wlcore_hw_queue_iter_data {
2503 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2504 	/* current vif */
2505 	struct ieee80211_vif *vif;
2506 	/* is the current vif among those iterated */
2507 	bool cur_running;
2508 };
2509 
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2510 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2511 				 struct ieee80211_vif *vif)
2512 {
2513 	struct wlcore_hw_queue_iter_data *iter_data = data;
2514 
2515 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2516 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2517 		return;
2518 
2519 	if (iter_data->cur_running || vif == iter_data->vif) {
2520 		iter_data->cur_running = true;
2521 		return;
2522 	}
2523 
2524 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2525 }
2526 
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2527 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2528 					 struct wl12xx_vif *wlvif)
2529 {
2530 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2531 	struct wlcore_hw_queue_iter_data iter_data = {};
2532 	int i, q_base;
2533 
2534 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2535 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2536 		return 0;
2537 	}
2538 
2539 	iter_data.vif = vif;
2540 
2541 	/* mark all bits taken by active interfaces */
2542 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2543 					IEEE80211_IFACE_ITER_RESUME_ALL,
2544 					wlcore_hw_queue_iter, &iter_data);
2545 
2546 	/* the current vif is already running in mac80211 (resume/recovery) */
2547 	if (iter_data.cur_running) {
2548 		wlvif->hw_queue_base = vif->hw_queue[0];
2549 		wl1271_debug(DEBUG_MAC80211,
2550 			     "using pre-allocated hw queue base %d",
2551 			     wlvif->hw_queue_base);
2552 
2553 		/* interface type might have changed type */
2554 		goto adjust_cab_queue;
2555 	}
2556 
2557 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2558 				     WLCORE_NUM_MAC_ADDRESSES);
2559 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2560 		return -EBUSY;
2561 
2562 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2563 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2564 		     wlvif->hw_queue_base);
2565 
2566 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2567 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2568 		/* register hw queues in mac80211 */
2569 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2570 	}
2571 
2572 adjust_cab_queue:
2573 	/* the last places are reserved for cab queues per interface */
2574 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2575 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2576 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2577 	else
2578 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2579 
2580 	return 0;
2581 }
2582 
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2583 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2584 				   struct ieee80211_vif *vif)
2585 {
2586 	struct wl1271 *wl = hw->priv;
2587 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2588 	struct vif_counter_data vif_count;
2589 	int ret = 0;
2590 	u8 role_type;
2591 
2592 	if (wl->plt) {
2593 		wl1271_error("Adding Interface not allowed while in PLT mode");
2594 		return -EBUSY;
2595 	}
2596 
2597 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2598 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2599 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2600 
2601 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2602 		     ieee80211_vif_type_p2p(vif), vif->addr);
2603 
2604 	wl12xx_get_vif_count(hw, vif, &vif_count);
2605 
2606 	mutex_lock(&wl->mutex);
2607 
2608 	/*
2609 	 * in some very corner case HW recovery scenarios its possible to
2610 	 * get here before __wl1271_op_remove_interface is complete, so
2611 	 * opt out if that is the case.
2612 	 */
2613 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2614 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2615 		ret = -EBUSY;
2616 		goto out_unlock;
2617 	}
2618 
2619 
2620 	ret = wl12xx_init_vif_data(wl, vif);
2621 	if (ret < 0)
2622 		goto out_unlock;
2623 
2624 	wlvif->wl = wl;
2625 	role_type = wl12xx_get_role_type(wl, wlvif);
2626 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2627 		ret = -EINVAL;
2628 		goto out_unlock;
2629 	}
2630 
2631 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2632 	if (ret < 0)
2633 		goto out_unlock;
2634 
2635 	/*
2636 	 * TODO: after the nvs issue will be solved, move this block
2637 	 * to start(), and make sure here the driver is ON.
2638 	 */
2639 	if (wl->state == WLCORE_STATE_OFF) {
2640 		/*
2641 		 * we still need this in order to configure the fw
2642 		 * while uploading the nvs
2643 		 */
2644 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2645 
2646 		ret = wl12xx_init_fw(wl);
2647 		if (ret < 0)
2648 			goto out_unlock;
2649 	}
2650 
2651 	/*
2652 	 * Call runtime PM only after possible wl12xx_init_fw() above
2653 	 * is done. Otherwise we do not have interrupts enabled.
2654 	 */
2655 	ret = pm_runtime_resume_and_get(wl->dev);
2656 	if (ret < 0)
2657 		goto out_unlock;
2658 
2659 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2660 		wl12xx_force_active_psm(wl);
2661 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2662 		mutex_unlock(&wl->mutex);
2663 		wl1271_recovery_work(&wl->recovery_work);
2664 		return 0;
2665 	}
2666 
2667 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2668 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2669 					     role_type, &wlvif->role_id);
2670 		if (ret < 0)
2671 			goto out;
2672 
2673 		ret = wl1271_init_vif_specific(wl, vif);
2674 		if (ret < 0)
2675 			goto out;
2676 
2677 	} else {
2678 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2679 					     &wlvif->dev_role_id);
2680 		if (ret < 0)
2681 			goto out;
2682 
2683 		/* needed mainly for configuring rate policies */
2684 		ret = wl1271_sta_hw_init(wl, wlvif);
2685 		if (ret < 0)
2686 			goto out;
2687 	}
2688 
2689 	list_add(&wlvif->list, &wl->wlvif_list);
2690 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2691 
2692 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2693 		wl->ap_count++;
2694 	else
2695 		wl->sta_count++;
2696 out:
2697 	pm_runtime_mark_last_busy(wl->dev);
2698 	pm_runtime_put_autosuspend(wl->dev);
2699 out_unlock:
2700 	mutex_unlock(&wl->mutex);
2701 
2702 	return ret;
2703 }
2704 
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2705 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2706 					 struct ieee80211_vif *vif,
2707 					 bool reset_tx_queues)
2708 {
2709 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2710 	int i, ret;
2711 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2712 
2713 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2714 
2715 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2716 		return;
2717 
2718 	/* because of hardware recovery, we may get here twice */
2719 	if (wl->state == WLCORE_STATE_OFF)
2720 		return;
2721 
2722 	wl1271_info("down");
2723 
2724 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2725 	    wl->scan_wlvif == wlvif) {
2726 		struct cfg80211_scan_info info = {
2727 			.aborted = true,
2728 		};
2729 
2730 		/*
2731 		 * Rearm the tx watchdog just before idling scan. This
2732 		 * prevents just-finished scans from triggering the watchdog
2733 		 */
2734 		wl12xx_rearm_tx_watchdog_locked(wl);
2735 
2736 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2737 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2738 		wl->scan_wlvif = NULL;
2739 		wl->scan.req = NULL;
2740 		ieee80211_scan_completed(wl->hw, &info);
2741 	}
2742 
2743 	if (wl->sched_vif == wlvif)
2744 		wl->sched_vif = NULL;
2745 
2746 	if (wl->roc_vif == vif) {
2747 		wl->roc_vif = NULL;
2748 		ieee80211_remain_on_channel_expired(wl->hw);
2749 	}
2750 
2751 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2752 		/* disable active roles */
2753 		ret = pm_runtime_resume_and_get(wl->dev);
2754 		if (ret < 0)
2755 			goto deinit;
2756 
2757 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2758 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2759 			if (wl12xx_dev_role_started(wlvif))
2760 				wl12xx_stop_dev(wl, wlvif);
2761 		}
2762 
2763 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2764 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2765 			if (ret < 0) {
2766 				pm_runtime_put_noidle(wl->dev);
2767 				goto deinit;
2768 			}
2769 		} else {
2770 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2771 			if (ret < 0) {
2772 				pm_runtime_put_noidle(wl->dev);
2773 				goto deinit;
2774 			}
2775 		}
2776 
2777 		pm_runtime_mark_last_busy(wl->dev);
2778 		pm_runtime_put_autosuspend(wl->dev);
2779 	}
2780 deinit:
2781 	wl12xx_tx_reset_wlvif(wl, wlvif);
2782 
2783 	/* clear all hlids (except system_hlid) */
2784 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2785 
2786 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2787 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2788 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2789 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2790 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2791 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2792 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2793 	} else {
2794 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2795 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2796 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2797 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2798 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2799 			wl12xx_free_rate_policy(wl,
2800 						&wlvif->ap.ucast_rate_idx[i]);
2801 		wl1271_free_ap_keys(wl, wlvif);
2802 	}
2803 
2804 	dev_kfree_skb(wlvif->probereq);
2805 	wlvif->probereq = NULL;
2806 	if (wl->last_wlvif == wlvif)
2807 		wl->last_wlvif = NULL;
2808 	list_del(&wlvif->list);
2809 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2810 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2811 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2812 
2813 	if (is_ap)
2814 		wl->ap_count--;
2815 	else
2816 		wl->sta_count--;
2817 
2818 	/*
2819 	 * Last AP, have more stations. Configure sleep auth according to STA.
2820 	 * Don't do thin on unintended recovery.
2821 	 */
2822 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2823 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2824 		goto unlock;
2825 
2826 	if (wl->ap_count == 0 && is_ap) {
2827 		/* mask ap events */
2828 		wl->event_mask &= ~wl->ap_event_mask;
2829 		wl1271_event_unmask(wl);
2830 	}
2831 
2832 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2833 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2834 		/* Configure for power according to debugfs */
2835 		if (sta_auth != WL1271_PSM_ILLEGAL)
2836 			wl1271_acx_sleep_auth(wl, sta_auth);
2837 		/* Configure for ELP power saving */
2838 		else
2839 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2840 	}
2841 
2842 unlock:
2843 	mutex_unlock(&wl->mutex);
2844 
2845 	timer_delete_sync(&wlvif->rx_streaming_timer);
2846 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2847 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2848 	cancel_work_sync(&wlvif->rc_update_work);
2849 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2850 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2851 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2852 
2853 	mutex_lock(&wl->mutex);
2854 }
2855 
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2856 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2857 				       struct ieee80211_vif *vif)
2858 {
2859 	struct wl1271 *wl = hw->priv;
2860 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2861 	struct wl12xx_vif *iter;
2862 	struct vif_counter_data vif_count;
2863 
2864 	wl12xx_get_vif_count(hw, vif, &vif_count);
2865 	mutex_lock(&wl->mutex);
2866 
2867 	if (wl->state == WLCORE_STATE_OFF ||
2868 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2869 		goto out;
2870 
2871 	/*
2872 	 * wl->vif can be null here if someone shuts down the interface
2873 	 * just when hardware recovery has been started.
2874 	 */
2875 	wl12xx_for_each_wlvif(wl, iter) {
2876 		if (iter != wlvif)
2877 			continue;
2878 
2879 		__wl1271_op_remove_interface(wl, vif, true);
2880 		break;
2881 	}
2882 	WARN_ON(iter != wlvif);
2883 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2884 		wl12xx_force_active_psm(wl);
2885 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2886 		wl12xx_queue_recovery_work(wl);
2887 	}
2888 out:
2889 	mutex_unlock(&wl->mutex);
2890 }
2891 
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2892 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2893 				      struct ieee80211_vif *vif,
2894 				      enum nl80211_iftype new_type, bool p2p)
2895 {
2896 	struct wl1271 *wl = hw->priv;
2897 	int ret;
2898 
2899 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2900 	wl1271_op_remove_interface(hw, vif);
2901 
2902 	vif->type = new_type;
2903 	vif->p2p = p2p;
2904 	ret = wl1271_op_add_interface(hw, vif);
2905 
2906 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2907 	return ret;
2908 }
2909 
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2910 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2911 {
2912 	int ret;
2913 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2914 
2915 	/*
2916 	 * One of the side effects of the JOIN command is that is clears
2917 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2918 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2919 	 * Currently the only valid scenario for JOIN during association
2920 	 * is on roaming, in which case we will also be given new keys.
2921 	 * Keep the below message for now, unless it starts bothering
2922 	 * users who really like to roam a lot :)
2923 	 */
2924 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2925 		wl1271_info("JOIN while associated.");
2926 
2927 	/* clear encryption type */
2928 	wlvif->encryption_type = KEY_NONE;
2929 
2930 	if (is_ibss)
2931 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2932 	else
2933 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2934 
2935 	return ret;
2936 }
2937 
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2938 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2939 			    int offset)
2940 {
2941 	u8 ssid_len;
2942 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2943 					 skb->len - offset);
2944 
2945 	if (!ptr) {
2946 		wl1271_error("No SSID in IEs!");
2947 		return -ENOENT;
2948 	}
2949 
2950 	ssid_len = ptr[1];
2951 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2952 		wl1271_error("SSID is too long!");
2953 		return -EINVAL;
2954 	}
2955 
2956 	wlvif->ssid_len = ssid_len;
2957 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2958 	return 0;
2959 }
2960 
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2961 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2962 {
2963 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2964 	struct sk_buff *skb;
2965 	int ieoffset;
2966 
2967 	/* we currently only support setting the ssid from the ap probe req */
2968 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2969 		return -EINVAL;
2970 
2971 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2972 	if (!skb)
2973 		return -EINVAL;
2974 
2975 	ieoffset = offsetof(struct ieee80211_mgmt,
2976 			    u.probe_req.variable);
2977 	wl1271_ssid_set(wlvif, skb, ieoffset);
2978 	dev_kfree_skb(skb);
2979 
2980 	return 0;
2981 }
2982 
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2983 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2984 			    struct ieee80211_bss_conf *bss_conf,
2985 			    u32 sta_rate_set)
2986 {
2987 	struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
2988 						 bss_conf);
2989 	int ieoffset;
2990 	int ret;
2991 
2992 	wlvif->aid = vif->cfg.aid;
2993 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chanreq.oper);
2994 	wlvif->beacon_int = bss_conf->beacon_int;
2995 	wlvif->wmm_enabled = bss_conf->qos;
2996 
2997 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2998 
2999 	/*
3000 	 * with wl1271, we don't need to update the
3001 	 * beacon_int and dtim_period, because the firmware
3002 	 * updates it by itself when the first beacon is
3003 	 * received after a join.
3004 	 */
3005 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3006 	if (ret < 0)
3007 		return ret;
3008 
3009 	/*
3010 	 * Get a template for hardware connection maintenance
3011 	 */
3012 	dev_kfree_skb(wlvif->probereq);
3013 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3014 							wlvif,
3015 							NULL);
3016 	ieoffset = offsetof(struct ieee80211_mgmt,
3017 			    u.probe_req.variable);
3018 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
3019 
3020 	/* enable the connection monitoring feature */
3021 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3022 	if (ret < 0)
3023 		return ret;
3024 
3025 	/*
3026 	 * The join command disable the keep-alive mode, shut down its process,
3027 	 * and also clear the template config, so we need to reset it all after
3028 	 * the join. The acx_aid starts the keep-alive process, and the order
3029 	 * of the commands below is relevant.
3030 	 */
3031 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
3032 	if (ret < 0)
3033 		return ret;
3034 
3035 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
3036 	if (ret < 0)
3037 		return ret;
3038 
3039 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
3040 	if (ret < 0)
3041 		return ret;
3042 
3043 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
3044 					   wlvif->sta.klv_template_id,
3045 					   ACX_KEEP_ALIVE_TPL_VALID);
3046 	if (ret < 0)
3047 		return ret;
3048 
3049 	/*
3050 	 * The default fw psm configuration is AUTO, while mac80211 default
3051 	 * setting is off (ACTIVE), so sync the fw with the correct value.
3052 	 */
3053 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3054 	if (ret < 0)
3055 		return ret;
3056 
3057 	if (sta_rate_set) {
3058 		wlvif->rate_set =
3059 			wl1271_tx_enabled_rates_get(wl,
3060 						    sta_rate_set,
3061 						    wlvif->band);
3062 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3063 		if (ret < 0)
3064 			return ret;
3065 	}
3066 
3067 	return ret;
3068 }
3069 
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3070 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3071 {
3072 	int ret;
3073 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3074 
3075 	/* make sure we are connected (sta) joined */
3076 	if (sta &&
3077 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3078 		return false;
3079 
3080 	/* make sure we are joined (ibss) */
3081 	if (!sta &&
3082 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3083 		return false;
3084 
3085 	if (sta) {
3086 		/* use defaults when not associated */
3087 		wlvif->aid = 0;
3088 
3089 		/* free probe-request template */
3090 		dev_kfree_skb(wlvif->probereq);
3091 		wlvif->probereq = NULL;
3092 
3093 		/* disable connection monitor features */
3094 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3095 		if (ret < 0)
3096 			return ret;
3097 
3098 		/* Disable the keep-alive feature */
3099 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3100 		if (ret < 0)
3101 			return ret;
3102 
3103 		/* disable beacon filtering */
3104 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3105 		if (ret < 0)
3106 			return ret;
3107 	}
3108 
3109 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3110 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3111 
3112 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3113 		ieee80211_chswitch_done(vif, false, 0);
3114 		cancel_delayed_work(&wlvif->channel_switch_work);
3115 	}
3116 
3117 	/* invalidate keep-alive template */
3118 	wl1271_acx_keep_alive_config(wl, wlvif,
3119 				     wlvif->sta.klv_template_id,
3120 				     ACX_KEEP_ALIVE_TPL_INVALID);
3121 
3122 	return 0;
3123 }
3124 
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3125 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3126 {
3127 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3128 	wlvif->rate_set = wlvif->basic_rate_set;
3129 }
3130 
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3131 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3132 				   bool idle)
3133 {
3134 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3135 
3136 	if (idle == cur_idle)
3137 		return;
3138 
3139 	if (idle) {
3140 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3141 	} else {
3142 		/* The current firmware only supports sched_scan in idle */
3143 		if (wl->sched_vif == wlvif)
3144 			wl->ops->sched_scan_stop(wl, wlvif);
3145 
3146 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3147 	}
3148 }
3149 
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3150 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3151 			     struct ieee80211_conf *conf, u32 changed)
3152 {
3153 	int ret;
3154 
3155 	if (wlcore_is_p2p_mgmt(wlvif))
3156 		return 0;
3157 
3158 	if (conf->power_level != wlvif->power_level) {
3159 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3160 		if (ret < 0)
3161 			return ret;
3162 
3163 		wlvif->power_level = conf->power_level;
3164 	}
3165 
3166 	return 0;
3167 }
3168 
wl1271_op_config(struct ieee80211_hw * hw,int radio_idx,u32 changed)3169 static int wl1271_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
3170 {
3171 	struct wl1271 *wl = hw->priv;
3172 	struct wl12xx_vif *wlvif;
3173 	struct ieee80211_conf *conf = &hw->conf;
3174 	int ret = 0;
3175 
3176 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3177 		     " changed 0x%x",
3178 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3179 		     conf->power_level,
3180 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3181 			 changed);
3182 
3183 	mutex_lock(&wl->mutex);
3184 
3185 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3186 		wl->power_level = conf->power_level;
3187 
3188 	if (unlikely(wl->state != WLCORE_STATE_ON))
3189 		goto out;
3190 
3191 	ret = pm_runtime_resume_and_get(wl->dev);
3192 	if (ret < 0)
3193 		goto out;
3194 
3195 	/* configure each interface */
3196 	wl12xx_for_each_wlvif(wl, wlvif) {
3197 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3198 		if (ret < 0)
3199 			goto out_sleep;
3200 	}
3201 
3202 out_sleep:
3203 	pm_runtime_mark_last_busy(wl->dev);
3204 	pm_runtime_put_autosuspend(wl->dev);
3205 
3206 out:
3207 	mutex_unlock(&wl->mutex);
3208 
3209 	return ret;
3210 }
3211 
3212 struct wl1271_filter_params {
3213 	bool enabled;
3214 	int mc_list_length;
3215 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3216 };
3217 
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3218 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3219 				       struct netdev_hw_addr_list *mc_list)
3220 {
3221 	struct wl1271_filter_params *fp;
3222 	struct netdev_hw_addr *ha;
3223 
3224 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3225 	if (!fp) {
3226 		wl1271_error("Out of memory setting filters.");
3227 		return 0;
3228 	}
3229 
3230 	/* update multicast filtering parameters */
3231 	fp->mc_list_length = 0;
3232 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3233 		fp->enabled = false;
3234 	} else {
3235 		fp->enabled = true;
3236 		netdev_hw_addr_list_for_each(ha, mc_list) {
3237 			memcpy(fp->mc_list[fp->mc_list_length],
3238 					ha->addr, ETH_ALEN);
3239 			fp->mc_list_length++;
3240 		}
3241 	}
3242 
3243 	return (u64)(unsigned long)fp;
3244 }
3245 
3246 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3247 				  FIF_FCSFAIL | \
3248 				  FIF_BCN_PRBRESP_PROMISC | \
3249 				  FIF_CONTROL | \
3250 				  FIF_OTHER_BSS)
3251 
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3252 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3253 				       unsigned int changed,
3254 				       unsigned int *total, u64 multicast)
3255 {
3256 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3257 	struct wl1271 *wl = hw->priv;
3258 	struct wl12xx_vif *wlvif;
3259 
3260 	int ret;
3261 
3262 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3263 		     " total %x", changed, *total);
3264 
3265 	mutex_lock(&wl->mutex);
3266 
3267 	*total &= WL1271_SUPPORTED_FILTERS;
3268 	changed &= WL1271_SUPPORTED_FILTERS;
3269 
3270 	if (unlikely(wl->state != WLCORE_STATE_ON))
3271 		goto out;
3272 
3273 	ret = pm_runtime_resume_and_get(wl->dev);
3274 	if (ret < 0)
3275 		goto out;
3276 
3277 	wl12xx_for_each_wlvif(wl, wlvif) {
3278 		if (wlcore_is_p2p_mgmt(wlvif))
3279 			continue;
3280 
3281 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3282 			if (*total & FIF_ALLMULTI)
3283 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3284 								   false,
3285 								   NULL, 0);
3286 			else if (fp)
3287 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3288 							fp->enabled,
3289 							fp->mc_list,
3290 							fp->mc_list_length);
3291 			if (ret < 0)
3292 				goto out_sleep;
3293 		}
3294 
3295 		/*
3296 		 * If interface in AP mode and created with allmulticast then disable
3297 		 * the firmware filters so that all multicast packets are passed
3298 		 * This is mandatory for MDNS based discovery protocols
3299 		 */
3300 		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3301 			if (*total & FIF_ALLMULTI) {
3302 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3303 							false,
3304 							NULL, 0);
3305 				if (ret < 0)
3306 					goto out_sleep;
3307 			}
3308 		}
3309 	}
3310 
3311 	/*
3312 	 * the fw doesn't provide an api to configure the filters. instead,
3313 	 * the filters configuration is based on the active roles / ROC
3314 	 * state.
3315 	 */
3316 
3317 out_sleep:
3318 	pm_runtime_mark_last_busy(wl->dev);
3319 	pm_runtime_put_autosuspend(wl->dev);
3320 
3321 out:
3322 	mutex_unlock(&wl->mutex);
3323 	kfree(fp);
3324 }
3325 
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16,bool is_pairwise)3326 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3327 				u8 id, u8 key_type, u8 key_size,
3328 				const u8 *key, u8 hlid, u32 tx_seq_32,
3329 				u16 tx_seq_16, bool is_pairwise)
3330 {
3331 	struct wl1271_ap_key *ap_key;
3332 	int i;
3333 
3334 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3335 
3336 	if (key_size > MAX_KEY_SIZE)
3337 		return -EINVAL;
3338 
3339 	/*
3340 	 * Find next free entry in ap_keys. Also check we are not replacing
3341 	 * an existing key.
3342 	 */
3343 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3344 		if (wlvif->ap.recorded_keys[i] == NULL)
3345 			break;
3346 
3347 		if (wlvif->ap.recorded_keys[i]->id == id) {
3348 			wl1271_warning("trying to record key replacement");
3349 			return -EINVAL;
3350 		}
3351 	}
3352 
3353 	if (i == MAX_NUM_KEYS)
3354 		return -EBUSY;
3355 
3356 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3357 	if (!ap_key)
3358 		return -ENOMEM;
3359 
3360 	ap_key->id = id;
3361 	ap_key->key_type = key_type;
3362 	ap_key->key_size = key_size;
3363 	memcpy(ap_key->key, key, key_size);
3364 	ap_key->hlid = hlid;
3365 	ap_key->tx_seq_32 = tx_seq_32;
3366 	ap_key->tx_seq_16 = tx_seq_16;
3367 	ap_key->is_pairwise = is_pairwise;
3368 
3369 	wlvif->ap.recorded_keys[i] = ap_key;
3370 	return 0;
3371 }
3372 
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3373 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3374 {
3375 	int i;
3376 
3377 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3378 		kfree(wlvif->ap.recorded_keys[i]);
3379 		wlvif->ap.recorded_keys[i] = NULL;
3380 	}
3381 }
3382 
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3383 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3384 {
3385 	int i, ret = 0;
3386 	struct wl1271_ap_key *key;
3387 	bool wep_key_added = false;
3388 
3389 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3390 		u8 hlid;
3391 		if (wlvif->ap.recorded_keys[i] == NULL)
3392 			break;
3393 
3394 		key = wlvif->ap.recorded_keys[i];
3395 		hlid = key->hlid;
3396 		if (hlid == WL12XX_INVALID_LINK_ID)
3397 			hlid = wlvif->ap.bcast_hlid;
3398 
3399 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3400 					    key->id, key->key_type,
3401 					    key->key_size, key->key,
3402 					    hlid, key->tx_seq_32,
3403 					    key->tx_seq_16, key->is_pairwise);
3404 		if (ret < 0)
3405 			goto out;
3406 
3407 		if (key->key_type == KEY_WEP)
3408 			wep_key_added = true;
3409 	}
3410 
3411 	if (wep_key_added) {
3412 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3413 						     wlvif->ap.bcast_hlid);
3414 		if (ret < 0)
3415 			goto out;
3416 	}
3417 
3418 out:
3419 	wl1271_free_ap_keys(wl, wlvif);
3420 	return ret;
3421 }
3422 
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta,bool is_pairwise)3423 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3424 		       u16 action, u8 id, u8 key_type,
3425 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3426 		       u16 tx_seq_16, struct ieee80211_sta *sta,
3427 		       bool is_pairwise)
3428 {
3429 	int ret;
3430 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3431 
3432 	if (is_ap) {
3433 		struct wl1271_station *wl_sta;
3434 		u8 hlid;
3435 
3436 		if (sta) {
3437 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3438 			hlid = wl_sta->hlid;
3439 		} else {
3440 			hlid = wlvif->ap.bcast_hlid;
3441 		}
3442 
3443 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3444 			/*
3445 			 * We do not support removing keys after AP shutdown.
3446 			 * Pretend we do to make mac80211 happy.
3447 			 */
3448 			if (action != KEY_ADD_OR_REPLACE)
3449 				return 0;
3450 
3451 			ret = wl1271_record_ap_key(wl, wlvif, id,
3452 					     key_type, key_size,
3453 					     key, hlid, tx_seq_32,
3454 					     tx_seq_16, is_pairwise);
3455 		} else {
3456 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3457 					     id, key_type, key_size,
3458 					     key, hlid, tx_seq_32,
3459 					     tx_seq_16, is_pairwise);
3460 		}
3461 
3462 		if (ret < 0)
3463 			return ret;
3464 	} else {
3465 		const u8 *addr;
3466 		static const u8 bcast_addr[ETH_ALEN] = {
3467 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3468 		};
3469 
3470 		addr = sta ? sta->addr : bcast_addr;
3471 
3472 		if (is_zero_ether_addr(addr)) {
3473 			/* We dont support TX only encryption */
3474 			return -EOPNOTSUPP;
3475 		}
3476 
3477 		/* The wl1271 does not allow to remove unicast keys - they
3478 		   will be cleared automatically on next CMD_JOIN. Ignore the
3479 		   request silently, as we dont want the mac80211 to emit
3480 		   an error message. */
3481 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3482 			return 0;
3483 
3484 		/* don't remove key if hlid was already deleted */
3485 		if (action == KEY_REMOVE &&
3486 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3487 			return 0;
3488 
3489 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3490 					     id, key_type, key_size,
3491 					     key, addr, tx_seq_32,
3492 					     tx_seq_16);
3493 		if (ret < 0)
3494 			return ret;
3495 
3496 	}
3497 
3498 	return 0;
3499 }
3500 
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3501 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3502 			     struct ieee80211_vif *vif,
3503 			     struct ieee80211_sta *sta,
3504 			     struct ieee80211_key_conf *key_conf)
3505 {
3506 	struct wl1271 *wl = hw->priv;
3507 	int ret;
3508 	bool might_change_spare =
3509 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3510 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3511 
3512 	if (might_change_spare) {
3513 		/*
3514 		 * stop the queues and flush to ensure the next packets are
3515 		 * in sync with FW spare block accounting
3516 		 */
3517 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3518 		wl1271_tx_flush(wl);
3519 	}
3520 
3521 	mutex_lock(&wl->mutex);
3522 
3523 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3524 		ret = -EAGAIN;
3525 		goto out_wake_queues;
3526 	}
3527 
3528 	ret = pm_runtime_resume_and_get(wl->dev);
3529 	if (ret < 0)
3530 		goto out_wake_queues;
3531 
3532 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3533 
3534 	pm_runtime_mark_last_busy(wl->dev);
3535 	pm_runtime_put_autosuspend(wl->dev);
3536 
3537 out_wake_queues:
3538 	if (might_change_spare)
3539 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3540 
3541 	mutex_unlock(&wl->mutex);
3542 
3543 	return ret;
3544 }
3545 
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3546 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3547 		   struct ieee80211_vif *vif,
3548 		   struct ieee80211_sta *sta,
3549 		   struct ieee80211_key_conf *key_conf)
3550 {
3551 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3552 	int ret;
3553 	u32 tx_seq_32 = 0;
3554 	u16 tx_seq_16 = 0;
3555 	u8 key_type;
3556 	u8 hlid;
3557 	bool is_pairwise;
3558 
3559 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3560 
3561 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3562 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3563 		     key_conf->cipher, key_conf->keyidx,
3564 		     key_conf->keylen, key_conf->flags);
3565 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3566 
3567 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3568 		if (sta) {
3569 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3570 			hlid = wl_sta->hlid;
3571 		} else {
3572 			hlid = wlvif->ap.bcast_hlid;
3573 		}
3574 	else
3575 		hlid = wlvif->sta.hlid;
3576 
3577 	if (hlid != WL12XX_INVALID_LINK_ID) {
3578 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3579 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3580 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3581 	}
3582 
3583 	switch (key_conf->cipher) {
3584 	case WLAN_CIPHER_SUITE_WEP40:
3585 	case WLAN_CIPHER_SUITE_WEP104:
3586 		key_type = KEY_WEP;
3587 
3588 		key_conf->hw_key_idx = key_conf->keyidx;
3589 		break;
3590 	case WLAN_CIPHER_SUITE_TKIP:
3591 		key_type = KEY_TKIP;
3592 		key_conf->hw_key_idx = key_conf->keyidx;
3593 		break;
3594 	case WLAN_CIPHER_SUITE_CCMP:
3595 		key_type = KEY_AES;
3596 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3597 		break;
3598 	case WL1271_CIPHER_SUITE_GEM:
3599 		key_type = KEY_GEM;
3600 		break;
3601 	default:
3602 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3603 
3604 		return -EOPNOTSUPP;
3605 	}
3606 
3607 	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3608 
3609 	switch (cmd) {
3610 	case SET_KEY:
3611 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3612 				 key_conf->keyidx, key_type,
3613 				 key_conf->keylen, key_conf->key,
3614 				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3615 		if (ret < 0) {
3616 			wl1271_error("Could not add or replace key");
3617 			return ret;
3618 		}
3619 
3620 		/* Store AP encryption key type */
3621 		if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3622 			wlvif->encryption_type = key_type;
3623 
3624 		/*
3625 		 * reconfiguring arp response if the unicast (or common)
3626 		 * encryption key type was changed
3627 		 */
3628 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3629 		    (sta || key_type == KEY_WEP) &&
3630 		    wlvif->encryption_type != key_type) {
3631 			wlvif->encryption_type = key_type;
3632 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3633 			if (ret < 0) {
3634 				wl1271_warning("build arp rsp failed: %d", ret);
3635 				return ret;
3636 			}
3637 		}
3638 		break;
3639 
3640 	case DISABLE_KEY:
3641 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3642 				     key_conf->keyidx, key_type,
3643 				     key_conf->keylen, key_conf->key,
3644 				     0, 0, sta, is_pairwise);
3645 		if (ret < 0) {
3646 			wl1271_error("Could not remove key");
3647 			return ret;
3648 		}
3649 		break;
3650 
3651 	default:
3652 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3653 		return -EOPNOTSUPP;
3654 	}
3655 
3656 	return ret;
3657 }
3658 EXPORT_SYMBOL_GPL(wlcore_set_key);
3659 
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3660 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3661 					  struct ieee80211_vif *vif,
3662 					  int key_idx)
3663 {
3664 	struct wl1271 *wl = hw->priv;
3665 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3666 	int ret;
3667 
3668 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3669 		     key_idx);
3670 
3671 	/* we don't handle unsetting of default key */
3672 	if (key_idx == -1)
3673 		return;
3674 
3675 	mutex_lock(&wl->mutex);
3676 
3677 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3678 		ret = -EAGAIN;
3679 		goto out_unlock;
3680 	}
3681 
3682 	ret = pm_runtime_resume_and_get(wl->dev);
3683 	if (ret < 0)
3684 		goto out_unlock;
3685 
3686 	wlvif->default_key = key_idx;
3687 
3688 	/* the default WEP key needs to be configured at least once */
3689 	if (wlvif->encryption_type == KEY_WEP) {
3690 		ret = wl12xx_cmd_set_default_wep_key(wl,
3691 				key_idx,
3692 				wlvif->sta.hlid);
3693 		if (ret < 0)
3694 			goto out_sleep;
3695 	}
3696 
3697 out_sleep:
3698 	pm_runtime_mark_last_busy(wl->dev);
3699 	pm_runtime_put_autosuspend(wl->dev);
3700 
3701 out_unlock:
3702 	mutex_unlock(&wl->mutex);
3703 }
3704 
wlcore_regdomain_config(struct wl1271 * wl)3705 void wlcore_regdomain_config(struct wl1271 *wl)
3706 {
3707 	int ret;
3708 
3709 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3710 		return;
3711 
3712 	mutex_lock(&wl->mutex);
3713 
3714 	if (unlikely(wl->state != WLCORE_STATE_ON))
3715 		goto out;
3716 
3717 	ret = pm_runtime_resume_and_get(wl->dev);
3718 	if (ret < 0)
3719 		goto out;
3720 
3721 	ret = wlcore_cmd_regdomain_config_locked(wl);
3722 	if (ret < 0) {
3723 		wl12xx_queue_recovery_work(wl);
3724 		goto out;
3725 	}
3726 
3727 	pm_runtime_mark_last_busy(wl->dev);
3728 	pm_runtime_put_autosuspend(wl->dev);
3729 out:
3730 	mutex_unlock(&wl->mutex);
3731 }
3732 
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3733 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3734 			     struct ieee80211_vif *vif,
3735 			     struct ieee80211_scan_request *hw_req)
3736 {
3737 	struct cfg80211_scan_request *req = &hw_req->req;
3738 	struct wl1271 *wl = hw->priv;
3739 	int ret;
3740 	u8 *ssid = NULL;
3741 	size_t len = 0;
3742 
3743 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3744 
3745 	if (req->n_ssids) {
3746 		ssid = req->ssids[0].ssid;
3747 		len = req->ssids[0].ssid_len;
3748 	}
3749 
3750 	mutex_lock(&wl->mutex);
3751 
3752 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3753 		/*
3754 		 * We cannot return -EBUSY here because cfg80211 will expect
3755 		 * a call to ieee80211_scan_completed if we do - in this case
3756 		 * there won't be any call.
3757 		 */
3758 		ret = -EAGAIN;
3759 		goto out;
3760 	}
3761 
3762 	ret = pm_runtime_resume_and_get(wl->dev);
3763 	if (ret < 0)
3764 		goto out;
3765 
3766 	/* fail if there is any role in ROC */
3767 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3768 		/* don't allow scanning right now */
3769 		ret = -EBUSY;
3770 		goto out_sleep;
3771 	}
3772 
3773 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3774 out_sleep:
3775 	pm_runtime_mark_last_busy(wl->dev);
3776 	pm_runtime_put_autosuspend(wl->dev);
3777 out:
3778 	mutex_unlock(&wl->mutex);
3779 
3780 	return ret;
3781 }
3782 
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3783 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3784 				     struct ieee80211_vif *vif)
3785 {
3786 	struct wl1271 *wl = hw->priv;
3787 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3788 	struct cfg80211_scan_info info = {
3789 		.aborted = true,
3790 	};
3791 	int ret;
3792 
3793 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3794 
3795 	mutex_lock(&wl->mutex);
3796 
3797 	if (unlikely(wl->state != WLCORE_STATE_ON))
3798 		goto out;
3799 
3800 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3801 		goto out;
3802 
3803 	ret = pm_runtime_resume_and_get(wl->dev);
3804 	if (ret < 0)
3805 		goto out;
3806 
3807 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3808 		ret = wl->ops->scan_stop(wl, wlvif);
3809 		if (ret < 0)
3810 			goto out_sleep;
3811 	}
3812 
3813 	/*
3814 	 * Rearm the tx watchdog just before idling scan. This
3815 	 * prevents just-finished scans from triggering the watchdog
3816 	 */
3817 	wl12xx_rearm_tx_watchdog_locked(wl);
3818 
3819 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3820 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3821 	wl->scan_wlvif = NULL;
3822 	wl->scan.req = NULL;
3823 	ieee80211_scan_completed(wl->hw, &info);
3824 
3825 out_sleep:
3826 	pm_runtime_mark_last_busy(wl->dev);
3827 	pm_runtime_put_autosuspend(wl->dev);
3828 out:
3829 	mutex_unlock(&wl->mutex);
3830 
3831 	cancel_delayed_work_sync(&wl->scan_complete_work);
3832 }
3833 
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3834 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3835 				      struct ieee80211_vif *vif,
3836 				      struct cfg80211_sched_scan_request *req,
3837 				      struct ieee80211_scan_ies *ies)
3838 {
3839 	struct wl1271 *wl = hw->priv;
3840 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3841 	int ret;
3842 
3843 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3844 
3845 	mutex_lock(&wl->mutex);
3846 
3847 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3848 		ret = -EAGAIN;
3849 		goto out;
3850 	}
3851 
3852 	ret = pm_runtime_resume_and_get(wl->dev);
3853 	if (ret < 0)
3854 		goto out;
3855 
3856 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3857 	if (ret < 0)
3858 		goto out_sleep;
3859 
3860 	wl->sched_vif = wlvif;
3861 
3862 out_sleep:
3863 	pm_runtime_mark_last_busy(wl->dev);
3864 	pm_runtime_put_autosuspend(wl->dev);
3865 out:
3866 	mutex_unlock(&wl->mutex);
3867 	return ret;
3868 }
3869 
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3870 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3871 				     struct ieee80211_vif *vif)
3872 {
3873 	struct wl1271 *wl = hw->priv;
3874 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3875 	int ret;
3876 
3877 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3878 
3879 	mutex_lock(&wl->mutex);
3880 
3881 	if (unlikely(wl->state != WLCORE_STATE_ON))
3882 		goto out;
3883 
3884 	ret = pm_runtime_resume_and_get(wl->dev);
3885 	if (ret < 0)
3886 		goto out;
3887 
3888 	wl->ops->sched_scan_stop(wl, wlvif);
3889 
3890 	pm_runtime_mark_last_busy(wl->dev);
3891 	pm_runtime_put_autosuspend(wl->dev);
3892 out:
3893 	mutex_unlock(&wl->mutex);
3894 
3895 	return 0;
3896 }
3897 
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,int radio_idx,u32 value)3898 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw,
3899 					int radio_idx, u32 value)
3900 {
3901 	struct wl1271 *wl = hw->priv;
3902 	int ret = 0;
3903 
3904 	mutex_lock(&wl->mutex);
3905 
3906 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3907 		ret = -EAGAIN;
3908 		goto out;
3909 	}
3910 
3911 	ret = pm_runtime_resume_and_get(wl->dev);
3912 	if (ret < 0)
3913 		goto out;
3914 
3915 	ret = wl1271_acx_frag_threshold(wl, value);
3916 	if (ret < 0)
3917 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3918 
3919 	pm_runtime_mark_last_busy(wl->dev);
3920 	pm_runtime_put_autosuspend(wl->dev);
3921 
3922 out:
3923 	mutex_unlock(&wl->mutex);
3924 
3925 	return ret;
3926 }
3927 
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,int radio_idx,u32 value)3928 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
3929 				       u32 value)
3930 {
3931 	struct wl1271 *wl = hw->priv;
3932 	struct wl12xx_vif *wlvif;
3933 	int ret = 0;
3934 
3935 	mutex_lock(&wl->mutex);
3936 
3937 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3938 		ret = -EAGAIN;
3939 		goto out;
3940 	}
3941 
3942 	ret = pm_runtime_resume_and_get(wl->dev);
3943 	if (ret < 0)
3944 		goto out;
3945 
3946 	wl12xx_for_each_wlvif(wl, wlvif) {
3947 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3948 		if (ret < 0)
3949 			wl1271_warning("set rts threshold failed: %d", ret);
3950 	}
3951 	pm_runtime_mark_last_busy(wl->dev);
3952 	pm_runtime_put_autosuspend(wl->dev);
3953 
3954 out:
3955 	mutex_unlock(&wl->mutex);
3956 
3957 	return ret;
3958 }
3959 
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3960 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3961 {
3962 	int len;
3963 	const u8 *next, *end = skb->data + skb->len;
3964 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3965 					skb->len - ieoffset);
3966 	if (!ie)
3967 		return;
3968 	len = ie[1] + 2;
3969 	next = ie + len;
3970 	memmove(ie, next, end - next);
3971 	skb_trim(skb, skb->len - len);
3972 }
3973 
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3974 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3975 					    unsigned int oui, u8 oui_type,
3976 					    int ieoffset)
3977 {
3978 	int len;
3979 	const u8 *next, *end = skb->data + skb->len;
3980 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3981 					       skb->data + ieoffset,
3982 					       skb->len - ieoffset);
3983 	if (!ie)
3984 		return;
3985 	len = ie[1] + 2;
3986 	next = ie + len;
3987 	memmove(ie, next, end - next);
3988 	skb_trim(skb, skb->len - len);
3989 }
3990 
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3991 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3992 					 struct ieee80211_vif *vif)
3993 {
3994 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3995 	struct sk_buff *skb;
3996 	int ret;
3997 
3998 	skb = ieee80211_proberesp_get(wl->hw, vif);
3999 	if (!skb)
4000 		return -EOPNOTSUPP;
4001 
4002 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4003 				      CMD_TEMPL_AP_PROBE_RESPONSE,
4004 				      skb->data,
4005 				      skb->len, 0,
4006 				      rates);
4007 	dev_kfree_skb(skb);
4008 
4009 	if (ret < 0)
4010 		goto out;
4011 
4012 	wl1271_debug(DEBUG_AP, "probe response updated");
4013 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
4014 
4015 out:
4016 	return ret;
4017 }
4018 
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)4019 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
4020 					     struct ieee80211_vif *vif,
4021 					     u8 *probe_rsp_data,
4022 					     size_t probe_rsp_len,
4023 					     u32 rates)
4024 {
4025 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4026 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
4027 	int ssid_ie_offset, ie_offset, templ_len;
4028 	const u8 *ptr;
4029 
4030 	/* no need to change probe response if the SSID is set correctly */
4031 	if (wlvif->ssid_len > 0)
4032 		return wl1271_cmd_template_set(wl, wlvif->role_id,
4033 					       CMD_TEMPL_AP_PROBE_RESPONSE,
4034 					       probe_rsp_data,
4035 					       probe_rsp_len, 0,
4036 					       rates);
4037 
4038 	if (probe_rsp_len + vif->cfg.ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4039 		wl1271_error("probe_rsp template too big");
4040 		return -EINVAL;
4041 	}
4042 
4043 	/* start searching from IE offset */
4044 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4045 
4046 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4047 			       probe_rsp_len - ie_offset);
4048 	if (!ptr) {
4049 		wl1271_error("No SSID in beacon!");
4050 		return -EINVAL;
4051 	}
4052 
4053 	ssid_ie_offset = ptr - probe_rsp_data;
4054 	ptr += (ptr[1] + 2);
4055 
4056 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4057 
4058 	/* insert SSID from bss_conf */
4059 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4060 	probe_rsp_templ[ssid_ie_offset + 1] = vif->cfg.ssid_len;
4061 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4062 	       vif->cfg.ssid, vif->cfg.ssid_len);
4063 	templ_len = ssid_ie_offset + 2 + vif->cfg.ssid_len;
4064 
4065 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + vif->cfg.ssid_len,
4066 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4067 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4068 
4069 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4070 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4071 				       probe_rsp_templ,
4072 				       templ_len, 0,
4073 				       rates);
4074 }
4075 
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4076 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4077 				       struct ieee80211_vif *vif,
4078 				       struct ieee80211_bss_conf *bss_conf,
4079 				       u32 changed)
4080 {
4081 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4082 	int ret = 0;
4083 
4084 	if (changed & BSS_CHANGED_ERP_SLOT) {
4085 		if (bss_conf->use_short_slot)
4086 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4087 		else
4088 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4089 		if (ret < 0) {
4090 			wl1271_warning("Set slot time failed %d", ret);
4091 			goto out;
4092 		}
4093 	}
4094 
4095 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4096 		if (bss_conf->use_short_preamble)
4097 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4098 		else
4099 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4100 	}
4101 
4102 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4103 		if (bss_conf->use_cts_prot)
4104 			ret = wl1271_acx_cts_protect(wl, wlvif,
4105 						     CTSPROTECT_ENABLE);
4106 		else
4107 			ret = wl1271_acx_cts_protect(wl, wlvif,
4108 						     CTSPROTECT_DISABLE);
4109 		if (ret < 0) {
4110 			wl1271_warning("Set ctsprotect failed %d", ret);
4111 			goto out;
4112 		}
4113 	}
4114 
4115 out:
4116 	return ret;
4117 }
4118 
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4119 static int wlcore_set_beacon_template(struct wl1271 *wl,
4120 				      struct ieee80211_vif *vif,
4121 				      bool is_ap)
4122 {
4123 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4124 	struct ieee80211_hdr *hdr;
4125 	u32 min_rate;
4126 	int ret;
4127 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4128 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif, 0);
4129 	u16 tmpl_id;
4130 
4131 	if (!beacon) {
4132 		ret = -EINVAL;
4133 		goto out;
4134 	}
4135 
4136 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4137 
4138 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4139 	if (ret < 0) {
4140 		dev_kfree_skb(beacon);
4141 		goto out;
4142 	}
4143 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4144 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4145 		CMD_TEMPL_BEACON;
4146 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4147 				      beacon->data,
4148 				      beacon->len, 0,
4149 				      min_rate);
4150 	if (ret < 0) {
4151 		dev_kfree_skb(beacon);
4152 		goto out;
4153 	}
4154 
4155 	wlvif->wmm_enabled =
4156 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4157 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4158 					beacon->data + ieoffset,
4159 					beacon->len - ieoffset);
4160 
4161 	/*
4162 	 * In case we already have a probe-resp beacon set explicitly
4163 	 * by usermode, don't use the beacon data.
4164 	 */
4165 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4166 		goto end_bcn;
4167 
4168 	/* remove TIM ie from probe response */
4169 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4170 
4171 	/*
4172 	 * remove p2p ie from probe response.
4173 	 * the fw reponds to probe requests that don't include
4174 	 * the p2p ie. probe requests with p2p ie will be passed,
4175 	 * and will be responded by the supplicant (the spec
4176 	 * forbids including the p2p ie when responding to probe
4177 	 * requests that didn't include it).
4178 	 */
4179 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4180 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4181 
4182 	hdr = (struct ieee80211_hdr *) beacon->data;
4183 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4184 					 IEEE80211_STYPE_PROBE_RESP);
4185 	if (is_ap)
4186 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4187 							   beacon->data,
4188 							   beacon->len,
4189 							   min_rate);
4190 	else
4191 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4192 					      CMD_TEMPL_PROBE_RESPONSE,
4193 					      beacon->data,
4194 					      beacon->len, 0,
4195 					      min_rate);
4196 end_bcn:
4197 	dev_kfree_skb(beacon);
4198 	if (ret < 0)
4199 		goto out;
4200 
4201 out:
4202 	return ret;
4203 }
4204 
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4205 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4206 					  struct ieee80211_vif *vif,
4207 					  struct ieee80211_bss_conf *bss_conf,
4208 					  u32 changed)
4209 {
4210 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4211 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4212 	int ret = 0;
4213 
4214 	if (changed & BSS_CHANGED_BEACON_INT) {
4215 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4216 			bss_conf->beacon_int);
4217 
4218 		wlvif->beacon_int = bss_conf->beacon_int;
4219 	}
4220 
4221 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4222 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4223 
4224 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4225 	}
4226 
4227 	if (changed & BSS_CHANGED_BEACON) {
4228 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4229 		if (ret < 0)
4230 			goto out;
4231 
4232 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4233 				       &wlvif->flags)) {
4234 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4235 			if (ret < 0)
4236 				goto out;
4237 		}
4238 	}
4239 out:
4240 	if (ret != 0)
4241 		wl1271_error("beacon info change failed: %d", ret);
4242 	return ret;
4243 }
4244 
4245 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4246 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4247 				       struct ieee80211_vif *vif,
4248 				       struct ieee80211_bss_conf *bss_conf,
4249 				       u32 changed)
4250 {
4251 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4252 	int ret = 0;
4253 
4254 	if (changed & BSS_CHANGED_BASIC_RATES) {
4255 		u32 rates = bss_conf->basic_rates;
4256 
4257 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4258 								 wlvif->band);
4259 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4260 							wlvif->basic_rate_set);
4261 
4262 		ret = wl1271_init_ap_rates(wl, wlvif);
4263 		if (ret < 0) {
4264 			wl1271_error("AP rate policy change failed %d", ret);
4265 			goto out;
4266 		}
4267 
4268 		ret = wl1271_ap_init_templates(wl, vif);
4269 		if (ret < 0)
4270 			goto out;
4271 
4272 		/* No need to set probe resp template for mesh */
4273 		if (!ieee80211_vif_is_mesh(vif)) {
4274 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4275 							    wlvif->basic_rate,
4276 							    vif);
4277 			if (ret < 0)
4278 				goto out;
4279 		}
4280 
4281 		ret = wlcore_set_beacon_template(wl, vif, true);
4282 		if (ret < 0)
4283 			goto out;
4284 	}
4285 
4286 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4287 	if (ret < 0)
4288 		goto out;
4289 
4290 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4291 		if (bss_conf->enable_beacon) {
4292 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4293 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4294 				if (ret < 0)
4295 					goto out;
4296 
4297 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4298 				if (ret < 0)
4299 					goto out;
4300 
4301 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4302 				wl1271_debug(DEBUG_AP, "started AP");
4303 			}
4304 		} else {
4305 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4306 				/*
4307 				 * AP might be in ROC in case we have just
4308 				 * sent auth reply. handle it.
4309 				 */
4310 				if (test_bit(wlvif->role_id, wl->roc_map))
4311 					wl12xx_croc(wl, wlvif->role_id);
4312 
4313 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4314 				if (ret < 0)
4315 					goto out;
4316 
4317 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4318 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4319 					  &wlvif->flags);
4320 				wl1271_debug(DEBUG_AP, "stopped AP");
4321 			}
4322 		}
4323 	}
4324 
4325 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4326 	if (ret < 0)
4327 		goto out;
4328 
4329 	/* Handle HT information change */
4330 	if ((changed & BSS_CHANGED_HT) &&
4331 	    (bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4332 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4333 					bss_conf->ht_operation_mode);
4334 		if (ret < 0) {
4335 			wl1271_warning("Set ht information failed %d", ret);
4336 			goto out;
4337 		}
4338 	}
4339 
4340 out:
4341 	return;
4342 }
4343 
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_vif * vif,u32 sta_rate_set)4344 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4345 			    struct ieee80211_vif *vif, u32 sta_rate_set)
4346 {
4347 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
4348 	u32 rates;
4349 	int ret;
4350 
4351 	wl1271_debug(DEBUG_MAC80211,
4352 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4353 	     bss_conf->bssid, vif->cfg.aid,
4354 	     bss_conf->beacon_int,
4355 	     bss_conf->basic_rates, sta_rate_set);
4356 
4357 	wlvif->beacon_int = bss_conf->beacon_int;
4358 	rates = bss_conf->basic_rates;
4359 	wlvif->basic_rate_set =
4360 		wl1271_tx_enabled_rates_get(wl, rates,
4361 					    wlvif->band);
4362 	wlvif->basic_rate =
4363 		wl1271_tx_min_rate_get(wl,
4364 				       wlvif->basic_rate_set);
4365 
4366 	if (sta_rate_set)
4367 		wlvif->rate_set =
4368 			wl1271_tx_enabled_rates_get(wl,
4369 						sta_rate_set,
4370 						wlvif->band);
4371 
4372 	/* we only support sched_scan while not connected */
4373 	if (wl->sched_vif == wlvif)
4374 		wl->ops->sched_scan_stop(wl, wlvif);
4375 
4376 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4377 	if (ret < 0)
4378 		return ret;
4379 
4380 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4381 	if (ret < 0)
4382 		return ret;
4383 
4384 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4385 	if (ret < 0)
4386 		return ret;
4387 
4388 	wlcore_set_ssid(wl, wlvif);
4389 
4390 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4391 
4392 	return 0;
4393 }
4394 
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4395 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4396 {
4397 	int ret;
4398 
4399 	/* revert back to minimum rates for the current band */
4400 	wl1271_set_band_rate(wl, wlvif);
4401 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4402 
4403 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4404 	if (ret < 0)
4405 		return ret;
4406 
4407 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4408 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4409 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4410 		if (ret < 0)
4411 			return ret;
4412 	}
4413 
4414 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4415 	return 0;
4416 }
4417 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4418 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4419 					struct ieee80211_vif *vif,
4420 					struct ieee80211_bss_conf *bss_conf,
4421 					u32 changed)
4422 {
4423 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4424 	bool do_join = false;
4425 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4426 	bool ibss_joined = false;
4427 	u32 sta_rate_set = 0;
4428 	int ret;
4429 	struct ieee80211_sta *sta;
4430 	bool sta_exists = false;
4431 	struct ieee80211_sta_ht_cap sta_ht_cap;
4432 
4433 	if (is_ibss) {
4434 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4435 						     changed);
4436 		if (ret < 0)
4437 			goto out;
4438 	}
4439 
4440 	if (changed & BSS_CHANGED_IBSS) {
4441 		if (vif->cfg.ibss_joined) {
4442 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4443 			ibss_joined = true;
4444 		} else {
4445 			wlcore_unset_assoc(wl, wlvif);
4446 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4447 		}
4448 	}
4449 
4450 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4451 		do_join = true;
4452 
4453 	/* Need to update the SSID (for filtering etc) */
4454 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4455 		do_join = true;
4456 
4457 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4458 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4459 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4460 
4461 		do_join = true;
4462 	}
4463 
4464 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4465 		wl1271_sta_handle_idle(wl, wlvif, vif->cfg.idle);
4466 
4467 	if (changed & BSS_CHANGED_CQM) {
4468 		bool enable = false;
4469 		if (bss_conf->cqm_rssi_thold)
4470 			enable = true;
4471 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4472 						  bss_conf->cqm_rssi_thold,
4473 						  bss_conf->cqm_rssi_hyst);
4474 		if (ret < 0)
4475 			goto out;
4476 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4477 	}
4478 
4479 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4480 		       BSS_CHANGED_ASSOC)) {
4481 		rcu_read_lock();
4482 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4483 		if (sta) {
4484 			u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask;
4485 
4486 			/* save the supp_rates of the ap */
4487 			sta_rate_set = sta->deflink.supp_rates[wlvif->band];
4488 			if (sta->deflink.ht_cap.ht_supported)
4489 				sta_rate_set |=
4490 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4491 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4492 			sta_ht_cap = sta->deflink.ht_cap;
4493 			sta_exists = true;
4494 		}
4495 
4496 		rcu_read_unlock();
4497 	}
4498 
4499 	if (changed & BSS_CHANGED_BSSID) {
4500 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4501 			ret = wlcore_set_bssid(wl, wlvif, vif,
4502 					       sta_rate_set);
4503 			if (ret < 0)
4504 				goto out;
4505 
4506 			/* Need to update the BSSID (for filtering etc) */
4507 			do_join = true;
4508 		} else {
4509 			ret = wlcore_clear_bssid(wl, wlvif);
4510 			if (ret < 0)
4511 				goto out;
4512 		}
4513 	}
4514 
4515 	if (changed & BSS_CHANGED_IBSS) {
4516 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4517 			     vif->cfg.ibss_joined);
4518 
4519 		if (vif->cfg.ibss_joined) {
4520 			u32 rates = bss_conf->basic_rates;
4521 			wlvif->basic_rate_set =
4522 				wl1271_tx_enabled_rates_get(wl, rates,
4523 							    wlvif->band);
4524 			wlvif->basic_rate =
4525 				wl1271_tx_min_rate_get(wl,
4526 						       wlvif->basic_rate_set);
4527 
4528 			/* by default, use 11b + OFDM rates */
4529 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4530 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4531 			if (ret < 0)
4532 				goto out;
4533 		}
4534 	}
4535 
4536 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4537 		/* enable beacon filtering */
4538 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4539 		if (ret < 0)
4540 			goto out;
4541 	}
4542 
4543 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4544 	if (ret < 0)
4545 		goto out;
4546 
4547 	if (do_join) {
4548 		ret = wlcore_join(wl, wlvif);
4549 		if (ret < 0) {
4550 			wl1271_warning("cmd join failed %d", ret);
4551 			goto out;
4552 		}
4553 	}
4554 
4555 	if (changed & BSS_CHANGED_ASSOC) {
4556 		if (vif->cfg.assoc) {
4557 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4558 					       sta_rate_set);
4559 			if (ret < 0)
4560 				goto out;
4561 
4562 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4563 				wl12xx_set_authorized(wl, wlvif);
4564 		} else {
4565 			wlcore_unset_assoc(wl, wlvif);
4566 		}
4567 	}
4568 
4569 	if (changed & BSS_CHANGED_PS) {
4570 		if (vif->cfg.ps &&
4571 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4572 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4573 			int ps_mode;
4574 			char *ps_mode_str;
4575 
4576 			if (wl->conf.conn.forced_ps) {
4577 				ps_mode = STATION_POWER_SAVE_MODE;
4578 				ps_mode_str = "forced";
4579 			} else {
4580 				ps_mode = STATION_AUTO_PS_MODE;
4581 				ps_mode_str = "auto";
4582 			}
4583 
4584 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4585 
4586 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4587 			if (ret < 0)
4588 				wl1271_warning("enter %s ps failed %d",
4589 					       ps_mode_str, ret);
4590 		} else if (!vif->cfg.ps &&
4591 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4592 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4593 
4594 			ret = wl1271_ps_set_mode(wl, wlvif,
4595 						 STATION_ACTIVE_MODE);
4596 			if (ret < 0)
4597 				wl1271_warning("exit auto ps failed %d", ret);
4598 		}
4599 	}
4600 
4601 	/* Handle new association with HT. Do this after join. */
4602 	if (sta_exists) {
4603 		bool enabled =
4604 			bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT;
4605 
4606 		ret = wlcore_hw_set_peer_cap(wl,
4607 					     &sta_ht_cap,
4608 					     enabled,
4609 					     wlvif->rate_set,
4610 					     wlvif->sta.hlid);
4611 		if (ret < 0) {
4612 			wl1271_warning("Set ht cap failed %d", ret);
4613 			goto out;
4614 
4615 		}
4616 
4617 		if (enabled) {
4618 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4619 						bss_conf->ht_operation_mode);
4620 			if (ret < 0) {
4621 				wl1271_warning("Set ht information failed %d",
4622 					       ret);
4623 				goto out;
4624 			}
4625 		}
4626 	}
4627 
4628 	/* Handle arp filtering. Done after join. */
4629 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4630 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4631 		__be32 addr = vif->cfg.arp_addr_list[0];
4632 		wlvif->sta.qos = bss_conf->qos;
4633 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4634 
4635 		if (vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc) {
4636 			wlvif->ip_addr = addr;
4637 			/*
4638 			 * The template should have been configured only upon
4639 			 * association. however, it seems that the correct ip
4640 			 * isn't being set (when sending), so we have to
4641 			 * reconfigure the template upon every ip change.
4642 			 */
4643 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4644 			if (ret < 0) {
4645 				wl1271_warning("build arp rsp failed: %d", ret);
4646 				goto out;
4647 			}
4648 
4649 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4650 				(ACX_ARP_FILTER_ARP_FILTERING |
4651 				 ACX_ARP_FILTER_AUTO_ARP),
4652 				addr);
4653 		} else {
4654 			wlvif->ip_addr = 0;
4655 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4656 		}
4657 
4658 		if (ret < 0)
4659 			goto out;
4660 	}
4661 
4662 out:
4663 	return;
4664 }
4665 
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u64 changed)4666 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4667 				       struct ieee80211_vif *vif,
4668 				       struct ieee80211_bss_conf *bss_conf,
4669 				       u64 changed)
4670 {
4671 	struct wl1271 *wl = hw->priv;
4672 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4673 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4674 	int ret;
4675 
4676 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4677 		     wlvif->role_id, (int)changed);
4678 
4679 	/*
4680 	 * make sure to cancel pending disconnections if our association
4681 	 * state changed
4682 	 */
4683 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4684 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4685 
4686 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4687 	    !bss_conf->enable_beacon)
4688 		wl1271_tx_flush(wl);
4689 
4690 	mutex_lock(&wl->mutex);
4691 
4692 	if (unlikely(wl->state != WLCORE_STATE_ON))
4693 		goto out;
4694 
4695 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4696 		goto out;
4697 
4698 	ret = pm_runtime_resume_and_get(wl->dev);
4699 	if (ret < 0)
4700 		goto out;
4701 
4702 	if ((changed & BSS_CHANGED_TXPOWER) &&
4703 	    bss_conf->txpower != wlvif->power_level) {
4704 
4705 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4706 		if (ret < 0)
4707 			goto out;
4708 
4709 		wlvif->power_level = bss_conf->txpower;
4710 	}
4711 
4712 	if (is_ap)
4713 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4714 	else
4715 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4716 
4717 	pm_runtime_mark_last_busy(wl->dev);
4718 	pm_runtime_put_autosuspend(wl->dev);
4719 
4720 out:
4721 	mutex_unlock(&wl->mutex);
4722 }
4723 
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4724 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4725 				 struct ieee80211_chanctx_conf *ctx)
4726 {
4727 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4728 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4729 		     cfg80211_get_chandef_type(&ctx->def));
4730 	return 0;
4731 }
4732 
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4733 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4734 				     struct ieee80211_chanctx_conf *ctx)
4735 {
4736 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4737 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4738 		     cfg80211_get_chandef_type(&ctx->def));
4739 }
4740 
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4741 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4742 				     struct ieee80211_chanctx_conf *ctx,
4743 				     u32 changed)
4744 {
4745 	struct wl1271 *wl = hw->priv;
4746 	struct wl12xx_vif *wlvif;
4747 	int ret;
4748 	int channel = ieee80211_frequency_to_channel(
4749 		ctx->def.chan->center_freq);
4750 
4751 	wl1271_debug(DEBUG_MAC80211,
4752 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4753 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4754 
4755 	mutex_lock(&wl->mutex);
4756 
4757 	ret = pm_runtime_resume_and_get(wl->dev);
4758 	if (ret < 0)
4759 		goto out;
4760 
4761 	wl12xx_for_each_wlvif(wl, wlvif) {
4762 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4763 
4764 		rcu_read_lock();
4765 		if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != ctx) {
4766 			rcu_read_unlock();
4767 			continue;
4768 		}
4769 		rcu_read_unlock();
4770 
4771 		/* start radar if needed */
4772 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4773 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4774 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4775 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4776 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4777 			wlcore_hw_set_cac(wl, wlvif, true);
4778 			wlvif->radar_enabled = true;
4779 		}
4780 	}
4781 
4782 	pm_runtime_mark_last_busy(wl->dev);
4783 	pm_runtime_put_autosuspend(wl->dev);
4784 out:
4785 	mutex_unlock(&wl->mutex);
4786 }
4787 
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)4788 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4789 					struct ieee80211_vif *vif,
4790 					struct ieee80211_bss_conf *link_conf,
4791 					struct ieee80211_chanctx_conf *ctx)
4792 {
4793 	struct wl1271 *wl = hw->priv;
4794 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4795 	int channel = ieee80211_frequency_to_channel(
4796 		ctx->def.chan->center_freq);
4797 	int ret = -EINVAL;
4798 
4799 	wl1271_debug(DEBUG_MAC80211,
4800 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4801 		     wlvif->role_id, channel,
4802 		     cfg80211_get_chandef_type(&ctx->def),
4803 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4804 
4805 	mutex_lock(&wl->mutex);
4806 
4807 	if (unlikely(wl->state != WLCORE_STATE_ON))
4808 		goto out;
4809 
4810 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4811 		goto out;
4812 
4813 	ret = pm_runtime_resume_and_get(wl->dev);
4814 	if (ret < 0)
4815 		goto out;
4816 
4817 	wlvif->band = ctx->def.chan->band;
4818 	wlvif->channel = channel;
4819 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4820 
4821 	/* update default rates according to the band */
4822 	wl1271_set_band_rate(wl, wlvif);
4823 
4824 	if (ctx->radar_enabled &&
4825 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4826 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4827 		wlcore_hw_set_cac(wl, wlvif, true);
4828 		wlvif->radar_enabled = true;
4829 	}
4830 
4831 	pm_runtime_mark_last_busy(wl->dev);
4832 	pm_runtime_put_autosuspend(wl->dev);
4833 out:
4834 	mutex_unlock(&wl->mutex);
4835 
4836 	return 0;
4837 }
4838 
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)4839 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4840 					   struct ieee80211_vif *vif,
4841 					   struct ieee80211_bss_conf *link_conf,
4842 					   struct ieee80211_chanctx_conf *ctx)
4843 {
4844 	struct wl1271 *wl = hw->priv;
4845 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4846 	int ret;
4847 
4848 	wl1271_debug(DEBUG_MAC80211,
4849 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4850 		     wlvif->role_id,
4851 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4852 		     cfg80211_get_chandef_type(&ctx->def));
4853 
4854 	wl1271_tx_flush(wl);
4855 
4856 	mutex_lock(&wl->mutex);
4857 
4858 	if (unlikely(wl->state != WLCORE_STATE_ON))
4859 		goto out;
4860 
4861 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4862 		goto out;
4863 
4864 	ret = pm_runtime_resume_and_get(wl->dev);
4865 	if (ret < 0)
4866 		goto out;
4867 
4868 	if (wlvif->radar_enabled) {
4869 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4870 		wlcore_hw_set_cac(wl, wlvif, false);
4871 		wlvif->radar_enabled = false;
4872 	}
4873 
4874 	pm_runtime_mark_last_busy(wl->dev);
4875 	pm_runtime_put_autosuspend(wl->dev);
4876 out:
4877 	mutex_unlock(&wl->mutex);
4878 }
4879 
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4880 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4881 				    struct wl12xx_vif *wlvif,
4882 				    struct ieee80211_chanctx_conf *new_ctx)
4883 {
4884 	int channel = ieee80211_frequency_to_channel(
4885 		new_ctx->def.chan->center_freq);
4886 
4887 	wl1271_debug(DEBUG_MAC80211,
4888 		     "switch vif (role %d) %d -> %d chan_type: %d",
4889 		     wlvif->role_id, wlvif->channel, channel,
4890 		     cfg80211_get_chandef_type(&new_ctx->def));
4891 
4892 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4893 		return 0;
4894 
4895 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4896 
4897 	if (wlvif->radar_enabled) {
4898 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4899 		wlcore_hw_set_cac(wl, wlvif, false);
4900 		wlvif->radar_enabled = false;
4901 	}
4902 
4903 	wlvif->band = new_ctx->def.chan->band;
4904 	wlvif->channel = channel;
4905 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4906 
4907 	/* start radar if needed */
4908 	if (new_ctx->radar_enabled) {
4909 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4910 		wlcore_hw_set_cac(wl, wlvif, true);
4911 		wlvif->radar_enabled = true;
4912 	}
4913 
4914 	return 0;
4915 }
4916 
4917 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4918 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4919 			     struct ieee80211_vif_chanctx_switch *vifs,
4920 			     int n_vifs,
4921 			     enum ieee80211_chanctx_switch_mode mode)
4922 {
4923 	struct wl1271 *wl = hw->priv;
4924 	int i, ret;
4925 
4926 	wl1271_debug(DEBUG_MAC80211,
4927 		     "mac80211 switch chanctx n_vifs %d mode %d",
4928 		     n_vifs, mode);
4929 
4930 	mutex_lock(&wl->mutex);
4931 
4932 	ret = pm_runtime_resume_and_get(wl->dev);
4933 	if (ret < 0)
4934 		goto out;
4935 
4936 	for (i = 0; i < n_vifs; i++) {
4937 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4938 
4939 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4940 		if (ret)
4941 			goto out_sleep;
4942 	}
4943 out_sleep:
4944 	pm_runtime_mark_last_busy(wl->dev);
4945 	pm_runtime_put_autosuspend(wl->dev);
4946 out:
4947 	mutex_unlock(&wl->mutex);
4948 
4949 	return 0;
4950 }
4951 
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,u16 queue,const struct ieee80211_tx_queue_params * params)4952 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4953 			     struct ieee80211_vif *vif,
4954 			     unsigned int link_id, u16 queue,
4955 			     const struct ieee80211_tx_queue_params *params)
4956 {
4957 	struct wl1271 *wl = hw->priv;
4958 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4959 	u8 ps_scheme;
4960 	int ret = 0;
4961 
4962 	if (wlcore_is_p2p_mgmt(wlvif))
4963 		return 0;
4964 
4965 	mutex_lock(&wl->mutex);
4966 
4967 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4968 
4969 	if (params->uapsd)
4970 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4971 	else
4972 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4973 
4974 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4975 		goto out;
4976 
4977 	ret = pm_runtime_resume_and_get(wl->dev);
4978 	if (ret < 0)
4979 		goto out;
4980 
4981 	/*
4982 	 * the txop is confed in units of 32us by the mac80211,
4983 	 * we need us
4984 	 */
4985 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4986 				params->cw_min, params->cw_max,
4987 				params->aifs, params->txop << 5);
4988 	if (ret < 0)
4989 		goto out_sleep;
4990 
4991 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4992 				 CONF_CHANNEL_TYPE_EDCF,
4993 				 wl1271_tx_get_queue(queue),
4994 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4995 				 0, 0);
4996 
4997 out_sleep:
4998 	pm_runtime_mark_last_busy(wl->dev);
4999 	pm_runtime_put_autosuspend(wl->dev);
5000 
5001 out:
5002 	mutex_unlock(&wl->mutex);
5003 
5004 	return ret;
5005 }
5006 
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5007 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
5008 			     struct ieee80211_vif *vif)
5009 {
5010 
5011 	struct wl1271 *wl = hw->priv;
5012 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5013 	u64 mactime = ULLONG_MAX;
5014 	int ret;
5015 
5016 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
5017 
5018 	mutex_lock(&wl->mutex);
5019 
5020 	if (unlikely(wl->state != WLCORE_STATE_ON))
5021 		goto out;
5022 
5023 	ret = pm_runtime_resume_and_get(wl->dev);
5024 	if (ret < 0)
5025 		goto out;
5026 
5027 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5028 	if (ret < 0)
5029 		goto out_sleep;
5030 
5031 out_sleep:
5032 	pm_runtime_mark_last_busy(wl->dev);
5033 	pm_runtime_put_autosuspend(wl->dev);
5034 
5035 out:
5036 	mutex_unlock(&wl->mutex);
5037 	return mactime;
5038 }
5039 
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)5040 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5041 				struct survey_info *survey)
5042 {
5043 	struct ieee80211_conf *conf = &hw->conf;
5044 
5045 	if (idx != 0)
5046 		return -ENOENT;
5047 
5048 	survey->channel = conf->chandef.chan;
5049 	survey->filled = 0;
5050 	return 0;
5051 }
5052 
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5053 static int wl1271_allocate_sta(struct wl1271 *wl,
5054 			     struct wl12xx_vif *wlvif,
5055 			     struct ieee80211_sta *sta)
5056 {
5057 	struct wl1271_station *wl_sta;
5058 	int ret;
5059 
5060 
5061 	if (wl->active_sta_count >= wl->max_ap_stations) {
5062 		wl1271_warning("could not allocate HLID - too much stations");
5063 		return -EBUSY;
5064 	}
5065 
5066 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5067 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5068 	if (ret < 0) {
5069 		wl1271_warning("could not allocate HLID - too many links");
5070 		return -EBUSY;
5071 	}
5072 
5073 	/* use the previous security seq, if this is a recovery/resume */
5074 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5075 
5076 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5077 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5078 	wl->active_sta_count++;
5079 	return 0;
5080 }
5081 
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)5082 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5083 {
5084 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5085 		return;
5086 
5087 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5088 	__clear_bit(hlid, &wl->ap_ps_map);
5089 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5090 
5091 	/*
5092 	 * save the last used PN in the private part of iee80211_sta,
5093 	 * in case of recovery/suspend
5094 	 */
5095 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5096 
5097 	wl12xx_free_link(wl, wlvif, &hlid);
5098 	wl->active_sta_count--;
5099 
5100 	/*
5101 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5102 	 * chance to return STA-buffered packets before complaining.
5103 	 */
5104 	if (wl->active_sta_count == 0)
5105 		wl12xx_rearm_tx_watchdog_locked(wl);
5106 }
5107 
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5108 static int wl12xx_sta_add(struct wl1271 *wl,
5109 			  struct wl12xx_vif *wlvif,
5110 			  struct ieee80211_sta *sta)
5111 {
5112 	struct wl1271_station *wl_sta;
5113 	int ret = 0;
5114 	u8 hlid;
5115 
5116 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5117 
5118 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5119 	if (ret < 0)
5120 		return ret;
5121 
5122 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5123 	hlid = wl_sta->hlid;
5124 
5125 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5126 	if (ret < 0)
5127 		wl1271_free_sta(wl, wlvif, hlid);
5128 
5129 	return ret;
5130 }
5131 
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5132 static int wl12xx_sta_remove(struct wl1271 *wl,
5133 			     struct wl12xx_vif *wlvif,
5134 			     struct ieee80211_sta *sta)
5135 {
5136 	struct wl1271_station *wl_sta;
5137 	int ret = 0, id;
5138 
5139 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5140 
5141 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5142 	id = wl_sta->hlid;
5143 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5144 		return -EINVAL;
5145 
5146 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5147 	if (ret < 0)
5148 		return ret;
5149 
5150 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5151 	return ret;
5152 }
5153 
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5154 static void wlcore_roc_if_possible(struct wl1271 *wl,
5155 				   struct wl12xx_vif *wlvif)
5156 {
5157 	if (find_first_bit(wl->roc_map,
5158 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5159 		return;
5160 
5161 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5162 		return;
5163 
5164 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5165 }
5166 
5167 /*
5168  * when wl_sta is NULL, we treat this call as if coming from a
5169  * pending auth reply.
5170  * wl->mutex must be taken and the FW must be awake when the call
5171  * takes place.
5172  */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5173 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5174 			      struct wl1271_station *wl_sta, bool in_conn)
5175 {
5176 	if (in_conn) {
5177 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5178 			return;
5179 
5180 		if (!wlvif->ap_pending_auth_reply &&
5181 		    !wlvif->inconn_count)
5182 			wlcore_roc_if_possible(wl, wlvif);
5183 
5184 		if (wl_sta) {
5185 			wl_sta->in_connection = true;
5186 			wlvif->inconn_count++;
5187 		} else {
5188 			wlvif->ap_pending_auth_reply = true;
5189 		}
5190 	} else {
5191 		if (wl_sta && !wl_sta->in_connection)
5192 			return;
5193 
5194 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5195 			return;
5196 
5197 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5198 			return;
5199 
5200 		if (wl_sta) {
5201 			wl_sta->in_connection = false;
5202 			wlvif->inconn_count--;
5203 		} else {
5204 			wlvif->ap_pending_auth_reply = false;
5205 		}
5206 
5207 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5208 		    test_bit(wlvif->role_id, wl->roc_map))
5209 			wl12xx_croc(wl, wlvif->role_id);
5210 	}
5211 }
5212 
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5213 static int wl12xx_update_sta_state(struct wl1271 *wl,
5214 				   struct wl12xx_vif *wlvif,
5215 				   struct ieee80211_sta *sta,
5216 				   enum ieee80211_sta_state old_state,
5217 				   enum ieee80211_sta_state new_state)
5218 {
5219 	struct wl1271_station *wl_sta;
5220 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5221 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5222 	int ret;
5223 
5224 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5225 
5226 	/* Add station (AP mode) */
5227 	if (is_ap &&
5228 	    old_state == IEEE80211_STA_AUTH &&
5229 	    new_state == IEEE80211_STA_ASSOC) {
5230 		ret = wl12xx_sta_add(wl, wlvif, sta);
5231 		if (ret)
5232 			return ret;
5233 
5234 		wl_sta->fw_added = true;
5235 
5236 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5237 	}
5238 
5239 	/* Remove station (AP mode) */
5240 	if (is_ap &&
5241 	    old_state == IEEE80211_STA_ASSOC &&
5242 	    new_state == IEEE80211_STA_AUTH) {
5243 		wl_sta->fw_added = false;
5244 
5245 		/* must not fail */
5246 		wl12xx_sta_remove(wl, wlvif, sta);
5247 
5248 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5249 	}
5250 
5251 	/* Authorize station (AP mode) */
5252 	if (is_ap &&
5253 	    new_state == IEEE80211_STA_AUTHORIZED) {
5254 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5255 		if (ret < 0)
5256 			return ret;
5257 
5258 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
5259 						     true,
5260 						     wl_sta->hlid);
5261 		if (ret)
5262 			return ret;
5263 
5264 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5265 	}
5266 
5267 	/* Authorize station */
5268 	if (is_sta &&
5269 	    new_state == IEEE80211_STA_AUTHORIZED) {
5270 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5271 		ret = wl12xx_set_authorized(wl, wlvif);
5272 		if (ret)
5273 			return ret;
5274 	}
5275 
5276 	if (is_sta &&
5277 	    old_state == IEEE80211_STA_AUTHORIZED &&
5278 	    new_state == IEEE80211_STA_ASSOC) {
5279 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5280 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5281 	}
5282 
5283 	/* save seq number on disassoc (suspend) */
5284 	if (is_sta &&
5285 	    old_state == IEEE80211_STA_ASSOC &&
5286 	    new_state == IEEE80211_STA_AUTH) {
5287 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5288 		wlvif->total_freed_pkts = 0;
5289 	}
5290 
5291 	/* restore seq number on assoc (resume) */
5292 	if (is_sta &&
5293 	    old_state == IEEE80211_STA_AUTH &&
5294 	    new_state == IEEE80211_STA_ASSOC) {
5295 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5296 	}
5297 
5298 	/* clear ROCs on failure or authorization */
5299 	if (is_sta &&
5300 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5301 	     new_state == IEEE80211_STA_NOTEXIST)) {
5302 		if (test_bit(wlvif->role_id, wl->roc_map))
5303 			wl12xx_croc(wl, wlvif->role_id);
5304 	}
5305 
5306 	if (is_sta &&
5307 	    old_state == IEEE80211_STA_NOTEXIST &&
5308 	    new_state == IEEE80211_STA_NONE) {
5309 		if (find_first_bit(wl->roc_map,
5310 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5311 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5312 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5313 				   wlvif->band, wlvif->channel);
5314 		}
5315 	}
5316 	return 0;
5317 }
5318 
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5319 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5320 			       struct ieee80211_vif *vif,
5321 			       struct ieee80211_sta *sta,
5322 			       enum ieee80211_sta_state old_state,
5323 			       enum ieee80211_sta_state new_state)
5324 {
5325 	struct wl1271 *wl = hw->priv;
5326 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5327 	int ret;
5328 
5329 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5330 		     sta->aid, old_state, new_state);
5331 
5332 	mutex_lock(&wl->mutex);
5333 
5334 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5335 		ret = -EBUSY;
5336 		goto out;
5337 	}
5338 
5339 	ret = pm_runtime_resume_and_get(wl->dev);
5340 	if (ret < 0)
5341 		goto out;
5342 
5343 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5344 
5345 	pm_runtime_mark_last_busy(wl->dev);
5346 	pm_runtime_put_autosuspend(wl->dev);
5347 out:
5348 	mutex_unlock(&wl->mutex);
5349 	if (new_state < old_state)
5350 		return 0;
5351 	return ret;
5352 }
5353 
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5354 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5355 				  struct ieee80211_vif *vif,
5356 				  struct ieee80211_ampdu_params *params)
5357 {
5358 	struct wl1271 *wl = hw->priv;
5359 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5360 	int ret;
5361 	u8 hlid, *ba_bitmap;
5362 	struct ieee80211_sta *sta = params->sta;
5363 	enum ieee80211_ampdu_mlme_action action = params->action;
5364 	u16 tid = params->tid;
5365 	u16 *ssn = &params->ssn;
5366 
5367 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5368 		     tid);
5369 
5370 	/* sanity check - the fields in FW are only 8bits wide */
5371 	if (WARN_ON(tid > 0xFF))
5372 		return -ENOTSUPP;
5373 
5374 	mutex_lock(&wl->mutex);
5375 
5376 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5377 		ret = -EAGAIN;
5378 		goto out;
5379 	}
5380 
5381 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5382 		hlid = wlvif->sta.hlid;
5383 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5384 		struct wl1271_station *wl_sta;
5385 
5386 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5387 		hlid = wl_sta->hlid;
5388 	} else {
5389 		ret = -EINVAL;
5390 		goto out;
5391 	}
5392 
5393 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5394 
5395 	ret = pm_runtime_resume_and_get(wl->dev);
5396 	if (ret < 0)
5397 		goto out;
5398 
5399 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5400 		     tid, action);
5401 
5402 	switch (action) {
5403 	case IEEE80211_AMPDU_RX_START:
5404 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5405 			ret = -ENOTSUPP;
5406 			break;
5407 		}
5408 
5409 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5410 			ret = -EBUSY;
5411 			wl1271_debug(DEBUG_RX, "exceeded max RX BA sessions");
5412 			break;
5413 		}
5414 
5415 		if (*ba_bitmap & BIT(tid)) {
5416 			ret = -EINVAL;
5417 			wl1271_error("cannot enable RX BA session on active "
5418 				     "tid: %d", tid);
5419 			break;
5420 		}
5421 
5422 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5423 				hlid,
5424 				params->buf_size);
5425 
5426 		if (!ret) {
5427 			*ba_bitmap |= BIT(tid);
5428 			wl->ba_rx_session_count++;
5429 		}
5430 		break;
5431 
5432 	case IEEE80211_AMPDU_RX_STOP:
5433 		if (!(*ba_bitmap & BIT(tid))) {
5434 			/*
5435 			 * this happens on reconfig - so only output a debug
5436 			 * message for now, and don't fail the function.
5437 			 */
5438 			wl1271_debug(DEBUG_MAC80211,
5439 				     "no active RX BA session on tid: %d",
5440 				     tid);
5441 			ret = 0;
5442 			break;
5443 		}
5444 
5445 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5446 							 hlid, 0);
5447 		if (!ret) {
5448 			*ba_bitmap &= ~BIT(tid);
5449 			wl->ba_rx_session_count--;
5450 		}
5451 		break;
5452 
5453 	/*
5454 	 * The BA initiator session management in FW independently.
5455 	 * Falling break here on purpose for all TX APDU commands.
5456 	 */
5457 	case IEEE80211_AMPDU_TX_START:
5458 	case IEEE80211_AMPDU_TX_STOP_CONT:
5459 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5460 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5461 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5462 		ret = -EINVAL;
5463 		break;
5464 
5465 	default:
5466 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5467 		ret = -EINVAL;
5468 	}
5469 
5470 	pm_runtime_mark_last_busy(wl->dev);
5471 	pm_runtime_put_autosuspend(wl->dev);
5472 
5473 out:
5474 	mutex_unlock(&wl->mutex);
5475 
5476 	return ret;
5477 }
5478 
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5479 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5480 				   struct ieee80211_vif *vif,
5481 				   const struct cfg80211_bitrate_mask *mask)
5482 {
5483 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5484 	struct wl1271 *wl = hw->priv;
5485 	int i, ret = 0;
5486 
5487 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5488 		mask->control[NL80211_BAND_2GHZ].legacy,
5489 		mask->control[NL80211_BAND_5GHZ].legacy);
5490 
5491 	mutex_lock(&wl->mutex);
5492 
5493 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5494 		wlvif->bitrate_masks[i] =
5495 			wl1271_tx_enabled_rates_get(wl,
5496 						    mask->control[i].legacy,
5497 						    i);
5498 
5499 	if (unlikely(wl->state != WLCORE_STATE_ON))
5500 		goto out;
5501 
5502 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5503 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5504 
5505 		ret = pm_runtime_resume_and_get(wl->dev);
5506 		if (ret < 0)
5507 			goto out;
5508 
5509 		wl1271_set_band_rate(wl, wlvif);
5510 		wlvif->basic_rate =
5511 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5512 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5513 
5514 		pm_runtime_mark_last_busy(wl->dev);
5515 		pm_runtime_put_autosuspend(wl->dev);
5516 	}
5517 out:
5518 	mutex_unlock(&wl->mutex);
5519 
5520 	return ret;
5521 }
5522 
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5523 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5524 				     struct ieee80211_vif *vif,
5525 				     struct ieee80211_channel_switch *ch_switch)
5526 {
5527 	struct wl1271 *wl = hw->priv;
5528 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5529 	int ret;
5530 
5531 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5532 
5533 	wl1271_tx_flush(wl);
5534 
5535 	mutex_lock(&wl->mutex);
5536 
5537 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5538 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5539 			ieee80211_chswitch_done(vif, false, 0);
5540 		goto out;
5541 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5542 		goto out;
5543 	}
5544 
5545 	ret = pm_runtime_resume_and_get(wl->dev);
5546 	if (ret < 0)
5547 		goto out;
5548 
5549 	/* TODO: change mac80211 to pass vif as param */
5550 
5551 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5552 		unsigned long delay_usec;
5553 
5554 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5555 		if (ret)
5556 			goto out_sleep;
5557 
5558 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5559 
5560 		/* indicate failure 5 seconds after channel switch time */
5561 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5562 			ch_switch->count;
5563 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5564 					     usecs_to_jiffies(delay_usec) +
5565 					     msecs_to_jiffies(5000));
5566 	}
5567 
5568 out_sleep:
5569 	pm_runtime_mark_last_busy(wl->dev);
5570 	pm_runtime_put_autosuspend(wl->dev);
5571 
5572 out:
5573 	mutex_unlock(&wl->mutex);
5574 }
5575 
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5576 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5577 					struct wl12xx_vif *wlvif,
5578 					u8 eid)
5579 {
5580 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5581 	struct sk_buff *beacon =
5582 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif), 0);
5583 
5584 	if (!beacon)
5585 		return NULL;
5586 
5587 	return cfg80211_find_ie(eid,
5588 				beacon->data + ieoffset,
5589 				beacon->len - ieoffset);
5590 }
5591 
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5592 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5593 				u8 *csa_count)
5594 {
5595 	const u8 *ie;
5596 	const struct ieee80211_channel_sw_ie *ie_csa;
5597 
5598 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5599 	if (!ie)
5600 		return -EINVAL;
5601 
5602 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5603 	*csa_count = ie_csa->count;
5604 
5605 	return 0;
5606 }
5607 
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5608 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5609 					    struct ieee80211_vif *vif,
5610 					    struct cfg80211_chan_def *chandef)
5611 {
5612 	struct wl1271 *wl = hw->priv;
5613 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5614 	struct ieee80211_channel_switch ch_switch = {
5615 		.block_tx = true,
5616 		.chandef = *chandef,
5617 	};
5618 	int ret;
5619 
5620 	wl1271_debug(DEBUG_MAC80211,
5621 		     "mac80211 channel switch beacon (role %d)",
5622 		     wlvif->role_id);
5623 
5624 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5625 	if (ret < 0) {
5626 		wl1271_error("error getting beacon (for CSA counter)");
5627 		return;
5628 	}
5629 
5630 	mutex_lock(&wl->mutex);
5631 
5632 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5633 		ret = -EBUSY;
5634 		goto out;
5635 	}
5636 
5637 	ret = pm_runtime_resume_and_get(wl->dev);
5638 	if (ret < 0)
5639 		goto out;
5640 
5641 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5642 	if (ret)
5643 		goto out_sleep;
5644 
5645 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5646 
5647 out_sleep:
5648 	pm_runtime_mark_last_busy(wl->dev);
5649 	pm_runtime_put_autosuspend(wl->dev);
5650 out:
5651 	mutex_unlock(&wl->mutex);
5652 }
5653 
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5654 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5655 			    u32 queues, bool drop)
5656 {
5657 	struct wl1271 *wl = hw->priv;
5658 
5659 	wl1271_tx_flush(wl);
5660 }
5661 
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5662 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5663 				       struct ieee80211_vif *vif,
5664 				       struct ieee80211_channel *chan,
5665 				       int duration,
5666 				       enum ieee80211_roc_type type)
5667 {
5668 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5669 	struct wl1271 *wl = hw->priv;
5670 	int channel, active_roc, ret = 0;
5671 
5672 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5673 
5674 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5675 		     channel, wlvif->role_id);
5676 
5677 	mutex_lock(&wl->mutex);
5678 
5679 	if (unlikely(wl->state != WLCORE_STATE_ON))
5680 		goto out;
5681 
5682 	/* return EBUSY if we can't ROC right now */
5683 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5684 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5685 		wl1271_warning("active roc on role %d", active_roc);
5686 		ret = -EBUSY;
5687 		goto out;
5688 	}
5689 
5690 	ret = pm_runtime_resume_and_get(wl->dev);
5691 	if (ret < 0)
5692 		goto out;
5693 
5694 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5695 	if (ret < 0)
5696 		goto out_sleep;
5697 
5698 	wl->roc_vif = vif;
5699 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5700 				     msecs_to_jiffies(duration));
5701 out_sleep:
5702 	pm_runtime_mark_last_busy(wl->dev);
5703 	pm_runtime_put_autosuspend(wl->dev);
5704 out:
5705 	mutex_unlock(&wl->mutex);
5706 	return ret;
5707 }
5708 
__wlcore_roc_completed(struct wl1271 * wl)5709 static int __wlcore_roc_completed(struct wl1271 *wl)
5710 {
5711 	struct wl12xx_vif *wlvif;
5712 	int ret;
5713 
5714 	/* already completed */
5715 	if (unlikely(!wl->roc_vif))
5716 		return 0;
5717 
5718 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5719 
5720 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5721 		return -EBUSY;
5722 
5723 	ret = wl12xx_stop_dev(wl, wlvif);
5724 	if (ret < 0)
5725 		return ret;
5726 
5727 	wl->roc_vif = NULL;
5728 
5729 	return 0;
5730 }
5731 
wlcore_roc_completed(struct wl1271 * wl)5732 static int wlcore_roc_completed(struct wl1271 *wl)
5733 {
5734 	int ret;
5735 
5736 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5737 
5738 	mutex_lock(&wl->mutex);
5739 
5740 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5741 		ret = -EBUSY;
5742 		goto out;
5743 	}
5744 
5745 	ret = pm_runtime_resume_and_get(wl->dev);
5746 	if (ret < 0)
5747 		goto out;
5748 
5749 	ret = __wlcore_roc_completed(wl);
5750 
5751 	pm_runtime_mark_last_busy(wl->dev);
5752 	pm_runtime_put_autosuspend(wl->dev);
5753 out:
5754 	mutex_unlock(&wl->mutex);
5755 
5756 	return ret;
5757 }
5758 
wlcore_roc_complete_work(struct work_struct * work)5759 static void wlcore_roc_complete_work(struct work_struct *work)
5760 {
5761 	struct delayed_work *dwork;
5762 	struct wl1271 *wl;
5763 	int ret;
5764 
5765 	dwork = to_delayed_work(work);
5766 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5767 
5768 	ret = wlcore_roc_completed(wl);
5769 	if (!ret)
5770 		ieee80211_remain_on_channel_expired(wl->hw);
5771 }
5772 
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5773 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5774 					      struct ieee80211_vif *vif)
5775 {
5776 	struct wl1271 *wl = hw->priv;
5777 
5778 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5779 
5780 	/* TODO: per-vif */
5781 	wl1271_tx_flush(wl);
5782 
5783 	/*
5784 	 * we can't just flush_work here, because it might deadlock
5785 	 * (as we might get called from the same workqueue)
5786 	 */
5787 	cancel_delayed_work_sync(&wl->roc_complete_work);
5788 	wlcore_roc_completed(wl);
5789 
5790 	return 0;
5791 }
5792 
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_link_sta * link_sta,u32 changed)5793 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5794 				    struct ieee80211_vif *vif,
5795 				    struct ieee80211_link_sta *link_sta,
5796 				    u32 changed)
5797 {
5798 	struct ieee80211_sta *sta = link_sta->sta;
5799 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5800 
5801 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5802 
5803 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5804 		return;
5805 
5806 	/* this callback is atomic, so schedule a new work */
5807 	wlvif->rc_update_bw = sta->deflink.bandwidth;
5808 	memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap,
5809 	       sizeof(sta->deflink.ht_cap));
5810 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5811 }
5812 
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5813 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5814 				     struct ieee80211_vif *vif,
5815 				     struct ieee80211_sta *sta,
5816 				     struct station_info *sinfo)
5817 {
5818 	struct wl1271 *wl = hw->priv;
5819 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5820 	s8 rssi_dbm;
5821 	int ret;
5822 
5823 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5824 
5825 	mutex_lock(&wl->mutex);
5826 
5827 	if (unlikely(wl->state != WLCORE_STATE_ON))
5828 		goto out;
5829 
5830 	ret = pm_runtime_resume_and_get(wl->dev);
5831 	if (ret < 0)
5832 		goto out_sleep;
5833 
5834 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5835 	if (ret < 0)
5836 		goto out_sleep;
5837 
5838 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5839 	sinfo->signal = rssi_dbm;
5840 
5841 out_sleep:
5842 	pm_runtime_mark_last_busy(wl->dev);
5843 	pm_runtime_put_autosuspend(wl->dev);
5844 
5845 out:
5846 	mutex_unlock(&wl->mutex);
5847 }
5848 
wlcore_op_get_expected_throughput(struct ieee80211_hw * hw,struct ieee80211_sta * sta)5849 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5850 					     struct ieee80211_sta *sta)
5851 {
5852 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5853 	struct wl1271 *wl = hw->priv;
5854 	u8 hlid = wl_sta->hlid;
5855 
5856 	/* return in units of Kbps */
5857 	return (wl->links[hlid].fw_rate_mbps * 1000);
5858 }
5859 
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5860 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5861 {
5862 	struct wl1271 *wl = hw->priv;
5863 	bool ret = false;
5864 
5865 	mutex_lock(&wl->mutex);
5866 
5867 	if (unlikely(wl->state != WLCORE_STATE_ON))
5868 		goto out;
5869 
5870 	/* packets are considered pending if in the TX queue or the FW */
5871 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5872 out:
5873 	mutex_unlock(&wl->mutex);
5874 
5875 	return ret;
5876 }
5877 
5878 /* can't be const, mac80211 writes to this */
5879 static struct ieee80211_rate wl1271_rates[] = {
5880 	{ .bitrate = 10,
5881 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5882 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5883 	{ .bitrate = 20,
5884 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5885 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5886 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5887 	{ .bitrate = 55,
5888 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5889 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5890 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5891 	{ .bitrate = 110,
5892 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5893 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5894 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5895 	{ .bitrate = 60,
5896 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5897 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5898 	{ .bitrate = 90,
5899 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5900 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5901 	{ .bitrate = 120,
5902 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5903 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5904 	{ .bitrate = 180,
5905 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5906 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5907 	{ .bitrate = 240,
5908 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5909 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5910 	{ .bitrate = 360,
5911 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5912 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5913 	{ .bitrate = 480,
5914 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5915 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5916 	{ .bitrate = 540,
5917 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5918 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5919 };
5920 
5921 /* can't be const, mac80211 writes to this */
5922 static struct ieee80211_channel wl1271_channels[] = {
5923 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5924 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5925 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5926 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5927 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5928 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5929 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5930 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5931 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5932 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5933 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5934 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5935 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5936 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5937 };
5938 
5939 /* can't be const, mac80211 writes to this */
5940 static struct ieee80211_supported_band wl1271_band_2ghz = {
5941 	.channels = wl1271_channels,
5942 	.n_channels = ARRAY_SIZE(wl1271_channels),
5943 	.bitrates = wl1271_rates,
5944 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5945 };
5946 
5947 /* 5 GHz data rates for WL1273 */
5948 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5949 	{ .bitrate = 60,
5950 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5951 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5952 	{ .bitrate = 90,
5953 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5954 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5955 	{ .bitrate = 120,
5956 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5957 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5958 	{ .bitrate = 180,
5959 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5960 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5961 	{ .bitrate = 240,
5962 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5963 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5964 	{ .bitrate = 360,
5965 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5966 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5967 	{ .bitrate = 480,
5968 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5969 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5970 	{ .bitrate = 540,
5971 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5972 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5973 };
5974 
5975 /* 5 GHz band channels for WL1273 */
5976 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5977 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5991 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5992 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5993 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5994 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5995 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5996 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5997 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5998 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5999 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
6000 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
6001 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
6002 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
6003 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
6004 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
6005 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
6006 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
6007 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
6008 };
6009 
6010 static struct ieee80211_supported_band wl1271_band_5ghz = {
6011 	.channels = wl1271_channels_5ghz,
6012 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6013 	.bitrates = wl1271_rates_5ghz,
6014 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6015 };
6016 
6017 static const struct ieee80211_ops wl1271_ops = {
6018 	.start = wl1271_op_start,
6019 	.stop = wlcore_op_stop,
6020 	.add_interface = wl1271_op_add_interface,
6021 	.remove_interface = wl1271_op_remove_interface,
6022 	.change_interface = wl12xx_op_change_interface,
6023 #ifdef CONFIG_PM
6024 	.suspend = wl1271_op_suspend,
6025 	.resume = wl1271_op_resume,
6026 #endif
6027 	.config = wl1271_op_config,
6028 	.prepare_multicast = wl1271_op_prepare_multicast,
6029 	.configure_filter = wl1271_op_configure_filter,
6030 	.tx = wl1271_op_tx,
6031 	.wake_tx_queue = ieee80211_handle_wake_tx_queue,
6032 	.set_key = wlcore_op_set_key,
6033 	.hw_scan = wl1271_op_hw_scan,
6034 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6035 	.sched_scan_start = wl1271_op_sched_scan_start,
6036 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6037 	.bss_info_changed = wl1271_op_bss_info_changed,
6038 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6039 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6040 	.conf_tx = wl1271_op_conf_tx,
6041 	.get_tsf = wl1271_op_get_tsf,
6042 	.get_survey = wl1271_op_get_survey,
6043 	.sta_state = wl12xx_op_sta_state,
6044 	.ampdu_action = wl1271_op_ampdu_action,
6045 	.tx_frames_pending = wl1271_tx_frames_pending,
6046 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6047 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6048 	.channel_switch = wl12xx_op_channel_switch,
6049 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6050 	.flush = wlcore_op_flush,
6051 	.remain_on_channel = wlcore_op_remain_on_channel,
6052 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6053 	.add_chanctx = wlcore_op_add_chanctx,
6054 	.remove_chanctx = wlcore_op_remove_chanctx,
6055 	.change_chanctx = wlcore_op_change_chanctx,
6056 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6057 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6058 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6059 	.link_sta_rc_update = wlcore_op_sta_rc_update,
6060 	.sta_statistics = wlcore_op_sta_statistics,
6061 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6062 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6063 };
6064 
6065 
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum nl80211_band band)6066 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6067 {
6068 	u8 idx;
6069 
6070 	BUG_ON(band >= 2);
6071 
6072 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6073 		wl1271_error("Illegal RX rate from HW: %d", rate);
6074 		return 0;
6075 	}
6076 
6077 	idx = wl->band_rate_to_idx[band][rate];
6078 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6079 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6080 		return 0;
6081 	}
6082 
6083 	return idx;
6084 }
6085 
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)6086 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6087 {
6088 	int i;
6089 
6090 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6091 		     oui, nic);
6092 
6093 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6094 		wl1271_warning("NIC part of the MAC address wraps around!");
6095 
6096 	for (i = 0; i < wl->num_mac_addr; i++) {
6097 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6098 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6099 		wl->addresses[i].addr[2] = (u8) oui;
6100 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6101 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6102 		wl->addresses[i].addr[5] = (u8) nic;
6103 		nic++;
6104 	}
6105 
6106 	/* we may be one address short at the most */
6107 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6108 
6109 	/*
6110 	 * turn on the LAA bit in the first address and use it as
6111 	 * the last address.
6112 	 */
6113 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6114 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6115 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6116 		       sizeof(wl->addresses[0]));
6117 		/* LAA bit */
6118 		wl->addresses[idx].addr[0] |= BIT(1);
6119 	}
6120 
6121 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6122 	wl->hw->wiphy->addresses = wl->addresses;
6123 }
6124 
wl12xx_get_hw_info(struct wl1271 * wl)6125 static int wl12xx_get_hw_info(struct wl1271 *wl)
6126 {
6127 	int ret;
6128 
6129 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6130 	if (ret < 0)
6131 		goto out;
6132 
6133 	wl->fuse_oui_addr = 0;
6134 	wl->fuse_nic_addr = 0;
6135 
6136 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6137 	if (ret < 0)
6138 		goto out;
6139 
6140 	if (wl->ops->get_mac)
6141 		ret = wl->ops->get_mac(wl);
6142 
6143 out:
6144 	return ret;
6145 }
6146 
wl1271_register_hw(struct wl1271 * wl)6147 static int wl1271_register_hw(struct wl1271 *wl)
6148 {
6149 	int ret;
6150 	u32 oui_addr = 0, nic_addr = 0;
6151 	struct platform_device *pdev = wl->pdev;
6152 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6153 
6154 	if (wl->mac80211_registered)
6155 		return 0;
6156 
6157 	if (wl->nvs_len >= 12) {
6158 		/* NOTE: The wl->nvs->nvs element must be first, in
6159 		 * order to simplify the casting, we assume it is at
6160 		 * the beginning of the wl->nvs structure.
6161 		 */
6162 		u8 *nvs_ptr = (u8 *)wl->nvs;
6163 
6164 		oui_addr =
6165 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6166 		nic_addr =
6167 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6168 	}
6169 
6170 	/* if the MAC address is zeroed in the NVS derive from fuse */
6171 	if (oui_addr == 0 && nic_addr == 0) {
6172 		oui_addr = wl->fuse_oui_addr;
6173 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6174 		nic_addr = wl->fuse_nic_addr + 1;
6175 	}
6176 
6177 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6178 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6179 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6180 			wl1271_warning("This default nvs file can be removed from the file system");
6181 		} else {
6182 			wl1271_warning("Your device performance is not optimized.");
6183 			wl1271_warning("Please use the calibrator tool to configure your device.");
6184 		}
6185 
6186 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6187 			wl1271_warning("Fuse mac address is zero. using random mac");
6188 			/* Use TI oui and a random nic */
6189 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6190 			nic_addr = get_random_u32();
6191 		} else {
6192 			oui_addr = wl->fuse_oui_addr;
6193 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6194 			nic_addr = wl->fuse_nic_addr + 1;
6195 		}
6196 	}
6197 
6198 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6199 
6200 	ret = ieee80211_register_hw(wl->hw);
6201 	if (ret < 0) {
6202 		wl1271_error("unable to register mac80211 hw: %d", ret);
6203 		goto out;
6204 	}
6205 
6206 	wl->mac80211_registered = true;
6207 
6208 	wl1271_debugfs_init(wl);
6209 
6210 	wl1271_notice("loaded");
6211 
6212 out:
6213 	return ret;
6214 }
6215 
wl1271_unregister_hw(struct wl1271 * wl)6216 static void wl1271_unregister_hw(struct wl1271 *wl)
6217 {
6218 	if (wl->plt)
6219 		wl1271_plt_stop(wl);
6220 
6221 	ieee80211_unregister_hw(wl->hw);
6222 	wl->mac80211_registered = false;
6223 
6224 }
6225 
wl1271_init_ieee80211(struct wl1271 * wl)6226 static int wl1271_init_ieee80211(struct wl1271 *wl)
6227 {
6228 	int i;
6229 	static const u32 cipher_suites[] = {
6230 		WLAN_CIPHER_SUITE_WEP40,
6231 		WLAN_CIPHER_SUITE_WEP104,
6232 		WLAN_CIPHER_SUITE_TKIP,
6233 		WLAN_CIPHER_SUITE_CCMP,
6234 		WL1271_CIPHER_SUITE_GEM,
6235 	};
6236 
6237 	/* The tx descriptor buffer */
6238 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6239 
6240 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6241 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6242 
6243 	/* unit us */
6244 	/* FIXME: find a proper value */
6245 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6246 
6247 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6248 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6249 	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6250 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6251 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6252 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6253 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6254 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6255 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6256 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6257 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6258 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6259 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6260 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6261 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6262 
6263 	wl->hw->wiphy->cipher_suites = cipher_suites;
6264 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6265 
6266 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6267 					 BIT(NL80211_IFTYPE_AP) |
6268 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6269 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6270 #ifdef CONFIG_MAC80211_MESH
6271 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6272 #endif
6273 					 BIT(NL80211_IFTYPE_P2P_GO);
6274 
6275 	wl->hw->wiphy->max_scan_ssids = 1;
6276 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6277 	wl->hw->wiphy->max_match_sets = 16;
6278 	/*
6279 	 * Maximum length of elements in scanning probe request templates
6280 	 * should be the maximum length possible for a template, without
6281 	 * the IEEE80211 header of the template
6282 	 */
6283 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6284 			sizeof(struct ieee80211_header);
6285 
6286 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6287 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6288 		sizeof(struct ieee80211_header);
6289 
6290 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6291 
6292 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6293 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6294 				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6295 				WIPHY_FLAG_IBSS_RSN;
6296 
6297 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6298 
6299 	/* make sure all our channels fit in the scanned_ch bitmask */
6300 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6301 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6302 		     WL1271_MAX_CHANNELS);
6303 	/*
6304 	* clear channel flags from the previous usage
6305 	* and restore max_power & max_antenna_gain values.
6306 	*/
6307 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6308 		wl1271_band_2ghz.channels[i].flags = 0;
6309 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6310 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6311 	}
6312 
6313 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6314 		wl1271_band_5ghz.channels[i].flags = 0;
6315 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6316 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6317 	}
6318 
6319 	/*
6320 	 * We keep local copies of the band structs because we need to
6321 	 * modify them on a per-device basis.
6322 	 */
6323 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6324 	       sizeof(wl1271_band_2ghz));
6325 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6326 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6327 	       sizeof(*wl->ht_cap));
6328 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6329 	       sizeof(wl1271_band_5ghz));
6330 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6331 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6332 	       sizeof(*wl->ht_cap));
6333 
6334 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6335 		&wl->bands[NL80211_BAND_2GHZ];
6336 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6337 		&wl->bands[NL80211_BAND_5GHZ];
6338 
6339 	/*
6340 	 * allow 4 queues per mac address we support +
6341 	 * 1 cab queue per mac + one global offchannel Tx queue
6342 	 */
6343 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6344 
6345 	/* the last queue is the offchannel queue */
6346 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6347 	wl->hw->max_rates = 1;
6348 
6349 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6350 
6351 	/* the FW answers probe-requests in AP-mode */
6352 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6353 	wl->hw->wiphy->probe_resp_offload =
6354 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6355 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6356 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6357 
6358 	/* allowed interface combinations */
6359 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6360 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6361 
6362 	/* register vendor commands */
6363 	wlcore_set_vendor_commands(wl->hw->wiphy);
6364 
6365 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6366 
6367 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6368 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6369 
6370 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6371 
6372 	return 0;
6373 }
6374 
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6375 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6376 				     u32 mbox_size)
6377 {
6378 	struct ieee80211_hw *hw;
6379 	struct wl1271 *wl;
6380 	int i, j, ret;
6381 	unsigned int order;
6382 
6383 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6384 	if (!hw) {
6385 		wl1271_error("could not alloc ieee80211_hw");
6386 		ret = -ENOMEM;
6387 		goto err_hw_alloc;
6388 	}
6389 
6390 	wl = hw->priv;
6391 	memset(wl, 0, sizeof(*wl));
6392 
6393 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6394 	if (!wl->priv) {
6395 		wl1271_error("could not alloc wl priv");
6396 		ret = -ENOMEM;
6397 		goto err_priv_alloc;
6398 	}
6399 
6400 	INIT_LIST_HEAD(&wl->wlvif_list);
6401 
6402 	wl->hw = hw;
6403 
6404 	/*
6405 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6406 	 * we don't allocate any additional resource here, so that's fine.
6407 	 */
6408 	for (i = 0; i < NUM_TX_QUEUES; i++)
6409 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6410 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6411 
6412 	skb_queue_head_init(&wl->deferred_rx_queue);
6413 	skb_queue_head_init(&wl->deferred_tx_queue);
6414 
6415 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6416 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6417 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6418 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6419 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6420 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6421 
6422 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6423 	if (!wl->freezable_wq) {
6424 		ret = -ENOMEM;
6425 		goto err_hw;
6426 	}
6427 
6428 	wl->channel = 0;
6429 	wl->rx_counter = 0;
6430 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6431 	wl->band = NL80211_BAND_2GHZ;
6432 	wl->channel_type = NL80211_CHAN_NO_HT;
6433 	wl->flags = 0;
6434 	wl->sg_enabled = true;
6435 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6436 	wl->recovery_count = 0;
6437 	wl->hw_pg_ver = -1;
6438 	wl->ap_ps_map = 0;
6439 	wl->ap_fw_ps_map = 0;
6440 	wl->quirks = 0;
6441 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6442 	wl->active_sta_count = 0;
6443 	wl->active_link_count = 0;
6444 	wl->fwlog_size = 0;
6445 
6446 	/* The system link is always allocated */
6447 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6448 
6449 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6450 	for (i = 0; i < wl->num_tx_desc; i++)
6451 		wl->tx_frames[i] = NULL;
6452 
6453 	spin_lock_init(&wl->wl_lock);
6454 
6455 	wl->state = WLCORE_STATE_OFF;
6456 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6457 	mutex_init(&wl->mutex);
6458 	mutex_init(&wl->flush_mutex);
6459 	init_completion(&wl->nvs_loading_complete);
6460 
6461 	order = get_order(aggr_buf_size);
6462 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6463 	if (!wl->aggr_buf) {
6464 		ret = -ENOMEM;
6465 		goto err_wq;
6466 	}
6467 	wl->aggr_buf_size = aggr_buf_size;
6468 
6469 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6470 	if (!wl->dummy_packet) {
6471 		ret = -ENOMEM;
6472 		goto err_aggr;
6473 	}
6474 
6475 	/* Allocate one page for the FW log */
6476 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6477 	if (!wl->fwlog) {
6478 		ret = -ENOMEM;
6479 		goto err_dummy_packet;
6480 	}
6481 
6482 	wl->mbox_size = mbox_size;
6483 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6484 	if (!wl->mbox) {
6485 		ret = -ENOMEM;
6486 		goto err_fwlog;
6487 	}
6488 
6489 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6490 	if (!wl->buffer_32) {
6491 		ret = -ENOMEM;
6492 		goto err_mbox;
6493 	}
6494 
6495 	return hw;
6496 
6497 err_mbox:
6498 	kfree(wl->mbox);
6499 
6500 err_fwlog:
6501 	free_page((unsigned long)wl->fwlog);
6502 
6503 err_dummy_packet:
6504 	dev_kfree_skb(wl->dummy_packet);
6505 
6506 err_aggr:
6507 	free_pages((unsigned long)wl->aggr_buf, order);
6508 
6509 err_wq:
6510 	destroy_workqueue(wl->freezable_wq);
6511 
6512 err_hw:
6513 	wl1271_debugfs_exit(wl);
6514 	kfree(wl->priv);
6515 
6516 err_priv_alloc:
6517 	ieee80211_free_hw(hw);
6518 
6519 err_hw_alloc:
6520 
6521 	return ERR_PTR(ret);
6522 }
6523 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6524 
wlcore_free_hw(struct wl1271 * wl)6525 int wlcore_free_hw(struct wl1271 *wl)
6526 {
6527 	/* Unblock any fwlog readers */
6528 	mutex_lock(&wl->mutex);
6529 	wl->fwlog_size = -1;
6530 	mutex_unlock(&wl->mutex);
6531 
6532 	wlcore_sysfs_free(wl);
6533 
6534 	kfree(wl->buffer_32);
6535 	kfree(wl->mbox);
6536 	free_page((unsigned long)wl->fwlog);
6537 	dev_kfree_skb(wl->dummy_packet);
6538 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6539 
6540 	wl1271_debugfs_exit(wl);
6541 
6542 	vfree(wl->fw);
6543 	wl->fw = NULL;
6544 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6545 	kfree(wl->nvs);
6546 	wl->nvs = NULL;
6547 
6548 	kfree(wl->raw_fw_status);
6549 	kfree(wl->fw_status);
6550 	kfree(wl->tx_res_if);
6551 	destroy_workqueue(wl->freezable_wq);
6552 
6553 	kfree(wl->priv);
6554 	ieee80211_free_hw(wl->hw);
6555 
6556 	return 0;
6557 }
6558 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6559 
6560 #ifdef CONFIG_PM
6561 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6562 	.flags = WIPHY_WOWLAN_ANY,
6563 	.n_patterns = WL1271_MAX_RX_FILTERS,
6564 	.pattern_min_len = 1,
6565 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6566 };
6567 #endif
6568 
wlcore_hardirq(int irq,void * cookie)6569 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6570 {
6571 	return IRQ_WAKE_THREAD;
6572 }
6573 
wlcore_nvs_cb(const struct firmware * fw,void * context)6574 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6575 {
6576 	struct wl1271 *wl = context;
6577 	struct platform_device *pdev = wl->pdev;
6578 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6579 	struct resource *res;
6580 
6581 	int ret;
6582 	irq_handler_t hardirq_fn = NULL;
6583 
6584 	if (fw) {
6585 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6586 		if (!wl->nvs) {
6587 			wl1271_error("Could not allocate nvs data");
6588 			goto out;
6589 		}
6590 		wl->nvs_len = fw->size;
6591 	} else if (pdev_data->family->nvs_name) {
6592 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6593 			     pdev_data->family->nvs_name);
6594 		wl->nvs = NULL;
6595 		wl->nvs_len = 0;
6596 	} else {
6597 		wl->nvs = NULL;
6598 		wl->nvs_len = 0;
6599 	}
6600 
6601 	ret = wl->ops->setup(wl);
6602 	if (ret < 0)
6603 		goto out_free_nvs;
6604 
6605 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6606 
6607 	/* adjust some runtime configuration parameters */
6608 	wlcore_adjust_conf(wl);
6609 
6610 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6611 	if (!res) {
6612 		wl1271_error("Could not get IRQ resource");
6613 		goto out_free_nvs;
6614 	}
6615 
6616 	wl->irq = res->start;
6617 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6618 	wl->if_ops = pdev_data->if_ops;
6619 
6620 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6621 		hardirq_fn = wlcore_hardirq;
6622 	else
6623 		wl->irq_flags |= IRQF_ONESHOT;
6624 
6625 	ret = wl12xx_set_power_on(wl);
6626 	if (ret < 0)
6627 		goto out_free_nvs;
6628 
6629 	ret = wl12xx_get_hw_info(wl);
6630 	if (ret < 0) {
6631 		wl1271_error("couldn't get hw info");
6632 		wl1271_power_off(wl);
6633 		goto out_free_nvs;
6634 	}
6635 
6636 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6637 				   wl->irq_flags, pdev->name, wl);
6638 	if (ret < 0) {
6639 		wl1271_error("interrupt configuration failed");
6640 		wl1271_power_off(wl);
6641 		goto out_free_nvs;
6642 	}
6643 
6644 #ifdef CONFIG_PM
6645 	device_init_wakeup(wl->dev, true);
6646 
6647 	ret = enable_irq_wake(wl->irq);
6648 	if (!ret) {
6649 		wl->irq_wake_enabled = true;
6650 		if (pdev_data->pwr_in_suspend)
6651 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6652 	}
6653 
6654 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6655 	if (res) {
6656 		wl->wakeirq = res->start;
6657 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6658 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6659 		if (ret)
6660 			wl->wakeirq = -ENODEV;
6661 	} else {
6662 		wl->wakeirq = -ENODEV;
6663 	}
6664 #endif
6665 	disable_irq(wl->irq);
6666 	wl1271_power_off(wl);
6667 
6668 	ret = wl->ops->identify_chip(wl);
6669 	if (ret < 0)
6670 		goto out_irq;
6671 
6672 	ret = wl1271_init_ieee80211(wl);
6673 	if (ret)
6674 		goto out_irq;
6675 
6676 	ret = wl1271_register_hw(wl);
6677 	if (ret)
6678 		goto out_irq;
6679 
6680 	ret = wlcore_sysfs_init(wl);
6681 	if (ret)
6682 		goto out_unreg;
6683 
6684 	wl->initialized = true;
6685 	goto out;
6686 
6687 out_unreg:
6688 	wl1271_unregister_hw(wl);
6689 
6690 out_irq:
6691 	if (wl->wakeirq >= 0)
6692 		dev_pm_clear_wake_irq(wl->dev);
6693 	device_init_wakeup(wl->dev, false);
6694 	free_irq(wl->irq, wl);
6695 
6696 out_free_nvs:
6697 	kfree(wl->nvs);
6698 
6699 out:
6700 	release_firmware(fw);
6701 	complete_all(&wl->nvs_loading_complete);
6702 }
6703 
wlcore_runtime_suspend(struct device * dev)6704 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6705 {
6706 	struct wl1271 *wl = dev_get_drvdata(dev);
6707 	struct wl12xx_vif *wlvif;
6708 	int error;
6709 
6710 	/* We do not enter elp sleep in PLT mode */
6711 	if (wl->plt)
6712 		return 0;
6713 
6714 	/* Nothing to do if no ELP mode requested */
6715 	if (wl->sleep_auth != WL1271_PSM_ELP)
6716 		return 0;
6717 
6718 	wl12xx_for_each_wlvif(wl, wlvif) {
6719 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6720 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6721 			return -EBUSY;
6722 	}
6723 
6724 	wl1271_debug(DEBUG_PSM, "chip to elp");
6725 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6726 	if (error < 0) {
6727 		wl12xx_queue_recovery_work(wl);
6728 
6729 		return error;
6730 	}
6731 
6732 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6733 
6734 	return 0;
6735 }
6736 
wlcore_runtime_resume(struct device * dev)6737 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6738 {
6739 	struct wl1271 *wl = dev_get_drvdata(dev);
6740 	DECLARE_COMPLETION_ONSTACK(compl);
6741 	unsigned long flags;
6742 	int ret;
6743 	unsigned long start_time = jiffies;
6744 	bool recovery = false;
6745 
6746 	/* Nothing to do if no ELP mode requested */
6747 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6748 		return 0;
6749 
6750 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6751 
6752 	spin_lock_irqsave(&wl->wl_lock, flags);
6753 	wl->elp_compl = &compl;
6754 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6755 
6756 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6757 	if (ret < 0) {
6758 		recovery = true;
6759 	} else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6760 		ret = wait_for_completion_timeout(&compl,
6761 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6762 		if (ret == 0) {
6763 			wl1271_warning("ELP wakeup timeout!");
6764 			recovery = true;
6765 		}
6766 	}
6767 
6768 	spin_lock_irqsave(&wl->wl_lock, flags);
6769 	wl->elp_compl = NULL;
6770 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6771 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6772 
6773 	if (recovery) {
6774 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6775 		wl12xx_queue_recovery_work(wl);
6776 	} else {
6777 		wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6778 			     jiffies_to_msecs(jiffies - start_time));
6779 	}
6780 
6781 	return 0;
6782 }
6783 
6784 static const struct dev_pm_ops wlcore_pm_ops = {
6785 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6786 			   wlcore_runtime_resume,
6787 			   NULL)
6788 };
6789 
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6790 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6791 {
6792 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6793 	const char *nvs_name;
6794 	int ret = 0;
6795 
6796 	if (!wl->ops || !wl->ptable || !pdev_data)
6797 		return -EINVAL;
6798 
6799 	wl->dev = &pdev->dev;
6800 	wl->pdev = pdev;
6801 	platform_set_drvdata(pdev, wl);
6802 
6803 	if (pdev_data->family && pdev_data->family->nvs_name) {
6804 		nvs_name = pdev_data->family->nvs_name;
6805 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
6806 					      nvs_name, &pdev->dev, GFP_KERNEL,
6807 					      wl, wlcore_nvs_cb);
6808 		if (ret < 0) {
6809 			wl1271_error("request_firmware_nowait failed for %s: %d",
6810 				     nvs_name, ret);
6811 			complete_all(&wl->nvs_loading_complete);
6812 		}
6813 	} else {
6814 		wlcore_nvs_cb(NULL, wl);
6815 	}
6816 
6817 	wl->dev->driver->pm = &wlcore_pm_ops;
6818 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6819 	pm_runtime_use_autosuspend(wl->dev);
6820 	pm_runtime_enable(wl->dev);
6821 
6822 	return ret;
6823 }
6824 EXPORT_SYMBOL_GPL(wlcore_probe);
6825 
wlcore_remove(struct platform_device * pdev)6826 void wlcore_remove(struct platform_device *pdev)
6827 {
6828 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6829 	struct wl1271 *wl = platform_get_drvdata(pdev);
6830 	int error;
6831 
6832 	error = pm_runtime_get_sync(wl->dev);
6833 	if (error < 0)
6834 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6835 
6836 	wl->dev->driver->pm = NULL;
6837 
6838 	if (pdev_data->family && pdev_data->family->nvs_name)
6839 		wait_for_completion(&wl->nvs_loading_complete);
6840 	if (!wl->initialized)
6841 		return;
6842 
6843 	if (wl->wakeirq >= 0) {
6844 		dev_pm_clear_wake_irq(wl->dev);
6845 		wl->wakeirq = -ENODEV;
6846 	}
6847 
6848 	device_init_wakeup(wl->dev, false);
6849 
6850 	if (wl->irq_wake_enabled)
6851 		disable_irq_wake(wl->irq);
6852 
6853 	wl1271_unregister_hw(wl);
6854 
6855 	pm_runtime_put_sync(wl->dev);
6856 	pm_runtime_dont_use_autosuspend(wl->dev);
6857 	pm_runtime_disable(wl->dev);
6858 
6859 	free_irq(wl->irq, wl);
6860 	wlcore_free_hw(wl);
6861 }
6862 EXPORT_SYMBOL_GPL(wlcore_remove);
6863 
6864 u32 wl12xx_debug_level = DEBUG_NONE;
6865 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6866 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6867 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6868 
6869 module_param_named(fwlog, fwlog_param, charp, 0);
6870 MODULE_PARM_DESC(fwlog,
6871 		 "FW logger options: continuous, dbgpins or disable");
6872 
6873 module_param(fwlog_mem_blocks, int, 0600);
6874 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6875 
6876 module_param(bug_on_recovery, int, 0600);
6877 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6878 
6879 module_param(no_recovery, int, 0600);
6880 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6881 
6882 MODULE_DESCRIPTION("TI WLAN core driver");
6883 MODULE_LICENSE("GPL");
6884 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6885 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6886