1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file is part of wlcore
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 */
8
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_WAKEUP_TIMEOUT 500
34
35 static char *fwlog_param;
36 static int fwlog_mem_blocks = -1;
37 static int bug_on_recovery = -1;
38 static int no_recovery = -1;
39
40 static void __wl1271_op_remove_interface(struct wl1271 *wl,
41 struct ieee80211_vif *vif,
42 bool reset_tx_queues);
43 static void wlcore_op_stop_locked(struct wl1271 *wl);
44 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
45
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)46 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
47 {
48 int ret;
49
50 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
51 return -EINVAL;
52
53 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
54 return 0;
55
56 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
57 return 0;
58
59 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
60 if (ret < 0)
61 return ret;
62
63 wl1271_info("Association completed.");
64 return 0;
65 }
66
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)67 static void wl1271_reg_notify(struct wiphy *wiphy,
68 struct regulatory_request *request)
69 {
70 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
71 struct wl1271 *wl = hw->priv;
72
73 /* copy the current dfs region */
74 if (request)
75 wl->dfs_region = request->dfs_region;
76
77 wlcore_regdomain_config(wl);
78 }
79
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)80 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
81 bool enable)
82 {
83 int ret = 0;
84
85 /* we should hold wl->mutex */
86 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
87 if (ret < 0)
88 goto out;
89
90 if (enable)
91 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
92 else
93 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
94 out:
95 return ret;
96 }
97
98 /*
99 * this function is being called when the rx_streaming interval
100 * has beed changed or rx_streaming should be disabled
101 */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)102 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
103 {
104 int ret = 0;
105 int period = wl->conf.rx_streaming.interval;
106
107 /* don't reconfigure if rx_streaming is disabled */
108 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
109 goto out;
110
111 /* reconfigure/disable according to new streaming_period */
112 if (period &&
113 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
114 (wl->conf.rx_streaming.always ||
115 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
116 ret = wl1271_set_rx_streaming(wl, wlvif, true);
117 else {
118 ret = wl1271_set_rx_streaming(wl, wlvif, false);
119 /* don't cancel_work_sync since we might deadlock */
120 timer_delete_sync(&wlvif->rx_streaming_timer);
121 }
122 out:
123 return ret;
124 }
125
wl1271_rx_streaming_enable_work(struct work_struct * work)126 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
127 {
128 int ret;
129 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
130 rx_streaming_enable_work);
131 struct wl1271 *wl = wlvif->wl;
132
133 mutex_lock(&wl->mutex);
134
135 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
136 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
137 (!wl->conf.rx_streaming.always &&
138 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
139 goto out;
140
141 if (!wl->conf.rx_streaming.interval)
142 goto out;
143
144 ret = pm_runtime_resume_and_get(wl->dev);
145 if (ret < 0)
146 goto out;
147
148 ret = wl1271_set_rx_streaming(wl, wlvif, true);
149 if (ret < 0)
150 goto out_sleep;
151
152 /* stop it after some time of inactivity */
153 mod_timer(&wlvif->rx_streaming_timer,
154 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
155
156 out_sleep:
157 pm_runtime_put_autosuspend(wl->dev);
158 out:
159 mutex_unlock(&wl->mutex);
160 }
161
wl1271_rx_streaming_disable_work(struct work_struct * work)162 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
163 {
164 int ret;
165 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
166 rx_streaming_disable_work);
167 struct wl1271 *wl = wlvif->wl;
168
169 mutex_lock(&wl->mutex);
170
171 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
172 goto out;
173
174 ret = pm_runtime_resume_and_get(wl->dev);
175 if (ret < 0)
176 goto out;
177
178 ret = wl1271_set_rx_streaming(wl, wlvif, false);
179 if (ret)
180 goto out_sleep;
181
182 out_sleep:
183 pm_runtime_put_autosuspend(wl->dev);
184 out:
185 mutex_unlock(&wl->mutex);
186 }
187
wl1271_rx_streaming_timer(struct timer_list * t)188 static void wl1271_rx_streaming_timer(struct timer_list *t)
189 {
190 struct wl12xx_vif *wlvif = timer_container_of(wlvif, t,
191 rx_streaming_timer);
192 struct wl1271 *wl = wlvif->wl;
193 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
194 }
195
196 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)197 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
198 {
199 /* if the watchdog is not armed, don't do anything */
200 if (wl->tx_allocated_blocks == 0)
201 return;
202
203 cancel_delayed_work(&wl->tx_watchdog_work);
204 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
205 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
206 }
207
wlcore_rc_update_work(struct work_struct * work)208 static void wlcore_rc_update_work(struct work_struct *work)
209 {
210 int ret;
211 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
212 rc_update_work);
213 struct wl1271 *wl = wlvif->wl;
214 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
215
216 mutex_lock(&wl->mutex);
217
218 if (unlikely(wl->state != WLCORE_STATE_ON))
219 goto out;
220
221 ret = pm_runtime_resume_and_get(wl->dev);
222 if (ret < 0)
223 goto out;
224
225 if (ieee80211_vif_is_mesh(vif)) {
226 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
227 true, wlvif->sta.hlid);
228 if (ret < 0)
229 goto out_sleep;
230 } else {
231 wlcore_hw_sta_rc_update(wl, wlvif);
232 }
233
234 out_sleep:
235 pm_runtime_put_autosuspend(wl->dev);
236 out:
237 mutex_unlock(&wl->mutex);
238 }
239
wl12xx_tx_watchdog_work(struct work_struct * work)240 static void wl12xx_tx_watchdog_work(struct work_struct *work)
241 {
242 struct delayed_work *dwork;
243 struct wl1271 *wl;
244
245 dwork = to_delayed_work(work);
246 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
247
248 mutex_lock(&wl->mutex);
249
250 if (unlikely(wl->state != WLCORE_STATE_ON))
251 goto out;
252
253 /* Tx went out in the meantime - everything is ok */
254 if (unlikely(wl->tx_allocated_blocks == 0))
255 goto out;
256
257 /*
258 * if a ROC is in progress, we might not have any Tx for a long
259 * time (e.g. pending Tx on the non-ROC channels)
260 */
261 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
262 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
263 wl->conf.tx.tx_watchdog_timeout);
264 wl12xx_rearm_tx_watchdog_locked(wl);
265 goto out;
266 }
267
268 /*
269 * if a scan is in progress, we might not have any Tx for a long
270 * time
271 */
272 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
273 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
274 wl->conf.tx.tx_watchdog_timeout);
275 wl12xx_rearm_tx_watchdog_locked(wl);
276 goto out;
277 }
278
279 /*
280 * AP might cache a frame for a long time for a sleeping station,
281 * so rearm the timer if there's an AP interface with stations. If
282 * Tx is genuinely stuck we will most hopefully discover it when all
283 * stations are removed due to inactivity.
284 */
285 if (wl->active_sta_count) {
286 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
287 " %d stations",
288 wl->conf.tx.tx_watchdog_timeout,
289 wl->active_sta_count);
290 wl12xx_rearm_tx_watchdog_locked(wl);
291 goto out;
292 }
293
294 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
295 wl->conf.tx.tx_watchdog_timeout);
296 wl12xx_queue_recovery_work(wl);
297
298 out:
299 mutex_unlock(&wl->mutex);
300 }
301
wlcore_adjust_conf(struct wl1271 * wl)302 static void wlcore_adjust_conf(struct wl1271 *wl)
303 {
304
305 if (fwlog_param) {
306 if (!strcmp(fwlog_param, "continuous")) {
307 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
308 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
309 } else if (!strcmp(fwlog_param, "dbgpins")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
311 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
312 } else if (!strcmp(fwlog_param, "disable")) {
313 wl->conf.fwlog.mem_blocks = 0;
314 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
315 } else {
316 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
317 }
318 }
319
320 if (bug_on_recovery != -1)
321 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
322
323 if (no_recovery != -1)
324 wl->conf.recovery.no_recovery = (u8) no_recovery;
325 }
326
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)327 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
328 struct wl12xx_vif *wlvif,
329 u8 hlid, u8 tx_pkts)
330 {
331 bool fw_ps;
332
333 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
334
335 /*
336 * Wake up from high level PS if the STA is asleep with too little
337 * packets in FW or if the STA is awake.
338 */
339 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
340 wl12xx_ps_link_end(wl, wlvif, hlid);
341
342 /*
343 * Start high-level PS if the STA is asleep with enough blocks in FW.
344 * Make an exception if this is the only connected link. In this
345 * case FW-memory congestion is less of a problem.
346 * Note that a single connected STA means 2*ap_count + 1 active links,
347 * since we must account for the global and broadcast AP links
348 * for each AP. The "fw_ps" check assures us the other link is a STA
349 * connected to the AP. Otherwise the FW would not set the PSM bit.
350 */
351 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
352 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
353 wl12xx_ps_link_start(wl, wlvif, hlid, true);
354 }
355
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)356 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
357 struct wl12xx_vif *wlvif,
358 struct wl_fw_status *status)
359 {
360 unsigned long cur_fw_ps_map;
361 u8 hlid;
362
363 cur_fw_ps_map = status->link_ps_bitmap;
364 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
365 wl1271_debug(DEBUG_PSM,
366 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
367 wl->ap_fw_ps_map, cur_fw_ps_map,
368 wl->ap_fw_ps_map ^ cur_fw_ps_map);
369
370 wl->ap_fw_ps_map = cur_fw_ps_map;
371 }
372
373 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
374 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
375 wl->links[hlid].allocated_pkts);
376 }
377
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)378 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
379 {
380 struct wl12xx_vif *wlvifsta;
381 struct wl12xx_vif *wlvifap;
382 struct wl12xx_vif *wlvif;
383 u32 old_tx_blk_count = wl->tx_blocks_available;
384 int avail, freed_blocks;
385 int i;
386 int ret;
387 struct wl1271_link *lnk;
388
389 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
390 wl->raw_fw_status,
391 wl->fw_status_len, false);
392 if (ret < 0)
393 return ret;
394
395 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, status);
396
397 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
398 "drv_rx_counter = %d, tx_results_counter = %d)",
399 status->intr,
400 status->fw_rx_counter,
401 status->drv_rx_counter,
402 status->tx_results_counter);
403
404 for (i = 0; i < NUM_TX_QUEUES; i++) {
405 /* prevent wrap-around in freed-packets counter */
406 wl->tx_allocated_pkts[i] -=
407 (status->counters.tx_released_pkts[i] -
408 wl->tx_pkts_freed[i]) & 0xff;
409
410 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
411 }
412
413 /* Find an authorized STA vif */
414 wlvifsta = NULL;
415 wl12xx_for_each_wlvif_sta(wl, wlvif) {
416 if (wlvif->sta.hlid != WL12XX_INVALID_LINK_ID &&
417 test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) {
418 wlvifsta = wlvif;
419 break;
420 }
421 }
422
423 /* Find a started AP vif */
424 wlvifap = NULL;
425 wl12xx_for_each_wlvif(wl, wlvif) {
426 if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
427 wlvif->inconn_count == 0 &&
428 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
429 wlvifap = wlvif;
430 break;
431 }
432 }
433
434 for_each_set_bit(i, wl->links_map, wl->num_links) {
435 u16 diff16, sec_pn16;
436 u8 diff, tx_lnk_free_pkts;
437
438 lnk = &wl->links[i];
439
440 /* prevent wrap-around in freed-packets counter */
441 tx_lnk_free_pkts = status->counters.tx_lnk_free_pkts[i];
442 diff = (tx_lnk_free_pkts - lnk->prev_freed_pkts) & 0xff;
443
444 if (diff) {
445 lnk->allocated_pkts -= diff;
446 lnk->prev_freed_pkts = tx_lnk_free_pkts;
447 }
448
449 /* Get the current sec_pn16 value if present */
450 if (status->counters.tx_lnk_sec_pn16)
451 sec_pn16 = __le16_to_cpu(status->counters.tx_lnk_sec_pn16[i]);
452 else
453 sec_pn16 = 0;
454 /* prevent wrap-around in pn16 counter */
455 diff16 = (sec_pn16 - lnk->prev_sec_pn16) & 0xffff;
456
457 /* FIXME: since free_pkts is a 8-bit counter of packets that
458 * rolls over, it can become zero. If it is zero, then we
459 * omit processing below. Is that really correct?
460 */
461 if (tx_lnk_free_pkts <= 0)
462 continue;
463
464 /* For a station that has an authorized link: */
465 if (wlvifsta && wlvifsta->sta.hlid == i) {
466 if (wlvifsta->encryption_type == KEY_TKIP ||
467 wlvifsta->encryption_type == KEY_AES) {
468 if (diff16) {
469 lnk->prev_sec_pn16 = sec_pn16;
470 /* accumulate the prev_freed_pkts
471 * counter according to the PN from
472 * firmware
473 */
474 lnk->total_freed_pkts += diff16;
475 }
476 } else {
477 if (diff)
478 /* accumulate the prev_freed_pkts
479 * counter according to the free packets
480 * count from firmware
481 */
482 lnk->total_freed_pkts += diff;
483 }
484 }
485
486 /* For an AP that has been started */
487 if (wlvifap && test_bit(i, wlvifap->ap.sta_hlid_map)) {
488 if (wlvifap->encryption_type == KEY_TKIP ||
489 wlvifap->encryption_type == KEY_AES) {
490 if (diff16) {
491 lnk->prev_sec_pn16 = sec_pn16;
492 /* accumulate the prev_freed_pkts
493 * counter according to the PN from
494 * firmware
495 */
496 lnk->total_freed_pkts += diff16;
497 }
498 } else {
499 if (diff)
500 /* accumulate the prev_freed_pkts
501 * counter according to the free packets
502 * count from firmware
503 */
504 lnk->total_freed_pkts += diff;
505 }
506 }
507 }
508
509 /* prevent wrap-around in total blocks counter */
510 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
511 freed_blocks = status->total_released_blks -
512 wl->tx_blocks_freed;
513 else
514 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
515 status->total_released_blks;
516
517 wl->tx_blocks_freed = status->total_released_blks;
518
519 wl->tx_allocated_blocks -= freed_blocks;
520
521 /*
522 * If the FW freed some blocks:
523 * If we still have allocated blocks - re-arm the timer, Tx is
524 * not stuck. Otherwise, cancel the timer (no Tx currently).
525 */
526 if (freed_blocks) {
527 if (wl->tx_allocated_blocks)
528 wl12xx_rearm_tx_watchdog_locked(wl);
529 else
530 cancel_delayed_work(&wl->tx_watchdog_work);
531 }
532
533 avail = status->tx_total - wl->tx_allocated_blocks;
534
535 /*
536 * The FW might change the total number of TX memblocks before
537 * we get a notification about blocks being released. Thus, the
538 * available blocks calculation might yield a temporary result
539 * which is lower than the actual available blocks. Keeping in
540 * mind that only blocks that were allocated can be moved from
541 * TX to RX, tx_blocks_available should never decrease here.
542 */
543 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
544 avail);
545
546 /* if more blocks are available now, tx work can be scheduled */
547 if (wl->tx_blocks_available > old_tx_blk_count)
548 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
549
550 /* for AP update num of allocated TX blocks per link and ps status */
551 wl12xx_for_each_wlvif_ap(wl, wlvif) {
552 wl12xx_irq_update_links_status(wl, wlvif, status);
553 }
554
555 /* update the host-chipset time offset */
556 wl->time_offset = (ktime_get_boottime_ns() >> 10) -
557 (s64)(status->fw_localtime);
558
559 wl->fw_fast_lnk_map = status->link_fast_bitmap;
560
561 return 0;
562 }
563
wl1271_flush_deferred_work(struct wl1271 * wl)564 static void wl1271_flush_deferred_work(struct wl1271 *wl)
565 {
566 struct sk_buff *skb;
567
568 /* Pass all received frames to the network stack */
569 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
570 ieee80211_rx_ni(wl->hw, skb);
571
572 /* Return sent skbs to the network stack */
573 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
574 ieee80211_tx_status_ni(wl->hw, skb);
575 }
576
wl1271_netstack_work(struct work_struct * work)577 static void wl1271_netstack_work(struct work_struct *work)
578 {
579 struct wl1271 *wl =
580 container_of(work, struct wl1271, netstack_work);
581
582 do {
583 wl1271_flush_deferred_work(wl);
584 } while (skb_queue_len(&wl->deferred_rx_queue));
585 }
586
587 #define WL1271_IRQ_MAX_LOOPS 256
588
wlcore_irq_locked(struct wl1271 * wl)589 static int wlcore_irq_locked(struct wl1271 *wl)
590 {
591 int ret = 0;
592 u32 intr;
593 int loopcount = WL1271_IRQ_MAX_LOOPS;
594 bool run_tx_queue = true;
595 bool done = false;
596 unsigned int defer_count;
597 unsigned long flags;
598
599 /*
600 * In case edge triggered interrupt must be used, we cannot iterate
601 * more than once without introducing race conditions with the hardirq.
602 */
603 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
604 loopcount = 1;
605
606 wl1271_debug(DEBUG_IRQ, "IRQ work");
607
608 if (unlikely(wl->state != WLCORE_STATE_ON))
609 goto out;
610
611 ret = pm_runtime_resume_and_get(wl->dev);
612 if (ret < 0)
613 goto out;
614
615 while (!done && loopcount--) {
616 smp_mb__after_atomic();
617
618 ret = wlcore_fw_status(wl, wl->fw_status);
619 if (ret < 0)
620 goto err_ret;
621
622 wlcore_hw_tx_immediate_compl(wl);
623
624 intr = wl->fw_status->intr;
625 intr &= WLCORE_ALL_INTR_MASK;
626 if (!intr) {
627 done = true;
628 continue;
629 }
630
631 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
632 wl1271_error("HW watchdog interrupt received! starting recovery.");
633 wl->watchdog_recovery = true;
634 ret = -EIO;
635
636 /* restarting the chip. ignore any other interrupt. */
637 goto err_ret;
638 }
639
640 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
641 wl1271_error("SW watchdog interrupt received! "
642 "starting recovery.");
643 wl->watchdog_recovery = true;
644 ret = -EIO;
645
646 /* restarting the chip. ignore any other interrupt. */
647 goto err_ret;
648 }
649
650 if (likely(intr & WL1271_ACX_INTR_DATA)) {
651 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
652
653 ret = wlcore_rx(wl, wl->fw_status);
654 if (ret < 0)
655 goto err_ret;
656
657 /* Check if any tx blocks were freed */
658 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
659 if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
660 if (!wl1271_tx_total_queue_count(wl))
661 run_tx_queue = false;
662 spin_unlock_irqrestore(&wl->wl_lock, flags);
663 }
664
665 /*
666 * In order to avoid starvation of the TX path,
667 * call the work function directly.
668 */
669 if (run_tx_queue) {
670 ret = wlcore_tx_work_locked(wl);
671 if (ret < 0)
672 goto err_ret;
673 }
674 }
675
676 /* check for tx results */
677 ret = wlcore_hw_tx_delayed_compl(wl);
678 if (ret < 0)
679 goto err_ret;
680
681 /* Make sure the deferred queues don't get too long */
682 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
683 skb_queue_len(&wl->deferred_rx_queue);
684 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
685 wl1271_flush_deferred_work(wl);
686 }
687
688 if (intr & WL1271_ACX_INTR_EVENT_A) {
689 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
690 ret = wl1271_event_handle(wl, 0);
691 if (ret < 0)
692 goto err_ret;
693 }
694
695 if (intr & WL1271_ACX_INTR_EVENT_B) {
696 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
697 ret = wl1271_event_handle(wl, 1);
698 if (ret < 0)
699 goto err_ret;
700 }
701
702 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
703 wl1271_debug(DEBUG_IRQ,
704 "WL1271_ACX_INTR_INIT_COMPLETE");
705
706 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
707 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
708 }
709
710 err_ret:
711 pm_runtime_put_autosuspend(wl->dev);
712
713 out:
714 return ret;
715 }
716
wlcore_irq(int irq,void * cookie)717 static irqreturn_t wlcore_irq(int irq, void *cookie)
718 {
719 int ret;
720 unsigned long flags;
721 struct wl1271 *wl = cookie;
722 bool queue_tx_work = true;
723
724 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
725
726 /* complete the ELP completion */
727 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
728 spin_lock_irqsave(&wl->wl_lock, flags);
729 if (wl->elp_compl)
730 complete(wl->elp_compl);
731 spin_unlock_irqrestore(&wl->wl_lock, flags);
732 }
733
734 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
735 /* don't enqueue a work right now. mark it as pending */
736 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
737 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
738 spin_lock_irqsave(&wl->wl_lock, flags);
739 disable_irq_nosync(wl->irq);
740 pm_wakeup_event(wl->dev, 0);
741 spin_unlock_irqrestore(&wl->wl_lock, flags);
742 goto out_handled;
743 }
744
745 /* TX might be handled here, avoid redundant work */
746 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
747 cancel_work_sync(&wl->tx_work);
748
749 mutex_lock(&wl->mutex);
750
751 ret = wlcore_irq_locked(wl);
752 if (ret)
753 wl12xx_queue_recovery_work(wl);
754
755 /* In case TX was not handled in wlcore_irq_locked(), queue TX work */
756 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
757 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
758 if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
759 if (!wl1271_tx_total_queue_count(wl))
760 queue_tx_work = false;
761 spin_unlock_irqrestore(&wl->wl_lock, flags);
762 }
763 if (queue_tx_work)
764 ieee80211_queue_work(wl->hw, &wl->tx_work);
765 }
766
767 mutex_unlock(&wl->mutex);
768
769 out_handled:
770 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
771
772 return IRQ_HANDLED;
773 }
774
775 struct vif_counter_data {
776 u8 counter;
777
778 struct ieee80211_vif *cur_vif;
779 bool cur_vif_running;
780 };
781
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)782 static void wl12xx_vif_count_iter(void *data, u8 *mac,
783 struct ieee80211_vif *vif)
784 {
785 struct vif_counter_data *counter = data;
786
787 counter->counter++;
788 if (counter->cur_vif == vif)
789 counter->cur_vif_running = true;
790 }
791
792 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)793 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
794 struct ieee80211_vif *cur_vif,
795 struct vif_counter_data *data)
796 {
797 memset(data, 0, sizeof(*data));
798 data->cur_vif = cur_vif;
799
800 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
801 wl12xx_vif_count_iter, data);
802 }
803
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)804 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
805 {
806 const struct firmware *fw;
807 const char *fw_name;
808 enum wl12xx_fw_type fw_type;
809 int ret;
810
811 if (plt) {
812 fw_type = WL12XX_FW_TYPE_PLT;
813 fw_name = wl->plt_fw_name;
814 } else {
815 /*
816 * we can't call wl12xx_get_vif_count() here because
817 * wl->mutex is taken, so use the cached last_vif_count value
818 */
819 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
820 fw_type = WL12XX_FW_TYPE_MULTI;
821 fw_name = wl->mr_fw_name;
822 } else {
823 fw_type = WL12XX_FW_TYPE_NORMAL;
824 fw_name = wl->sr_fw_name;
825 }
826 }
827
828 if (wl->fw_type == fw_type)
829 return 0;
830
831 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
832
833 ret = request_firmware(&fw, fw_name, wl->dev);
834
835 if (ret < 0) {
836 wl1271_error("could not get firmware %s: %d", fw_name, ret);
837 return ret;
838 }
839
840 if (fw->size % 4) {
841 wl1271_error("firmware size is not multiple of 32 bits: %zu",
842 fw->size);
843 ret = -EILSEQ;
844 goto out;
845 }
846
847 vfree(wl->fw);
848 wl->fw_type = WL12XX_FW_TYPE_NONE;
849 wl->fw_len = fw->size;
850 wl->fw = vmalloc(wl->fw_len);
851
852 if (!wl->fw) {
853 wl1271_error("could not allocate memory for the firmware");
854 ret = -ENOMEM;
855 goto out;
856 }
857
858 memcpy(wl->fw, fw->data, wl->fw_len);
859 ret = 0;
860 wl->fw_type = fw_type;
861 out:
862 release_firmware(fw);
863
864 return ret;
865 }
866
wl12xx_queue_recovery_work(struct wl1271 * wl)867 void wl12xx_queue_recovery_work(struct wl1271 *wl)
868 {
869 /* Avoid a recursive recovery */
870 if (wl->state == WLCORE_STATE_ON) {
871 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
872 &wl->flags));
873
874 wl->state = WLCORE_STATE_RESTARTING;
875 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
876 ieee80211_queue_work(wl->hw, &wl->recovery_work);
877 }
878 }
879
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)880 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
881 {
882 size_t len;
883
884 /* Make sure we have enough room */
885 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
886
887 /* Fill the FW log file, consumed by the sysfs fwlog entry */
888 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
889 wl->fwlog_size += len;
890
891 return len;
892 }
893
wl12xx_read_fwlog_panic(struct wl1271 * wl)894 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
895 {
896 u32 end_of_log = 0;
897 int error;
898
899 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
900 return;
901
902 wl1271_info("Reading FW panic log");
903
904 /*
905 * Make sure the chip is awake and the logger isn't active.
906 * Do not send a stop fwlog command if the fw is hanged or if
907 * dbgpins are used (due to some fw bug).
908 */
909 error = pm_runtime_resume_and_get(wl->dev);
910 if (error < 0)
911 return;
912 if (!wl->watchdog_recovery &&
913 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
914 wl12xx_cmd_stop_fwlog(wl);
915
916 /* Traverse the memory blocks linked list */
917 do {
918 end_of_log = wlcore_event_fw_logger(wl);
919 if (end_of_log == 0) {
920 msleep(100);
921 end_of_log = wlcore_event_fw_logger(wl);
922 }
923 } while (end_of_log != 0);
924 }
925
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)926 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
927 u8 hlid, struct ieee80211_sta *sta)
928 {
929 struct wl1271_station *wl_sta;
930 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
931
932 wl_sta = (void *)sta->drv_priv;
933 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
934
935 /*
936 * increment the initial seq number on recovery to account for
937 * transmitted packets that we haven't yet got in the FW status
938 */
939 if (wlvif->encryption_type == KEY_GEM)
940 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
941
942 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
943 wl_sta->total_freed_pkts += sqn_recovery_padding;
944 }
945
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)946 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
947 struct wl12xx_vif *wlvif,
948 u8 hlid, const u8 *addr)
949 {
950 struct ieee80211_sta *sta;
951 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
952
953 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
954 is_zero_ether_addr(addr)))
955 return;
956
957 rcu_read_lock();
958 sta = ieee80211_find_sta(vif, addr);
959 if (sta)
960 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
961 rcu_read_unlock();
962 }
963
wlcore_print_recovery(struct wl1271 * wl)964 static void wlcore_print_recovery(struct wl1271 *wl)
965 {
966 u32 pc = 0;
967 u32 hint_sts = 0;
968 int ret;
969
970 wl1271_info("Hardware recovery in progress. FW ver: %s",
971 wl->chip.fw_ver_str);
972
973 /* change partitions momentarily so we can read the FW pc */
974 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
975 if (ret < 0)
976 return;
977
978 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
979 if (ret < 0)
980 return;
981
982 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
983 if (ret < 0)
984 return;
985
986 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
987 pc, hint_sts, ++wl->recovery_count);
988
989 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
990 }
991
992
wl1271_recovery_work(struct work_struct * work)993 static void wl1271_recovery_work(struct work_struct *work)
994 {
995 struct wl1271 *wl =
996 container_of(work, struct wl1271, recovery_work);
997 struct wl12xx_vif *wlvif;
998 struct ieee80211_vif *vif;
999 int error;
1000
1001 mutex_lock(&wl->mutex);
1002
1003 if (wl->state == WLCORE_STATE_OFF || wl->plt)
1004 goto out_unlock;
1005
1006 error = pm_runtime_resume_and_get(wl->dev);
1007 if (error < 0)
1008 wl1271_warning("Enable for recovery failed");
1009 wlcore_disable_interrupts_nosync(wl);
1010
1011 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
1012 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
1013 wl12xx_read_fwlog_panic(wl);
1014 wlcore_print_recovery(wl);
1015 }
1016
1017 BUG_ON(wl->conf.recovery.bug_on_recovery &&
1018 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1019
1020 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
1021
1022 if (wl->conf.recovery.no_recovery) {
1023 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1024 goto out_unlock;
1025 }
1026
1027 /* Prevent spurious TX during FW restart */
1028 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1029
1030 /* reboot the chipset */
1031 while (!list_empty(&wl->wlvif_list)) {
1032 wlvif = list_first_entry(&wl->wlvif_list,
1033 struct wl12xx_vif, list);
1034 vif = wl12xx_wlvif_to_vif(wlvif);
1035
1036 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1037 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1038 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1039 vif->bss_conf.bssid);
1040 }
1041
1042 __wl1271_op_remove_interface(wl, vif, false);
1043 }
1044
1045 wlcore_op_stop_locked(wl);
1046 pm_runtime_put_autosuspend(wl->dev);
1047
1048 ieee80211_restart_hw(wl->hw);
1049
1050 /*
1051 * Its safe to enable TX now - the queues are stopped after a request
1052 * to restart the HW.
1053 */
1054 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1055
1056 out_unlock:
1057 wl->watchdog_recovery = false;
1058 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1059 mutex_unlock(&wl->mutex);
1060 }
1061
wlcore_fw_wakeup(struct wl1271 * wl)1062 static int wlcore_fw_wakeup(struct wl1271 *wl)
1063 {
1064 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1065 }
1066
wl1271_setup(struct wl1271 * wl)1067 static int wl1271_setup(struct wl1271 *wl)
1068 {
1069 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1070 if (!wl->raw_fw_status)
1071 goto err;
1072
1073 wl->fw_status = kzalloc_obj(*wl->fw_status);
1074 if (!wl->fw_status)
1075 goto err;
1076
1077 wl->tx_res_if = kzalloc_obj(*wl->tx_res_if);
1078 if (!wl->tx_res_if)
1079 goto err;
1080
1081 return 0;
1082 err:
1083 kfree(wl->fw_status);
1084 kfree(wl->raw_fw_status);
1085 return -ENOMEM;
1086 }
1087
wl12xx_set_power_on(struct wl1271 * wl)1088 static int wl12xx_set_power_on(struct wl1271 *wl)
1089 {
1090 int ret;
1091
1092 msleep(WL1271_PRE_POWER_ON_SLEEP);
1093 ret = wl1271_power_on(wl);
1094 if (ret < 0)
1095 goto out;
1096 msleep(WL1271_POWER_ON_SLEEP);
1097 wl1271_io_reset(wl);
1098 wl1271_io_init(wl);
1099
1100 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1101 if (ret < 0)
1102 goto fail;
1103
1104 /* ELP module wake up */
1105 ret = wlcore_fw_wakeup(wl);
1106 if (ret < 0)
1107 goto fail;
1108
1109 out:
1110 return ret;
1111
1112 fail:
1113 wl1271_power_off(wl);
1114 return ret;
1115 }
1116
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1117 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1118 {
1119 int ret = 0;
1120
1121 ret = wl12xx_set_power_on(wl);
1122 if (ret < 0)
1123 goto out;
1124
1125 /*
1126 * For wl127x based devices we could use the default block
1127 * size (512 bytes), but due to a bug in the sdio driver, we
1128 * need to set it explicitly after the chip is powered on. To
1129 * simplify the code and since the performance impact is
1130 * negligible, we use the same block size for all different
1131 * chip types.
1132 *
1133 * Check if the bus supports blocksize alignment and, if it
1134 * doesn't, make sure we don't have the quirk.
1135 */
1136 if (!wl1271_set_block_size(wl))
1137 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1138
1139 /* TODO: make sure the lower driver has set things up correctly */
1140
1141 ret = wl1271_setup(wl);
1142 if (ret < 0)
1143 goto out;
1144
1145 ret = wl12xx_fetch_firmware(wl, plt);
1146 if (ret < 0) {
1147 kfree(wl->fw_status);
1148 kfree(wl->raw_fw_status);
1149 kfree(wl->tx_res_if);
1150 }
1151
1152 out:
1153 return ret;
1154 }
1155
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1156 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1157 {
1158 int retries = WL1271_BOOT_RETRIES;
1159 struct wiphy *wiphy = wl->hw->wiphy;
1160
1161 static const char* const PLT_MODE[] = {
1162 "PLT_OFF",
1163 "PLT_ON",
1164 "PLT_FEM_DETECT",
1165 "PLT_CHIP_AWAKE"
1166 };
1167
1168 int ret;
1169
1170 mutex_lock(&wl->mutex);
1171
1172 wl1271_notice("power up");
1173
1174 if (wl->state != WLCORE_STATE_OFF) {
1175 wl1271_error("cannot go into PLT state because not "
1176 "in off state: %d", wl->state);
1177 ret = -EBUSY;
1178 goto out;
1179 }
1180
1181 /* Indicate to lower levels that we are now in PLT mode */
1182 wl->plt = true;
1183 wl->plt_mode = plt_mode;
1184
1185 while (retries) {
1186 retries--;
1187 ret = wl12xx_chip_wakeup(wl, true);
1188 if (ret < 0)
1189 goto power_off;
1190
1191 if (plt_mode != PLT_CHIP_AWAKE) {
1192 ret = wl->ops->plt_init(wl);
1193 if (ret < 0)
1194 goto power_off;
1195 }
1196
1197 wl->state = WLCORE_STATE_ON;
1198 wl1271_notice("firmware booted in PLT mode %s (%s)",
1199 PLT_MODE[plt_mode],
1200 wl->chip.fw_ver_str);
1201
1202 /* update hw/fw version info in wiphy struct */
1203 wiphy->hw_version = wl->chip.id;
1204 strscpy(wiphy->fw_version, wl->chip.fw_ver_str,
1205 sizeof(wiphy->fw_version));
1206
1207 goto out;
1208
1209 power_off:
1210 wl1271_power_off(wl);
1211 }
1212
1213 wl->plt = false;
1214 wl->plt_mode = PLT_OFF;
1215
1216 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1217 WL1271_BOOT_RETRIES);
1218 out:
1219 mutex_unlock(&wl->mutex);
1220
1221 return ret;
1222 }
1223
wl1271_plt_stop(struct wl1271 * wl)1224 int wl1271_plt_stop(struct wl1271 *wl)
1225 {
1226 int ret = 0;
1227
1228 wl1271_notice("power down");
1229
1230 /*
1231 * Interrupts must be disabled before setting the state to OFF.
1232 * Otherwise, the interrupt handler might be called and exit without
1233 * reading the interrupt status.
1234 */
1235 wlcore_disable_interrupts(wl);
1236 mutex_lock(&wl->mutex);
1237 if (!wl->plt) {
1238 mutex_unlock(&wl->mutex);
1239
1240 /*
1241 * This will not necessarily enable interrupts as interrupts
1242 * may have been disabled when op_stop was called. It will,
1243 * however, balance the above call to disable_interrupts().
1244 */
1245 wlcore_enable_interrupts(wl);
1246
1247 wl1271_error("cannot power down because not in PLT "
1248 "state: %d", wl->state);
1249 ret = -EBUSY;
1250 goto out;
1251 }
1252
1253 mutex_unlock(&wl->mutex);
1254
1255 wl1271_flush_deferred_work(wl);
1256 cancel_work_sync(&wl->netstack_work);
1257 cancel_work_sync(&wl->recovery_work);
1258 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1259
1260 mutex_lock(&wl->mutex);
1261 wl1271_power_off(wl);
1262 wl->flags = 0;
1263 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1264 wl->state = WLCORE_STATE_OFF;
1265 wl->plt = false;
1266 wl->plt_mode = PLT_OFF;
1267 wl->rx_counter = 0;
1268 mutex_unlock(&wl->mutex);
1269
1270 out:
1271 return ret;
1272 }
1273
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1274 static void wl1271_op_tx(struct ieee80211_hw *hw,
1275 struct ieee80211_tx_control *control,
1276 struct sk_buff *skb)
1277 {
1278 struct wl1271 *wl = hw->priv;
1279 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1280 struct ieee80211_vif *vif = info->control.vif;
1281 struct wl12xx_vif *wlvif = NULL;
1282 unsigned long flags;
1283 int q, mapping;
1284 u8 hlid;
1285
1286 if (!vif) {
1287 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1288 ieee80211_free_txskb(hw, skb);
1289 return;
1290 }
1291
1292 wlvif = wl12xx_vif_to_data(vif);
1293 mapping = skb_get_queue_mapping(skb);
1294 q = wl1271_tx_get_queue(mapping);
1295
1296 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1297
1298 spin_lock_irqsave(&wl->wl_lock, flags);
1299
1300 /*
1301 * drop the packet if the link is invalid or the queue is stopped
1302 * for any reason but watermark. Watermark is a "soft"-stop so we
1303 * allow these packets through.
1304 */
1305 if (hlid == WL12XX_INVALID_LINK_ID ||
1306 (!test_bit(hlid, wlvif->links_map)) ||
1307 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1308 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1309 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1310 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1311 ieee80211_free_txskb(hw, skb);
1312 goto out;
1313 }
1314
1315 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1316 hlid, q, skb->len);
1317 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1318
1319 wl->tx_queue_count[q]++;
1320 wlvif->tx_queue_count[q]++;
1321
1322 /*
1323 * The workqueue is slow to process the tx_queue and we need stop
1324 * the queue here, otherwise the queue will get too long.
1325 */
1326 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1327 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1328 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1329 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1330 wlcore_stop_queue_locked(wl, wlvif, q,
1331 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1332 }
1333
1334 /*
1335 * The chip specific setup must run before the first TX packet -
1336 * before that, the tx_work will not be initialized!
1337 */
1338
1339 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1340 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1341 ieee80211_queue_work(wl->hw, &wl->tx_work);
1342
1343 out:
1344 spin_unlock_irqrestore(&wl->wl_lock, flags);
1345 }
1346
wl1271_tx_dummy_packet(struct wl1271 * wl)1347 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1348 {
1349 unsigned long flags;
1350 int q;
1351
1352 /* no need to queue a new dummy packet if one is already pending */
1353 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1354 return 0;
1355
1356 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1357
1358 spin_lock_irqsave(&wl->wl_lock, flags);
1359 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1360 wl->tx_queue_count[q]++;
1361 spin_unlock_irqrestore(&wl->wl_lock, flags);
1362
1363 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1364 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1365 return wlcore_tx_work_locked(wl);
1366
1367 /*
1368 * If the FW TX is busy, TX work will be scheduled by the threaded
1369 * interrupt handler function
1370 */
1371 return 0;
1372 }
1373
1374 /*
1375 * The size of the dummy packet should be at least 1400 bytes. However, in
1376 * order to minimize the number of bus transactions, aligning it to 512 bytes
1377 * boundaries could be beneficial, performance wise
1378 */
1379 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1380
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1381 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1382 {
1383 struct sk_buff *skb;
1384 struct ieee80211_hdr_3addr *hdr;
1385 unsigned int dummy_packet_size;
1386
1387 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1388 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1389
1390 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1391 if (!skb) {
1392 wl1271_warning("Failed to allocate a dummy packet skb");
1393 return NULL;
1394 }
1395
1396 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1397
1398 hdr = skb_put_zero(skb, sizeof(*hdr));
1399 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1400 IEEE80211_STYPE_NULLFUNC |
1401 IEEE80211_FCTL_TODS);
1402
1403 skb_put_zero(skb, dummy_packet_size);
1404
1405 /* Dummy packets require the TID to be management */
1406 skb->priority = WL1271_TID_MGMT;
1407
1408 /* Initialize all fields that might be used */
1409 skb_set_queue_mapping(skb, 0);
1410 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1411
1412 return skb;
1413 }
1414
1415
1416 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1417 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1418 {
1419 int num_fields = 0, in_field = 0, fields_size = 0;
1420 int i, pattern_len = 0;
1421
1422 if (!p->mask) {
1423 wl1271_warning("No mask in WoWLAN pattern");
1424 return -EINVAL;
1425 }
1426
1427 /*
1428 * The pattern is broken up into segments of bytes at different offsets
1429 * that need to be checked by the FW filter. Each segment is called
1430 * a field in the FW API. We verify that the total number of fields
1431 * required for this pattern won't exceed FW limits (8)
1432 * as well as the total fields buffer won't exceed the FW limit.
1433 * Note that if there's a pattern which crosses Ethernet/IP header
1434 * boundary a new field is required.
1435 */
1436 for (i = 0; i < p->pattern_len; i++) {
1437 if (test_bit(i, (unsigned long *)p->mask)) {
1438 if (!in_field) {
1439 in_field = 1;
1440 pattern_len = 1;
1441 } else {
1442 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1443 num_fields++;
1444 fields_size += pattern_len +
1445 RX_FILTER_FIELD_OVERHEAD;
1446 pattern_len = 1;
1447 } else
1448 pattern_len++;
1449 }
1450 } else {
1451 if (in_field) {
1452 in_field = 0;
1453 fields_size += pattern_len +
1454 RX_FILTER_FIELD_OVERHEAD;
1455 num_fields++;
1456 }
1457 }
1458 }
1459
1460 if (in_field) {
1461 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1462 num_fields++;
1463 }
1464
1465 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1466 wl1271_warning("RX Filter too complex. Too many segments");
1467 return -EINVAL;
1468 }
1469
1470 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1471 wl1271_warning("RX filter pattern is too big");
1472 return -E2BIG;
1473 }
1474
1475 return 0;
1476 }
1477
wl1271_rx_filter_alloc(void)1478 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1479 {
1480 return kzalloc_obj(struct wl12xx_rx_filter);
1481 }
1482
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1483 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1484 {
1485 int i;
1486
1487 if (filter == NULL)
1488 return;
1489
1490 for (i = 0; i < filter->num_fields; i++)
1491 kfree(filter->fields[i].pattern);
1492
1493 kfree(filter);
1494 }
1495
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1496 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1497 u16 offset, u8 flags,
1498 const u8 *pattern, u8 len)
1499 {
1500 struct wl12xx_rx_filter_field *field;
1501
1502 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1503 wl1271_warning("Max fields per RX filter. can't alloc another");
1504 return -EINVAL;
1505 }
1506
1507 field = &filter->fields[filter->num_fields];
1508
1509 field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1510 if (!field->pattern) {
1511 wl1271_warning("Failed to allocate RX filter pattern");
1512 return -ENOMEM;
1513 }
1514
1515 filter->num_fields++;
1516
1517 field->offset = cpu_to_le16(offset);
1518 field->flags = flags;
1519 field->len = len;
1520
1521 return 0;
1522 }
1523
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1524 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1525 {
1526 int i, fields_size = 0;
1527
1528 for (i = 0; i < filter->num_fields; i++)
1529 fields_size += filter->fields[i].len +
1530 sizeof(struct wl12xx_rx_filter_field) -
1531 sizeof(u8 *);
1532
1533 return fields_size;
1534 }
1535
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1536 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1537 u8 *buf)
1538 {
1539 int i;
1540 struct wl12xx_rx_filter_field *field;
1541
1542 for (i = 0; i < filter->num_fields; i++) {
1543 field = (struct wl12xx_rx_filter_field *)buf;
1544
1545 field->offset = filter->fields[i].offset;
1546 field->flags = filter->fields[i].flags;
1547 field->len = filter->fields[i].len;
1548
1549 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1550 buf += sizeof(struct wl12xx_rx_filter_field) -
1551 sizeof(u8 *) + field->len;
1552 }
1553 }
1554
1555 /*
1556 * Allocates an RX filter returned through f
1557 * which needs to be freed using rx_filter_free()
1558 */
1559 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1560 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1561 struct wl12xx_rx_filter **f)
1562 {
1563 int i, j, ret = 0;
1564 struct wl12xx_rx_filter *filter;
1565 u16 offset;
1566 u8 flags, len;
1567
1568 filter = wl1271_rx_filter_alloc();
1569 if (!filter) {
1570 wl1271_warning("Failed to alloc rx filter");
1571 ret = -ENOMEM;
1572 goto err;
1573 }
1574
1575 i = 0;
1576 while (i < p->pattern_len) {
1577 if (!test_bit(i, (unsigned long *)p->mask)) {
1578 i++;
1579 continue;
1580 }
1581
1582 for (j = i; j < p->pattern_len; j++) {
1583 if (!test_bit(j, (unsigned long *)p->mask))
1584 break;
1585
1586 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1587 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1588 break;
1589 }
1590
1591 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1592 offset = i;
1593 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1594 } else {
1595 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1596 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1597 }
1598
1599 len = j - i;
1600
1601 ret = wl1271_rx_filter_alloc_field(filter,
1602 offset,
1603 flags,
1604 &p->pattern[i], len);
1605 if (ret)
1606 goto err;
1607
1608 i = j;
1609 }
1610
1611 filter->action = FILTER_SIGNAL;
1612
1613 *f = filter;
1614 return 0;
1615
1616 err:
1617 wl1271_rx_filter_free(filter);
1618 *f = NULL;
1619
1620 return ret;
1621 }
1622
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1623 static int wl1271_configure_wowlan(struct wl1271 *wl,
1624 struct cfg80211_wowlan *wow)
1625 {
1626 int i, ret;
1627
1628 if (!wow || wow->any || !wow->n_patterns) {
1629 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1630 FILTER_SIGNAL);
1631 if (ret)
1632 goto out;
1633
1634 ret = wl1271_rx_filter_clear_all(wl);
1635 if (ret)
1636 goto out;
1637
1638 return 0;
1639 }
1640
1641 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1642 return -EINVAL;
1643
1644 /* Validate all incoming patterns before clearing current FW state */
1645 for (i = 0; i < wow->n_patterns; i++) {
1646 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1647 if (ret) {
1648 wl1271_warning("Bad wowlan pattern %d", i);
1649 return ret;
1650 }
1651 }
1652
1653 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1654 if (ret)
1655 goto out;
1656
1657 ret = wl1271_rx_filter_clear_all(wl);
1658 if (ret)
1659 goto out;
1660
1661 /* Translate WoWLAN patterns into filters */
1662 for (i = 0; i < wow->n_patterns; i++) {
1663 struct cfg80211_pkt_pattern *p;
1664 struct wl12xx_rx_filter *filter = NULL;
1665
1666 p = &wow->patterns[i];
1667
1668 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1669 if (ret) {
1670 wl1271_warning("Failed to create an RX filter from "
1671 "wowlan pattern %d", i);
1672 goto out;
1673 }
1674
1675 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1676
1677 wl1271_rx_filter_free(filter);
1678 if (ret)
1679 goto out;
1680 }
1681
1682 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1683
1684 out:
1685 return ret;
1686 }
1687
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1688 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1689 struct wl12xx_vif *wlvif,
1690 struct cfg80211_wowlan *wow)
1691 {
1692 int ret = 0;
1693
1694 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1695 goto out;
1696
1697 ret = wl1271_configure_wowlan(wl, wow);
1698 if (ret < 0)
1699 goto out;
1700
1701 if ((wl->conf.conn.suspend_wake_up_event ==
1702 wl->conf.conn.wake_up_event) &&
1703 (wl->conf.conn.suspend_listen_interval ==
1704 wl->conf.conn.listen_interval))
1705 goto out;
1706
1707 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1708 wl->conf.conn.suspend_wake_up_event,
1709 wl->conf.conn.suspend_listen_interval);
1710
1711 if (ret < 0)
1712 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1713 out:
1714 return ret;
1715
1716 }
1717
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1718 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1719 struct wl12xx_vif *wlvif,
1720 struct cfg80211_wowlan *wow)
1721 {
1722 int ret = 0;
1723
1724 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1725 goto out;
1726
1727 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1728 if (ret < 0)
1729 goto out;
1730
1731 ret = wl1271_configure_wowlan(wl, wow);
1732 if (ret < 0)
1733 goto out;
1734
1735 out:
1736 return ret;
1737
1738 }
1739
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1740 static int wl1271_configure_suspend(struct wl1271 *wl,
1741 struct wl12xx_vif *wlvif,
1742 struct cfg80211_wowlan *wow)
1743 {
1744 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1745 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1746 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1747 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1748 return 0;
1749 }
1750
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1751 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1752 {
1753 int ret = 0;
1754 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1755 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1756
1757 if ((!is_ap) && (!is_sta))
1758 return;
1759
1760 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1761 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1762 return;
1763
1764 wl1271_configure_wowlan(wl, NULL);
1765
1766 if (is_sta) {
1767 if ((wl->conf.conn.suspend_wake_up_event ==
1768 wl->conf.conn.wake_up_event) &&
1769 (wl->conf.conn.suspend_listen_interval ==
1770 wl->conf.conn.listen_interval))
1771 return;
1772
1773 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1774 wl->conf.conn.wake_up_event,
1775 wl->conf.conn.listen_interval);
1776
1777 if (ret < 0)
1778 wl1271_error("resume: wake up conditions failed: %d",
1779 ret);
1780
1781 } else if (is_ap) {
1782 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1783 }
1784 }
1785
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1786 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1787 struct cfg80211_wowlan *wow)
1788 {
1789 struct wl1271 *wl = hw->priv;
1790 struct wl12xx_vif *wlvif;
1791 unsigned long flags;
1792 int ret;
1793
1794 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1795 WARN_ON(!wow);
1796
1797 /* we want to perform the recovery before suspending */
1798 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1799 wl1271_warning("postponing suspend to perform recovery");
1800 return -EBUSY;
1801 }
1802
1803 wl1271_tx_flush(wl);
1804
1805 mutex_lock(&wl->mutex);
1806
1807 ret = pm_runtime_resume_and_get(wl->dev);
1808 if (ret < 0) {
1809 mutex_unlock(&wl->mutex);
1810 return ret;
1811 }
1812
1813 wl->wow_enabled = true;
1814 wl12xx_for_each_wlvif(wl, wlvif) {
1815 if (wlcore_is_p2p_mgmt(wlvif))
1816 continue;
1817
1818 ret = wl1271_configure_suspend(wl, wlvif, wow);
1819 if (ret < 0) {
1820 goto out_sleep;
1821 }
1822 }
1823
1824 /* disable fast link flow control notifications from FW */
1825 ret = wlcore_hw_interrupt_notify(wl, false);
1826 if (ret < 0)
1827 goto out_sleep;
1828
1829 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1830 ret = wlcore_hw_rx_ba_filter(wl,
1831 !!wl->conf.conn.suspend_rx_ba_activity);
1832 if (ret < 0)
1833 goto out_sleep;
1834
1835 out_sleep:
1836 pm_runtime_put_noidle(wl->dev);
1837 mutex_unlock(&wl->mutex);
1838
1839 if (ret < 0) {
1840 wl1271_warning("couldn't prepare device to suspend");
1841 return ret;
1842 }
1843
1844 /* flush any remaining work */
1845 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1846
1847 flush_work(&wl->tx_work);
1848
1849 /*
1850 * Cancel the watchdog even if above tx_flush failed. We will detect
1851 * it on resume anyway.
1852 */
1853 cancel_delayed_work(&wl->tx_watchdog_work);
1854
1855 /*
1856 * set suspended flag to avoid triggering a new threaded_irq
1857 * work.
1858 */
1859 spin_lock_irqsave(&wl->wl_lock, flags);
1860 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1861 spin_unlock_irqrestore(&wl->wl_lock, flags);
1862
1863 return pm_runtime_force_suspend(wl->dev);
1864 }
1865
wl1271_op_resume(struct ieee80211_hw * hw)1866 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1867 {
1868 struct wl1271 *wl = hw->priv;
1869 struct wl12xx_vif *wlvif;
1870 unsigned long flags;
1871 bool run_irq_work = false, pending_recovery;
1872 int ret;
1873
1874 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1875 wl->wow_enabled);
1876 WARN_ON(!wl->wow_enabled);
1877
1878 ret = pm_runtime_force_resume(wl->dev);
1879 if (ret < 0) {
1880 wl1271_error("ELP wakeup failure!");
1881 goto out_sleep;
1882 }
1883
1884 /*
1885 * re-enable irq_work enqueuing, and call irq_work directly if
1886 * there is a pending work.
1887 */
1888 spin_lock_irqsave(&wl->wl_lock, flags);
1889 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1890 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1891 run_irq_work = true;
1892 spin_unlock_irqrestore(&wl->wl_lock, flags);
1893
1894 mutex_lock(&wl->mutex);
1895
1896 /* test the recovery flag before calling any SDIO functions */
1897 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1898 &wl->flags);
1899
1900 if (run_irq_work) {
1901 wl1271_debug(DEBUG_MAC80211,
1902 "run postponed irq_work directly");
1903
1904 /* don't talk to the HW if recovery is pending */
1905 if (!pending_recovery) {
1906 ret = wlcore_irq_locked(wl);
1907 if (ret)
1908 wl12xx_queue_recovery_work(wl);
1909 }
1910
1911 wlcore_enable_interrupts(wl);
1912 }
1913
1914 if (pending_recovery) {
1915 wl1271_warning("queuing forgotten recovery on resume");
1916 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1917 goto out_sleep;
1918 }
1919
1920 ret = pm_runtime_resume_and_get(wl->dev);
1921 if (ret < 0)
1922 goto out;
1923
1924 wl12xx_for_each_wlvif(wl, wlvif) {
1925 if (wlcore_is_p2p_mgmt(wlvif))
1926 continue;
1927
1928 wl1271_configure_resume(wl, wlvif);
1929 }
1930
1931 ret = wlcore_hw_interrupt_notify(wl, true);
1932 if (ret < 0)
1933 goto out_sleep;
1934
1935 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1936 ret = wlcore_hw_rx_ba_filter(wl, false);
1937 if (ret < 0)
1938 goto out_sleep;
1939
1940 out_sleep:
1941 pm_runtime_put_autosuspend(wl->dev);
1942
1943 out:
1944 wl->wow_enabled = false;
1945
1946 /*
1947 * Set a flag to re-init the watchdog on the first Tx after resume.
1948 * That way we avoid possible conditions where Tx-complete interrupts
1949 * fail to arrive and we perform a spurious recovery.
1950 */
1951 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1952 mutex_unlock(&wl->mutex);
1953
1954 return 0;
1955 }
1956
wl1271_op_start(struct ieee80211_hw * hw)1957 static int wl1271_op_start(struct ieee80211_hw *hw)
1958 {
1959 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1960
1961 /*
1962 * We have to delay the booting of the hardware because
1963 * we need to know the local MAC address before downloading and
1964 * initializing the firmware. The MAC address cannot be changed
1965 * after boot, and without the proper MAC address, the firmware
1966 * will not function properly.
1967 *
1968 * The MAC address is first known when the corresponding interface
1969 * is added. That is where we will initialize the hardware.
1970 */
1971
1972 return 0;
1973 }
1974
wlcore_op_stop_locked(struct wl1271 * wl)1975 static void wlcore_op_stop_locked(struct wl1271 *wl)
1976 {
1977 int i;
1978
1979 if (wl->state == WLCORE_STATE_OFF) {
1980 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1981 &wl->flags))
1982 wlcore_enable_interrupts(wl);
1983
1984 return;
1985 }
1986
1987 /*
1988 * this must be before the cancel_work calls below, so that the work
1989 * functions don't perform further work.
1990 */
1991 wl->state = WLCORE_STATE_OFF;
1992
1993 /*
1994 * Use the nosync variant to disable interrupts, so the mutex could be
1995 * held while doing so without deadlocking.
1996 */
1997 wlcore_disable_interrupts_nosync(wl);
1998
1999 mutex_unlock(&wl->mutex);
2000
2001 wlcore_synchronize_interrupts(wl);
2002 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2003 cancel_work_sync(&wl->recovery_work);
2004 wl1271_flush_deferred_work(wl);
2005 cancel_delayed_work_sync(&wl->scan_complete_work);
2006 cancel_work_sync(&wl->netstack_work);
2007 cancel_work_sync(&wl->tx_work);
2008 cancel_delayed_work_sync(&wl->tx_watchdog_work);
2009
2010 /* let's notify MAC80211 about the remaining pending TX frames */
2011 mutex_lock(&wl->mutex);
2012 wl12xx_tx_reset(wl);
2013
2014 wl1271_power_off(wl);
2015 /*
2016 * In case a recovery was scheduled, interrupts were disabled to avoid
2017 * an interrupt storm. Now that the power is down, it is safe to
2018 * re-enable interrupts to balance the disable depth
2019 */
2020 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2021 wlcore_enable_interrupts(wl);
2022
2023 wl->band = NL80211_BAND_2GHZ;
2024
2025 wl->rx_counter = 0;
2026 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2027 wl->channel_type = NL80211_CHAN_NO_HT;
2028 wl->tx_blocks_available = 0;
2029 wl->tx_allocated_blocks = 0;
2030 wl->tx_results_count = 0;
2031 wl->tx_packets_count = 0;
2032 wl->time_offset = 0;
2033 wl->ap_fw_ps_map = 0;
2034 wl->ap_ps_map = 0;
2035 wl->sleep_auth = WL1271_PSM_ILLEGAL;
2036 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2037 memset(wl->links_map, 0, sizeof(wl->links_map));
2038 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2039 memset(wl->session_ids, 0, sizeof(wl->session_ids));
2040 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2041 wl->active_sta_count = 0;
2042 wl->active_link_count = 0;
2043
2044 /* The system link is always allocated */
2045 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2046 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2047 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2048
2049 /*
2050 * this is performed after the cancel_work calls and the associated
2051 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2052 * get executed before all these vars have been reset.
2053 */
2054 wl->flags = 0;
2055
2056 wl->tx_blocks_freed = 0;
2057
2058 for (i = 0; i < NUM_TX_QUEUES; i++) {
2059 wl->tx_pkts_freed[i] = 0;
2060 wl->tx_allocated_pkts[i] = 0;
2061 }
2062
2063 wl1271_debugfs_reset(wl);
2064
2065 kfree(wl->raw_fw_status);
2066 wl->raw_fw_status = NULL;
2067 kfree(wl->fw_status);
2068 wl->fw_status = NULL;
2069 kfree(wl->tx_res_if);
2070 wl->tx_res_if = NULL;
2071 kfree(wl->target_mem_map);
2072 wl->target_mem_map = NULL;
2073
2074 /*
2075 * FW channels must be re-calibrated after recovery,
2076 * save current Reg-Domain channel configuration and clear it.
2077 */
2078 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2079 sizeof(wl->reg_ch_conf_pending));
2080 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2081 }
2082
wlcore_op_stop(struct ieee80211_hw * hw,bool suspend)2083 static void wlcore_op_stop(struct ieee80211_hw *hw, bool suspend)
2084 {
2085 struct wl1271 *wl = hw->priv;
2086
2087 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2088
2089 mutex_lock(&wl->mutex);
2090
2091 wlcore_op_stop_locked(wl);
2092
2093 mutex_unlock(&wl->mutex);
2094 }
2095
wlcore_channel_switch_work(struct work_struct * work)2096 static void wlcore_channel_switch_work(struct work_struct *work)
2097 {
2098 struct delayed_work *dwork;
2099 struct wl1271 *wl;
2100 struct ieee80211_vif *vif;
2101 struct wl12xx_vif *wlvif;
2102 int ret;
2103
2104 dwork = to_delayed_work(work);
2105 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2106 wl = wlvif->wl;
2107
2108 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2109
2110 mutex_lock(&wl->mutex);
2111
2112 if (unlikely(wl->state != WLCORE_STATE_ON))
2113 goto out;
2114
2115 /* check the channel switch is still ongoing */
2116 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2117 goto out;
2118
2119 vif = wl12xx_wlvif_to_vif(wlvif);
2120 ieee80211_chswitch_done(vif, false, 0);
2121
2122 ret = pm_runtime_resume_and_get(wl->dev);
2123 if (ret < 0)
2124 goto out;
2125
2126 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2127
2128 pm_runtime_put_autosuspend(wl->dev);
2129 out:
2130 mutex_unlock(&wl->mutex);
2131 }
2132
wlcore_connection_loss_work(struct work_struct * work)2133 static void wlcore_connection_loss_work(struct work_struct *work)
2134 {
2135 struct delayed_work *dwork;
2136 struct wl1271 *wl;
2137 struct ieee80211_vif *vif;
2138 struct wl12xx_vif *wlvif;
2139
2140 dwork = to_delayed_work(work);
2141 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2142 wl = wlvif->wl;
2143
2144 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2145
2146 mutex_lock(&wl->mutex);
2147
2148 if (unlikely(wl->state != WLCORE_STATE_ON))
2149 goto out;
2150
2151 /* Call mac80211 connection loss */
2152 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2153 goto out;
2154
2155 vif = wl12xx_wlvif_to_vif(wlvif);
2156 ieee80211_connection_loss(vif);
2157 out:
2158 mutex_unlock(&wl->mutex);
2159 }
2160
wlcore_pending_auth_complete_work(struct work_struct * work)2161 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2162 {
2163 struct delayed_work *dwork;
2164 struct wl1271 *wl;
2165 struct wl12xx_vif *wlvif;
2166 unsigned long time_spare;
2167 int ret;
2168
2169 dwork = to_delayed_work(work);
2170 wlvif = container_of(dwork, struct wl12xx_vif,
2171 pending_auth_complete_work);
2172 wl = wlvif->wl;
2173
2174 mutex_lock(&wl->mutex);
2175
2176 if (unlikely(wl->state != WLCORE_STATE_ON))
2177 goto out;
2178
2179 /*
2180 * Make sure a second really passed since the last auth reply. Maybe
2181 * a second auth reply arrived while we were stuck on the mutex.
2182 * Check for a little less than the timeout to protect from scheduler
2183 * irregularities.
2184 */
2185 time_spare = jiffies +
2186 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2187 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2188 goto out;
2189
2190 ret = pm_runtime_resume_and_get(wl->dev);
2191 if (ret < 0)
2192 goto out;
2193
2194 /* cancel the ROC if active */
2195 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2196
2197 pm_runtime_put_autosuspend(wl->dev);
2198 out:
2199 mutex_unlock(&wl->mutex);
2200 }
2201
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2202 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2203 {
2204 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2205 WL12XX_MAX_RATE_POLICIES);
2206 if (policy >= WL12XX_MAX_RATE_POLICIES)
2207 return -EBUSY;
2208
2209 __set_bit(policy, wl->rate_policies_map);
2210 *idx = policy;
2211 return 0;
2212 }
2213
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2214 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2215 {
2216 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2217 return;
2218
2219 __clear_bit(*idx, wl->rate_policies_map);
2220 *idx = WL12XX_MAX_RATE_POLICIES;
2221 }
2222
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2223 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2224 {
2225 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2226 WLCORE_MAX_KLV_TEMPLATES);
2227 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2228 return -EBUSY;
2229
2230 __set_bit(policy, wl->klv_templates_map);
2231 *idx = policy;
2232 return 0;
2233 }
2234
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2235 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2236 {
2237 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2238 return;
2239
2240 __clear_bit(*idx, wl->klv_templates_map);
2241 *idx = WLCORE_MAX_KLV_TEMPLATES;
2242 }
2243
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2244 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2245 {
2246 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2247
2248 switch (wlvif->bss_type) {
2249 case BSS_TYPE_AP_BSS:
2250 if (wlvif->p2p)
2251 return WL1271_ROLE_P2P_GO;
2252 else if (ieee80211_vif_is_mesh(vif))
2253 return WL1271_ROLE_MESH_POINT;
2254 else
2255 return WL1271_ROLE_AP;
2256
2257 case BSS_TYPE_STA_BSS:
2258 if (wlvif->p2p)
2259 return WL1271_ROLE_P2P_CL;
2260 else
2261 return WL1271_ROLE_STA;
2262
2263 case BSS_TYPE_IBSS:
2264 return WL1271_ROLE_IBSS;
2265
2266 default:
2267 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2268 }
2269 return WL12XX_INVALID_ROLE_TYPE;
2270 }
2271
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2272 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2273 {
2274 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2275 int i;
2276
2277 /* clear everything but the persistent data */
2278 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2279
2280 switch (ieee80211_vif_type_p2p(vif)) {
2281 case NL80211_IFTYPE_P2P_CLIENT:
2282 wlvif->p2p = 1;
2283 fallthrough;
2284 case NL80211_IFTYPE_STATION:
2285 case NL80211_IFTYPE_P2P_DEVICE:
2286 wlvif->bss_type = BSS_TYPE_STA_BSS;
2287 break;
2288 case NL80211_IFTYPE_ADHOC:
2289 wlvif->bss_type = BSS_TYPE_IBSS;
2290 break;
2291 case NL80211_IFTYPE_P2P_GO:
2292 wlvif->p2p = 1;
2293 fallthrough;
2294 case NL80211_IFTYPE_AP:
2295 case NL80211_IFTYPE_MESH_POINT:
2296 wlvif->bss_type = BSS_TYPE_AP_BSS;
2297 break;
2298 default:
2299 wlvif->bss_type = MAX_BSS_TYPE;
2300 return -EOPNOTSUPP;
2301 }
2302
2303 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2304 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2305 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2306
2307 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2308 wlvif->bss_type == BSS_TYPE_IBSS) {
2309 /* init sta/ibss data */
2310 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2311 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2312 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2313 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2314 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2315 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2316 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2317 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2318 } else {
2319 /* init ap data */
2320 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2321 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2322 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2323 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2324 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2325 wl12xx_allocate_rate_policy(wl,
2326 &wlvif->ap.ucast_rate_idx[i]);
2327 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2328 /*
2329 * TODO: check if basic_rate shouldn't be
2330 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2331 * instead (the same thing for STA above).
2332 */
2333 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2334 /* TODO: this seems to be used only for STA, check it */
2335 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2336 }
2337
2338 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2339 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2340 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2341
2342 /*
2343 * mac80211 configures some values globally, while we treat them
2344 * per-interface. thus, on init, we have to copy them from wl
2345 */
2346 wlvif->band = wl->band;
2347 wlvif->channel = wl->channel;
2348 wlvif->power_level = wl->power_level;
2349 wlvif->channel_type = wl->channel_type;
2350
2351 INIT_WORK(&wlvif->rx_streaming_enable_work,
2352 wl1271_rx_streaming_enable_work);
2353 INIT_WORK(&wlvif->rx_streaming_disable_work,
2354 wl1271_rx_streaming_disable_work);
2355 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2356 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2357 wlcore_channel_switch_work);
2358 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2359 wlcore_connection_loss_work);
2360 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2361 wlcore_pending_auth_complete_work);
2362 INIT_LIST_HEAD(&wlvif->list);
2363
2364 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2365 return 0;
2366 }
2367
wl12xx_init_fw(struct wl1271 * wl)2368 static int wl12xx_init_fw(struct wl1271 *wl)
2369 {
2370 int retries = WL1271_BOOT_RETRIES;
2371 bool booted = false;
2372 struct wiphy *wiphy = wl->hw->wiphy;
2373 int ret;
2374
2375 while (retries) {
2376 retries--;
2377 ret = wl12xx_chip_wakeup(wl, false);
2378 if (ret < 0)
2379 goto power_off;
2380
2381 ret = wl->ops->boot(wl);
2382 if (ret < 0)
2383 goto power_off;
2384
2385 ret = wl1271_hw_init(wl);
2386 if (ret < 0)
2387 goto irq_disable;
2388
2389 booted = true;
2390 break;
2391
2392 irq_disable:
2393 mutex_unlock(&wl->mutex);
2394 /* Unlocking the mutex in the middle of handling is
2395 inherently unsafe. In this case we deem it safe to do,
2396 because we need to let any possibly pending IRQ out of
2397 the system (and while we are WLCORE_STATE_OFF the IRQ
2398 work function will not do anything.) Also, any other
2399 possible concurrent operations will fail due to the
2400 current state, hence the wl1271 struct should be safe. */
2401 wlcore_disable_interrupts(wl);
2402 wl1271_flush_deferred_work(wl);
2403 cancel_work_sync(&wl->netstack_work);
2404 mutex_lock(&wl->mutex);
2405 power_off:
2406 wl1271_power_off(wl);
2407 }
2408
2409 if (!booted) {
2410 wl1271_error("firmware boot failed despite %d retries",
2411 WL1271_BOOT_RETRIES);
2412 goto out;
2413 }
2414
2415 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2416
2417 /* update hw/fw version info in wiphy struct */
2418 wiphy->hw_version = wl->chip.id;
2419 strscpy(wiphy->fw_version, wl->chip.fw_ver_str,
2420 sizeof(wiphy->fw_version));
2421
2422 /* WLAN_CIPHER_SUITE_AES_CMAC must be last in cipher_suites;
2423 support only with firmware 8.9.1 and newer */
2424 if (wl->chip.fw_ver[FW_VER_MAJOR] < 1)
2425 wl->hw->wiphy->n_cipher_suites--;
2426
2427 /*
2428 * Now we know if 11a is supported (info from the NVS), so disable
2429 * 11a channels if not supported
2430 */
2431 if (!wl->enable_11a)
2432 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2433
2434 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2435 wl->enable_11a ? "" : "not ");
2436
2437 wl->state = WLCORE_STATE_ON;
2438 out:
2439 return ret;
2440 }
2441
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2442 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2443 {
2444 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2445 }
2446
2447 /*
2448 * Check whether a fw switch (i.e. moving from one loaded
2449 * fw to another) is needed. This function is also responsible
2450 * for updating wl->last_vif_count, so it must be called before
2451 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2452 * will be used).
2453 */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2454 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2455 struct vif_counter_data vif_counter_data,
2456 bool add)
2457 {
2458 enum wl12xx_fw_type current_fw = wl->fw_type;
2459 u8 vif_count = vif_counter_data.counter;
2460
2461 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2462 return false;
2463
2464 /* increase the vif count if this is a new vif */
2465 if (add && !vif_counter_data.cur_vif_running)
2466 vif_count++;
2467
2468 wl->last_vif_count = vif_count;
2469
2470 /* no need for fw change if the device is OFF */
2471 if (wl->state == WLCORE_STATE_OFF)
2472 return false;
2473
2474 /* no need for fw change if a single fw is used */
2475 if (!wl->mr_fw_name)
2476 return false;
2477
2478 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2479 return true;
2480 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2481 return true;
2482
2483 return false;
2484 }
2485
2486 /*
2487 * Enter "forced psm". Make sure the sta is in psm against the ap,
2488 * to make the fw switch a bit more disconnection-persistent.
2489 */
wl12xx_force_active_psm(struct wl1271 * wl)2490 static void wl12xx_force_active_psm(struct wl1271 *wl)
2491 {
2492 struct wl12xx_vif *wlvif;
2493
2494 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2495 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2496 }
2497 }
2498
2499 struct wlcore_hw_queue_iter_data {
2500 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2501 /* current vif */
2502 struct ieee80211_vif *vif;
2503 /* is the current vif among those iterated */
2504 bool cur_running;
2505 };
2506
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2507 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2508 struct ieee80211_vif *vif)
2509 {
2510 struct wlcore_hw_queue_iter_data *iter_data = data;
2511
2512 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2513 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2514 return;
2515
2516 if (iter_data->cur_running || vif == iter_data->vif) {
2517 iter_data->cur_running = true;
2518 return;
2519 }
2520
2521 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2522 }
2523
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2524 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2525 struct wl12xx_vif *wlvif)
2526 {
2527 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2528 struct wlcore_hw_queue_iter_data iter_data = {};
2529 int i, q_base;
2530
2531 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2532 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2533 return 0;
2534 }
2535
2536 iter_data.vif = vif;
2537
2538 /* mark all bits taken by active interfaces */
2539 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2540 IEEE80211_IFACE_ITER_RESUME_ALL,
2541 wlcore_hw_queue_iter, &iter_data);
2542
2543 /* the current vif is already running in mac80211 (resume/recovery) */
2544 if (iter_data.cur_running) {
2545 wlvif->hw_queue_base = vif->hw_queue[0];
2546 wl1271_debug(DEBUG_MAC80211,
2547 "using pre-allocated hw queue base %d",
2548 wlvif->hw_queue_base);
2549
2550 /* interface type might have changed type */
2551 goto adjust_cab_queue;
2552 }
2553
2554 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2555 WLCORE_NUM_MAC_ADDRESSES);
2556 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2557 return -EBUSY;
2558
2559 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2560 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2561 wlvif->hw_queue_base);
2562
2563 for (i = 0; i < NUM_TX_QUEUES; i++) {
2564 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2565 /* register hw queues in mac80211 */
2566 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2567 }
2568
2569 adjust_cab_queue:
2570 /* the last places are reserved for cab queues per interface */
2571 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2572 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2573 wlvif->hw_queue_base / NUM_TX_QUEUES;
2574 else
2575 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2576
2577 return 0;
2578 }
2579
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2580 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2581 struct ieee80211_vif *vif)
2582 {
2583 struct wl1271 *wl = hw->priv;
2584 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2585 struct vif_counter_data vif_count;
2586 int ret = 0;
2587 u8 role_type;
2588
2589 if (wl->plt) {
2590 wl1271_error("Adding Interface not allowed while in PLT mode");
2591 return -EBUSY;
2592 }
2593
2594 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2595 IEEE80211_VIF_SUPPORTS_UAPSD |
2596 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2597
2598 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2599 ieee80211_vif_type_p2p(vif), vif->addr);
2600
2601 wl12xx_get_vif_count(hw, vif, &vif_count);
2602
2603 mutex_lock(&wl->mutex);
2604
2605 /*
2606 * in some very corner case HW recovery scenarios its possible to
2607 * get here before __wl1271_op_remove_interface is complete, so
2608 * opt out if that is the case.
2609 */
2610 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2611 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2612 ret = -EBUSY;
2613 goto out_unlock;
2614 }
2615
2616
2617 ret = wl12xx_init_vif_data(wl, vif);
2618 if (ret < 0)
2619 goto out_unlock;
2620
2621 wlvif->wl = wl;
2622 role_type = wl12xx_get_role_type(wl, wlvif);
2623 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2624 ret = -EINVAL;
2625 goto out_unlock;
2626 }
2627
2628 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2629 if (ret < 0)
2630 goto out_unlock;
2631
2632 /*
2633 * TODO: after the nvs issue will be solved, move this block
2634 * to start(), and make sure here the driver is ON.
2635 */
2636 if (wl->state == WLCORE_STATE_OFF) {
2637 /*
2638 * we still need this in order to configure the fw
2639 * while uploading the nvs
2640 */
2641 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2642
2643 ret = wl12xx_init_fw(wl);
2644 if (ret < 0)
2645 goto out_unlock;
2646 }
2647
2648 /*
2649 * Call runtime PM only after possible wl12xx_init_fw() above
2650 * is done. Otherwise we do not have interrupts enabled.
2651 */
2652 ret = pm_runtime_resume_and_get(wl->dev);
2653 if (ret < 0)
2654 goto out_unlock;
2655
2656 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2657 wl12xx_force_active_psm(wl);
2658 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2659 mutex_unlock(&wl->mutex);
2660 wl1271_recovery_work(&wl->recovery_work);
2661 return 0;
2662 }
2663
2664 if (!wlcore_is_p2p_mgmt(wlvif)) {
2665 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2666 role_type, &wlvif->role_id);
2667 if (ret < 0)
2668 goto out;
2669
2670 ret = wl1271_init_vif_specific(wl, vif);
2671 if (ret < 0)
2672 goto out;
2673
2674 } else {
2675 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2676 &wlvif->dev_role_id);
2677 if (ret < 0)
2678 goto out;
2679
2680 /* needed mainly for configuring rate policies */
2681 ret = wl1271_sta_hw_init(wl, wlvif);
2682 if (ret < 0)
2683 goto out;
2684 }
2685
2686 list_add(&wlvif->list, &wl->wlvif_list);
2687 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2688
2689 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2690 wl->ap_count++;
2691 else
2692 wl->sta_count++;
2693 out:
2694 pm_runtime_put_autosuspend(wl->dev);
2695 out_unlock:
2696 mutex_unlock(&wl->mutex);
2697
2698 return ret;
2699 }
2700
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2701 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2702 struct ieee80211_vif *vif,
2703 bool reset_tx_queues)
2704 {
2705 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2706 int i, ret;
2707 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2708
2709 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2710
2711 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2712 return;
2713
2714 /* because of hardware recovery, we may get here twice */
2715 if (wl->state == WLCORE_STATE_OFF)
2716 return;
2717
2718 wl1271_info("down");
2719
2720 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2721 wl->scan_wlvif == wlvif) {
2722 struct cfg80211_scan_info info = {
2723 .aborted = true,
2724 };
2725
2726 /*
2727 * Rearm the tx watchdog just before idling scan. This
2728 * prevents just-finished scans from triggering the watchdog
2729 */
2730 wl12xx_rearm_tx_watchdog_locked(wl);
2731
2732 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2733 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2734 wl->scan_wlvif = NULL;
2735 wl->scan.req = NULL;
2736 ieee80211_scan_completed(wl->hw, &info);
2737 }
2738
2739 if (wl->sched_vif == wlvif)
2740 wl->sched_vif = NULL;
2741
2742 if (wl->roc_vif == vif) {
2743 wl->roc_vif = NULL;
2744 ieee80211_remain_on_channel_expired(wl->hw);
2745 }
2746
2747 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2748 /* disable active roles */
2749 ret = pm_runtime_resume_and_get(wl->dev);
2750 if (ret < 0)
2751 goto deinit;
2752
2753 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2754 wlvif->bss_type == BSS_TYPE_IBSS) {
2755 if (wl12xx_dev_role_started(wlvif))
2756 wl12xx_stop_dev(wl, wlvif);
2757 }
2758
2759 if (!wlcore_is_p2p_mgmt(wlvif)) {
2760 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2761 if (ret < 0) {
2762 pm_runtime_put_noidle(wl->dev);
2763 goto deinit;
2764 }
2765 } else {
2766 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2767 if (ret < 0) {
2768 pm_runtime_put_noidle(wl->dev);
2769 goto deinit;
2770 }
2771 }
2772
2773 pm_runtime_put_autosuspend(wl->dev);
2774 }
2775 deinit:
2776 wl12xx_tx_reset_wlvif(wl, wlvif);
2777
2778 /* clear all hlids (except system_hlid) */
2779 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2780
2781 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2782 wlvif->bss_type == BSS_TYPE_IBSS) {
2783 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2784 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2785 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2786 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2787 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2788 } else {
2789 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2790 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2791 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2792 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2793 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2794 wl12xx_free_rate_policy(wl,
2795 &wlvif->ap.ucast_rate_idx[i]);
2796 wl1271_free_ap_keys(wl, wlvif);
2797 }
2798
2799 dev_kfree_skb(wlvif->probereq);
2800 wlvif->probereq = NULL;
2801 if (wl->last_wlvif == wlvif)
2802 wl->last_wlvif = NULL;
2803 list_del(&wlvif->list);
2804 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2805 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2806 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2807
2808 if (is_ap)
2809 wl->ap_count--;
2810 else
2811 wl->sta_count--;
2812
2813 /*
2814 * Last AP, have more stations. Configure sleep auth according to STA.
2815 * Don't do thin on unintended recovery.
2816 */
2817 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2818 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2819 goto unlock;
2820
2821 if (wl->ap_count == 0 && is_ap) {
2822 /* mask ap events */
2823 wl->event_mask &= ~wl->ap_event_mask;
2824 wl1271_event_unmask(wl);
2825 }
2826
2827 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2828 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2829 /* Configure for power according to debugfs */
2830 if (sta_auth != WL1271_PSM_ILLEGAL)
2831 wl1271_acx_sleep_auth(wl, sta_auth);
2832 /* Configure for ELP power saving */
2833 else
2834 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2835 }
2836
2837 unlock:
2838 mutex_unlock(&wl->mutex);
2839
2840 timer_delete_sync(&wlvif->rx_streaming_timer);
2841 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2842 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2843 cancel_work_sync(&wlvif->rc_update_work);
2844 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2845 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2846 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2847
2848 mutex_lock(&wl->mutex);
2849 }
2850
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2851 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2852 struct ieee80211_vif *vif)
2853 {
2854 struct wl1271 *wl = hw->priv;
2855 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2856 struct wl12xx_vif *iter;
2857 struct vif_counter_data vif_count;
2858
2859 wl12xx_get_vif_count(hw, vif, &vif_count);
2860 mutex_lock(&wl->mutex);
2861
2862 if (wl->state == WLCORE_STATE_OFF ||
2863 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2864 goto out;
2865
2866 /*
2867 * wl->vif can be null here if someone shuts down the interface
2868 * just when hardware recovery has been started.
2869 */
2870 wl12xx_for_each_wlvif(wl, iter) {
2871 if (iter != wlvif)
2872 continue;
2873
2874 __wl1271_op_remove_interface(wl, vif, true);
2875 break;
2876 }
2877 WARN_ON(iter != wlvif);
2878 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2879 wl12xx_force_active_psm(wl);
2880 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2881 wl12xx_queue_recovery_work(wl);
2882 }
2883 out:
2884 mutex_unlock(&wl->mutex);
2885 }
2886
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2887 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2888 struct ieee80211_vif *vif,
2889 enum nl80211_iftype new_type, bool p2p)
2890 {
2891 struct wl1271 *wl = hw->priv;
2892 int ret;
2893
2894 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2895 wl1271_op_remove_interface(hw, vif);
2896
2897 vif->type = new_type;
2898 vif->p2p = p2p;
2899 ret = wl1271_op_add_interface(hw, vif);
2900
2901 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2902 return ret;
2903 }
2904
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2905 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2906 {
2907 int ret;
2908 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2909
2910 /*
2911 * One of the side effects of the JOIN command is that is clears
2912 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2913 * to a WPA/WPA2 access point will therefore kill the data-path.
2914 * Currently the only valid scenario for JOIN during association
2915 * is on roaming, in which case we will also be given new keys.
2916 * Keep the below message for now, unless it starts bothering
2917 * users who really like to roam a lot :)
2918 */
2919 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2920 wl1271_info("JOIN while associated.");
2921
2922 /* clear encryption type */
2923 wlvif->encryption_type = KEY_NONE;
2924
2925 if (is_ibss)
2926 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2927 else
2928 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2929
2930 return ret;
2931 }
2932
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2933 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2934 int offset)
2935 {
2936 u8 ssid_len;
2937 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2938 skb->len - offset);
2939
2940 if (!ptr) {
2941 wl1271_error("No SSID in IEs!");
2942 return -ENOENT;
2943 }
2944
2945 ssid_len = ptr[1];
2946 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2947 wl1271_error("SSID is too long!");
2948 return -EINVAL;
2949 }
2950
2951 wlvif->ssid_len = ssid_len;
2952 memcpy(wlvif->ssid, ptr+2, ssid_len);
2953 return 0;
2954 }
2955
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2956 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2957 {
2958 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2959 struct sk_buff *skb;
2960 int ieoffset;
2961
2962 /* we currently only support setting the ssid from the ap probe req */
2963 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2964 return -EINVAL;
2965
2966 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2967 if (!skb)
2968 return -EINVAL;
2969
2970 ieoffset = offsetof(struct ieee80211_mgmt,
2971 u.probe_req.variable);
2972 wl1271_ssid_set(wlvif, skb, ieoffset);
2973 dev_kfree_skb(skb);
2974
2975 return 0;
2976 }
2977
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2978 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2979 struct ieee80211_bss_conf *bss_conf,
2980 u32 sta_rate_set)
2981 {
2982 struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
2983 bss_conf);
2984 int ieoffset;
2985 int ret;
2986
2987 wlvif->aid = vif->cfg.aid;
2988 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chanreq.oper);
2989 wlvif->beacon_int = bss_conf->beacon_int;
2990 wlvif->wmm_enabled = bss_conf->qos;
2991
2992 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2993
2994 /*
2995 * with wl1271, we don't need to update the
2996 * beacon_int and dtim_period, because the firmware
2997 * updates it by itself when the first beacon is
2998 * received after a join.
2999 */
3000 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3001 if (ret < 0)
3002 return ret;
3003
3004 /*
3005 * Get a template for hardware connection maintenance
3006 */
3007 dev_kfree_skb(wlvif->probereq);
3008 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3009 wlvif,
3010 NULL);
3011 ieoffset = offsetof(struct ieee80211_mgmt,
3012 u.probe_req.variable);
3013 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
3014
3015 /* enable the connection monitoring feature */
3016 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3017 if (ret < 0)
3018 return ret;
3019
3020 /*
3021 * The join command disable the keep-alive mode, shut down its process,
3022 * and also clear the template config, so we need to reset it all after
3023 * the join. The acx_aid starts the keep-alive process, and the order
3024 * of the commands below is relevant.
3025 */
3026 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
3027 if (ret < 0)
3028 return ret;
3029
3030 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
3031 if (ret < 0)
3032 return ret;
3033
3034 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
3035 if (ret < 0)
3036 return ret;
3037
3038 ret = wl1271_acx_keep_alive_config(wl, wlvif,
3039 wlvif->sta.klv_template_id,
3040 ACX_KEEP_ALIVE_TPL_VALID);
3041 if (ret < 0)
3042 return ret;
3043
3044 /*
3045 * The default fw psm configuration is AUTO, while mac80211 default
3046 * setting is off (ACTIVE), so sync the fw with the correct value.
3047 */
3048 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3049 if (ret < 0)
3050 return ret;
3051
3052 if (sta_rate_set) {
3053 wlvif->rate_set =
3054 wl1271_tx_enabled_rates_get(wl,
3055 sta_rate_set,
3056 wlvif->band);
3057 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3058 if (ret < 0)
3059 return ret;
3060 }
3061
3062 return ret;
3063 }
3064
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3065 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3066 {
3067 int ret;
3068 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3069
3070 /* make sure we are connected (sta) joined */
3071 if (sta &&
3072 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3073 return false;
3074
3075 /* make sure we are joined (ibss) */
3076 if (!sta &&
3077 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3078 return false;
3079
3080 if (sta) {
3081 /* use defaults when not associated */
3082 wlvif->aid = 0;
3083
3084 /* free probe-request template */
3085 dev_kfree_skb(wlvif->probereq);
3086 wlvif->probereq = NULL;
3087
3088 /* disable connection monitor features */
3089 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3090 if (ret < 0)
3091 return ret;
3092
3093 /* Disable the keep-alive feature */
3094 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3095 if (ret < 0)
3096 return ret;
3097
3098 /* disable beacon filtering */
3099 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3100 if (ret < 0)
3101 return ret;
3102 }
3103
3104 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3105 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3106
3107 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3108 ieee80211_chswitch_done(vif, false, 0);
3109 cancel_delayed_work(&wlvif->channel_switch_work);
3110 }
3111
3112 /* invalidate keep-alive template */
3113 wl1271_acx_keep_alive_config(wl, wlvif,
3114 wlvif->sta.klv_template_id,
3115 ACX_KEEP_ALIVE_TPL_INVALID);
3116
3117 return 0;
3118 }
3119
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3120 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3121 {
3122 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3123 wlvif->rate_set = wlvif->basic_rate_set;
3124 }
3125
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3126 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3127 bool idle)
3128 {
3129 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3130
3131 if (idle == cur_idle)
3132 return;
3133
3134 if (idle) {
3135 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3136 } else {
3137 /* The current firmware only supports sched_scan in idle */
3138 if (wl->sched_vif == wlvif)
3139 wl->ops->sched_scan_stop(wl, wlvif);
3140
3141 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3142 }
3143 }
3144
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3145 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3146 struct ieee80211_conf *conf, u32 changed)
3147 {
3148 int ret;
3149
3150 if (wlcore_is_p2p_mgmt(wlvif))
3151 return 0;
3152
3153 if (conf->power_level != wlvif->power_level) {
3154 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3155 if (ret < 0)
3156 return ret;
3157
3158 wlvif->power_level = conf->power_level;
3159 }
3160
3161 return 0;
3162 }
3163
wl1271_op_config(struct ieee80211_hw * hw,int radio_idx,u32 changed)3164 static int wl1271_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
3165 {
3166 struct wl1271 *wl = hw->priv;
3167 struct wl12xx_vif *wlvif;
3168 struct ieee80211_conf *conf = &hw->conf;
3169 int ret = 0;
3170
3171 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3172 " changed 0x%x",
3173 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3174 conf->power_level,
3175 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3176 changed);
3177
3178 mutex_lock(&wl->mutex);
3179
3180 if (changed & IEEE80211_CONF_CHANGE_POWER)
3181 wl->power_level = conf->power_level;
3182
3183 if (unlikely(wl->state != WLCORE_STATE_ON))
3184 goto out;
3185
3186 ret = pm_runtime_resume_and_get(wl->dev);
3187 if (ret < 0)
3188 goto out;
3189
3190 /* configure each interface */
3191 wl12xx_for_each_wlvif(wl, wlvif) {
3192 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3193 if (ret < 0)
3194 goto out_sleep;
3195 }
3196
3197 out_sleep:
3198 pm_runtime_put_autosuspend(wl->dev);
3199
3200 out:
3201 mutex_unlock(&wl->mutex);
3202
3203 return ret;
3204 }
3205
3206 struct wl1271_filter_params {
3207 bool enabled;
3208 int mc_list_length;
3209 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3210 };
3211
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3212 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3213 struct netdev_hw_addr_list *mc_list)
3214 {
3215 struct wl1271_filter_params *fp;
3216 struct netdev_hw_addr *ha;
3217
3218 fp = kzalloc_obj(*fp, GFP_ATOMIC);
3219 if (!fp) {
3220 wl1271_error("Out of memory setting filters.");
3221 return 0;
3222 }
3223
3224 /* update multicast filtering parameters */
3225 fp->mc_list_length = 0;
3226 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3227 fp->enabled = false;
3228 } else {
3229 fp->enabled = true;
3230 netdev_hw_addr_list_for_each(ha, mc_list) {
3231 memcpy(fp->mc_list[fp->mc_list_length],
3232 ha->addr, ETH_ALEN);
3233 fp->mc_list_length++;
3234 }
3235 }
3236
3237 return (u64)(unsigned long)fp;
3238 }
3239
3240 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3241 FIF_FCSFAIL | \
3242 FIF_BCN_PRBRESP_PROMISC | \
3243 FIF_CONTROL | \
3244 FIF_OTHER_BSS)
3245
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3246 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3247 unsigned int changed,
3248 unsigned int *total, u64 multicast)
3249 {
3250 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3251 struct wl1271 *wl = hw->priv;
3252 struct wl12xx_vif *wlvif;
3253
3254 int ret;
3255
3256 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3257 " total %x", changed, *total);
3258
3259 mutex_lock(&wl->mutex);
3260
3261 *total &= WL1271_SUPPORTED_FILTERS;
3262 changed &= WL1271_SUPPORTED_FILTERS;
3263
3264 if (unlikely(wl->state != WLCORE_STATE_ON))
3265 goto out;
3266
3267 ret = pm_runtime_resume_and_get(wl->dev);
3268 if (ret < 0)
3269 goto out;
3270
3271 wl12xx_for_each_wlvif(wl, wlvif) {
3272 if (wlcore_is_p2p_mgmt(wlvif))
3273 continue;
3274
3275 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3276 if (*total & FIF_ALLMULTI)
3277 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3278 false,
3279 NULL, 0);
3280 else if (fp)
3281 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3282 fp->enabled,
3283 fp->mc_list,
3284 fp->mc_list_length);
3285 if (ret < 0)
3286 goto out_sleep;
3287 }
3288
3289 /*
3290 * If interface in AP mode and created with allmulticast then disable
3291 * the firmware filters so that all multicast packets are passed
3292 * This is mandatory for MDNS based discovery protocols
3293 */
3294 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3295 if (*total & FIF_ALLMULTI) {
3296 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3297 false,
3298 NULL, 0);
3299 if (ret < 0)
3300 goto out_sleep;
3301 }
3302 }
3303 }
3304
3305 /*
3306 * the fw doesn't provide an api to configure the filters. instead,
3307 * the filters configuration is based on the active roles / ROC
3308 * state.
3309 */
3310
3311 out_sleep:
3312 pm_runtime_put_autosuspend(wl->dev);
3313
3314 out:
3315 mutex_unlock(&wl->mutex);
3316 kfree(fp);
3317 }
3318
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16,bool is_pairwise)3319 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3320 u8 id, u8 key_type, u8 key_size,
3321 const u8 *key, u8 hlid, u32 tx_seq_32,
3322 u16 tx_seq_16, bool is_pairwise)
3323 {
3324 struct wl1271_ap_key *ap_key;
3325 int i;
3326
3327 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3328
3329 if (key_size > MAX_KEY_SIZE)
3330 return -EINVAL;
3331
3332 /*
3333 * Find next free entry in ap_keys. Also check we are not replacing
3334 * an existing key.
3335 */
3336 for (i = 0; i < MAX_NUM_KEYS; i++) {
3337 if (wlvif->ap.recorded_keys[i] == NULL)
3338 break;
3339
3340 if (wlvif->ap.recorded_keys[i]->id == id) {
3341 wl1271_warning("trying to record key replacement");
3342 return -EINVAL;
3343 }
3344 }
3345
3346 if (i == MAX_NUM_KEYS)
3347 return -EBUSY;
3348
3349 ap_key = kzalloc_obj(*ap_key);
3350 if (!ap_key)
3351 return -ENOMEM;
3352
3353 ap_key->id = id;
3354 ap_key->key_type = key_type;
3355 ap_key->key_size = key_size;
3356 memcpy(ap_key->key, key, key_size);
3357 ap_key->hlid = hlid;
3358 ap_key->tx_seq_32 = tx_seq_32;
3359 ap_key->tx_seq_16 = tx_seq_16;
3360 ap_key->is_pairwise = is_pairwise;
3361
3362 wlvif->ap.recorded_keys[i] = ap_key;
3363 return 0;
3364 }
3365
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3366 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3367 {
3368 int i;
3369
3370 for (i = 0; i < MAX_NUM_KEYS; i++) {
3371 kfree(wlvif->ap.recorded_keys[i]);
3372 wlvif->ap.recorded_keys[i] = NULL;
3373 }
3374 }
3375
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3376 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3377 {
3378 int i, ret = 0;
3379 struct wl1271_ap_key *key;
3380 bool wep_key_added = false;
3381
3382 for (i = 0; i < MAX_NUM_KEYS; i++) {
3383 u8 hlid;
3384 if (wlvif->ap.recorded_keys[i] == NULL)
3385 break;
3386
3387 key = wlvif->ap.recorded_keys[i];
3388 hlid = key->hlid;
3389 if (hlid == WL12XX_INVALID_LINK_ID)
3390 hlid = wlvif->ap.bcast_hlid;
3391
3392 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3393 key->id, key->key_type,
3394 key->key_size, key->key,
3395 hlid, key->tx_seq_32,
3396 key->tx_seq_16, key->is_pairwise);
3397 if (ret < 0)
3398 goto out;
3399
3400 if (key->key_type == KEY_WEP)
3401 wep_key_added = true;
3402 }
3403
3404 if (wep_key_added) {
3405 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3406 wlvif->ap.bcast_hlid);
3407 if (ret < 0)
3408 goto out;
3409 }
3410
3411 out:
3412 wl1271_free_ap_keys(wl, wlvif);
3413 return ret;
3414 }
3415
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta,bool is_pairwise)3416 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3417 u16 action, u8 id, u8 key_type,
3418 u8 key_size, const u8 *key, u32 tx_seq_32,
3419 u16 tx_seq_16, struct ieee80211_sta *sta,
3420 bool is_pairwise)
3421 {
3422 int ret;
3423 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3424
3425 if (is_ap) {
3426 struct wl1271_station *wl_sta;
3427 u8 hlid;
3428
3429 if (sta) {
3430 wl_sta = (struct wl1271_station *)sta->drv_priv;
3431 hlid = wl_sta->hlid;
3432 } else {
3433 hlid = wlvif->ap.bcast_hlid;
3434 }
3435
3436 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3437 /*
3438 * We do not support removing keys after AP shutdown.
3439 * Pretend we do to make mac80211 happy.
3440 */
3441 if (action != KEY_ADD_OR_REPLACE)
3442 return 0;
3443
3444 ret = wl1271_record_ap_key(wl, wlvif, id,
3445 key_type, key_size,
3446 key, hlid, tx_seq_32,
3447 tx_seq_16, is_pairwise);
3448 } else {
3449 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3450 id, key_type, key_size,
3451 key, hlid, tx_seq_32,
3452 tx_seq_16, is_pairwise);
3453 }
3454
3455 if (ret < 0)
3456 return ret;
3457 } else {
3458 const u8 *addr;
3459 static const u8 bcast_addr[ETH_ALEN] = {
3460 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3461 };
3462
3463 addr = sta ? sta->addr : bcast_addr;
3464
3465 if (is_zero_ether_addr(addr)) {
3466 /* We dont support TX only encryption */
3467 return -EOPNOTSUPP;
3468 }
3469
3470 /* The wl1271 does not allow to remove unicast keys - they
3471 will be cleared automatically on next CMD_JOIN. Ignore the
3472 request silently, as we dont want the mac80211 to emit
3473 an error message. */
3474 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3475 return 0;
3476
3477 /* don't remove key if hlid was already deleted */
3478 if (action == KEY_REMOVE &&
3479 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3480 return 0;
3481
3482 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3483 id, key_type, key_size,
3484 key, addr, tx_seq_32,
3485 tx_seq_16);
3486 if (ret < 0)
3487 return ret;
3488
3489 }
3490
3491 return 0;
3492 }
3493
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3494 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3495 struct ieee80211_vif *vif,
3496 struct ieee80211_sta *sta,
3497 struct ieee80211_key_conf *key_conf)
3498 {
3499 struct wl1271 *wl = hw->priv;
3500 int ret;
3501 bool might_change_spare =
3502 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3503 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3504
3505 if (might_change_spare) {
3506 /*
3507 * stop the queues and flush to ensure the next packets are
3508 * in sync with FW spare block accounting
3509 */
3510 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3511 wl1271_tx_flush(wl);
3512 }
3513
3514 mutex_lock(&wl->mutex);
3515
3516 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3517 ret = -EAGAIN;
3518 goto out_wake_queues;
3519 }
3520
3521 ret = pm_runtime_resume_and_get(wl->dev);
3522 if (ret < 0)
3523 goto out_wake_queues;
3524
3525 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3526
3527 pm_runtime_put_autosuspend(wl->dev);
3528
3529 out_wake_queues:
3530 if (might_change_spare)
3531 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3532
3533 mutex_unlock(&wl->mutex);
3534
3535 return ret;
3536 }
3537
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3538 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3539 struct ieee80211_vif *vif,
3540 struct ieee80211_sta *sta,
3541 struct ieee80211_key_conf *key_conf)
3542 {
3543 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3544 int ret;
3545 u32 tx_seq_32 = 0;
3546 u16 tx_seq_16 = 0;
3547 u8 key_type;
3548 u8 hlid;
3549 bool is_pairwise;
3550
3551 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3552
3553 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3554 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3555 key_conf->cipher, key_conf->keyidx,
3556 key_conf->keylen, key_conf->flags);
3557 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3558
3559 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3560 if (sta) {
3561 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3562 hlid = wl_sta->hlid;
3563 } else {
3564 hlid = wlvif->ap.bcast_hlid;
3565 }
3566 else
3567 hlid = wlvif->sta.hlid;
3568
3569 if (hlid != WL12XX_INVALID_LINK_ID) {
3570 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3571 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3572 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3573 }
3574
3575 switch (key_conf->cipher) {
3576 case WLAN_CIPHER_SUITE_WEP40:
3577 case WLAN_CIPHER_SUITE_WEP104:
3578 key_type = KEY_WEP;
3579
3580 key_conf->hw_key_idx = key_conf->keyidx;
3581 break;
3582 case WLAN_CIPHER_SUITE_TKIP:
3583 key_type = KEY_TKIP;
3584 key_conf->hw_key_idx = key_conf->keyidx;
3585 break;
3586 case WLAN_CIPHER_SUITE_CCMP:
3587 key_type = KEY_AES;
3588 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3589 break;
3590 case WL1271_CIPHER_SUITE_GEM:
3591 key_type = KEY_GEM;
3592 break;
3593 case WLAN_CIPHER_SUITE_AES_CMAC:
3594 key_type = KEY_IGTK;
3595 break;
3596 default:
3597 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3598
3599 return -EOPNOTSUPP;
3600 }
3601
3602 is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3603
3604 switch (cmd) {
3605 case SET_KEY:
3606 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3607 key_conf->keyidx, key_type,
3608 key_conf->keylen, key_conf->key,
3609 tx_seq_32, tx_seq_16, sta, is_pairwise);
3610 if (ret < 0) {
3611 wl1271_error("Could not add or replace key");
3612 return ret;
3613 }
3614
3615 /* Store AP encryption key type */
3616 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3617 wlvif->encryption_type = key_type;
3618
3619 /*
3620 * reconfiguring arp response if the unicast (or common)
3621 * encryption key type was changed
3622 */
3623 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3624 (sta || key_type == KEY_WEP) &&
3625 wlvif->encryption_type != key_type) {
3626 wlvif->encryption_type = key_type;
3627 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3628 if (ret < 0) {
3629 wl1271_warning("build arp rsp failed: %d", ret);
3630 return ret;
3631 }
3632 }
3633 break;
3634
3635 case DISABLE_KEY:
3636 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3637 key_conf->keyidx, key_type,
3638 key_conf->keylen, key_conf->key,
3639 0, 0, sta, is_pairwise);
3640 if (ret < 0) {
3641 wl1271_error("Could not remove key");
3642 return ret;
3643 }
3644 break;
3645
3646 default:
3647 wl1271_error("Unsupported key cmd 0x%x", cmd);
3648 return -EOPNOTSUPP;
3649 }
3650
3651 return ret;
3652 }
3653 EXPORT_SYMBOL_GPL(wlcore_set_key);
3654
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3655 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3656 struct ieee80211_vif *vif,
3657 int key_idx)
3658 {
3659 struct wl1271 *wl = hw->priv;
3660 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3661 int ret;
3662
3663 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3664 key_idx);
3665
3666 /* we don't handle unsetting of default key */
3667 if (key_idx == -1)
3668 return;
3669
3670 mutex_lock(&wl->mutex);
3671
3672 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3673 ret = -EAGAIN;
3674 goto out_unlock;
3675 }
3676
3677 ret = pm_runtime_resume_and_get(wl->dev);
3678 if (ret < 0)
3679 goto out_unlock;
3680
3681 wlvif->default_key = key_idx;
3682
3683 /* the default WEP key needs to be configured at least once */
3684 if (wlvif->encryption_type == KEY_WEP) {
3685 ret = wl12xx_cmd_set_default_wep_key(wl,
3686 key_idx,
3687 wlvif->sta.hlid);
3688 if (ret < 0)
3689 goto out_sleep;
3690 }
3691
3692 out_sleep:
3693 pm_runtime_put_autosuspend(wl->dev);
3694
3695 out_unlock:
3696 mutex_unlock(&wl->mutex);
3697 }
3698
wlcore_regdomain_config(struct wl1271 * wl)3699 void wlcore_regdomain_config(struct wl1271 *wl)
3700 {
3701 int ret;
3702
3703 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3704 return;
3705
3706 mutex_lock(&wl->mutex);
3707
3708 if (unlikely(wl->state != WLCORE_STATE_ON))
3709 goto out;
3710
3711 ret = pm_runtime_resume_and_get(wl->dev);
3712 if (ret < 0)
3713 goto out;
3714
3715 ret = wlcore_cmd_regdomain_config_locked(wl);
3716 if (ret < 0) {
3717 wl12xx_queue_recovery_work(wl);
3718 goto out;
3719 }
3720
3721 pm_runtime_put_autosuspend(wl->dev);
3722 out:
3723 mutex_unlock(&wl->mutex);
3724 }
3725
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3726 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3727 struct ieee80211_vif *vif,
3728 struct ieee80211_scan_request *hw_req)
3729 {
3730 struct cfg80211_scan_request *req = &hw_req->req;
3731 struct wl1271 *wl = hw->priv;
3732 int ret;
3733 u8 *ssid = NULL;
3734 size_t len = 0;
3735
3736 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3737
3738 if (req->n_ssids) {
3739 ssid = req->ssids[0].ssid;
3740 len = req->ssids[0].ssid_len;
3741 }
3742
3743 mutex_lock(&wl->mutex);
3744
3745 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3746 /*
3747 * We cannot return -EBUSY here because cfg80211 will expect
3748 * a call to ieee80211_scan_completed if we do - in this case
3749 * there won't be any call.
3750 */
3751 ret = -EAGAIN;
3752 goto out;
3753 }
3754
3755 ret = pm_runtime_resume_and_get(wl->dev);
3756 if (ret < 0)
3757 goto out;
3758
3759 /* fail if there is any role in ROC */
3760 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3761 /* don't allow scanning right now */
3762 ret = -EBUSY;
3763 goto out_sleep;
3764 }
3765
3766 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3767 out_sleep:
3768 pm_runtime_put_autosuspend(wl->dev);
3769 out:
3770 mutex_unlock(&wl->mutex);
3771
3772 return ret;
3773 }
3774
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3775 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3776 struct ieee80211_vif *vif)
3777 {
3778 struct wl1271 *wl = hw->priv;
3779 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3780 struct cfg80211_scan_info info = {
3781 .aborted = true,
3782 };
3783 int ret;
3784
3785 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3786
3787 mutex_lock(&wl->mutex);
3788
3789 if (unlikely(wl->state != WLCORE_STATE_ON))
3790 goto out;
3791
3792 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3793 goto out;
3794
3795 ret = pm_runtime_resume_and_get(wl->dev);
3796 if (ret < 0)
3797 goto out;
3798
3799 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3800 ret = wl->ops->scan_stop(wl, wlvif);
3801 if (ret < 0)
3802 goto out_sleep;
3803 }
3804
3805 /*
3806 * Rearm the tx watchdog just before idling scan. This
3807 * prevents just-finished scans from triggering the watchdog
3808 */
3809 wl12xx_rearm_tx_watchdog_locked(wl);
3810
3811 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3812 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3813 wl->scan_wlvif = NULL;
3814 wl->scan.req = NULL;
3815 ieee80211_scan_completed(wl->hw, &info);
3816
3817 out_sleep:
3818 pm_runtime_put_autosuspend(wl->dev);
3819 out:
3820 mutex_unlock(&wl->mutex);
3821
3822 cancel_delayed_work_sync(&wl->scan_complete_work);
3823 }
3824
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3825 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3826 struct ieee80211_vif *vif,
3827 struct cfg80211_sched_scan_request *req,
3828 struct ieee80211_scan_ies *ies)
3829 {
3830 struct wl1271 *wl = hw->priv;
3831 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3832 int ret;
3833
3834 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3835
3836 mutex_lock(&wl->mutex);
3837
3838 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3839 ret = -EAGAIN;
3840 goto out;
3841 }
3842
3843 ret = pm_runtime_resume_and_get(wl->dev);
3844 if (ret < 0)
3845 goto out;
3846
3847 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3848 if (ret < 0)
3849 goto out_sleep;
3850
3851 wl->sched_vif = wlvif;
3852
3853 out_sleep:
3854 pm_runtime_put_autosuspend(wl->dev);
3855 out:
3856 mutex_unlock(&wl->mutex);
3857 return ret;
3858 }
3859
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3860 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3861 struct ieee80211_vif *vif)
3862 {
3863 struct wl1271 *wl = hw->priv;
3864 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3865 int ret;
3866
3867 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3868
3869 mutex_lock(&wl->mutex);
3870
3871 if (unlikely(wl->state != WLCORE_STATE_ON))
3872 goto out;
3873
3874 ret = pm_runtime_resume_and_get(wl->dev);
3875 if (ret < 0)
3876 goto out;
3877
3878 wl->ops->sched_scan_stop(wl, wlvif);
3879
3880 pm_runtime_put_autosuspend(wl->dev);
3881 out:
3882 mutex_unlock(&wl->mutex);
3883
3884 return 0;
3885 }
3886
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,int radio_idx,u32 value)3887 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw,
3888 int radio_idx, u32 value)
3889 {
3890 struct wl1271 *wl = hw->priv;
3891 int ret = 0;
3892
3893 mutex_lock(&wl->mutex);
3894
3895 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3896 ret = -EAGAIN;
3897 goto out;
3898 }
3899
3900 ret = pm_runtime_resume_and_get(wl->dev);
3901 if (ret < 0)
3902 goto out;
3903
3904 ret = wl1271_acx_frag_threshold(wl, value);
3905 if (ret < 0)
3906 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3907
3908 pm_runtime_put_autosuspend(wl->dev);
3909
3910 out:
3911 mutex_unlock(&wl->mutex);
3912
3913 return ret;
3914 }
3915
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,int radio_idx,u32 value)3916 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
3917 u32 value)
3918 {
3919 struct wl1271 *wl = hw->priv;
3920 struct wl12xx_vif *wlvif;
3921 int ret = 0;
3922
3923 mutex_lock(&wl->mutex);
3924
3925 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3926 ret = -EAGAIN;
3927 goto out;
3928 }
3929
3930 ret = pm_runtime_resume_and_get(wl->dev);
3931 if (ret < 0)
3932 goto out;
3933
3934 wl12xx_for_each_wlvif(wl, wlvif) {
3935 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3936 if (ret < 0)
3937 wl1271_warning("set rts threshold failed: %d", ret);
3938 }
3939 pm_runtime_put_autosuspend(wl->dev);
3940
3941 out:
3942 mutex_unlock(&wl->mutex);
3943
3944 return ret;
3945 }
3946
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3947 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3948 {
3949 int len;
3950 const u8 *next, *end = skb->data + skb->len;
3951 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3952 skb->len - ieoffset);
3953 if (!ie)
3954 return;
3955 len = ie[1] + 2;
3956 next = ie + len;
3957 memmove(ie, next, end - next);
3958 skb_trim(skb, skb->len - len);
3959 }
3960
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3961 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3962 unsigned int oui, u8 oui_type,
3963 int ieoffset)
3964 {
3965 int len;
3966 const u8 *next, *end = skb->data + skb->len;
3967 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3968 skb->data + ieoffset,
3969 skb->len - ieoffset);
3970 if (!ie)
3971 return;
3972 len = ie[1] + 2;
3973 next = ie + len;
3974 memmove(ie, next, end - next);
3975 skb_trim(skb, skb->len - len);
3976 }
3977
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3978 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3979 struct ieee80211_vif *vif)
3980 {
3981 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3982 struct sk_buff *skb;
3983 int ret;
3984
3985 skb = ieee80211_proberesp_get(wl->hw, vif);
3986 if (!skb)
3987 return -EOPNOTSUPP;
3988
3989 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3990 CMD_TEMPL_AP_PROBE_RESPONSE,
3991 skb->data,
3992 skb->len, 0,
3993 rates);
3994 dev_kfree_skb(skb);
3995
3996 if (ret < 0)
3997 goto out;
3998
3999 wl1271_debug(DEBUG_AP, "probe response updated");
4000 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
4001
4002 out:
4003 return ret;
4004 }
4005
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)4006 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
4007 struct ieee80211_vif *vif,
4008 u8 *probe_rsp_data,
4009 size_t probe_rsp_len,
4010 u32 rates)
4011 {
4012 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4013 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
4014 int ssid_ie_offset, ie_offset, templ_len;
4015 const u8 *ptr;
4016
4017 /* no need to change probe response if the SSID is set correctly */
4018 if (wlvif->ssid_len > 0)
4019 return wl1271_cmd_template_set(wl, wlvif->role_id,
4020 CMD_TEMPL_AP_PROBE_RESPONSE,
4021 probe_rsp_data,
4022 probe_rsp_len, 0,
4023 rates);
4024
4025 if (probe_rsp_len + vif->cfg.ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4026 wl1271_error("probe_rsp template too big");
4027 return -EINVAL;
4028 }
4029
4030 /* start searching from IE offset */
4031 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4032
4033 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4034 probe_rsp_len - ie_offset);
4035 if (!ptr) {
4036 wl1271_error("No SSID in beacon!");
4037 return -EINVAL;
4038 }
4039
4040 ssid_ie_offset = ptr - probe_rsp_data;
4041 ptr += (ptr[1] + 2);
4042
4043 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4044
4045 /* insert SSID from bss_conf */
4046 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4047 probe_rsp_templ[ssid_ie_offset + 1] = vif->cfg.ssid_len;
4048 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4049 vif->cfg.ssid, vif->cfg.ssid_len);
4050 templ_len = ssid_ie_offset + 2 + vif->cfg.ssid_len;
4051
4052 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + vif->cfg.ssid_len,
4053 ptr, probe_rsp_len - (ptr - probe_rsp_data));
4054 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4055
4056 return wl1271_cmd_template_set(wl, wlvif->role_id,
4057 CMD_TEMPL_AP_PROBE_RESPONSE,
4058 probe_rsp_templ,
4059 templ_len, 0,
4060 rates);
4061 }
4062
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4063 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4064 struct ieee80211_vif *vif,
4065 struct ieee80211_bss_conf *bss_conf,
4066 u32 changed)
4067 {
4068 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4069 int ret = 0;
4070
4071 if (changed & BSS_CHANGED_ERP_SLOT) {
4072 if (bss_conf->use_short_slot)
4073 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4074 else
4075 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4076 if (ret < 0) {
4077 wl1271_warning("Set slot time failed %d", ret);
4078 goto out;
4079 }
4080 }
4081
4082 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4083 if (bss_conf->use_short_preamble)
4084 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4085 else
4086 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4087 }
4088
4089 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4090 if (bss_conf->use_cts_prot)
4091 ret = wl1271_acx_cts_protect(wl, wlvif,
4092 CTSPROTECT_ENABLE);
4093 else
4094 ret = wl1271_acx_cts_protect(wl, wlvif,
4095 CTSPROTECT_DISABLE);
4096 if (ret < 0) {
4097 wl1271_warning("Set ctsprotect failed %d", ret);
4098 goto out;
4099 }
4100 }
4101
4102 out:
4103 return ret;
4104 }
4105
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4106 static int wlcore_set_beacon_template(struct wl1271 *wl,
4107 struct ieee80211_vif *vif,
4108 bool is_ap)
4109 {
4110 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4111 struct ieee80211_hdr *hdr;
4112 u32 min_rate;
4113 int ret;
4114 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4115 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif, 0);
4116 u16 tmpl_id;
4117
4118 if (!beacon) {
4119 ret = -EINVAL;
4120 goto out;
4121 }
4122
4123 wl1271_debug(DEBUG_MASTER, "beacon updated");
4124
4125 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4126 if (ret < 0) {
4127 dev_kfree_skb(beacon);
4128 goto out;
4129 }
4130 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4131 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4132 CMD_TEMPL_BEACON;
4133 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4134 beacon->data,
4135 beacon->len, 0,
4136 min_rate);
4137 if (ret < 0) {
4138 dev_kfree_skb(beacon);
4139 goto out;
4140 }
4141
4142 wlvif->wmm_enabled =
4143 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4144 WLAN_OUI_TYPE_MICROSOFT_WMM,
4145 beacon->data + ieoffset,
4146 beacon->len - ieoffset);
4147
4148 /*
4149 * In case we already have a probe-resp beacon set explicitly
4150 * by usermode, don't use the beacon data.
4151 */
4152 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4153 goto end_bcn;
4154
4155 /* remove TIM ie from probe response */
4156 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4157
4158 /*
4159 * remove p2p ie from probe response.
4160 * the fw reponds to probe requests that don't include
4161 * the p2p ie. probe requests with p2p ie will be passed,
4162 * and will be responded by the supplicant (the spec
4163 * forbids including the p2p ie when responding to probe
4164 * requests that didn't include it).
4165 */
4166 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4167 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4168
4169 hdr = (struct ieee80211_hdr *) beacon->data;
4170 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4171 IEEE80211_STYPE_PROBE_RESP);
4172 if (is_ap)
4173 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4174 beacon->data,
4175 beacon->len,
4176 min_rate);
4177 else
4178 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4179 CMD_TEMPL_PROBE_RESPONSE,
4180 beacon->data,
4181 beacon->len, 0,
4182 min_rate);
4183 end_bcn:
4184 dev_kfree_skb(beacon);
4185 if (ret < 0)
4186 goto out;
4187
4188 out:
4189 return ret;
4190 }
4191
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4192 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4193 struct ieee80211_vif *vif,
4194 struct ieee80211_bss_conf *bss_conf,
4195 u32 changed)
4196 {
4197 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4198 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4199 int ret = 0;
4200
4201 if (changed & BSS_CHANGED_BEACON_INT) {
4202 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4203 bss_conf->beacon_int);
4204
4205 wlvif->beacon_int = bss_conf->beacon_int;
4206 }
4207
4208 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4209 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4210
4211 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4212 }
4213
4214 if (changed & BSS_CHANGED_BEACON) {
4215 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4216 if (ret < 0)
4217 goto out;
4218
4219 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4220 &wlvif->flags)) {
4221 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4222 if (ret < 0)
4223 goto out;
4224 }
4225 }
4226 out:
4227 if (ret != 0)
4228 wl1271_error("beacon info change failed: %d", ret);
4229 return ret;
4230 }
4231
4232 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4233 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4234 struct ieee80211_vif *vif,
4235 struct ieee80211_bss_conf *bss_conf,
4236 u32 changed)
4237 {
4238 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4239 int ret = 0;
4240
4241 if (changed & BSS_CHANGED_BASIC_RATES) {
4242 u32 rates = bss_conf->basic_rates;
4243
4244 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4245 wlvif->band);
4246 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4247 wlvif->basic_rate_set);
4248
4249 ret = wl1271_init_ap_rates(wl, wlvif);
4250 if (ret < 0) {
4251 wl1271_error("AP rate policy change failed %d", ret);
4252 goto out;
4253 }
4254
4255 ret = wl1271_ap_init_templates(wl, vif);
4256 if (ret < 0)
4257 goto out;
4258
4259 /* No need to set probe resp template for mesh */
4260 if (!ieee80211_vif_is_mesh(vif)) {
4261 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4262 wlvif->basic_rate,
4263 vif);
4264 if (ret < 0)
4265 goto out;
4266 }
4267
4268 ret = wlcore_set_beacon_template(wl, vif, true);
4269 if (ret < 0)
4270 goto out;
4271 }
4272
4273 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4274 if (ret < 0)
4275 goto out;
4276
4277 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4278 if (bss_conf->enable_beacon) {
4279 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4280 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4281 if (ret < 0)
4282 goto out;
4283
4284 ret = wl1271_ap_init_hwenc(wl, wlvif);
4285 if (ret < 0)
4286 goto out;
4287
4288 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4289 wl1271_debug(DEBUG_AP, "started AP");
4290 }
4291 } else {
4292 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4293 /*
4294 * AP might be in ROC in case we have just
4295 * sent auth reply. handle it.
4296 */
4297 if (test_bit(wlvif->role_id, wl->roc_map))
4298 wl12xx_croc(wl, wlvif->role_id);
4299
4300 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4301 if (ret < 0)
4302 goto out;
4303
4304 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4305 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4306 &wlvif->flags);
4307 wl1271_debug(DEBUG_AP, "stopped AP");
4308 }
4309 }
4310 }
4311
4312 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4313 if (ret < 0)
4314 goto out;
4315
4316 /* Handle HT information change */
4317 if ((changed & BSS_CHANGED_HT) &&
4318 (bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4319 ret = wl1271_acx_set_ht_information(wl, wlvif,
4320 bss_conf->ht_operation_mode);
4321 if (ret < 0) {
4322 wl1271_warning("Set ht information failed %d", ret);
4323 goto out;
4324 }
4325 }
4326
4327 out:
4328 return;
4329 }
4330
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_vif * vif,u32 sta_rate_set)4331 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4332 struct ieee80211_vif *vif, u32 sta_rate_set)
4333 {
4334 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
4335 u32 rates;
4336 int ret;
4337
4338 wl1271_debug(DEBUG_MAC80211,
4339 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4340 bss_conf->bssid, vif->cfg.aid,
4341 bss_conf->beacon_int,
4342 bss_conf->basic_rates, sta_rate_set);
4343
4344 wlvif->beacon_int = bss_conf->beacon_int;
4345 rates = bss_conf->basic_rates;
4346 wlvif->basic_rate_set =
4347 wl1271_tx_enabled_rates_get(wl, rates,
4348 wlvif->band);
4349 wlvif->basic_rate =
4350 wl1271_tx_min_rate_get(wl,
4351 wlvif->basic_rate_set);
4352
4353 if (sta_rate_set)
4354 wlvif->rate_set =
4355 wl1271_tx_enabled_rates_get(wl,
4356 sta_rate_set,
4357 wlvif->band);
4358
4359 /* we only support sched_scan while not connected */
4360 if (wl->sched_vif == wlvif)
4361 wl->ops->sched_scan_stop(wl, wlvif);
4362
4363 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4364 if (ret < 0)
4365 return ret;
4366
4367 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4368 if (ret < 0)
4369 return ret;
4370
4371 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4372 if (ret < 0)
4373 return ret;
4374
4375 wlcore_set_ssid(wl, wlvif);
4376
4377 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4378
4379 return 0;
4380 }
4381
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4382 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4383 {
4384 int ret;
4385
4386 /* revert back to minimum rates for the current band */
4387 wl1271_set_band_rate(wl, wlvif);
4388 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4389
4390 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4391 if (ret < 0)
4392 return ret;
4393
4394 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4395 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4396 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4397 if (ret < 0)
4398 return ret;
4399 }
4400
4401 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4402 return 0;
4403 }
4404 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4405 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4406 struct ieee80211_vif *vif,
4407 struct ieee80211_bss_conf *bss_conf,
4408 u32 changed)
4409 {
4410 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4411 bool do_join = false;
4412 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4413 bool ibss_joined = false;
4414 u32 sta_rate_set = 0;
4415 int ret;
4416 struct ieee80211_sta *sta;
4417 bool sta_exists = false;
4418 struct ieee80211_sta_ht_cap sta_ht_cap;
4419
4420 if (is_ibss) {
4421 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4422 changed);
4423 if (ret < 0)
4424 goto out;
4425 }
4426
4427 if (changed & BSS_CHANGED_IBSS) {
4428 if (vif->cfg.ibss_joined) {
4429 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4430 ibss_joined = true;
4431 } else {
4432 wlcore_unset_assoc(wl, wlvif);
4433 wl12xx_cmd_role_stop_sta(wl, wlvif);
4434 }
4435 }
4436
4437 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4438 do_join = true;
4439
4440 /* Need to update the SSID (for filtering etc) */
4441 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4442 do_join = true;
4443
4444 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4445 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4446 bss_conf->enable_beacon ? "enabled" : "disabled");
4447
4448 do_join = true;
4449 }
4450
4451 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4452 wl1271_sta_handle_idle(wl, wlvif, vif->cfg.idle);
4453
4454 if (changed & BSS_CHANGED_CQM) {
4455 bool enable = false;
4456 if (bss_conf->cqm_rssi_thold)
4457 enable = true;
4458 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4459 bss_conf->cqm_rssi_thold,
4460 bss_conf->cqm_rssi_hyst);
4461 if (ret < 0)
4462 goto out;
4463 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4464 }
4465
4466 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4467 BSS_CHANGED_ASSOC)) {
4468 rcu_read_lock();
4469 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4470 if (sta) {
4471 u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask;
4472
4473 /* save the supp_rates of the ap */
4474 sta_rate_set = sta->deflink.supp_rates[wlvif->band];
4475 if (sta->deflink.ht_cap.ht_supported)
4476 sta_rate_set |=
4477 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4478 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4479 sta_ht_cap = sta->deflink.ht_cap;
4480 sta_exists = true;
4481 }
4482
4483 rcu_read_unlock();
4484 }
4485
4486 if (changed & BSS_CHANGED_BSSID) {
4487 if (!is_zero_ether_addr(bss_conf->bssid)) {
4488 ret = wlcore_set_bssid(wl, wlvif, vif,
4489 sta_rate_set);
4490 if (ret < 0)
4491 goto out;
4492
4493 /* Need to update the BSSID (for filtering etc) */
4494 do_join = true;
4495 } else {
4496 ret = wlcore_clear_bssid(wl, wlvif);
4497 if (ret < 0)
4498 goto out;
4499 }
4500 }
4501
4502 if (changed & BSS_CHANGED_IBSS) {
4503 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4504 vif->cfg.ibss_joined);
4505
4506 if (vif->cfg.ibss_joined) {
4507 u32 rates = bss_conf->basic_rates;
4508 wlvif->basic_rate_set =
4509 wl1271_tx_enabled_rates_get(wl, rates,
4510 wlvif->band);
4511 wlvif->basic_rate =
4512 wl1271_tx_min_rate_get(wl,
4513 wlvif->basic_rate_set);
4514
4515 /* by default, use 11b + OFDM rates */
4516 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4517 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4518 if (ret < 0)
4519 goto out;
4520 }
4521 }
4522
4523 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4524 /* enable beacon filtering */
4525 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4526 if (ret < 0)
4527 goto out;
4528 }
4529
4530 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4531 if (ret < 0)
4532 goto out;
4533
4534 if (do_join) {
4535 ret = wlcore_join(wl, wlvif);
4536 if (ret < 0) {
4537 wl1271_warning("cmd join failed %d", ret);
4538 goto out;
4539 }
4540 }
4541
4542 if (changed & BSS_CHANGED_ASSOC) {
4543 if (vif->cfg.assoc) {
4544 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4545 sta_rate_set);
4546 if (ret < 0)
4547 goto out;
4548
4549 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4550 wl12xx_set_authorized(wl, wlvif);
4551 } else {
4552 wlcore_unset_assoc(wl, wlvif);
4553 }
4554 }
4555
4556 if (changed & BSS_CHANGED_PS) {
4557 if (vif->cfg.ps &&
4558 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4559 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4560 int ps_mode;
4561 char *ps_mode_str;
4562
4563 if (wl->conf.conn.forced_ps) {
4564 ps_mode = STATION_POWER_SAVE_MODE;
4565 ps_mode_str = "forced";
4566 } else {
4567 ps_mode = STATION_AUTO_PS_MODE;
4568 ps_mode_str = "auto";
4569 }
4570
4571 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4572
4573 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4574 if (ret < 0)
4575 wl1271_warning("enter %s ps failed %d",
4576 ps_mode_str, ret);
4577 } else if (!vif->cfg.ps &&
4578 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4579 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4580
4581 ret = wl1271_ps_set_mode(wl, wlvif,
4582 STATION_ACTIVE_MODE);
4583 if (ret < 0)
4584 wl1271_warning("exit auto ps failed %d", ret);
4585 }
4586 }
4587
4588 /* Handle new association with HT. Do this after join. */
4589 if (sta_exists) {
4590 bool enabled =
4591 bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT;
4592
4593 ret = wlcore_hw_set_peer_cap(wl,
4594 &sta_ht_cap,
4595 enabled,
4596 wlvif->rate_set,
4597 wlvif->sta.hlid);
4598 if (ret < 0) {
4599 wl1271_warning("Set ht cap failed %d", ret);
4600 goto out;
4601
4602 }
4603
4604 if (enabled) {
4605 ret = wl1271_acx_set_ht_information(wl, wlvif,
4606 bss_conf->ht_operation_mode);
4607 if (ret < 0) {
4608 wl1271_warning("Set ht information failed %d",
4609 ret);
4610 goto out;
4611 }
4612 }
4613 }
4614
4615 /* Handle arp filtering. Done after join. */
4616 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4617 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4618 __be32 addr = vif->cfg.arp_addr_list[0];
4619 wlvif->sta.qos = bss_conf->qos;
4620 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4621
4622 if (vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc) {
4623 wlvif->ip_addr = addr;
4624 /*
4625 * The template should have been configured only upon
4626 * association. however, it seems that the correct ip
4627 * isn't being set (when sending), so we have to
4628 * reconfigure the template upon every ip change.
4629 */
4630 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4631 if (ret < 0) {
4632 wl1271_warning("build arp rsp failed: %d", ret);
4633 goto out;
4634 }
4635
4636 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4637 (ACX_ARP_FILTER_ARP_FILTERING |
4638 ACX_ARP_FILTER_AUTO_ARP),
4639 addr);
4640 } else {
4641 wlvif->ip_addr = 0;
4642 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4643 }
4644
4645 if (ret < 0)
4646 goto out;
4647 }
4648
4649 out:
4650 return;
4651 }
4652
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u64 changed)4653 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4654 struct ieee80211_vif *vif,
4655 struct ieee80211_bss_conf *bss_conf,
4656 u64 changed)
4657 {
4658 struct wl1271 *wl = hw->priv;
4659 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4660 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4661 int ret;
4662
4663 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4664 wlvif->role_id, (int)changed);
4665
4666 /*
4667 * make sure to cancel pending disconnections if our association
4668 * state changed
4669 */
4670 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4671 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4672
4673 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4674 !bss_conf->enable_beacon)
4675 wl1271_tx_flush(wl);
4676
4677 mutex_lock(&wl->mutex);
4678
4679 if (unlikely(wl->state != WLCORE_STATE_ON))
4680 goto out;
4681
4682 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4683 goto out;
4684
4685 ret = pm_runtime_resume_and_get(wl->dev);
4686 if (ret < 0)
4687 goto out;
4688
4689 if ((changed & BSS_CHANGED_TXPOWER) &&
4690 bss_conf->txpower != wlvif->power_level) {
4691
4692 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4693 if (ret < 0)
4694 goto out;
4695
4696 wlvif->power_level = bss_conf->txpower;
4697 }
4698
4699 if (is_ap)
4700 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4701 else
4702 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4703
4704 pm_runtime_put_autosuspend(wl->dev);
4705
4706 out:
4707 mutex_unlock(&wl->mutex);
4708 }
4709
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4710 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4711 struct ieee80211_chanctx_conf *ctx)
4712 {
4713 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4714 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4715 cfg80211_get_chandef_type(&ctx->def));
4716 return 0;
4717 }
4718
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4719 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4720 struct ieee80211_chanctx_conf *ctx)
4721 {
4722 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4723 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4724 cfg80211_get_chandef_type(&ctx->def));
4725 }
4726
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4727 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4728 struct ieee80211_chanctx_conf *ctx,
4729 u32 changed)
4730 {
4731 struct wl1271 *wl = hw->priv;
4732 struct wl12xx_vif *wlvif;
4733 int ret;
4734 int channel = ieee80211_frequency_to_channel(
4735 ctx->def.chan->center_freq);
4736
4737 wl1271_debug(DEBUG_MAC80211,
4738 "mac80211 change chanctx %d (type %d) changed 0x%x",
4739 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4740
4741 mutex_lock(&wl->mutex);
4742
4743 ret = pm_runtime_resume_and_get(wl->dev);
4744 if (ret < 0)
4745 goto out;
4746
4747 wl12xx_for_each_wlvif(wl, wlvif) {
4748 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4749
4750 rcu_read_lock();
4751 if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != ctx) {
4752 rcu_read_unlock();
4753 continue;
4754 }
4755 rcu_read_unlock();
4756
4757 /* start radar if needed */
4758 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4759 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4760 ctx->radar_enabled && !wlvif->radar_enabled &&
4761 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4762 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4763 wlcore_hw_set_cac(wl, wlvif, true);
4764 wlvif->radar_enabled = true;
4765 }
4766 }
4767
4768 pm_runtime_put_autosuspend(wl->dev);
4769 out:
4770 mutex_unlock(&wl->mutex);
4771 }
4772
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)4773 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4774 struct ieee80211_vif *vif,
4775 struct ieee80211_bss_conf *link_conf,
4776 struct ieee80211_chanctx_conf *ctx)
4777 {
4778 struct wl1271 *wl = hw->priv;
4779 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4780 int channel = ieee80211_frequency_to_channel(
4781 ctx->def.chan->center_freq);
4782 int ret = -EINVAL;
4783
4784 wl1271_debug(DEBUG_MAC80211,
4785 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4786 wlvif->role_id, channel,
4787 cfg80211_get_chandef_type(&ctx->def),
4788 ctx->radar_enabled, ctx->def.chan->dfs_state);
4789
4790 mutex_lock(&wl->mutex);
4791
4792 if (unlikely(wl->state != WLCORE_STATE_ON))
4793 goto out;
4794
4795 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4796 goto out;
4797
4798 ret = pm_runtime_resume_and_get(wl->dev);
4799 if (ret < 0)
4800 goto out;
4801
4802 wlvif->band = ctx->def.chan->band;
4803 wlvif->channel = channel;
4804 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4805
4806 /* update default rates according to the band */
4807 wl1271_set_band_rate(wl, wlvif);
4808
4809 if (ctx->radar_enabled &&
4810 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4811 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4812 wlcore_hw_set_cac(wl, wlvif, true);
4813 wlvif->radar_enabled = true;
4814 }
4815
4816 pm_runtime_put_autosuspend(wl->dev);
4817 out:
4818 mutex_unlock(&wl->mutex);
4819
4820 return 0;
4821 }
4822
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)4823 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4824 struct ieee80211_vif *vif,
4825 struct ieee80211_bss_conf *link_conf,
4826 struct ieee80211_chanctx_conf *ctx)
4827 {
4828 struct wl1271 *wl = hw->priv;
4829 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4830 int ret;
4831
4832 wl1271_debug(DEBUG_MAC80211,
4833 "mac80211 unassign chanctx (role %d) %d (type %d)",
4834 wlvif->role_id,
4835 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4836 cfg80211_get_chandef_type(&ctx->def));
4837
4838 wl1271_tx_flush(wl);
4839
4840 mutex_lock(&wl->mutex);
4841
4842 if (unlikely(wl->state != WLCORE_STATE_ON))
4843 goto out;
4844
4845 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4846 goto out;
4847
4848 ret = pm_runtime_resume_and_get(wl->dev);
4849 if (ret < 0)
4850 goto out;
4851
4852 if (wlvif->radar_enabled) {
4853 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4854 wlcore_hw_set_cac(wl, wlvif, false);
4855 wlvif->radar_enabled = false;
4856 }
4857
4858 pm_runtime_put_autosuspend(wl->dev);
4859 out:
4860 mutex_unlock(&wl->mutex);
4861 }
4862
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4863 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4864 struct wl12xx_vif *wlvif,
4865 struct ieee80211_chanctx_conf *new_ctx)
4866 {
4867 int channel = ieee80211_frequency_to_channel(
4868 new_ctx->def.chan->center_freq);
4869
4870 wl1271_debug(DEBUG_MAC80211,
4871 "switch vif (role %d) %d -> %d chan_type: %d",
4872 wlvif->role_id, wlvif->channel, channel,
4873 cfg80211_get_chandef_type(&new_ctx->def));
4874
4875 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4876 return 0;
4877
4878 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4879
4880 if (wlvif->radar_enabled) {
4881 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4882 wlcore_hw_set_cac(wl, wlvif, false);
4883 wlvif->radar_enabled = false;
4884 }
4885
4886 wlvif->band = new_ctx->def.chan->band;
4887 wlvif->channel = channel;
4888 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4889
4890 /* start radar if needed */
4891 if (new_ctx->radar_enabled) {
4892 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4893 wlcore_hw_set_cac(wl, wlvif, true);
4894 wlvif->radar_enabled = true;
4895 }
4896
4897 return 0;
4898 }
4899
4900 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4901 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4902 struct ieee80211_vif_chanctx_switch *vifs,
4903 int n_vifs,
4904 enum ieee80211_chanctx_switch_mode mode)
4905 {
4906 struct wl1271 *wl = hw->priv;
4907 int i, ret;
4908
4909 wl1271_debug(DEBUG_MAC80211,
4910 "mac80211 switch chanctx n_vifs %d mode %d",
4911 n_vifs, mode);
4912
4913 mutex_lock(&wl->mutex);
4914
4915 ret = pm_runtime_resume_and_get(wl->dev);
4916 if (ret < 0)
4917 goto out;
4918
4919 for (i = 0; i < n_vifs; i++) {
4920 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4921
4922 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4923 if (ret)
4924 goto out_sleep;
4925 }
4926 out_sleep:
4927 pm_runtime_put_autosuspend(wl->dev);
4928 out:
4929 mutex_unlock(&wl->mutex);
4930
4931 return 0;
4932 }
4933
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,u16 queue,const struct ieee80211_tx_queue_params * params)4934 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4935 struct ieee80211_vif *vif,
4936 unsigned int link_id, u16 queue,
4937 const struct ieee80211_tx_queue_params *params)
4938 {
4939 struct wl1271 *wl = hw->priv;
4940 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4941 u8 ps_scheme;
4942 int ret = 0;
4943
4944 if (wlcore_is_p2p_mgmt(wlvif))
4945 return 0;
4946
4947 mutex_lock(&wl->mutex);
4948
4949 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4950
4951 if (params->uapsd)
4952 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4953 else
4954 ps_scheme = CONF_PS_SCHEME_LEGACY;
4955
4956 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4957 goto out;
4958
4959 ret = pm_runtime_resume_and_get(wl->dev);
4960 if (ret < 0)
4961 goto out;
4962
4963 /*
4964 * the txop is confed in units of 32us by the mac80211,
4965 * we need us
4966 */
4967 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4968 params->cw_min, params->cw_max,
4969 params->aifs, params->txop << 5);
4970 if (ret < 0)
4971 goto out_sleep;
4972
4973 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4974 CONF_CHANNEL_TYPE_EDCF,
4975 wl1271_tx_get_queue(queue),
4976 ps_scheme, CONF_ACK_POLICY_LEGACY,
4977 0, 0);
4978
4979 out_sleep:
4980 pm_runtime_put_autosuspend(wl->dev);
4981
4982 out:
4983 mutex_unlock(&wl->mutex);
4984
4985 return ret;
4986 }
4987
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4988 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4989 struct ieee80211_vif *vif)
4990 {
4991
4992 struct wl1271 *wl = hw->priv;
4993 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4994 u64 mactime = ULLONG_MAX;
4995 int ret;
4996
4997 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4998
4999 mutex_lock(&wl->mutex);
5000
5001 if (unlikely(wl->state != WLCORE_STATE_ON))
5002 goto out;
5003
5004 ret = pm_runtime_resume_and_get(wl->dev);
5005 if (ret < 0)
5006 goto out;
5007
5008 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5009 if (ret < 0)
5010 goto out_sleep;
5011
5012 out_sleep:
5013 pm_runtime_put_autosuspend(wl->dev);
5014
5015 out:
5016 mutex_unlock(&wl->mutex);
5017 return mactime;
5018 }
5019
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)5020 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5021 struct survey_info *survey)
5022 {
5023 struct ieee80211_conf *conf = &hw->conf;
5024
5025 if (idx != 0)
5026 return -ENOENT;
5027
5028 survey->channel = conf->chandef.chan;
5029 survey->filled = 0;
5030 return 0;
5031 }
5032
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5033 static int wl1271_allocate_sta(struct wl1271 *wl,
5034 struct wl12xx_vif *wlvif,
5035 struct ieee80211_sta *sta)
5036 {
5037 struct wl1271_station *wl_sta;
5038 int ret;
5039
5040
5041 if (wl->active_sta_count >= wl->max_ap_stations) {
5042 wl1271_warning("could not allocate HLID - too much stations");
5043 return -EBUSY;
5044 }
5045
5046 wl_sta = (struct wl1271_station *)sta->drv_priv;
5047 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5048 if (ret < 0) {
5049 wl1271_warning("could not allocate HLID - too many links");
5050 return -EBUSY;
5051 }
5052
5053 /* use the previous security seq, if this is a recovery/resume */
5054 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5055
5056 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5057 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5058 wl->active_sta_count++;
5059 return 0;
5060 }
5061
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)5062 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5063 {
5064 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5065 return;
5066
5067 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5068 __clear_bit(hlid, &wl->ap_ps_map);
5069 __clear_bit(hlid, &wl->ap_fw_ps_map);
5070
5071 /*
5072 * save the last used PN in the private part of iee80211_sta,
5073 * in case of recovery/suspend
5074 */
5075 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5076
5077 wl12xx_free_link(wl, wlvif, &hlid);
5078 wl->active_sta_count--;
5079
5080 /*
5081 * rearm the tx watchdog when the last STA is freed - give the FW a
5082 * chance to return STA-buffered packets before complaining.
5083 */
5084 if (wl->active_sta_count == 0)
5085 wl12xx_rearm_tx_watchdog_locked(wl);
5086 }
5087
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5088 static int wl12xx_sta_add(struct wl1271 *wl,
5089 struct wl12xx_vif *wlvif,
5090 struct ieee80211_sta *sta)
5091 {
5092 struct wl1271_station *wl_sta;
5093 int ret = 0;
5094 u8 hlid;
5095
5096 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5097
5098 ret = wl1271_allocate_sta(wl, wlvif, sta);
5099 if (ret < 0)
5100 return ret;
5101
5102 wl_sta = (struct wl1271_station *)sta->drv_priv;
5103 hlid = wl_sta->hlid;
5104
5105 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5106 if (ret < 0)
5107 wl1271_free_sta(wl, wlvif, hlid);
5108
5109 return ret;
5110 }
5111
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5112 static int wl12xx_sta_remove(struct wl1271 *wl,
5113 struct wl12xx_vif *wlvif,
5114 struct ieee80211_sta *sta)
5115 {
5116 struct wl1271_station *wl_sta;
5117 int ret = 0, id;
5118
5119 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5120
5121 wl_sta = (struct wl1271_station *)sta->drv_priv;
5122 id = wl_sta->hlid;
5123 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5124 return -EINVAL;
5125
5126 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5127 if (ret < 0)
5128 return ret;
5129
5130 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5131 return ret;
5132 }
5133
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5134 static void wlcore_roc_if_possible(struct wl1271 *wl,
5135 struct wl12xx_vif *wlvif)
5136 {
5137 if (find_first_bit(wl->roc_map,
5138 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5139 return;
5140
5141 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5142 return;
5143
5144 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5145 }
5146
5147 /*
5148 * when wl_sta is NULL, we treat this call as if coming from a
5149 * pending auth reply.
5150 * wl->mutex must be taken and the FW must be awake when the call
5151 * takes place.
5152 */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5153 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5154 struct wl1271_station *wl_sta, bool in_conn)
5155 {
5156 if (in_conn) {
5157 if (WARN_ON(wl_sta && wl_sta->in_connection))
5158 return;
5159
5160 if (!wlvif->ap_pending_auth_reply &&
5161 !wlvif->inconn_count)
5162 wlcore_roc_if_possible(wl, wlvif);
5163
5164 if (wl_sta) {
5165 wl_sta->in_connection = true;
5166 wlvif->inconn_count++;
5167 } else {
5168 wlvif->ap_pending_auth_reply = true;
5169 }
5170 } else {
5171 if (wl_sta && !wl_sta->in_connection)
5172 return;
5173
5174 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5175 return;
5176
5177 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5178 return;
5179
5180 if (wl_sta) {
5181 wl_sta->in_connection = false;
5182 wlvif->inconn_count--;
5183 } else {
5184 wlvif->ap_pending_auth_reply = false;
5185 }
5186
5187 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5188 test_bit(wlvif->role_id, wl->roc_map))
5189 wl12xx_croc(wl, wlvif->role_id);
5190 }
5191 }
5192
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5193 static int wl12xx_update_sta_state(struct wl1271 *wl,
5194 struct wl12xx_vif *wlvif,
5195 struct ieee80211_sta *sta,
5196 enum ieee80211_sta_state old_state,
5197 enum ieee80211_sta_state new_state)
5198 {
5199 struct wl1271_station *wl_sta;
5200 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5201 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5202 int ret;
5203
5204 wl_sta = (struct wl1271_station *)sta->drv_priv;
5205
5206 /* Add station (AP mode) */
5207 if (is_ap &&
5208 old_state == IEEE80211_STA_AUTH &&
5209 new_state == IEEE80211_STA_ASSOC) {
5210 ret = wl12xx_sta_add(wl, wlvif, sta);
5211 if (ret)
5212 return ret;
5213
5214 wl_sta->fw_added = true;
5215
5216 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5217 }
5218
5219 /* Remove station (AP mode) */
5220 if (is_ap &&
5221 old_state == IEEE80211_STA_ASSOC &&
5222 new_state == IEEE80211_STA_AUTH) {
5223 wl_sta->fw_added = false;
5224
5225 /* must not fail */
5226 wl12xx_sta_remove(wl, wlvif, sta);
5227
5228 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5229 }
5230
5231 /* Authorize station (AP mode) */
5232 if (is_ap &&
5233 new_state == IEEE80211_STA_AUTHORIZED) {
5234 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5235 if (ret < 0)
5236 return ret;
5237
5238 ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
5239 true,
5240 wl_sta->hlid);
5241 if (ret)
5242 return ret;
5243
5244 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5245 }
5246
5247 /* Authorize station */
5248 if (is_sta &&
5249 new_state == IEEE80211_STA_AUTHORIZED) {
5250 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5251 ret = wl12xx_set_authorized(wl, wlvif);
5252 if (ret)
5253 return ret;
5254 }
5255
5256 if (is_sta &&
5257 old_state == IEEE80211_STA_AUTHORIZED &&
5258 new_state == IEEE80211_STA_ASSOC) {
5259 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5260 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5261 }
5262
5263 /* save seq number on disassoc (suspend) */
5264 if (is_sta &&
5265 old_state == IEEE80211_STA_ASSOC &&
5266 new_state == IEEE80211_STA_AUTH) {
5267 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5268 wlvif->total_freed_pkts = 0;
5269 }
5270
5271 /* restore seq number on assoc (resume) */
5272 if (is_sta &&
5273 old_state == IEEE80211_STA_AUTH &&
5274 new_state == IEEE80211_STA_ASSOC) {
5275 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5276 }
5277
5278 /* clear ROCs on failure or authorization */
5279 if (is_sta &&
5280 (new_state == IEEE80211_STA_AUTHORIZED ||
5281 new_state == IEEE80211_STA_NOTEXIST)) {
5282 if (test_bit(wlvif->role_id, wl->roc_map))
5283 wl12xx_croc(wl, wlvif->role_id);
5284 }
5285
5286 if (is_sta &&
5287 old_state == IEEE80211_STA_NOTEXIST &&
5288 new_state == IEEE80211_STA_NONE) {
5289 if (find_first_bit(wl->roc_map,
5290 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5291 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5292 wl12xx_roc(wl, wlvif, wlvif->role_id,
5293 wlvif->band, wlvif->channel);
5294 }
5295 }
5296 return 0;
5297 }
5298
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5299 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5300 struct ieee80211_vif *vif,
5301 struct ieee80211_sta *sta,
5302 enum ieee80211_sta_state old_state,
5303 enum ieee80211_sta_state new_state)
5304 {
5305 struct wl1271 *wl = hw->priv;
5306 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5307 int ret;
5308
5309 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5310 sta->aid, old_state, new_state);
5311
5312 mutex_lock(&wl->mutex);
5313
5314 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5315 ret = -EBUSY;
5316 goto out;
5317 }
5318
5319 ret = pm_runtime_resume_and_get(wl->dev);
5320 if (ret < 0)
5321 goto out;
5322
5323 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5324
5325 pm_runtime_put_autosuspend(wl->dev);
5326 out:
5327 mutex_unlock(&wl->mutex);
5328 if (new_state < old_state)
5329 return 0;
5330 return ret;
5331 }
5332
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5333 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5334 struct ieee80211_vif *vif,
5335 struct ieee80211_ampdu_params *params)
5336 {
5337 struct wl1271 *wl = hw->priv;
5338 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5339 int ret;
5340 u8 hlid, *ba_bitmap;
5341 struct ieee80211_sta *sta = params->sta;
5342 enum ieee80211_ampdu_mlme_action action = params->action;
5343 u16 tid = params->tid;
5344 u16 *ssn = ¶ms->ssn;
5345
5346 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5347 tid);
5348
5349 /* sanity check - the fields in FW are only 8bits wide */
5350 if (WARN_ON(tid > 0xFF))
5351 return -ENOTSUPP;
5352
5353 mutex_lock(&wl->mutex);
5354
5355 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5356 ret = -EAGAIN;
5357 goto out;
5358 }
5359
5360 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5361 hlid = wlvif->sta.hlid;
5362 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5363 struct wl1271_station *wl_sta;
5364
5365 wl_sta = (struct wl1271_station *)sta->drv_priv;
5366 hlid = wl_sta->hlid;
5367 } else {
5368 ret = -EINVAL;
5369 goto out;
5370 }
5371
5372 ba_bitmap = &wl->links[hlid].ba_bitmap;
5373
5374 ret = pm_runtime_resume_and_get(wl->dev);
5375 if (ret < 0)
5376 goto out;
5377
5378 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5379 tid, action);
5380
5381 switch (action) {
5382 case IEEE80211_AMPDU_RX_START:
5383 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5384 ret = -ENOTSUPP;
5385 break;
5386 }
5387
5388 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5389 ret = -EBUSY;
5390 wl1271_debug(DEBUG_RX, "exceeded max RX BA sessions");
5391 break;
5392 }
5393
5394 if (*ba_bitmap & BIT(tid)) {
5395 ret = -EINVAL;
5396 wl1271_error("cannot enable RX BA session on active "
5397 "tid: %d", tid);
5398 break;
5399 }
5400
5401 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5402 hlid,
5403 params->buf_size);
5404
5405 if (!ret) {
5406 *ba_bitmap |= BIT(tid);
5407 wl->ba_rx_session_count++;
5408 }
5409 break;
5410
5411 case IEEE80211_AMPDU_RX_STOP:
5412 if (!(*ba_bitmap & BIT(tid))) {
5413 /*
5414 * this happens on reconfig - so only output a debug
5415 * message for now, and don't fail the function.
5416 */
5417 wl1271_debug(DEBUG_MAC80211,
5418 "no active RX BA session on tid: %d",
5419 tid);
5420 ret = 0;
5421 break;
5422 }
5423
5424 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5425 hlid, 0);
5426 if (!ret) {
5427 *ba_bitmap &= ~BIT(tid);
5428 wl->ba_rx_session_count--;
5429 }
5430 break;
5431
5432 /*
5433 * The BA initiator session management in FW independently.
5434 * Falling break here on purpose for all TX APDU commands.
5435 */
5436 case IEEE80211_AMPDU_TX_START:
5437 case IEEE80211_AMPDU_TX_STOP_CONT:
5438 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5439 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5440 case IEEE80211_AMPDU_TX_OPERATIONAL:
5441 ret = -EINVAL;
5442 break;
5443
5444 default:
5445 wl1271_error("Incorrect ampdu action id=%x\n", action);
5446 ret = -EINVAL;
5447 }
5448
5449 pm_runtime_put_autosuspend(wl->dev);
5450
5451 out:
5452 mutex_unlock(&wl->mutex);
5453
5454 return ret;
5455 }
5456
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5457 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5458 struct ieee80211_vif *vif,
5459 const struct cfg80211_bitrate_mask *mask)
5460 {
5461 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5462 struct wl1271 *wl = hw->priv;
5463 int i, ret = 0;
5464
5465 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5466 mask->control[NL80211_BAND_2GHZ].legacy,
5467 mask->control[NL80211_BAND_5GHZ].legacy);
5468
5469 mutex_lock(&wl->mutex);
5470
5471 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5472 wlvif->bitrate_masks[i] =
5473 wl1271_tx_enabled_rates_get(wl,
5474 mask->control[i].legacy,
5475 i);
5476
5477 if (unlikely(wl->state != WLCORE_STATE_ON))
5478 goto out;
5479
5480 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5481 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5482
5483 ret = pm_runtime_resume_and_get(wl->dev);
5484 if (ret < 0)
5485 goto out;
5486
5487 wl1271_set_band_rate(wl, wlvif);
5488 wlvif->basic_rate =
5489 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5490 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5491
5492 pm_runtime_put_autosuspend(wl->dev);
5493 }
5494 out:
5495 mutex_unlock(&wl->mutex);
5496
5497 return ret;
5498 }
5499
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5500 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5501 struct ieee80211_vif *vif,
5502 struct ieee80211_channel_switch *ch_switch)
5503 {
5504 struct wl1271 *wl = hw->priv;
5505 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5506 int ret;
5507
5508 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5509
5510 wl1271_tx_flush(wl);
5511
5512 mutex_lock(&wl->mutex);
5513
5514 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5515 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5516 ieee80211_chswitch_done(vif, false, 0);
5517 goto out;
5518 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5519 goto out;
5520 }
5521
5522 ret = pm_runtime_resume_and_get(wl->dev);
5523 if (ret < 0)
5524 goto out;
5525
5526 /* TODO: change mac80211 to pass vif as param */
5527
5528 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5529 unsigned long delay_usec;
5530
5531 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5532 if (ret)
5533 goto out_sleep;
5534
5535 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5536
5537 /* indicate failure 5 seconds after channel switch time */
5538 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5539 ch_switch->count;
5540 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5541 usecs_to_jiffies(delay_usec) +
5542 msecs_to_jiffies(5000));
5543 }
5544
5545 out_sleep:
5546 pm_runtime_put_autosuspend(wl->dev);
5547
5548 out:
5549 mutex_unlock(&wl->mutex);
5550 }
5551
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5552 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5553 struct wl12xx_vif *wlvif,
5554 u8 eid)
5555 {
5556 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5557 struct sk_buff *beacon =
5558 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif), 0);
5559
5560 if (!beacon)
5561 return NULL;
5562
5563 return cfg80211_find_ie(eid,
5564 beacon->data + ieoffset,
5565 beacon->len - ieoffset);
5566 }
5567
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5568 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5569 u8 *csa_count)
5570 {
5571 const u8 *ie;
5572 const struct ieee80211_channel_sw_ie *ie_csa;
5573
5574 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5575 if (!ie)
5576 return -EINVAL;
5577
5578 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5579 *csa_count = ie_csa->count;
5580
5581 return 0;
5582 }
5583
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5584 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5585 struct ieee80211_vif *vif,
5586 struct cfg80211_chan_def *chandef)
5587 {
5588 struct wl1271 *wl = hw->priv;
5589 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5590 struct ieee80211_channel_switch ch_switch = {
5591 .block_tx = true,
5592 .chandef = *chandef,
5593 };
5594 int ret;
5595
5596 wl1271_debug(DEBUG_MAC80211,
5597 "mac80211 channel switch beacon (role %d)",
5598 wlvif->role_id);
5599
5600 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5601 if (ret < 0) {
5602 wl1271_error("error getting beacon (for CSA counter)");
5603 return;
5604 }
5605
5606 mutex_lock(&wl->mutex);
5607
5608 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5609 ret = -EBUSY;
5610 goto out;
5611 }
5612
5613 ret = pm_runtime_resume_and_get(wl->dev);
5614 if (ret < 0)
5615 goto out;
5616
5617 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5618 if (ret)
5619 goto out_sleep;
5620
5621 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5622
5623 out_sleep:
5624 pm_runtime_put_autosuspend(wl->dev);
5625 out:
5626 mutex_unlock(&wl->mutex);
5627 }
5628
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5629 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5630 u32 queues, bool drop)
5631 {
5632 struct wl1271 *wl = hw->priv;
5633
5634 wl1271_tx_flush(wl);
5635 }
5636
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5637 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5638 struct ieee80211_vif *vif,
5639 struct ieee80211_channel *chan,
5640 int duration,
5641 enum ieee80211_roc_type type)
5642 {
5643 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5644 struct wl1271 *wl = hw->priv;
5645 int channel, active_roc, ret = 0;
5646
5647 channel = ieee80211_frequency_to_channel(chan->center_freq);
5648
5649 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5650 channel, wlvif->role_id);
5651
5652 mutex_lock(&wl->mutex);
5653
5654 if (unlikely(wl->state != WLCORE_STATE_ON))
5655 goto out;
5656
5657 /* return EBUSY if we can't ROC right now */
5658 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5659 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5660 wl1271_warning("active roc on role %d", active_roc);
5661 ret = -EBUSY;
5662 goto out;
5663 }
5664
5665 ret = pm_runtime_resume_and_get(wl->dev);
5666 if (ret < 0)
5667 goto out;
5668
5669 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5670 if (ret < 0)
5671 goto out_sleep;
5672
5673 wl->roc_vif = vif;
5674 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5675 msecs_to_jiffies(duration));
5676 out_sleep:
5677 pm_runtime_put_autosuspend(wl->dev);
5678 out:
5679 mutex_unlock(&wl->mutex);
5680 return ret;
5681 }
5682
__wlcore_roc_completed(struct wl1271 * wl)5683 static int __wlcore_roc_completed(struct wl1271 *wl)
5684 {
5685 struct wl12xx_vif *wlvif;
5686 int ret;
5687
5688 /* already completed */
5689 if (unlikely(!wl->roc_vif))
5690 return 0;
5691
5692 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5693
5694 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5695 return -EBUSY;
5696
5697 ret = wl12xx_stop_dev(wl, wlvif);
5698 if (ret < 0)
5699 return ret;
5700
5701 wl->roc_vif = NULL;
5702
5703 return 0;
5704 }
5705
wlcore_roc_completed(struct wl1271 * wl)5706 static int wlcore_roc_completed(struct wl1271 *wl)
5707 {
5708 int ret;
5709
5710 wl1271_debug(DEBUG_MAC80211, "roc complete");
5711
5712 mutex_lock(&wl->mutex);
5713
5714 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5715 ret = -EBUSY;
5716 goto out;
5717 }
5718
5719 ret = pm_runtime_resume_and_get(wl->dev);
5720 if (ret < 0)
5721 goto out;
5722
5723 ret = __wlcore_roc_completed(wl);
5724
5725 pm_runtime_put_autosuspend(wl->dev);
5726 out:
5727 mutex_unlock(&wl->mutex);
5728
5729 return ret;
5730 }
5731
wlcore_roc_complete_work(struct work_struct * work)5732 static void wlcore_roc_complete_work(struct work_struct *work)
5733 {
5734 struct delayed_work *dwork;
5735 struct wl1271 *wl;
5736 int ret;
5737
5738 dwork = to_delayed_work(work);
5739 wl = container_of(dwork, struct wl1271, roc_complete_work);
5740
5741 ret = wlcore_roc_completed(wl);
5742 if (!ret)
5743 ieee80211_remain_on_channel_expired(wl->hw);
5744 }
5745
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5746 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5747 struct ieee80211_vif *vif)
5748 {
5749 struct wl1271 *wl = hw->priv;
5750
5751 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5752
5753 /* TODO: per-vif */
5754 wl1271_tx_flush(wl);
5755
5756 /*
5757 * we can't just flush_work here, because it might deadlock
5758 * (as we might get called from the same workqueue)
5759 */
5760 cancel_delayed_work_sync(&wl->roc_complete_work);
5761 wlcore_roc_completed(wl);
5762
5763 return 0;
5764 }
5765
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_link_sta * link_sta,u32 changed)5766 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5767 struct ieee80211_vif *vif,
5768 struct ieee80211_link_sta *link_sta,
5769 u32 changed)
5770 {
5771 struct ieee80211_sta *sta = link_sta->sta;
5772 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5773
5774 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5775
5776 if (!(changed & IEEE80211_RC_BW_CHANGED))
5777 return;
5778
5779 /* this callback is atomic, so schedule a new work */
5780 wlvif->rc_update_bw = sta->deflink.bandwidth;
5781 memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap,
5782 sizeof(sta->deflink.ht_cap));
5783 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5784 }
5785
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5786 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5787 struct ieee80211_vif *vif,
5788 struct ieee80211_sta *sta,
5789 struct station_info *sinfo)
5790 {
5791 struct wl1271 *wl = hw->priv;
5792 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5793 s8 rssi_dbm;
5794 int ret;
5795
5796 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5797
5798 mutex_lock(&wl->mutex);
5799
5800 if (unlikely(wl->state != WLCORE_STATE_ON))
5801 goto out;
5802
5803 ret = pm_runtime_resume_and_get(wl->dev);
5804 if (ret < 0)
5805 goto out_sleep;
5806
5807 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5808 if (ret < 0)
5809 goto out_sleep;
5810
5811 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5812 sinfo->signal = rssi_dbm;
5813
5814 out_sleep:
5815 pm_runtime_put_autosuspend(wl->dev);
5816
5817 out:
5818 mutex_unlock(&wl->mutex);
5819 }
5820
wlcore_op_get_expected_throughput(struct ieee80211_hw * hw,struct ieee80211_sta * sta)5821 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5822 struct ieee80211_sta *sta)
5823 {
5824 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5825 struct wl1271 *wl = hw->priv;
5826 u8 hlid = wl_sta->hlid;
5827
5828 /* return in units of Kbps */
5829 return (wl->links[hlid].fw_rate_mbps * 1000);
5830 }
5831
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5832 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5833 {
5834 struct wl1271 *wl = hw->priv;
5835 bool ret = false;
5836
5837 mutex_lock(&wl->mutex);
5838
5839 if (unlikely(wl->state != WLCORE_STATE_ON))
5840 goto out;
5841
5842 /* packets are considered pending if in the TX queue or the FW */
5843 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5844 out:
5845 mutex_unlock(&wl->mutex);
5846
5847 return ret;
5848 }
5849
5850 /* can't be const, mac80211 writes to this */
5851 static struct ieee80211_rate wl1271_rates[] = {
5852 { .bitrate = 10,
5853 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5854 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5855 { .bitrate = 20,
5856 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5857 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5858 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5859 { .bitrate = 55,
5860 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5861 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5862 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5863 { .bitrate = 110,
5864 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5865 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5866 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5867 { .bitrate = 60,
5868 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5869 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5870 { .bitrate = 90,
5871 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5872 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5873 { .bitrate = 120,
5874 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5875 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5876 { .bitrate = 180,
5877 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5878 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5879 { .bitrate = 240,
5880 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5881 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5882 { .bitrate = 360,
5883 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5884 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5885 { .bitrate = 480,
5886 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5887 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5888 { .bitrate = 540,
5889 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5890 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5891 };
5892
5893 /* can't be const, mac80211 writes to this */
5894 static struct ieee80211_channel wl1271_channels[] = {
5895 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5896 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5897 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5898 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5899 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5900 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5901 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5902 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5903 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5904 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5905 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5906 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5907 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5908 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5909 };
5910
5911 /* can't be const, mac80211 writes to this */
5912 static struct ieee80211_supported_band wl1271_band_2ghz = {
5913 .channels = wl1271_channels,
5914 .n_channels = ARRAY_SIZE(wl1271_channels),
5915 .bitrates = wl1271_rates,
5916 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5917 };
5918
5919 /* 5 GHz data rates for WL1273 */
5920 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5921 { .bitrate = 60,
5922 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5923 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5924 { .bitrate = 90,
5925 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5926 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5927 { .bitrate = 120,
5928 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5929 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5930 { .bitrate = 180,
5931 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5932 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5933 { .bitrate = 240,
5934 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5935 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5936 { .bitrate = 360,
5937 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5938 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5939 { .bitrate = 480,
5940 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5941 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5942 { .bitrate = 540,
5943 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5944 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5945 };
5946
5947 /* 5 GHz band channels for WL1273 */
5948 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5949 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5950 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5951 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5952 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5953 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5954 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5955 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5956 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5957 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5958 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5959 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5960 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5961 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5962 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5963 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5964 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5965 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5966 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5967 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5968 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5969 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5970 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5971 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5972 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5973 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5974 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5975 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5976 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5977 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5978 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5979 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5980 };
5981
5982 static struct ieee80211_supported_band wl1271_band_5ghz = {
5983 .channels = wl1271_channels_5ghz,
5984 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5985 .bitrates = wl1271_rates_5ghz,
5986 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5987 };
5988
5989 static const struct ieee80211_ops wl1271_ops = {
5990 .start = wl1271_op_start,
5991 .stop = wlcore_op_stop,
5992 .add_interface = wl1271_op_add_interface,
5993 .remove_interface = wl1271_op_remove_interface,
5994 .change_interface = wl12xx_op_change_interface,
5995 #ifdef CONFIG_PM
5996 .suspend = wl1271_op_suspend,
5997 .resume = wl1271_op_resume,
5998 #endif
5999 .config = wl1271_op_config,
6000 .prepare_multicast = wl1271_op_prepare_multicast,
6001 .configure_filter = wl1271_op_configure_filter,
6002 .tx = wl1271_op_tx,
6003 .wake_tx_queue = ieee80211_handle_wake_tx_queue,
6004 .set_key = wlcore_op_set_key,
6005 .hw_scan = wl1271_op_hw_scan,
6006 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
6007 .sched_scan_start = wl1271_op_sched_scan_start,
6008 .sched_scan_stop = wl1271_op_sched_scan_stop,
6009 .bss_info_changed = wl1271_op_bss_info_changed,
6010 .set_frag_threshold = wl1271_op_set_frag_threshold,
6011 .set_rts_threshold = wl1271_op_set_rts_threshold,
6012 .conf_tx = wl1271_op_conf_tx,
6013 .get_tsf = wl1271_op_get_tsf,
6014 .get_survey = wl1271_op_get_survey,
6015 .sta_state = wl12xx_op_sta_state,
6016 .ampdu_action = wl1271_op_ampdu_action,
6017 .tx_frames_pending = wl1271_tx_frames_pending,
6018 .set_bitrate_mask = wl12xx_set_bitrate_mask,
6019 .set_default_unicast_key = wl1271_op_set_default_key_idx,
6020 .channel_switch = wl12xx_op_channel_switch,
6021 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
6022 .flush = wlcore_op_flush,
6023 .remain_on_channel = wlcore_op_remain_on_channel,
6024 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6025 .add_chanctx = wlcore_op_add_chanctx,
6026 .remove_chanctx = wlcore_op_remove_chanctx,
6027 .change_chanctx = wlcore_op_change_chanctx,
6028 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6029 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6030 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6031 .link_sta_rc_update = wlcore_op_sta_rc_update,
6032 .sta_statistics = wlcore_op_sta_statistics,
6033 .get_expected_throughput = wlcore_op_get_expected_throughput,
6034 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6035 };
6036
6037
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum nl80211_band band)6038 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6039 {
6040 u8 idx;
6041
6042 BUG_ON(band >= 2);
6043
6044 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6045 wl1271_error("Illegal RX rate from HW: %d", rate);
6046 return 0;
6047 }
6048
6049 idx = wl->band_rate_to_idx[band][rate];
6050 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6051 wl1271_error("Unsupported RX rate from HW: %d", rate);
6052 return 0;
6053 }
6054
6055 return idx;
6056 }
6057
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)6058 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6059 {
6060 int i;
6061
6062 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6063 oui, nic);
6064
6065 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6066 wl1271_warning("NIC part of the MAC address wraps around!");
6067
6068 for (i = 0; i < wl->num_mac_addr; i++) {
6069 wl->addresses[i].addr[0] = (u8)(oui >> 16);
6070 wl->addresses[i].addr[1] = (u8)(oui >> 8);
6071 wl->addresses[i].addr[2] = (u8) oui;
6072 wl->addresses[i].addr[3] = (u8)(nic >> 16);
6073 wl->addresses[i].addr[4] = (u8)(nic >> 8);
6074 wl->addresses[i].addr[5] = (u8) nic;
6075 nic++;
6076 }
6077
6078 /* we may be one address short at the most */
6079 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6080
6081 /*
6082 * turn on the LAA bit in the first address and use it as
6083 * the last address.
6084 */
6085 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6086 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6087 memcpy(&wl->addresses[idx], &wl->addresses[0],
6088 sizeof(wl->addresses[0]));
6089 /* LAA bit */
6090 wl->addresses[idx].addr[0] |= BIT(1);
6091 }
6092
6093 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6094 wl->hw->wiphy->addresses = wl->addresses;
6095 }
6096
wl12xx_get_hw_info(struct wl1271 * wl)6097 static int wl12xx_get_hw_info(struct wl1271 *wl)
6098 {
6099 int ret;
6100
6101 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6102 if (ret < 0)
6103 goto out;
6104
6105 wl->fuse_oui_addr = 0;
6106 wl->fuse_nic_addr = 0;
6107
6108 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6109 if (ret < 0)
6110 goto out;
6111
6112 if (wl->ops->get_mac)
6113 ret = wl->ops->get_mac(wl);
6114
6115 out:
6116 return ret;
6117 }
6118
wl1271_register_hw(struct wl1271 * wl)6119 static int wl1271_register_hw(struct wl1271 *wl)
6120 {
6121 int ret;
6122 u32 oui_addr = 0, nic_addr = 0;
6123 struct platform_device *pdev = wl->pdev;
6124 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6125
6126 if (wl->mac80211_registered)
6127 return 0;
6128
6129 if (wl->nvs_len >= 12) {
6130 /* NOTE: The wl->nvs->nvs element must be first, in
6131 * order to simplify the casting, we assume it is at
6132 * the beginning of the wl->nvs structure.
6133 */
6134 u8 *nvs_ptr = (u8 *)wl->nvs;
6135
6136 oui_addr =
6137 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6138 nic_addr =
6139 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6140 }
6141
6142 /* if the MAC address is zeroed in the NVS derive from fuse */
6143 if (oui_addr == 0 && nic_addr == 0) {
6144 oui_addr = wl->fuse_oui_addr;
6145 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6146 nic_addr = wl->fuse_nic_addr + 1;
6147 }
6148
6149 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6150 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6151 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6152 wl1271_warning("This default nvs file can be removed from the file system");
6153 } else {
6154 wl1271_warning("Your device performance is not optimized.");
6155 wl1271_warning("Please use the calibrator tool to configure your device.");
6156 }
6157
6158 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6159 wl1271_warning("Fuse mac address is zero. using random mac");
6160 /* Use TI oui and a random nic */
6161 oui_addr = WLCORE_TI_OUI_ADDRESS;
6162 nic_addr = get_random_u32();
6163 } else {
6164 oui_addr = wl->fuse_oui_addr;
6165 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6166 nic_addr = wl->fuse_nic_addr + 1;
6167 }
6168 }
6169
6170 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6171
6172 ret = ieee80211_register_hw(wl->hw);
6173 if (ret < 0) {
6174 wl1271_error("unable to register mac80211 hw: %d", ret);
6175 goto out;
6176 }
6177
6178 wl->mac80211_registered = true;
6179
6180 wl1271_debugfs_init(wl);
6181
6182 wl1271_notice("loaded");
6183
6184 out:
6185 return ret;
6186 }
6187
wl1271_unregister_hw(struct wl1271 * wl)6188 static void wl1271_unregister_hw(struct wl1271 *wl)
6189 {
6190 if (wl->plt)
6191 wl1271_plt_stop(wl);
6192
6193 ieee80211_unregister_hw(wl->hw);
6194 wl->mac80211_registered = false;
6195
6196 }
6197
wl1271_init_ieee80211(struct wl1271 * wl)6198 static int wl1271_init_ieee80211(struct wl1271 *wl)
6199 {
6200 int i;
6201 static const u32 cipher_suites[] = {
6202 WLAN_CIPHER_SUITE_WEP40,
6203 WLAN_CIPHER_SUITE_WEP104,
6204 WLAN_CIPHER_SUITE_TKIP,
6205 WLAN_CIPHER_SUITE_CCMP,
6206 WL1271_CIPHER_SUITE_GEM,
6207 WLAN_CIPHER_SUITE_AES_CMAC,
6208 };
6209
6210 /* The tx descriptor buffer */
6211 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6212
6213 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6214 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6215
6216 /* unit us */
6217 /* FIXME: find a proper value */
6218 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6219
6220 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6221 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6222 ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6223 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6224 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6225 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6226 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6227 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6228 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6229 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6230 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6231 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6232 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6233 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6234 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6235
6236 wl->hw->wiphy->cipher_suites = cipher_suites;
6237 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6238
6239 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6240 BIT(NL80211_IFTYPE_AP) |
6241 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6242 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6243 #ifdef CONFIG_MAC80211_MESH
6244 BIT(NL80211_IFTYPE_MESH_POINT) |
6245 #endif
6246 BIT(NL80211_IFTYPE_P2P_GO);
6247
6248 wl->hw->wiphy->max_scan_ssids = 1;
6249 wl->hw->wiphy->max_sched_scan_ssids = 16;
6250 wl->hw->wiphy->max_match_sets = 16;
6251 /*
6252 * Maximum length of elements in scanning probe request templates
6253 * should be the maximum length possible for a template, without
6254 * the IEEE80211 header of the template
6255 */
6256 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6257 sizeof(struct ieee80211_header);
6258
6259 wl->hw->wiphy->max_sched_scan_reqs = 1;
6260 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6261 sizeof(struct ieee80211_header);
6262
6263 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6264
6265 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6266 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6267 WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6268 WIPHY_FLAG_IBSS_RSN;
6269
6270 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6271
6272 /* make sure all our channels fit in the scanned_ch bitmask */
6273 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6274 ARRAY_SIZE(wl1271_channels_5ghz) >
6275 WL1271_MAX_CHANNELS);
6276 /*
6277 * clear channel flags from the previous usage
6278 * and restore max_power & max_antenna_gain values.
6279 */
6280 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6281 wl1271_band_2ghz.channels[i].flags = 0;
6282 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6283 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6284 }
6285
6286 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6287 wl1271_band_5ghz.channels[i].flags = 0;
6288 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6289 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6290 }
6291
6292 /*
6293 * We keep local copies of the band structs because we need to
6294 * modify them on a per-device basis.
6295 */
6296 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6297 sizeof(wl1271_band_2ghz));
6298 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6299 &wl->ht_cap[NL80211_BAND_2GHZ],
6300 sizeof(*wl->ht_cap));
6301 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6302 sizeof(wl1271_band_5ghz));
6303 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6304 &wl->ht_cap[NL80211_BAND_5GHZ],
6305 sizeof(*wl->ht_cap));
6306
6307 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6308 &wl->bands[NL80211_BAND_2GHZ];
6309 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6310 &wl->bands[NL80211_BAND_5GHZ];
6311
6312 /*
6313 * allow 4 queues per mac address we support +
6314 * 1 cab queue per mac + one global offchannel Tx queue
6315 */
6316 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6317
6318 /* the last queue is the offchannel queue */
6319 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6320 wl->hw->max_rates = 1;
6321
6322 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6323
6324 /* the FW answers probe-requests in AP-mode */
6325 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6326 wl->hw->wiphy->probe_resp_offload =
6327 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6328 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6329 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6330
6331 /* allowed interface combinations */
6332 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6333 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6334
6335 /* register vendor commands */
6336 wlcore_set_vendor_commands(wl->hw->wiphy);
6337
6338 SET_IEEE80211_DEV(wl->hw, wl->dev);
6339
6340 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6341 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6342
6343 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6344
6345 return 0;
6346 }
6347
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6348 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6349 u32 mbox_size)
6350 {
6351 struct ieee80211_hw *hw;
6352 struct wl1271 *wl;
6353 int i, j, ret;
6354 unsigned int order;
6355
6356 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6357 if (!hw) {
6358 wl1271_error("could not alloc ieee80211_hw");
6359 ret = -ENOMEM;
6360 goto err_hw_alloc;
6361 }
6362
6363 wl = hw->priv;
6364 memset(wl, 0, sizeof(*wl));
6365
6366 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6367 if (!wl->priv) {
6368 wl1271_error("could not alloc wl priv");
6369 ret = -ENOMEM;
6370 goto err_priv_alloc;
6371 }
6372
6373 INIT_LIST_HEAD(&wl->wlvif_list);
6374
6375 wl->hw = hw;
6376
6377 /*
6378 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6379 * we don't allocate any additional resource here, so that's fine.
6380 */
6381 for (i = 0; i < NUM_TX_QUEUES; i++)
6382 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6383 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6384
6385 skb_queue_head_init(&wl->deferred_rx_queue);
6386 skb_queue_head_init(&wl->deferred_tx_queue);
6387
6388 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6389 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6390 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6391 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6392 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6393 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6394
6395 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6396 if (!wl->freezable_wq) {
6397 ret = -ENOMEM;
6398 goto err_hw;
6399 }
6400
6401 wl->channel = 0;
6402 wl->rx_counter = 0;
6403 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6404 wl->band = NL80211_BAND_2GHZ;
6405 wl->channel_type = NL80211_CHAN_NO_HT;
6406 wl->flags = 0;
6407 wl->sg_enabled = true;
6408 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6409 wl->recovery_count = 0;
6410 wl->hw_pg_ver = -1;
6411 wl->ap_ps_map = 0;
6412 wl->ap_fw_ps_map = 0;
6413 wl->quirks = 0;
6414 wl->system_hlid = WL12XX_SYSTEM_HLID;
6415 wl->active_sta_count = 0;
6416 wl->active_link_count = 0;
6417 wl->fwlog_size = 0;
6418
6419 /* The system link is always allocated */
6420 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6421
6422 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6423 for (i = 0; i < wl->num_tx_desc; i++)
6424 wl->tx_frames[i] = NULL;
6425
6426 spin_lock_init(&wl->wl_lock);
6427
6428 wl->state = WLCORE_STATE_OFF;
6429 wl->fw_type = WL12XX_FW_TYPE_NONE;
6430 mutex_init(&wl->mutex);
6431 mutex_init(&wl->flush_mutex);
6432 init_completion(&wl->nvs_loading_complete);
6433
6434 order = get_order(aggr_buf_size);
6435 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6436 if (!wl->aggr_buf) {
6437 ret = -ENOMEM;
6438 goto err_wq;
6439 }
6440 wl->aggr_buf_size = aggr_buf_size;
6441
6442 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6443 if (!wl->dummy_packet) {
6444 ret = -ENOMEM;
6445 goto err_aggr;
6446 }
6447
6448 /* Allocate one page for the FW log */
6449 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6450 if (!wl->fwlog) {
6451 ret = -ENOMEM;
6452 goto err_dummy_packet;
6453 }
6454
6455 wl->mbox_size = mbox_size;
6456 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6457 if (!wl->mbox) {
6458 ret = -ENOMEM;
6459 goto err_fwlog;
6460 }
6461
6462 wl->buffer_32 = kmalloc_obj(*wl->buffer_32);
6463 if (!wl->buffer_32) {
6464 ret = -ENOMEM;
6465 goto err_mbox;
6466 }
6467
6468 return hw;
6469
6470 err_mbox:
6471 kfree(wl->mbox);
6472
6473 err_fwlog:
6474 free_page((unsigned long)wl->fwlog);
6475
6476 err_dummy_packet:
6477 dev_kfree_skb(wl->dummy_packet);
6478
6479 err_aggr:
6480 free_pages((unsigned long)wl->aggr_buf, order);
6481
6482 err_wq:
6483 destroy_workqueue(wl->freezable_wq);
6484
6485 err_hw:
6486 wl1271_debugfs_exit(wl);
6487 kfree(wl->priv);
6488
6489 err_priv_alloc:
6490 ieee80211_free_hw(hw);
6491
6492 err_hw_alloc:
6493
6494 return ERR_PTR(ret);
6495 }
6496 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6497
wlcore_free_hw(struct wl1271 * wl)6498 int wlcore_free_hw(struct wl1271 *wl)
6499 {
6500 /* Unblock any fwlog readers */
6501 mutex_lock(&wl->mutex);
6502 wl->fwlog_size = -1;
6503 mutex_unlock(&wl->mutex);
6504
6505 wlcore_sysfs_free(wl);
6506
6507 kfree(wl->buffer_32);
6508 kfree(wl->mbox);
6509 free_page((unsigned long)wl->fwlog);
6510 dev_kfree_skb(wl->dummy_packet);
6511 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6512
6513 wl1271_debugfs_exit(wl);
6514
6515 vfree(wl->fw);
6516 wl->fw = NULL;
6517 wl->fw_type = WL12XX_FW_TYPE_NONE;
6518 kfree(wl->nvs);
6519 wl->nvs = NULL;
6520
6521 kfree(wl->raw_fw_status);
6522 kfree(wl->fw_status);
6523 kfree(wl->tx_res_if);
6524 destroy_workqueue(wl->freezable_wq);
6525
6526 kfree(wl->priv);
6527 ieee80211_free_hw(wl->hw);
6528
6529 return 0;
6530 }
6531 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6532
6533 #ifdef CONFIG_PM
6534 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6535 .flags = WIPHY_WOWLAN_ANY,
6536 .n_patterns = WL1271_MAX_RX_FILTERS,
6537 .pattern_min_len = 1,
6538 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6539 };
6540 #endif
6541
wlcore_hardirq(int irq,void * cookie)6542 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6543 {
6544 return IRQ_WAKE_THREAD;
6545 }
6546
wlcore_nvs_cb(const struct firmware * fw,void * context)6547 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6548 {
6549 struct wl1271 *wl = context;
6550 struct platform_device *pdev = wl->pdev;
6551 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6552 struct resource *res;
6553
6554 int ret;
6555 irq_handler_t hardirq_fn = NULL;
6556
6557 if (fw) {
6558 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6559 if (!wl->nvs) {
6560 wl1271_error("Could not allocate nvs data");
6561 goto out;
6562 }
6563 wl->nvs_len = fw->size;
6564 } else if (pdev_data->family->nvs_name) {
6565 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6566 pdev_data->family->nvs_name);
6567 wl->nvs = NULL;
6568 wl->nvs_len = 0;
6569 } else {
6570 wl->nvs = NULL;
6571 wl->nvs_len = 0;
6572 }
6573
6574 ret = wl->ops->setup(wl);
6575 if (ret < 0)
6576 goto out_free_nvs;
6577
6578 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6579
6580 /* adjust some runtime configuration parameters */
6581 wlcore_adjust_conf(wl);
6582
6583 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6584 if (!res) {
6585 wl1271_error("Could not get IRQ resource");
6586 goto out_free_nvs;
6587 }
6588
6589 wl->irq = res->start;
6590 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6591 wl->if_ops = pdev_data->if_ops;
6592
6593 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6594 hardirq_fn = wlcore_hardirq;
6595 else
6596 wl->irq_flags |= IRQF_ONESHOT;
6597
6598 ret = wl12xx_set_power_on(wl);
6599 if (ret < 0)
6600 goto out_free_nvs;
6601
6602 ret = wl12xx_get_hw_info(wl);
6603 if (ret < 0) {
6604 wl1271_error("couldn't get hw info");
6605 wl1271_power_off(wl);
6606 goto out_free_nvs;
6607 }
6608
6609 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6610 wl->irq_flags, pdev->name, wl);
6611 if (ret < 0) {
6612 wl1271_error("interrupt configuration failed");
6613 wl1271_power_off(wl);
6614 goto out_free_nvs;
6615 }
6616
6617 #ifdef CONFIG_PM
6618 device_init_wakeup(wl->dev, true);
6619
6620 ret = enable_irq_wake(wl->irq);
6621 if (!ret) {
6622 wl->irq_wake_enabled = true;
6623 if (pdev_data->pwr_in_suspend)
6624 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6625 }
6626
6627 res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6628 if (res) {
6629 wl->wakeirq = res->start;
6630 wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6631 ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6632 if (ret)
6633 wl->wakeirq = -ENODEV;
6634 } else {
6635 wl->wakeirq = -ENODEV;
6636 }
6637 #endif
6638 disable_irq(wl->irq);
6639 wl1271_power_off(wl);
6640
6641 ret = wl->ops->identify_chip(wl);
6642 if (ret < 0)
6643 goto out_irq;
6644
6645 ret = wl1271_init_ieee80211(wl);
6646 if (ret)
6647 goto out_irq;
6648
6649 ret = wl1271_register_hw(wl);
6650 if (ret)
6651 goto out_irq;
6652
6653 ret = wlcore_sysfs_init(wl);
6654 if (ret)
6655 goto out_unreg;
6656
6657 wl->initialized = true;
6658 goto out;
6659
6660 out_unreg:
6661 wl1271_unregister_hw(wl);
6662
6663 out_irq:
6664 if (wl->wakeirq >= 0)
6665 dev_pm_clear_wake_irq(wl->dev);
6666 device_init_wakeup(wl->dev, false);
6667 free_irq(wl->irq, wl);
6668
6669 out_free_nvs:
6670 kfree(wl->nvs);
6671
6672 out:
6673 release_firmware(fw);
6674 complete_all(&wl->nvs_loading_complete);
6675 }
6676
wlcore_runtime_suspend(struct device * dev)6677 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6678 {
6679 struct wl1271 *wl = dev_get_drvdata(dev);
6680 struct wl12xx_vif *wlvif;
6681 int error;
6682
6683 /* We do not enter elp sleep in PLT mode */
6684 if (wl->plt)
6685 return 0;
6686
6687 /* Nothing to do if no ELP mode requested */
6688 if (wl->sleep_auth != WL1271_PSM_ELP)
6689 return 0;
6690
6691 wl12xx_for_each_wlvif(wl, wlvif) {
6692 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6693 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6694 return -EBUSY;
6695 }
6696
6697 wl1271_debug(DEBUG_PSM, "chip to elp");
6698 error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6699 if (error < 0) {
6700 wl12xx_queue_recovery_work(wl);
6701
6702 return error;
6703 }
6704
6705 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6706
6707 return 0;
6708 }
6709
wlcore_runtime_resume(struct device * dev)6710 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6711 {
6712 struct wl1271 *wl = dev_get_drvdata(dev);
6713 DECLARE_COMPLETION_ONSTACK(compl);
6714 unsigned long flags;
6715 int ret;
6716 unsigned long start_time = jiffies;
6717 bool recovery = false;
6718
6719 /* Nothing to do if no ELP mode requested */
6720 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6721 return 0;
6722
6723 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6724
6725 spin_lock_irqsave(&wl->wl_lock, flags);
6726 wl->elp_compl = &compl;
6727 spin_unlock_irqrestore(&wl->wl_lock, flags);
6728
6729 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6730 if (ret < 0) {
6731 recovery = true;
6732 } else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6733 ret = wait_for_completion_timeout(&compl,
6734 msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6735 if (ret == 0) {
6736 wl1271_warning("ELP wakeup timeout!");
6737 recovery = true;
6738 }
6739 }
6740
6741 spin_lock_irqsave(&wl->wl_lock, flags);
6742 wl->elp_compl = NULL;
6743 spin_unlock_irqrestore(&wl->wl_lock, flags);
6744 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6745
6746 if (recovery) {
6747 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6748 wl12xx_queue_recovery_work(wl);
6749 } else {
6750 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6751 jiffies_to_msecs(jiffies - start_time));
6752 }
6753
6754 return 0;
6755 }
6756
6757 static const struct dev_pm_ops wlcore_pm_ops = {
6758 SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6759 wlcore_runtime_resume,
6760 NULL)
6761 };
6762
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6763 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6764 {
6765 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6766 const char *nvs_name;
6767 int ret = 0;
6768
6769 if (!wl->ops || !wl->ptable || !pdev_data)
6770 return -EINVAL;
6771
6772 wl->dev = &pdev->dev;
6773 wl->pdev = pdev;
6774 platform_set_drvdata(pdev, wl);
6775
6776 if (pdev_data->family && pdev_data->family->nvs_name) {
6777 nvs_name = pdev_data->family->nvs_name;
6778 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
6779 nvs_name, &pdev->dev, GFP_KERNEL,
6780 wl, wlcore_nvs_cb);
6781 if (ret < 0) {
6782 wl1271_error("request_firmware_nowait failed for %s: %d",
6783 nvs_name, ret);
6784 complete_all(&wl->nvs_loading_complete);
6785 }
6786 } else {
6787 wlcore_nvs_cb(NULL, wl);
6788 }
6789
6790 wl->dev->driver->pm = &wlcore_pm_ops;
6791 pm_runtime_set_autosuspend_delay(wl->dev, 50);
6792 pm_runtime_use_autosuspend(wl->dev);
6793 pm_runtime_enable(wl->dev);
6794
6795 return ret;
6796 }
6797 EXPORT_SYMBOL_GPL(wlcore_probe);
6798
wlcore_remove(struct platform_device * pdev)6799 void wlcore_remove(struct platform_device *pdev)
6800 {
6801 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6802 struct wl1271 *wl = platform_get_drvdata(pdev);
6803 int error;
6804
6805 error = pm_runtime_get_sync(wl->dev);
6806 if (error < 0)
6807 dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6808
6809 wl->dev->driver->pm = NULL;
6810
6811 if (pdev_data->family && pdev_data->family->nvs_name)
6812 wait_for_completion(&wl->nvs_loading_complete);
6813 if (!wl->initialized)
6814 return;
6815
6816 if (wl->wakeirq >= 0) {
6817 dev_pm_clear_wake_irq(wl->dev);
6818 wl->wakeirq = -ENODEV;
6819 }
6820
6821 device_init_wakeup(wl->dev, false);
6822
6823 if (wl->irq_wake_enabled)
6824 disable_irq_wake(wl->irq);
6825
6826 wl1271_unregister_hw(wl);
6827
6828 pm_runtime_put_sync(wl->dev);
6829 pm_runtime_dont_use_autosuspend(wl->dev);
6830 pm_runtime_disable(wl->dev);
6831
6832 free_irq(wl->irq, wl);
6833 wlcore_free_hw(wl);
6834 }
6835 EXPORT_SYMBOL_GPL(wlcore_remove);
6836
6837 u32 wl12xx_debug_level = DEBUG_NONE;
6838 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6839 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6840 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6841
6842 module_param_named(fwlog, fwlog_param, charp, 0);
6843 MODULE_PARM_DESC(fwlog,
6844 "FW logger options: continuous, dbgpins or disable");
6845
6846 module_param(fwlog_mem_blocks, int, 0600);
6847 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6848
6849 module_param(bug_on_recovery, int, 0600);
6850 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6851
6852 module_param(no_recovery, int, 0600);
6853 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6854
6855 MODULE_DESCRIPTION("TI WLAN core driver");
6856 MODULE_LICENSE("GPL");
6857 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6858 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6859